pax_global_header00006660000000000000000000000064146767204600014526gustar00rootroot0000000000000052 comment=a35301341c71d354b2553ae3dabc5262be521756 apischema-0.18.3/000077500000000000000000000000001467672046000135515ustar00rootroot00000000000000apischema-0.18.3/.github/000077500000000000000000000000001467672046000151115ustar00rootroot00000000000000apischema-0.18.3/.github/FUNDING.yml000066400000000000000000000000171467672046000167240ustar00rootroot00000000000000github: [wyfo] apischema-0.18.3/.github/dependabot.yml000066400000000000000000000013501467672046000177400ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" groups: actions: patterns: ["*"] - package-ecosystem: "pip" directory: "tests" schedule: interval: "monthly" groups: tests: patterns: ["*"] - package-ecosystem: "pip" directory: "docs" schedule: interval: "monthly" groups: docs: patterns: ["*"] - package-ecosystem: "pip" directory: "benchmark" schedule: interval: "monthly" groups: benchmark: patterns: ["*"] - package-ecosystem: "pip" directory: "scripts" schedule: interval: "monthly" groups: scripts: patterns: ["*"] apischema-0.18.3/.github/workflows/000077500000000000000000000000001467672046000171465ustar00rootroot00000000000000apischema-0.18.3/.github/workflows/cd.yml000066400000000000000000000037731467672046000202710ustar00rootroot00000000000000name: CD on: workflow_dispatch: release: types: [published] jobs: build_sdist: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - run: scripts/cythonize.sh - run: pipx run build --sdist - uses: actions/upload-artifact@v4 with: name: dist-sdist path: dist/*.tar.gz build_pure_wheel: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - run: scripts/cythonize.sh - run: NO_EXTENSION=1 pipx run build - uses: actions/upload-artifact@v4 with: name: dist-wheel path: dist/*.whl build_wheels: name: Wheel on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v4 - name: Set up Python 3.12 (MacOS) if: matrix.os == 'macos-latest' uses: actions/setup-python@v5 with: python-version: "3.12" - run: scripts/cythonize.sh - run: scripts/generate_tests_from_examples.py - uses: pypa/cibuildwheel@v2.21.1 env: CIBW_SKIP: pp* CIBW_TEST_COMMAND: pytest {project}/tests # TODO execute tests on Windows (https://github.com/wyfo/apischema/runs/4622330189) CIBW_TEST_COMMAND_WINDOWS: python -c "import apischema" CIBW_BEFORE_TEST: pip install -r tests/requirements.txt # TODO is skipping still necessary? CIBW_TEST_SKIP: "*universal2:arm64" - uses: actions/upload-artifact@v4 with: name: dist-${{ matrix.os }} path: wheelhouse/*.whl publish: needs: [build_sdist, build_wheels] runs-on: ubuntu-latest if: github.event_name == 'release' && github.event.action == 'published' steps: - uses: actions/download-artifact@v4 with: path: dist pattern: dist-* merge-multiple: true - uses: pypa/gh-action-pypi-publish@v1.8.10 with: password: ${{ secrets.PYPI_TOKEN }} apischema-0.18.3/.github/workflows/ci.yml000066400000000000000000000042071467672046000202670ustar00rootroot00000000000000name: CI on: push: branches: - master - v[0-9]*.* paths: - apischema/** - examples/** - scripts/** - tests/** pull_request: paths: - apischema/** - examples/** - scripts/** - tests/** jobs: test: name: Test ${{ matrix.python-version }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13-dev', 'pypy-3.9', 'pypy-3.10'] include: - python-version: '3.12' pytest-args: --cov=apischema --cov-branch --cov-report=xml --cov-report=html steps: - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('tests/requirements.txt') }} restore-keys: | ${{ runner.os }}-pip- - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install requirements run: pip install -r tests/requirements.txt - name: Generate tests from documentation example run: scripts/generate_tests_from_examples.py - name: Run tests run: pytest tests ${{ matrix.pytest-args }} - uses: codecov/codecov-action@v4 # https://github.community/t/run-step-if-file-exists/16445/3 if: hashFiles('coverage.xml') != '' - uses: actions/upload-artifact@v4 if: hashFiles('coverage.xml') != '' with: name: coverage path: | coverage.xml htmlcov - name: Cythonize run: scripts/cythonize.sh if: matrix.python-version != 'pypy-3.9' && matrix.python-version != 'pypy-3.10' - name: Compile run: pip install -e . if: matrix.python-version != 'pypy-3.9' && matrix.python-version != 'pypy-3.10' - name: Run tests (compiled) run: pytest tests if: matrix.python-version != 'pypy-3.9' && matrix.python-version != 'pypy-3.10' concurrency: group: ci-${{ github.head_ref }} cancel-in-progress: true apischema-0.18.3/.github/workflows/doc.yml000066400000000000000000000113511467672046000204370ustar00rootroot00000000000000name: Documentation on: workflow_dispatch: release: types: [published] push: branches: - master paths: - apischema/** - benchmark/** - docs/** - examples/** - mkdocs.yml - pyproject.toml pull_request: paths: - apischema/** - benchmark/** - docs/** - examples/** - mkdocs.yml jobs: run_benchmark: runs-on: ubuntu-latest steps: - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('tests/requirements.txt') }} restore-keys: | ${{ runner.os }}-pip- - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: '3.12' - name: Cythonize run: scripts/cythonize.sh - name: Install apischema run: pip install -e . - name: Install requirements run: pip install -r benchmark/requirements.txt - name: Run benchmark run: python benchmark/main.py - uses: actions/upload-artifact@v4 with: name: benchmark_table path: examples/benchmark_table.md - uses: actions/upload-artifact@v4 with: name: benchmark_chart_light path: docs/benchmark_chart_light.svg - uses: actions/upload-artifact@v4 with: name: benchmark_chart_dark path: docs/benchmark_chart_dark.svg upload_doc: needs: [run_benchmark] runs-on: ubuntu-latest steps: - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('tests/requirements.txt') }} restore-keys: | ${{ runner.os }}-pip- - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: '3.12' - uses: actions/download-artifact@v4 with: name: benchmark_table path: examples - uses: actions/download-artifact@v4 with: name: benchmark_chart_light path: docs - uses: actions/download-artifact@v4 with: name: benchmark_chart_dark path: docs - name: Install requirements run: pip install -r docs/requirements.txt - name: Build documentation run: mkdocs build - uses: actions/upload-artifact@v4 with: name: documentation path: site/** publish_doc: needs: [run_benchmark] runs-on: ubuntu-latest if: github.event_name == 'push' || github.event_name == 'release' steps: - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('tests/requirements.txt') }} restore-keys: | ${{ runner.os }}-pip- - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: # TODO bump to 3.12 when mike will support it python-version: '3.11' - uses: actions/download-artifact@v4 with: name: benchmark_table path: examples - uses: actions/download-artifact@v4 with: name: benchmark_chart_light path: docs - uses: actions/download-artifact@v4 with: name: benchmark_chart_dark path: docs - name: Install requirements run: pip install -r docs/requirements.txt - name: Setup git run: | git config user.name github-actions git config user.email github-actions@github.com git fetch origin gh-pages --depth=1 - name: Retrieve current version # TODO use a better thing that parsing pyproject.toml run: | echo "version=$(cat pyproject.toml | grep "version =" | cut -d' ' -f3 | cut -d'"' -f2 | cut -d. -f-2)" >> $GITHUB_ENV echo "revision=$(cat pyproject.toml | grep "version =" | cut -d' ' -f3 | cut -d'"' -f2 | cut -d. -f3)" >> $GITHUB_ENV - name: Deploy latest documentation if: github.event_name == 'release' && env.revision == '0' run: | mike retitle latest "$(mike list latest -j | jq .version -r)" mike deploy $version latest -t "$version (latest)" -u - name: Deploy patch documentation if: github.event_name == 'release' && env.revision != '0' run: mike deploy $version - name: Deploy dev documentation if: github.event_name == 'push' run: mike deploy dev - name: Publish documentation if: github.event_name == 'push' || github.event_name == 'release' run: | git switch gh-pages cat versions.json | jq '[.[-1], .[:-1][]]' -r | tee versions.json git add versions.json git commit -m "sort versions.json" git push origin gh-pages apischema-0.18.3/.gitignore000066400000000000000000000023361467672046000155450ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .idea __generated__ cov-* *.c *.pyx *.pxd apischema-0.18.3/.pre-commit-config.yaml000066400000000000000000000017711467672046000200400ustar00rootroot00000000000000default_language_version: python: python3.12 repos: - repo: local hooks: - id: update_readme name: Update Readme entry: python scripts/generate_readme.py language: system - id: sort_all name: Sort __all__ entry: python scripts/sort_all.py language: system types: [python] - repo: https://github.com/hadialqattan/pycln rev: v2.4.0 hooks: - id: pycln - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: - id: isort - repo: https://github.com/psf/black rev: 23.10.0 hooks: - id: black args: [-C] - repo: https://github.com/PyCQA/flake8 rev: 6.1.0 hooks: - id: flake8 exclude: ^examples/.*\.py$ - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.6.1 hooks: - id: mypy args: [--ignore-missing-imports, --scripts-are-modules, --warn-unused-ignores, --warn-redundant-cast, --check-untyped-defs] exclude: ^examples/.*\.py$ apischema-0.18.3/LICENSE.txt000066400000000000000000000020551467672046000153760ustar00rootroot00000000000000MIT License Copyright (c) [year] [fullname] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. apischema-0.18.3/README.md000066400000000000000000000105451467672046000150350ustar00rootroot00000000000000# apischema JSON (de)serialization, GraphQL and JSON schema generation using Python typing. *apischema* makes your life easier when dealing with API data. ## Documentation [https://wyfo.github.io/apischema/](https://wyfo.github.io/apischema/) ## Install ```shell pip install apischema ``` It requires only Python 3.8+. *PyPy3* is also fully supported. ## Why another library? (If you wonder how this differs from the *pydantic* library, see the [dedicated section of the documentation](https://wyfo.github.io/apischema/dev/difference_with_pydantic) — there are many differences.) This library fulfills the following goals: - stay as close as possible to the standard library (dataclasses, typing, etc.) — as a consequence we do not need plugins for editors/linters/etc.; - avoid object-oriented limitations — do not require a base class — thus handle easily every type (`Foo`, `list[Bar]`, `NewType(Id, int)`, etc.) the same way. - be adaptable, provide tools to support any types (ORM, etc.); - avoid dynamic things like using raw strings for attributes name - play nicely with your IDE. No known alternative achieves all of this, and apischema is also [(a lot) faster](https://wyfo.github.io/apischema/dev/optimizations_and_benchmark#benchmark) than all of them. On top of that, because APIs are not only JSON, *apischema* is also a complete GraphQL library > Actually, *apischema* is even adaptable enough to enable support of competitor libraries in a few dozens of line of code ([pydantic support example](https://wyfo.github.io/apischema/dev/examples/pydantic_support) using [conversions feature](https://wyfo.github.io/apischema/dev/conversions)) ## Example ```python from collections.abc import Collection from dataclasses import dataclass, field from uuid import UUID, uuid4 import pytest from graphql import print_schema from apischema import ValidationError, deserialize, serialize from apischema.graphql import graphql_schema from apischema.json_schema import deserialization_schema # Define a schema with standard dataclasses @dataclass class Resource: id: UUID name: str tags: set[str] = field(default_factory=set) # Get some data uuid = uuid4() data = {"id": str(uuid), "name": "wyfo", "tags": ["some_tag"]} # Deserialize data resource = deserialize(Resource, data) assert resource == Resource(uuid, "wyfo", {"some_tag"}) # Serialize objects assert serialize(Resource, resource) == data # Validate during deserialization with pytest.raises(ValidationError) as err: # pytest checks exception is raised deserialize(Resource, {"id": "42", "name": "wyfo"}) assert err.value.errors == [ {"loc": ["id"], "err": "badly formed hexadecimal UUID string"} ] # Generate JSON Schema assert deserialization_schema(Resource) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "id": {"type": "string", "format": "uuid"}, "name": {"type": "string"}, "tags": { "type": "array", "items": {"type": "string"}, "uniqueItems": True, "default": [], }, }, "required": ["id", "name"], "additionalProperties": False, } # Define GraphQL operations def resources(tags: Collection[str] | None = None) -> Collection[Resource] | None: ... # Generate GraphQL schema schema = graphql_schema(query=[resources], id_types={UUID}) schema_str = """\ type Query { resources(tags: [String!]): [Resource!] } type Resource { id: ID! name: String! tags: [String!]! }""" assert print_schema(schema) == schema_str ``` *apischema* works out of the box with your data model. > This example and further ones are using *pytest* API because they are in fact run as tests in the library CI ### Run the documentation examples All documentation examples are written using the last Python minor version — currently 3.10 — in order to provide up-to-date documentation. Because Python 3.10 specificities (like [PEP 585](https://www.python.org/dev/peps/pep-0604/)) are used, this version is "mandatory" to execute the examples as-is. In addition to *pytest*, some examples use third-party libraries like *SQLAlchemy* or *attrs*. All of this dependencies can be downloaded using the `examples` extra with ```shell pip install apischema[examples] ``` Once dependencies are installed, you can simply copy-paste examples and execute them, using the proper Python version. apischema-0.18.3/apischema/000077500000000000000000000000001467672046000155035ustar00rootroot00000000000000apischema-0.18.3/apischema/__init__.py000066400000000000000000000044471467672046000176250ustar00rootroot00000000000000__all__ = [ "PassThroughOptions", "Undefined", "UndefinedType", "Unsupported", "ValidationError", "alias", "dependent_required", "deserialization_method", "deserialize", "deserializer", "discriminator", "identity", "order", "properties", "schema", "serialization_default", "serialization_method", "serialize", "serialized", "serializer", "settings", "type_name", "validator", ] import warnings from . import ( # noqa: F401 cache, conversions, dataclasses, fields, json_schema, metadata, objects, tagged_unions, validation, ) from .aliases import alias from .conversions import deserializer, serializer from .dependencies import dependent_required from .deserialization import deserialization_method, deserialize from .discriminators import discriminator from .metadata import properties from .ordering import order from .schemas import schema from .serialization import ( PassThroughOptions, serialization_default, serialization_method, serialize, ) from .serialization.serialized_methods import serialized from .settings import settings from .type_names import type_name from .types import Undefined, UndefinedType from .utils import identity from .validation import ValidationError, validator from .visitor import Unsupported try: import graphql as _gql if _gql.__version__.startswith("2."): warnings.warn( f"graphql-core version {_gql.__version__} is incompatible with apischema;\n" "GraphQL schema generation is thus not available." ) else: from . import graphql # noqa: F401 __all__.append("graphql") del _gql except ImportError: pass def __getattr__(name): if name == "graphql": raise AttributeError( "GraphQL feature requires graphql-core library\n" "Run `pip install apischema[graphql]` to install it" ) raise AttributeError(f"module {__name__!r} has no attribute {name!r}") def register_default_conversions(): """Handle standard library + internal types""" from . import std_types # noqa: F401 deserializer(ValidationError.from_errors) serializer(ValidationError.errors) register_default_conversions() del register_default_conversions apischema-0.18.3/apischema/aliases.py000066400000000000000000000025231467672046000175000ustar00rootroot00000000000000from typing import Callable, MutableMapping, TypeVar, overload from apischema.cache import CacheAwareDict from apischema.types import Metadata, MetadataImplem Aliaser = Callable[[str], str] Cls = TypeVar("Cls", bound=type) _class_aliasers: MutableMapping[type, Aliaser] = CacheAwareDict({}) get_class_aliaser = _class_aliasers.get @overload def alias(alias_: str, *, override: bool = True) -> Metadata: ... @overload def alias(override: bool) -> Metadata: ... @overload def alias(aliaser: Aliaser) -> Callable[[Cls], Cls]: ... def alias(arg=None, *, override: bool = True): # type: ignore """Field alias or class aliaser :param alias_: alias of the field :param override: alias can be overridden by a class aliaser :param aliaser: compute alias for each (overridable) field of the class decorated """ from apischema.metadata.keys import ALIAS_METADATA, ALIAS_NO_OVERRIDE_METADATA if callable(arg): def aliaser(cls: Cls) -> Cls: _class_aliasers[cls] = arg return cls return aliaser else: metadata = MetadataImplem() if arg is not None: metadata[ALIAS_METADATA] = arg if not override: metadata[ALIAS_NO_OVERRIDE_METADATA] = True if not metadata: raise NotImplementedError return metadata apischema-0.18.3/apischema/cache.py000066400000000000000000000022011467672046000171130ustar00rootroot00000000000000__all__ = ["cache", "reset", "set_size"] import sys from functools import lru_cache from typing import Callable, Iterator, MutableMapping, TypeVar, cast _cached: list = [] Func = TypeVar("Func", bound=Callable) def cache(func: Func) -> Func: cached = cast(Func, lru_cache()(func)) _cached.append(cached) return cached def reset(): for cached in _cached: cached.cache_clear() def set_size(size: int): for cached in _cached: wrapped = cached.__wrapped__ setattr( sys.modules[wrapped.__module__], wrapped.__name__, lru_cache(size)(wrapped) ) K = TypeVar("K") V = TypeVar("V") class CacheAwareDict(MutableMapping[K, V]): def __init__(self, wrapped: MutableMapping[K, V]): self.wrapped = wrapped def __getitem__(self, key: K) -> V: return self.wrapped[key] def __setitem__(self, key: K, value: V): self.wrapped[key] = value reset() def __delitem__(self, key: K): del self.wrapped[key] def __len__(self) -> int: return len(self.wrapped) def __iter__(self) -> Iterator[K]: return iter(self.wrapped) apischema-0.18.3/apischema/constraints.py000066400000000000000000000063031467672046000204260ustar00rootroot00000000000000import operator as op from dataclasses import dataclass, field, fields from math import gcd from typing import Any, Callable, Collection, Dict, Optional, Pattern, Tuple, TypeVar from apischema.types import Number from apischema.utils import merge_opts T = TypeVar("T") U = TypeVar("U") CONSTRAINT_METADATA_KEY = "constraint" @dataclass class ConstraintMetadata: alias: str cls: type merge: Callable[[T, T], T] @property def field(self) -> Any: return field(default=None, metadata={CONSTRAINT_METADATA_KEY: self}) def constraint(alias: str, cls: type, merge: Callable[[T, T], T]) -> Any: return field( default=None, metadata={CONSTRAINT_METADATA_KEY: ConstraintMetadata(alias, cls, merge)}, ) def merge_mult_of(m1: Number, m2: Number) -> Number: if not isinstance(m1, int) and not isinstance(m2, int): raise TypeError("multipleOf merging is only supported with integers") return m1 * m2 / gcd(m1, m2) # type: ignore def merge_pattern(p1: Pattern, p2: Pattern) -> Pattern: raise TypeError("Cannot merge patterns") min_, max_ = min, max @dataclass(frozen=True) class Constraints: # number min: Optional[Number] = constraint("minimum", float, max_) max: Optional[Number] = constraint("maximum", float, min_) exc_min: Optional[Number] = constraint("exclusiveMinimum", float, max_) exc_max: Optional[Number] = constraint("exclusiveMaximum", float, min_) mult_of: Optional[Number] = constraint("multipleOf", float, merge_mult_of) # string min_len: Optional[int] = constraint("minLength", str, max_) max_len: Optional[int] = constraint("maxLength", str, min_) pattern: Optional[Pattern] = constraint("pattern", str, merge_pattern) # array min_items: Optional[int] = constraint("minItems", list, max_) max_items: Optional[int] = constraint("maxItems", list, min_) unique: Optional[bool] = constraint("uniqueItems", list, op.or_) # object min_props: Optional[int] = constraint("minProperties", dict, max_) max_props: Optional[int] = constraint("maxProperties", dict, min_) @property def attr_and_metata( self, ) -> Collection[Tuple[str, Optional[Any], ConstraintMetadata]]: return [ (f.name, getattr(self, f.name), f.metadata[CONSTRAINT_METADATA_KEY]) for f in fields(self) if CONSTRAINT_METADATA_KEY in f.metadata ] def merge_into(self, base_schema: Dict[str, Any]): for name, attr, metadata in self.attr_and_metata: if attr is not None: alias = metadata.alias if alias in base_schema: base_schema[alias] = metadata.merge(attr, base_schema[alias]) else: base_schema[alias] = attr @merge_opts def merge_constraints(c1: Constraints, c2: Constraints) -> Constraints: constraints: Dict[str, Any] = {} for name, attr1, metadata in c1.attr_and_metata: attr2 = getattr(c2, name) if attr1 is None: constraints[name] = attr2 elif attr2 is None: constraints[name] = attr1 else: constraints[name] = metadata.merge(attr1, attr2) return Constraints(**constraints) apischema-0.18.3/apischema/conversions/000077500000000000000000000000001467672046000200535ustar00rootroot00000000000000apischema-0.18.3/apischema/conversions/__init__.py000066400000000000000000000006741467672046000221730ustar00rootroot00000000000000__all__ = [ "AnyConversion", "Conversion", "LazyConversion", "as_names", "as_str", "catch_value_error", "deserializer", "reset_deserializers", "reset_serializer", "serializer", ] from .conversions import AnyConversion, Conversion, LazyConversion from .converters import ( as_names, as_str, catch_value_error, deserializer, reset_deserializers, reset_serializer, serializer, ) apischema-0.18.3/apischema/conversions/conversions.py000066400000000000000000000064421467672046000230030ustar00rootroot00000000000000from dataclasses import dataclass from functools import lru_cache from typing import ( TYPE_CHECKING, Any, Callable, Collection, Dict, Generic, List, NewType, Optional, Tuple, TypeVar, Union, ) from apischema.conversions.utils import Converter, converter_types from apischema.dataclasses import replace from apischema.methods import is_method, method_class, method_wrapper from apischema.types import AnyType from apischema.typing import is_type_var from apischema.utils import identity if TYPE_CHECKING: pass ConvOrProp = TypeVar("ConvOrProp", Converter, property) @dataclass(frozen=True) class Conversion(Generic[ConvOrProp]): converter: ConvOrProp source: AnyType = None target: AnyType = None sub_conversion: Optional["AnyConversion"] = None inherited: Optional[bool] = None @dataclass(frozen=True) class LazyConversion: get: Callable[[], Optional["AnyConversion"]] def __post_init__(self): object.__setattr__(self, "get", lru_cache(1)(self.get)) @property def inherited(self) -> Optional[bool]: conversion = self.get() return isinstance(conversion, Conversion) and conversion.inherited ConvOrFunc = Union[Conversion, Converter, property, LazyConversion] AnyConversion = Union[ConvOrFunc, Tuple[ConvOrFunc, ...]] DefaultConversion = Callable[[AnyType], Optional[AnyConversion]] ResolvedConversion = NewType("ResolvedConversion", Conversion[Converter]) ResolvedConversions = Tuple[ResolvedConversion, ...] # Tuple in order to be hashable def resolve_conversion( conversion: Union[Converter, property, Conversion], namespace: Optional[Dict[str, Any]] = None, ) -> ResolvedConversion: if not isinstance(conversion, Conversion): conversion = Conversion(conversion) if is_method(conversion.converter): if conversion.source is None: conversion = replace(conversion, source=method_class(conversion.converter)) conversion = replace(conversion, converter=method_wrapper(conversion.converter)) assert not isinstance(conversion.converter, property) source, target = converter_types( conversion.converter, conversion.source, conversion.target, namespace ) return ResolvedConversion(replace(conversion, source=source, target=target)) def resolve_any_conversion(conversion: Optional[AnyConversion]) -> ResolvedConversions: if not conversion: return () result: List[ResolvedConversion] = [] for conv in conversion if isinstance(conversion, Collection) else [conversion]: if isinstance(conv, LazyConversion): result.extend(resolve_any_conversion(conv.get())) else: result.append(resolve_conversion(conv)) return tuple(result) def handle_identity_conversion( conversion: ResolvedConversion, tp: AnyType ) -> ResolvedConversion: if ( is_identity(conversion) and conversion.source == conversion.target and is_type_var(conversion.source) ): return ResolvedConversion(replace(conversion, source=tp, target=tp)) else: return conversion def is_identity(conversion: ResolvedConversion) -> bool: return ( conversion.converter == identity and conversion.source == conversion.target and conversion.sub_conversion is None ) apischema-0.18.3/apischema/conversions/converters.py000066400000000000000000000145071467672046000226260ustar00rootroot00000000000000from collections import defaultdict from enum import Enum from functools import partial, wraps from types import new_class from typing import ( TYPE_CHECKING, Any, Callable, MutableMapping, Optional, Tuple, Type, TypeVar, Union, cast, overload, ) from apischema.cache import CacheAwareDict from apischema.conversions import LazyConversion from apischema.conversions.conversions import ( AnyConversion, Conversion, ConvOrFunc, resolve_conversion, ) from apischema.conversions.utils import Converter, is_convertible from apischema.methods import MethodOrProperty, MethodWrapper, is_method, method_class from apischema.type_names import type_name from apischema.types import AnyType from apischema.typing import is_type_var from apischema.utils import get_args2, get_origin_or_type, stop_signature_abuse from apischema.validation.errors import ValidationError if TYPE_CHECKING: pass _deserializers: MutableMapping[AnyType, Tuple[ConvOrFunc, ...]] = CacheAwareDict( defaultdict(tuple) ) _serializers: MutableMapping[AnyType, ConvOrFunc] = CacheAwareDict({}) Deserializer = TypeVar( "Deserializer", bound=Union[Callable, Conversion, staticmethod, type] ) Serializer = TypeVar("Serializer", bound=Union[Callable, Conversion, property, type]) default_deserialization: Callable[[type], Optional[AnyConversion]] = _deserializers.get def default_serialization(tp: Type) -> Optional[AnyConversion]: for sub_cls in getattr(tp, "__mro__", [tp]): if sub_cls in _serializers: conversion = _serializers[sub_cls] if ( sub_cls == tp or not isinstance(conversion, (Conversion, LazyConversion)) or conversion.inherited in (None, True) ): return conversion else: return None def check_converter_type(tp: AnyType) -> AnyType: origin = get_origin_or_type(tp) if not is_convertible(tp): raise TypeError(f"{origin} is not convertible") if not all(map(is_type_var, get_args2(tp))): raise TypeError("Generic conversion doesn't support specialization") return origin def _add_deserializer(conversion: ConvOrFunc, target: AnyType): target = check_converter_type(target) if conversion not in _deserializers[target]: _deserializers[target] = *_deserializers[target], conversion class DeserializerDescriptor(MethodWrapper[staticmethod]): def __set_name__(self, owner, name): super().__set_name__(owner, name) method = self._method.__get__(None, object) resolved = resolve_conversion(method, {owner.__name__: owner}) _add_deserializer(method, resolved.target) @overload def deserializer(deserializer: Deserializer) -> Deserializer: ... @overload def deserializer( *, lazy: Callable[[], Union[Converter, Conversion]], target: Type ) -> None: ... def deserializer( deserializer: Optional[Deserializer] = None, *, lazy: Optional[Callable[[], Union[Converter, Conversion]]] = None, target: Optional[Type] = None, ): if deserializer is not None: if isinstance(deserializer, staticmethod): return DeserializerDescriptor(deserializer) elif isinstance(deserializer, LazyConversion): stop_signature_abuse() else: resolved = resolve_conversion(deserializer) _add_deserializer(deserializer, resolved.target) return deserializer elif lazy is not None and target is not None: _add_deserializer(LazyConversion(lazy), target) else: stop_signature_abuse() def _add_serializer(conversion: ConvOrFunc, source: AnyType): source = check_converter_type(source) _serializers[source] = conversion class SerializerDescriptor(MethodWrapper[MethodOrProperty]): def __set_name__(self, owner, name): super().__set_name__(owner, name) _add_serializer(self._method, source=owner) @overload def serializer(serializer: Serializer) -> Serializer: ... @overload def serializer( *, lazy: Callable[[], Union[Converter, Conversion]], source: Type ) -> Callable[[Serializer], Serializer]: ... def serializer( serializer: Optional[Serializer] = None, *, lazy: Optional[Callable[[], Union[Converter, Conversion]]] = None, source: Optional[Type] = None, ): if serializer is not None: if is_method(serializer) and method_class(serializer) is None: # type: ignore return SerializerDescriptor(serializer) # type: ignore elif isinstance(serializer, LazyConversion): stop_signature_abuse() else: resolved = resolve_conversion(serializer) _add_serializer(serializer, resolved.source) return serializer elif lazy is not None and source is not None: _add_serializer(LazyConversion(lazy), source) else: stop_signature_abuse() def reset_deserializers(cls: Type): _deserializers.pop(cls, ...) def reset_serializer(cls: Type): _serializers.pop(cls, ...) Func = TypeVar("Func", bound=Callable) class ValueErrorCatcher: def __init__(self, func: Callable[[Any], Any]): wraps(func)(self) self.func = func def __call__(self, arg): try: return self.func(arg) except ValueError as err: raise ValidationError(str(err)) def catch_value_error(func: Func) -> Func: return cast(Func, ValueErrorCatcher(func)) Cls = TypeVar("Cls", bound=type) def as_str(cls: Cls) -> Cls: deserializer(Conversion(catch_value_error(cls), source=str, target=cls)) serializer(Conversion(str, source=cls)) return cls EnumCls = TypeVar("EnumCls", bound=Type[Enum]) def as_names(cls: EnumCls, aliaser: Callable[[str], str] = lambda s: s) -> EnumCls: # Enum requires to call namespace __setitem__ def exec_body(namespace: dict): for elt in cls: # type: ignore namespace[elt.name] = aliaser(elt.name) if not issubclass(cls, Enum): raise TypeError("as_names must be called with Enum subclass") name_cls = type_name(None)( new_class(cls.__name__, (str, Enum), exec_body=exec_body) ) deserializer(Conversion(partial(getattr, cls), source=name_cls, target=cls)) def get_name(obj): return getattr(name_cls, obj.name) serializer(Conversion(get_name, source=cls, target=name_cls)) return cls apischema-0.18.3/apischema/conversions/utils.py000066400000000000000000000056131467672046000215720ustar00rootroot00000000000000from inspect import Parameter, signature from typing import Any, Callable, Dict, Generic, Optional, Tuple, Type, cast from apischema.types import AnyType from apischema.typing import ( get_type_hints, is_annotated, is_literal, is_new_type, is_type, is_union, ) from apischema.utils import get_origin_or_type Converter = Callable[[Any], Any] def converter_types( converter: Converter, source: AnyType = None, target: AnyType = None, namespace: Optional[Dict[str, Any]] = None, ) -> Tuple[AnyType, AnyType]: try: # in pre 3.9, Generic __new__ perturb signature of types if ( isinstance(converter, type) and converter.__new__ is Generic.__new__ is not object.__new__ and converter.__init__ is not object.__init__ # type: ignore ): parameters = list(signature(converter.__init__).parameters.values())[1:] # type: ignore else: parameters = list(signature(converter).parameters.values()) except ValueError: # builtin types if target is None and is_type(converter): target = cast(Type[Any], converter) if source is None: raise TypeError("Converter source is unknown") from None else: if not parameters: raise TypeError("converter must have at least one parameter") first_param, *other_params = parameters for p in other_params: if p.default is Parameter.empty and p.kind not in ( Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD, ): raise TypeError( "converter must have at most one parameter without default" ) if source is not None and target is not None: return source, target types = get_type_hints(converter, None, namespace, include_extras=True) if not types and is_type(converter): types = get_type_hints( converter.__new__, None, namespace, include_extras=True ) or get_type_hints( converter.__init__, None, namespace, include_extras=True # type: ignore ) if source is None: try: source = types.pop(first_param.name) except KeyError: raise TypeError("converter source is unknown") from None if target is None: if is_type(converter): target = cast(Type, converter) else: try: target = types.pop("return") except KeyError: raise TypeError("converter target is unknown") from None return source, target def is_convertible(tp: AnyType) -> bool: origin = get_origin_or_type(tp) return is_new_type(tp) or ( is_type(origin) and not (tp is Any or is_literal(tp) or is_annotated(tp) or is_union(origin)) ) apischema-0.18.3/apischema/conversions/visitor.py000066400000000000000000000171271467672046000221340ustar00rootroot00000000000000from contextlib import contextmanager, suppress from dataclasses import replace from typing import ( Any, Collection, Generic, Iterable, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ) from apischema.conversions import LazyConversion from apischema.conversions.conversions import ( AnyConversion, DefaultConversion, ResolvedConversion, ResolvedConversions, handle_identity_conversion, is_identity, resolve_any_conversion, ) from apischema.conversions.utils import is_convertible from apischema.metadata.implem import ConversionMetadata from apischema.metadata.keys import CONVERSION_METADATA from apischema.types import AnyType from apischema.typing import is_type_var from apischema.utils import ( context_setter, get_args2, get_origin_or_type, identity, is_subclass, substitute_type_vars, subtyping_substitution, ) from apischema.visitor import Result, Unsupported, Visitor Deserialization = ResolvedConversions Serialization = ResolvedConversion Conv = TypeVar("Conv") class ConversionsVisitor(Visitor[Result], Generic[Conv, Result]): def __init__(self, default_conversion: DefaultConversion): self.default_conversion = default_conversion self._conversion: Optional[AnyConversion] = None def _has_conversion( self, tp: AnyType, conversion: Optional[AnyConversion] ) -> Tuple[bool, Optional[Conv]]: raise NotImplementedError def _annotated_conversion( self, annotation: ConversionMetadata ) -> Optional[AnyConversion]: raise NotImplementedError def annotated(self, tp: AnyType, annotations: Sequence[Any]) -> Result: for annotation in reversed(annotations): if isinstance(annotation, Mapping) and CONVERSION_METADATA in annotation: with self._replace_conversion( self._annotated_conversion(annotation[CONVERSION_METADATA]) ): return super().annotated(tp, annotations) return super().annotated(tp, annotations) def _union_results(self, types: Iterable[AnyType]) -> Sequence[Result]: results = [] for alt in types: with suppress(Unsupported): results.append(self.visit(alt)) if not results: raise Unsupported(Union[tuple(types)]) return results def _visited_union(self, results: Sequence[Result]) -> Result: raise NotImplementedError def union(self, types: Sequence[AnyType]) -> Result: return self._visited_union(self._union_results(types)) @contextmanager def _replace_conversion(self, conversion: Optional[AnyConversion]): with context_setter(self): self._conversion = resolve_any_conversion(conversion) or None yield def visit_with_conv( self, tp: AnyType, conversion: Optional[AnyConversion] ) -> Result: with self._replace_conversion(conversion): return self.visit(tp) def _visit_conversion( self, tp: AnyType, conversion: Conv, dynamic: bool, next_conversion: Optional[AnyConversion], ) -> Result: raise NotImplementedError def visit_conversion( self, tp: AnyType, conversion: Optional[Conv], dynamic: bool, next_conversion: Optional[AnyConversion] = None, ) -> Result: if conversion is not None: return self._visit_conversion(tp, conversion, dynamic, next_conversion) else: with self._replace_conversion(next_conversion): return super().visit(tp) def visit(self, tp: AnyType) -> Result: if not is_convertible(tp): return self.visit_conversion(tp, None, False, self._conversion) dynamic, conversion = self._has_conversion(tp, self._conversion) if not dynamic: _, conversion = self._has_conversion( tp, self.default_conversion(get_origin_or_type(tp)) ) next_conversion = None if not dynamic and is_subclass(tp, Collection) and not is_subclass(tp, str): next_conversion = self._conversion return self.visit_conversion(tp, conversion, dynamic, next_conversion) def sub_conversion( conversion: ResolvedConversion, next_conversion: Optional[AnyConversion] ) -> Optional[AnyConversion]: # TODO why did I use LazyConversion here? return ( LazyConversion(lambda: conversion.sub_conversion), LazyConversion(lambda: next_conversion), ) class DeserializationVisitor(ConversionsVisitor[Deserialization, Result]): @staticmethod def _has_conversion( tp: AnyType, conversion: Optional[AnyConversion] ) -> Tuple[bool, Optional[Deserialization]]: identity_conv, result = False, [] for conv in resolve_any_conversion(conversion): conv = handle_identity_conversion(conv, tp) if is_subclass(conv.target, tp): if is_identity(conv): if identity_conv: continue identity_conv = True conv = ResolvedConversion(replace(conv, sub_conversion=identity)) if is_type_var(conv.source) or any( map(is_type_var, get_args2(conv.source)) ): _, substitution = subtyping_substitution(tp, conv.target) conv = replace( conv, source=substitute_type_vars(conv.source, substitution) ) result.append(ResolvedConversion(replace(conv, target=tp))) if identity_conv and len(result) == 1: return True, None else: return bool(result), tuple(result) or None def _annotated_conversion( self, annotation: ConversionMetadata ) -> Optional[AnyConversion]: return annotation.deserialization def _visit_conversion( self, tp: AnyType, conversion: Deserialization, dynamic: bool, next_conversion: Optional[AnyConversion], ) -> Result: results = [ self.visit_with_conv(conv.source, sub_conversion(conv, next_conversion)) for conv in conversion ] return self._visited_union(results) class SerializationVisitor(ConversionsVisitor[Serialization, Result]): @staticmethod def _has_conversion( tp: AnyType, conversion: Optional[AnyConversion] ) -> Tuple[bool, Optional[Serialization]]: for conv in resolve_any_conversion(conversion): conv = handle_identity_conversion(conv, tp) if is_subclass(tp, conv.source): if is_identity(conv): return True, None if is_type_var(conv.target) or any( map(is_type_var, get_args2(conv.target)) ): substitution, _ = subtyping_substitution(conv.source, tp) conv = replace( conv, target=substitute_type_vars(conv.target, substitution) ) return True, ResolvedConversion(replace(conv, source=tp)) else: return False, None def _annotated_conversion( self, annotation: ConversionMetadata ) -> Optional[AnyConversion]: return annotation.serialization def _visit_conversion( self, tp: AnyType, conversion: Serialization, dynamic: bool, next_conversion: Optional[AnyConversion], ) -> Result: return self.visit_with_conv( conversion.target, sub_conversion(conversion, next_conversion) ) apischema-0.18.3/apischema/dataclasses.py000066400000000000000000000016021467672046000203430ustar00rootroot00000000000000# flake8: noqa from dataclasses import * def _replace(__obj, **changes): from dataclasses import _FIELD_INITVAR, _FIELDS # type:ignore from dataclasses import replace as replace_ from apischema.fields import FIELDS_SET_ATTR, fields_set, set_fields # Fix https://bugs.python.org/issue36470 assert is_dataclass(__obj) for name, field in getattr(__obj, _FIELDS).items(): if field._field_type == _FIELD_INITVAR and name not in changes: if field.default is not MISSING: changes[name] = field.default elif field.default_factory is not MISSING: changes[name] = field.default_factory() result = replace_(__obj, **changes) if hasattr(__obj, FIELDS_SET_ATTR): set_fields(result, *fields_set(__obj), *changes, overwrite=True) return result globals()[replace.__name__] = _replace del _replace apischema-0.18.3/apischema/dependencies.py000066400000000000000000000041621467672046000205060ustar00rootroot00000000000000from collections import defaultdict from dataclasses import dataclass from typing import ( AbstractSet, Any, Collection, Dict, List, Mapping, MutableMapping, Optional, Set, Tuple, overload, ) from apischema.cache import CacheAwareDict from apischema.objects.fields import check_field_or_name, get_field_name _dependent_requireds: MutableMapping[ type, List[Tuple[Any, Collection[Any]]] ] = CacheAwareDict(defaultdict(list)) DependentRequired = Mapping[str, AbstractSet[str]] def get_dependent_required(cls: type) -> DependentRequired: result: Dict[str, Set[str]] = defaultdict(set) for sub_cls in cls.__mro__: for field, required in _dependent_requireds[sub_cls]: result[get_field_name(field)].update(map(get_field_name, required)) return result @dataclass class DependentRequiredDescriptor: fields: Mapping[Any, Collection[Any]] groups: Collection[Collection[Any]] def __set_name__(self, owner, name): setattr(owner, name, None) dependent_required(self.fields, *self.groups, owner=owner) @overload def dependent_required( fields: Mapping[Any, Collection[Any]], *groups: Collection[Any], owner: Optional[type] = None, ): ... @overload def dependent_required(*groups: Collection[Any], owner: Optional[type] = None): ... def dependent_required(*groups: Collection[Any], owner: Optional[type] = None): # type: ignore if not groups: return fields: Mapping[Any, Collection[Any]] = {} if isinstance(groups[0], Mapping): fields, *groups = groups # type: ignore if owner is None: return DependentRequiredDescriptor(fields, groups) else: dep_req = _dependent_requireds[owner] for field, required in fields.items(): dep_req.append((field, required)) check_field_or_name(field) for req in required: check_field_or_name(req) for group in map(list, groups): for i, field in enumerate(group): check_field_or_name(field) dep_req.append((field, [group[:i], group[i:]])) apischema-0.18.3/apischema/deserialization/000077500000000000000000000000001467672046000206715ustar00rootroot00000000000000apischema-0.18.3/apischema/deserialization/__init__.py000066400000000000000000000755621467672046000230210ustar00rootroot00000000000000import collections.abc import dataclasses import inspect import re from collections import defaultdict from enum import Enum from functools import lru_cache, partial from typing import ( TYPE_CHECKING, Any, Callable, Collection, Dict, Mapping, Optional, Pattern, Sequence, Set, Tuple, Type, TypeVar, Union, overload, ) from apischema.aliases import Aliaser from apischema.cache import cache from apischema.constraints import Constraints, merge_constraints from apischema.conversions.conversions import AnyConversion, DefaultConversion from apischema.conversions.converters import ValueErrorCatcher from apischema.conversions.visitor import ( Deserialization, DeserializationVisitor, sub_conversion, ) from apischema.dependencies import get_dependent_required from apischema.deserialization.coercion import Coerce, Coercer from apischema.deserialization.flattened import get_deserialization_flattened_aliases from apischema.deserialization.methods import ( AdditionalField, AnyMethod, BoolMethod, CoercerMethod, ConstrainedFloatMethod, ConstrainedIntMethod, ConstrainedStrMethod, Constraint, Constructor, ConversionAlternative, ConversionMethod, ConversionUnionMethod, ConversionWithValueErrorMethod, DefaultField, DeserializationMethod, DiscriminatorMethod, FactoryField, Field, FieldsConstructor, FlattenedField, FloatMethod, FrozenSetMethod, IntMethod, ListCheckOnlyMethod, ListMethod, LiteralMethod, MappingCheckOnly, MappingMethod, NoConstructor, NoneMethod, ObjectMethod, OptionalMethod, PatternField, RawConstructor, RawConstructorCopy, RecMethod, SetMethod, SimpleObjectMethod, StrMethod, SubprimitiveMethod, TupleMethod, TypeCheckMethod, UnionByTypeMethod, UnionMethod, ValidatorMethod, VariadicTupleMethod, ) from apischema.discriminators import Discriminator, get_inherited_discriminator from apischema.json_schema.patterns import infer_pattern from apischema.metadata.implem import ValidatorsMetadata from apischema.metadata.keys import ( DISCRIMINATOR_METADATA, SCHEMA_METADATA, VALIDATORS_METADATA, ) from apischema.objects import ObjectField from apischema.objects.fields import FieldKind from apischema.objects.visitor import DeserializationObjectVisitor from apischema.recursion import RecursiveConversionsVisitor from apischema.schemas import Schema, get_schema from apischema.types import PRIMITIVE_TYPES, AnyType, NoneType from apischema.typing import get_args, get_origin, is_type, is_typed_dict, is_union from apischema.utils import ( CollectionOrPredicate, Lazy, as_predicate, get_origin_or_type, literal_values, opt_or, to_pascal_case, to_snake_case, ) from apischema.validation import get_validators from apischema.validation.validators import Validator if TYPE_CHECKING: from apischema.settings import ConstraintError MISSING_PROPERTY = "missing property" UNEXPECTED_PROPERTY = "unexpected property" T = TypeVar("T") Factory = Callable[[Optional[Constraints], Sequence[Validator]], DeserializationMethod] JSON_TYPES = {dict, list, *PRIMITIVE_TYPES} # FloatMethod can require "copy", because it can cast integer to float CHECK_ONLY_METHODS = ( NoneMethod, BoolMethod, IntMethod, StrMethod, ListCheckOnlyMethod, MappingCheckOnly, ) def check_only(method: DeserializationMethod) -> bool: return ( isinstance(method, CHECK_ONLY_METHODS) or ( isinstance(method, OptionalMethod) and method.coercer is None and check_only(method.value_method) ) or ( isinstance(method, UnionMethod) and all(map(check_only, method.alt_methods)) ) or ( isinstance(method, UnionByTypeMethod) and all(map(check_only, method.method_by_cls.values())) ) or (isinstance(method, TypeCheckMethod) and check_only(method.fallback)) ) def is_raw_dataclass(cls: type) -> bool: return ( dataclasses.is_dataclass(cls) and type(cls) is type # no metaclass and "__slots__" not in cls.__dict__ and not hasattr(cls, "__post_init__") and all(f.init for f in dataclasses.fields(cls)) and cls.__new__ is object.__new__ and ( cls.__setattr__ is object.__setattr__ or getattr(cls, dataclasses._PARAMS).frozen # type: ignore ) and ( list(inspect.signature(cls.__init__, follow_wrapped=False).parameters) == ["__dataclass_self__" if "self" in dataclasses.fields(cls) else "self"] + [f.name for f in dataclasses.fields(cls)] ) ) @dataclasses.dataclass(frozen=True) class DeserializationMethodFactory: factory: Factory cls: Optional[type] = None constraints: Optional[Constraints] = None validators: Tuple[Validator, ...] = () def merge( self, constraints: Optional[Constraints], validators: Sequence[Validator] = () ) -> "DeserializationMethodFactory": if constraints is None and not validators: return self return dataclasses.replace( self, constraints=merge_constraints(self.constraints, constraints), validators=(*validators, *self.validators), ) # private intermediate method instead of decorated property because of mypy @lru_cache() def _method(self) -> DeserializationMethod: return self.factory(self.constraints, self.validators) @property def method(self) -> DeserializationMethod: return self._method() def get_constraints(schema: Optional[Schema]) -> Optional[Constraints]: return schema.constraints if schema is not None else None constraint_classes = {cls.__name__: cls for cls in Constraint.__subclasses__()} def preformat_error( error: "ConstraintError", constraint: Any ) -> Union[str, Callable[[Any], str]]: return ( error.format(constraint) if isinstance(error, str) else partial(error, constraint) ) def constraints_validators( constraints: Optional[Constraints], ) -> Mapping[type, Tuple[Constraint, ...]]: from apischema import settings result: Dict[type, Tuple[Constraint, ...]] = defaultdict(tuple) if constraints is not None: for name, attr, metadata in constraints.attr_and_metata: if attr is None or attr is False: continue error = preformat_error( getattr(settings.errors, to_snake_case(metadata.alias)), attr if not isinstance(attr, type(re.compile(r""))) else attr.pattern, ) constraint_cls = constraint_classes[ to_pascal_case(metadata.alias) + "Constraint" ] result[metadata.cls] = (*result[metadata.cls], constraint_cls(error, attr)) # type: ignore if float in result: result[int] = result[float] return result class DeserializationMethodVisitor( RecursiveConversionsVisitor[Deserialization, DeserializationMethodFactory], DeserializationVisitor[DeserializationMethodFactory], DeserializationObjectVisitor[DeserializationMethodFactory], ): def __init__( self, additional_properties: bool, aliaser: Aliaser, coercer: Optional[Coercer], default_conversion: DefaultConversion, fall_back_on_default: bool, no_copy: bool, pass_through: CollectionOrPredicate[type], ): super().__init__(default_conversion) self.additional_properties = additional_properties self.aliaser = aliaser self.coercer = coercer self.fall_back_on_default = fall_back_on_default self.no_copy = no_copy self.pass_through = pass_through self.pass_through_type = as_predicate(pass_through) def _recursive_result( self, lazy: Lazy[DeserializationMethodFactory] ) -> DeserializationMethodFactory: def factory( constraints: Optional[Constraints], validators: Sequence[Validator] ) -> DeserializationMethod: return RecMethod(lambda: lazy().merge(constraints, validators).method) return DeserializationMethodFactory(factory) def visit_not_recursive(self, tp: AnyType) -> DeserializationMethodFactory: return deserialization_method_factory( tp, self.additional_properties, self.aliaser, self.coercer, self._conversion, self.default_conversion, self.fall_back_on_default, self.no_copy, self.pass_through, ) def discriminate( self, discriminator: Discriminator, types: Sequence[AnyType] ) -> DeserializationMethodFactory: mapping = {} for key, tp in discriminator.get_mapping(types).items(): mapping[key] = self.visit(tp) def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: from apischema import settings return DiscriminatorMethod( self.aliaser(discriminator.alias), {key: fact.merge(constraints).method for key, fact in mapping.items()}, settings.errors.missing_property, preformat_error(settings.errors.one_of, list(mapping)), ) return self._factory(factory) def annotated( self, tp: AnyType, annotations: Sequence[Any] ) -> DeserializationMethodFactory: for annotation in reversed(annotations): if ( isinstance(annotation, Mapping) and DISCRIMINATOR_METADATA in annotation and is_union(get_origin(tp)) ): factory = self.discriminate( annotation[DISCRIMINATOR_METADATA], get_args(tp) ) break else: factory = super().annotated(tp, annotations) for annotation in reversed(annotations): if isinstance(annotation, Mapping): factory = factory.merge( get_constraints(annotation.get(SCHEMA_METADATA)), annotation.get( VALIDATORS_METADATA, ValidatorsMetadata(()) ).validators, ) return factory def _factory( self, factory: Factory, cls: Optional[type] = None, validation: bool = True ) -> DeserializationMethodFactory: def wrapper( constraints: Optional[Constraints], validators: Sequence[Validator] ) -> DeserializationMethod: method: DeserializationMethod if validation and validators: method = ValidatorMethod( factory(constraints, ()), validators, self.aliaser ) else: method = factory(constraints, validators) if cls is not None and self.coercer is not None: method = CoercerMethod(self.coercer, cls, method) return method return DeserializationMethodFactory(wrapper, cls) def any(self) -> DeserializationMethodFactory: def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: return AnyMethod(dict(constraints_validators(constraints))) return self._factory(factory) def collection( self, cls: Type[Collection], value_type: AnyType ) -> DeserializationMethodFactory: value_factory = self.visit(value_type) def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: value_method = value_factory.method list_constraints = constraints_validators(constraints)[list] method: DeserializationMethod if issubclass(cls, collections.abc.Set) and not issubclass(cls, frozenset): return SetMethod(list_constraints, value_method) if self.no_copy and check_only(value_method): method = ListCheckOnlyMethod(list_constraints, value_method) else: method = ListMethod(list_constraints, value_method) if issubclass(cls, tuple): return VariadicTupleMethod(method) if issubclass(cls, frozenset): return FrozenSetMethod(method) return method return self._factory(factory, list) def enum(self, cls: Type[Enum]) -> DeserializationMethodFactory: return self.literal(list(cls)) def literal(self, values: Sequence[Any]) -> DeserializationMethodFactory: def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: from apischema import settings value_map = dict(zip(literal_values(values), values)) return LiteralMethod( value_map, preformat_error(settings.errors.one_of, list(value_map)), self.coercer, tuple(set(map(type, value_map))), ) return self._factory(factory) def mapping( self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType ) -> DeserializationMethodFactory: key_factory, value_factory = self.visit(key_type), self.visit(value_type) def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: key_method, value_method = key_factory.method, value_factory.method dict_constraints = constraints_validators(constraints)[dict] if self.no_copy and check_only(key_method) and check_only(value_method): return MappingCheckOnly(dict_constraints, key_method, value_method) else: return MappingMethod(dict_constraints, key_method, value_method) return self._factory(factory, dict) def object( self, tp: Type, fields: Sequence[ObjectField] ) -> DeserializationMethodFactory: cls = get_origin_or_type(tp) field_factories = [ self.visit_with_conv(f.type, f.deserialization).merge( get_constraints(f.schema), f.validators ) for f in fields ] def factory( constraints: Optional[Constraints], validators: Sequence[Validator] ) -> DeserializationMethod: from apischema import settings alias_by_name = {field.name: self.aliaser(field.alias) for field in fields} requiring: Dict[str, Set[str]] = defaultdict(set) for f, reqs in get_dependent_required(cls).items(): for req in reqs: requiring[req].add(alias_by_name[f]) normal_fields, flattened_fields, pattern_fields = [], [], [] additional_field = None for field, field_factory in zip(fields, field_factories): field_method: DeserializationMethod = field_factory.method fall_back_on_default = ( field.fall_back_on_default or self.fall_back_on_default ) if field.flattened: flattened_aliases = get_deserialization_flattened_aliases( cls, field, self.default_conversion ) flattened_fields.append( FlattenedField( field.name, tuple(set(map(self.aliaser, flattened_aliases))), field_method, fall_back_on_default, ) ) elif field.pattern_properties is not None: field_pattern = field.pattern_properties if field_pattern is ...: field_pattern = infer_pattern( field.type, self.default_conversion ) assert isinstance(field_pattern, Pattern) pattern_fields.append( PatternField( field.name, field_pattern, field_method, fall_back_on_default, ) ) elif field.additional_properties: additional_field = AdditionalField( field.name, field_method, fall_back_on_default ) else: normal_fields.append( Field( field.name, self.aliaser(field.alias), field_method, field.required, requiring[field.name], fall_back_on_default, ) ) object_constraints = constraints_validators(constraints)[dict] all_alliases = set(alias_by_name.values()) constructor: Optional[Constructor] = None if is_typed_dict(cls): constructor = NoConstructor(cls) elif ( settings.deserialization.override_dataclass_constructors and is_raw_dataclass(cls) ): constructor = FieldsConstructor( cls, len(fields), tuple( DefaultField(f.name, f.default) for f in dataclasses.fields(cls) if f.default is not dataclasses.MISSING ), tuple( FactoryField(f.name, f.default_factory) for f in dataclasses.fields(cls) if f.default_factory is not dataclasses.MISSING ), ) if ( not object_constraints and not flattened_fields and not pattern_fields and not additional_field and (is_typed_dict(cls) == self.additional_properties) and (not is_typed_dict(cls) or self.no_copy) and not validators and all( check_only(f.method) and f.alias == f.name and not f.fall_back_on_default and not f.required_by for f in normal_fields ) ): return SimpleObjectMethod( constructor or RawConstructorCopy(cls), tuple(normal_fields), all_alliases, is_typed_dict(cls), settings.errors.missing_property, settings.errors.unexpected_property, ) return ObjectMethod( constructor or RawConstructor(cls), object_constraints, tuple(normal_fields), tuple(flattened_fields), tuple(pattern_fields), additional_field, all_alliases, self.additional_properties, is_typed_dict(cls), tuple(validators), tuple( (f.name, f.default_factory) for f in fields if f.kind == FieldKind.WRITE_ONLY ), {field.name for field in fields if field.post_init}, self.aliaser, settings.errors.missing_property, settings.errors.unexpected_property, ) return self._factory(factory, dict, validation=False) def primitive(self, cls: Type) -> DeserializationMethodFactory: def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: validators = constraints_validators(constraints)[cls] if cls is NoneType: return NoneMethod() elif cls is bool: return BoolMethod() elif cls is str: return ConstrainedStrMethod(validators) if validators else StrMethod() elif cls is int: return ConstrainedIntMethod(validators) if validators else IntMethod() elif cls is float: return ( ConstrainedFloatMethod(validators) if validators else FloatMethod() ) else: raise NotImplementedError return self._factory(factory, cls) def subprimitive(self, cls: Type, superclass: Type) -> DeserializationMethodFactory: primitive_factory = self.primitive(superclass) def factory( constraints: Optional[Constraints], validators: Sequence[Validator] ) -> DeserializationMethod: return SubprimitiveMethod(cls, primitive_factory.merge(constraints).method) return self._factory(factory) def tuple(self, types: Sequence[AnyType]) -> DeserializationMethodFactory: elt_factories = [self.visit(tp) for tp in types] def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: def len_error(constraints: Constraints) -> Union[str, Callable[[Any], str]]: return constraints_validators(constraints)[list][0].error return TupleMethod( constraints_validators(constraints)[list], len_error(Constraints(min_items=len(types))), len_error(Constraints(max_items=len(types))), tuple(fact.method for fact in elt_factories), ) return self._factory(factory, list) def union(self, types: Sequence[AnyType]) -> DeserializationMethodFactory: if discriminator := get_inherited_discriminator(types): return self.discriminate(discriminator, types) alt_factories = self._union_results(types) if len(alt_factories) == 1: return alt_factories[0] def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: alt_methods = tuple( fact.merge(constraints).method for fact in alt_factories ) # method_by_cls cannot replace alt_methods, because there could be several # methods for one class method_by_cls = dict( zip((f.cls for f in alt_factories if f.cls is not None), alt_methods) ) if NoneType in types and len(alt_methods) == 2: value_method = next( meth for fact, meth in zip(alt_factories, alt_methods) if fact.cls is not NoneType ) return OptionalMethod(value_method, self.coercer) elif len(method_by_cls) == len(alt_factories) and not any( isinstance(x, CoercerMethod) for x in alt_methods ): # Coercion induces a different type in data than type to deserialize. # Prefer UnionMethod in this case. return UnionByTypeMethod(method_by_cls) else: return UnionMethod(alt_methods) return self._factory(factory) def _visit_conversion( self, tp: AnyType, conversion: Deserialization, dynamic: bool, next_conversion: Optional[AnyConversion], ) -> DeserializationMethodFactory: assert conversion conv_factories = [ self.visit_with_conv(conv.source, sub_conversion(conv, next_conversion)) for conv in conversion ] def factory(constraints: Optional[Constraints], _) -> DeserializationMethod: conv_alternatives = tuple( ConversionAlternative( conv.converter.func if isinstance(conv.converter, ValueErrorCatcher) else conv.converter, (fact if dynamic else fact.merge(constraints)).method, isinstance(conv.converter, ValueErrorCatcher), ) for conv, fact in zip(conversion, conv_factories) ) if len(conv_alternatives) > 1: return ConversionUnionMethod(conv_alternatives) elif conv_alternatives[0].value_error: return ConversionWithValueErrorMethod( conv_alternatives[0].converter, conv_alternatives[0].method ) else: return ConversionMethod( conv_alternatives[0].converter, conv_alternatives[0].method ) return self._factory(factory, validation=not dynamic) def visit_conversion( self, tp: AnyType, conversion: Optional[Deserialization], dynamic: bool, next_conversion: Optional[AnyConversion] = None, ) -> DeserializationMethodFactory: factory = super().visit_conversion(tp, conversion, dynamic, next_conversion) if not dynamic: factory = factory.merge(get_constraints(get_schema(tp)), get_validators(tp)) if get_args(tp): factory = factory.merge( get_constraints(get_schema(get_origin(tp))), get_validators(get_origin(tp)), ) cls = get_origin_or_type(tp) if ( is_type(cls) # check for type first in order to have it hashable and cls not in JSON_TYPES # eliminate most common types and self.pass_through_type(cls) and not is_typed_dict(cls) # typed dict isinstance cannot be checked ): def wrapper( constraints: Optional[Constraints], _: Sequence[Validator] ) -> DeserializationMethod: return TypeCheckMethod(cls, factory.merge(constraints, ()).method) return self._factory(wrapper) return factory @cache def deserialization_method_factory( tp: AnyType, additional_properties: bool, aliaser: Aliaser, coercer: Optional[Coercer], conversion: Optional[AnyConversion], default_conversion: DefaultConversion, fall_back_on_default: bool, no_copy: bool, pass_through: CollectionOrPredicate[type], ) -> DeserializationMethodFactory: return DeserializationMethodVisitor( additional_properties, aliaser, coercer, default_conversion, fall_back_on_default, no_copy, pass_through, ).visit_with_conv(tp, conversion) @overload def deserialization_method( type: Type[T], *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, coerce: Optional[Coerce] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, fall_back_on_default: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[CollectionOrPredicate[type]] = None, schema: Optional[Schema] = None, validators: Collection[Callable] = (), ) -> Callable[[Any], T]: ... @overload def deserialization_method( type: AnyType, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, coerce: Optional[Coerce] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, fall_back_on_default: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[CollectionOrPredicate[type]] = None, schema: Optional[Schema] = None, validators: Collection[Callable] = (), ) -> Callable[[Any], Any]: ... def deserialization_method( type: AnyType, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, coerce: Optional[Coerce] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, fall_back_on_default: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[CollectionOrPredicate[type]] = None, schema: Optional[Schema] = None, validators: Collection[Callable] = (), ) -> Callable[[Any], Any]: from apischema import settings coercer: Optional[Coercer] = None if callable(coerce): coercer = coerce elif opt_or(coerce, settings.deserialization.coerce): coercer = settings.deserialization.coercer pass_through = opt_or(pass_through, settings.deserialization.pass_through) if isinstance(pass_through, Collection) and not isinstance(pass_through, tuple): pass_through = tuple(pass_through) return ( deserialization_method_factory( type, opt_or(additional_properties, settings.additional_properties), opt_or(aliaser, settings.aliaser), coercer, conversion, opt_or(default_conversion, settings.deserialization.default_conversion), opt_or(fall_back_on_default, settings.deserialization.fall_back_on_default), opt_or(no_copy, settings.deserialization.no_copy), pass_through, # type: ignore ) .merge(get_constraints(schema), tuple(map(Validator, validators))) .method.deserialize ) @overload def deserialize( type: Type[T], data: Any, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, coerce: Optional[Coerce] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, fall_back_on_default: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[CollectionOrPredicate[type]] = None, schema: Optional[Schema] = None, validators: Collection[Callable] = (), ) -> T: ... @overload def deserialize( type: AnyType, data: Any, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, coerce: Optional[Coerce] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, fall_back_on_default: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[CollectionOrPredicate[type]] = None, schema: Optional[Schema] = None, validators: Collection[Callable] = (), ) -> Any: ... def deserialize( type: AnyType, data: Any, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, coerce: Optional[Coerce] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, fall_back_on_default: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[CollectionOrPredicate[type]] = None, schema: Optional[Schema] = None, validators: Collection[Callable] = (), ) -> Any: return deserialization_method( type, additional_properties=additional_properties, aliaser=aliaser, coerce=coerce, conversion=conversion, default_conversion=default_conversion, fall_back_on_default=fall_back_on_default, no_copy=no_copy, pass_through=pass_through, schema=schema, validators=validators, )(data) apischema-0.18.3/apischema/deserialization/coercion.py000066400000000000000000000026731467672046000230540ustar00rootroot00000000000000from typing import Any, Callable, Dict, Type, TypeVar, Union from apischema.json_schema.types import bad_type from apischema.types import NoneType T = TypeVar("T") Coercer = Callable[[Type[T], Any], T] _bool_pairs = ( ("0", "1"), ("f", "t"), ("n", "y"), ("no", "yes"), ("false", "true"), ("off", "on"), ("ko", "ok"), ) STR_TO_BOOL: Dict[str, bool] = {} for false, true in _bool_pairs: for s, value in ((false, False), (true, True)): STR_TO_BOOL[s.lower()] = value STR_NONE_VALUES = {""} def coerce(cls: Type[T], data: Any) -> T: if cls is NoneType: if data is None or data in STR_NONE_VALUES: return None # type: ignore else: raise bad_type(data, cls) elif isinstance(data, cls): return data elif cls is bool: if isinstance(data, str): return STR_TO_BOOL[data.lower()] # type: ignore elif isinstance(data, int): return bool(data) # type: ignore else: raise bad_type(data, cls) elif cls in (int, float): try: return cls(data) # type: ignore except ValueError: raise bad_type(data, cls) elif cls is str: if isinstance(data, (int, float)) and not isinstance(data, bool): return str(data) # type: ignore else: raise bad_type(data, cls) else: raise bad_type(data, cls) Coerce = Union[bool, Coercer] apischema-0.18.3/apischema/deserialization/flattened.py000066400000000000000000000032711467672046000232140ustar00rootroot00000000000000from typing import Iterator, Mapping, Sequence, Type from apischema.conversions.conversions import DefaultConversion from apischema.conversions.visitor import DeserializationVisitor from apischema.objects import ObjectField from apischema.objects.visitor import DeserializationObjectVisitor from apischema.types import AnyType from apischema.utils import get_origin_or_type from apischema.visitor import Unsupported class InitFlattenedAliasVisitor( DeserializationObjectVisitor[Iterator[str]], DeserializationVisitor[Iterator[str]] ): def mapping( self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType ) -> Iterator[str]: yield from () def object(self, tp: AnyType, fields: Sequence[ObjectField]) -> Iterator[str]: for field in fields: if field.flattened: yield from get_deserialization_flattened_aliases( get_origin_or_type(tp), field, self.default_conversion ) elif not field.is_aggregate: yield field.alias def _visited_union(self, results: Sequence[Iterator[str]]) -> Iterator[str]: if len(results) != 1: raise NotImplementedError return results[0] def get_deserialization_flattened_aliases( cls: Type, field: ObjectField, default_conversion: DefaultConversion ) -> Iterator[str]: assert field.flattened try: yield from InitFlattenedAliasVisitor(default_conversion).visit_with_conv( field.type, field.deserialization ) except (NotImplementedError, Unsupported): raise TypeError( f"Flattened field {cls.__name__}.{field.name} must have an object type" ) from None apischema-0.18.3/apischema/deserialization/methods.py000066400000000000000000000716551467672046000227240ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import ( AbstractSet, Any, Callable, Dict, List, Mapping, Optional, Pattern, Sequence, Tuple, Union, ) from apischema.aliases import Aliaser from apischema.conversions.utils import Converter from apischema.deserialization.coercion import Coercer from apischema.json_schema.types import bad_type from apischema.types import AnyType, NoneType from apischema.utils import Lazy from apischema.validation.errors import ( ErrorKey, ErrorMsg, ValidationError, merge_errors, ) from apischema.validation.mock import ValidatorMock from apischema.validation.validators import Validator, validate @dataclass class Constraint: error: Union[str, Callable[[Any], str]] def validate(self, data: Any) -> bool: raise NotImplementedError @dataclass class MinimumConstraint(Constraint): minimum: int def validate(self, data: Any) -> bool: return data >= self.minimum @dataclass class MaximumConstraint(Constraint): maximum: int def validate(self, data: Any) -> bool: return data <= self.maximum @dataclass class ExclusiveMinimumConstraint(Constraint): exc_min: int def validate(self, data: Any) -> bool: return data > self.exc_min @dataclass class ExclusiveMaximumConstraint(Constraint): exc_max: int def validate(self, data: Any) -> bool: return data < self.exc_max @dataclass class MultipleOfConstraint(Constraint): mult_of: int def validate(self, data: Any) -> bool: return not (data % self.mult_of) @dataclass class MinLengthConstraint(Constraint): min_len: int def validate(self, data: Any) -> bool: return len(data) >= self.min_len @dataclass class MaxLengthConstraint(Constraint): max_len: int def validate(self, data: Any) -> bool: return len(data) <= self.max_len @dataclass class PatternConstraint(Constraint): pattern: Pattern def validate(self, data: Any) -> bool: return self.pattern.match(data) is not None @dataclass class MinItemsConstraint(Constraint): min_items: int def validate(self, data: Any) -> bool: return len(data) >= self.min_items @dataclass class MaxItemsConstraint(Constraint): max_items: int def validate(self, data: Any) -> bool: return len(data) <= self.max_items def to_hashable(data: Any) -> Any: if isinstance(data, list): return tuple(map(to_hashable, data)) elif isinstance(data, dict): sorted_keys = sorted(data) return tuple(sorted_keys + [to_hashable(data[k]) for k in sorted_keys]) else: return data @dataclass class UniqueItemsConstraint(Constraint): unique: bool def __post_init__(self): assert self.unique def validate(self, data: Any) -> bool: return len(set(map(to_hashable, data))) == len(data) @dataclass class MinPropertiesConstraint(Constraint): min_properties: int def validate(self, data: Any) -> bool: return len(data) >= self.min_properties @dataclass class MaxPropertiesConstraint(Constraint): max_properties: int def validate(self, data: Any) -> bool: return len(data) <= self.max_properties def format_error(err: Union[str, Callable[[Any], str]], data: Any) -> str: return err if isinstance(err, str) else err(data) ErrorDict = Dict[ErrorKey, ValidationError] def validate_constraints( data: Any, constraints: Tuple[Constraint, ...], children_errors: Optional[ErrorDict] ) -> Any: for i in range(len(constraints)): constraint: Constraint = constraints[i] if not constraint.validate(data): errors: List[str] = [format_error(constraint.error, data)] for j in range(i + 1, len(constraints)): constraint = constraints[j] if not constraint.validate(data): errors.append(format_error(constraint.error, data)) raise ValidationError(errors, children_errors or {}) if children_errors: raise ValidationError([], children_errors) return data def set_child_error( errors: Optional[ErrorDict], key: ErrorKey, error: ValidationError ) -> ErrorDict: if errors is None: return {key: error} else: errors[key] = error return errors class DeserializationMethod: def deserialize(self, data: Any) -> Any: raise NotImplementedError @dataclass class RecMethod(DeserializationMethod): lazy: Lazy[DeserializationMethod] method: Optional[DeserializationMethod] = field(init=False) def __post_init__(self): self.method = None def deserialize(self, data: Any) -> Any: if self.method is None: self.method = self.lazy() return self.method.deserialize(data) @dataclass class ValidatorMethod(DeserializationMethod): method: DeserializationMethod validators: Sequence[Validator] aliaser: Aliaser def deserialize(self, data: Any) -> Any: return validate( self.method.deserialize(data), self.validators, aliaser=self.aliaser ) @dataclass class CoercerMethod(DeserializationMethod): coercer: Coercer cls: type method: DeserializationMethod def deserialize(self, data: Any) -> Any: return self.method.deserialize(self.coercer(self.cls, data)) @dataclass class TypeCheckMethod(DeserializationMethod): expected: AnyType # `type` would require exact match (i.e. no EnumMeta) fallback: DeserializationMethod def deserialize(self, data: Any) -> Any: if isinstance(data, self.expected): return data return self.fallback.deserialize(data) @dataclass class AnyMethod(DeserializationMethod): constraints: Dict[type, Tuple[Constraint, ...]] def deserialize(self, data: Any) -> Any: if type(data) in self.constraints: validate_constraints(data, self.constraints[type(data)], None) return data @dataclass class ListCheckOnlyMethod(DeserializationMethod): constraints: Tuple[Constraint, ...] value_method: DeserializationMethod def deserialize(self, data: Any) -> Any: if not isinstance(data, list): raise bad_type(data, list) elt_errors: Optional[ErrorDict] = None for i, elt in enumerate(data): try: self.value_method.deserialize(elt) except ValidationError as err: elt_errors = set_child_error(elt_errors, i, err) validate_constraints(data, self.constraints, elt_errors) return data @dataclass class ListMethod(DeserializationMethod): constraints: Tuple[Constraint, ...] value_method: DeserializationMethod def deserialize(self, data: Any) -> Any: if not isinstance(data, list): raise bad_type(data, list) elt_errors: Optional[ErrorDict] = None values: list = [None] * len(data) for i, elt in enumerate(data): try: values[i] = self.value_method.deserialize(elt) except ValidationError as err: elt_errors = set_child_error(elt_errors, i, err) validate_constraints(data, self.constraints, elt_errors) return values @dataclass class SetMethod(DeserializationMethod): constraints: Tuple[Constraint, ...] value_method: DeserializationMethod def deserialize(self, data: Any) -> Any: if not isinstance(data, list): raise bad_type(data, list) elt_errors: ErrorDict = {} values: set = set() for i, elt in enumerate(data): try: values.add(self.value_method.deserialize(elt)) except ValidationError as err: elt_errors = set_child_error(elt_errors, i, err) validate_constraints(data, self.constraints, elt_errors) return values @dataclass class FrozenSetMethod(DeserializationMethod): method: DeserializationMethod def deserialize(self, data: Any) -> Any: return frozenset(self.method.deserialize(data)) @dataclass class VariadicTupleMethod(DeserializationMethod): method: DeserializationMethod def deserialize(self, data: Any) -> Any: return tuple(self.method.deserialize(data)) @dataclass class LiteralMethod(DeserializationMethod): value_map: dict error: Union[str, Callable[[Any], str]] coercer: Optional[Coercer] types: Tuple[type, ...] def deserialize(self, data: Any) -> Any: try: return self.value_map[data] except KeyError: if self.coercer is not None: for cls in self.types: try: return self.value_map[self.coercer(cls, data)] except IndexError: pass raise ValidationError(format_error(self.error, data)) except TypeError: raise bad_type(data, *self.types) @dataclass class MappingCheckOnly(DeserializationMethod): constraints: Tuple[Constraint, ...] key_method: DeserializationMethod value_method: DeserializationMethod def deserialize(self, data: Any) -> Any: if not isinstance(data, dict): raise bad_type(data, dict) item_errors: Optional[ErrorDict] = None for key, value in data.items(): try: self.key_method.deserialize(key) self.value_method.deserialize(value) except ValidationError as err: item_errors = set_child_error(item_errors, key, err) validate_constraints(data, self.constraints, item_errors) return data @dataclass class MappingMethod(DeserializationMethod): constraints: Tuple[Constraint, ...] key_method: DeserializationMethod value_method: DeserializationMethod def deserialize(self, data: Any) -> Any: if not isinstance(data, dict): raise bad_type(data, dict) item_errors: Optional[ErrorDict] = None items: dict = {} for key, value in data.items(): try: items[self.key_method.deserialize(key)] = self.value_method.deserialize( value ) except ValidationError as err: item_errors = set_child_error(item_errors, key, err) validate_constraints(data, self.constraints, item_errors) return items @dataclass class Field: name: str alias: str method: DeserializationMethod required: bool required_by: Optional[AbstractSet[str]] fall_back_on_default: bool @dataclass class FlattenedField: name: str aliases: Tuple[str, ...] method: DeserializationMethod fall_back_on_default: bool @dataclass class PatternField: name: str pattern: Pattern method: DeserializationMethod fall_back_on_default: bool @dataclass class AdditionalField: name: str method: DeserializationMethod fall_back_on_default: bool @dataclass class Constructor: cls: Any # cython doesn't handle type subclasses properly def construct(self, fields: Dict[str, Any]) -> Any: raise NotImplementedError class NoConstructor(Constructor): def construct(self, fields: Dict[str, Any]) -> Any: return fields def PyObject_Call(obj, args, kwargs): return obj(*args, **kwargs) class RawConstructor(Constructor): def construct(self, fields: Dict[str, Any]) -> Any: return PyObject_Call(self.cls, (), fields) class RawConstructorCopy(Constructor): def construct(self, fields: Dict[str, Any]) -> Any: return self.cls(**fields) @dataclass class DefaultField: name: str default_value: Any # https://github.com/cython/cython/issues/4383 @dataclass class FactoryField: name: str factory: Callable @dataclass class FieldsConstructor(Constructor): nb_fields: int default_fields: Tuple[DefaultField, ...] factory_fields: Tuple[FactoryField, ...] def construct(self, fields: Any) -> Any: # fields can be a dict subclass obj = object.__new__(self.cls) obj_dict: dict = obj.__dict__ obj_dict.update(fields) if len(fields) != self.nb_fields: for default_field in self.default_fields: if default_field.name not in obj_dict: obj_dict[default_field.name] = default_field.default_value for factory_field in self.factory_fields: if factory_field.name not in obj_dict: obj_dict[factory_field.name] = factory_field.factory() return obj @dataclass class SimpleObjectMethod(DeserializationMethod): constructor: Constructor fields: Tuple[Field, ...] all_aliases: AbstractSet[str] typed_dict: bool missing: str unexpected: str def deserialize(self, data: Any) -> Any: discriminator: Optional[str] = None if not isinstance(data, dict): if isinstance(data, Discriminated): discriminator = data.discriminator data = data.data if not isinstance(data, dict): raise bad_type(data, dict) else: raise bad_type(data, dict) fields_count: int = 0 field_errors: Optional[dict] = None for field in self.fields: if field.alias in data: fields_count += 1 try: field.method.deserialize(data[field.alias]) except ValidationError as err: if field.required or not field.fall_back_on_default: field_errors = set_child_error(field_errors, field.alias, err) elif field.required: field_errors = set_child_error( field_errors, field.alias, ValidationError(self.missing) ) has_discriminator = False if len(data) != fields_count and not self.typed_dict: for key in data.keys() - self.all_aliases: if key == discriminator: has_discriminator = True else: field_errors = set_child_error( field_errors, key, ValidationError(self.unexpected) ) if field_errors: raise ValidationError([], field_errors) if has_discriminator: data = data.copy() del data[discriminator] return self.constructor.construct(data) def extend_errors( errors: Optional[List[ErrorMsg]], messages: Sequence[ErrorMsg] ) -> List[ErrorMsg]: if errors is None: return list(messages) else: errors.extend(messages) return errors def update_children_errors( errors: Optional[Dict[ErrorKey, ValidationError]], children: Mapping[ErrorKey, ValidationError], ) -> Dict[ErrorKey, ValidationError]: if errors is None: return dict(children) else: errors.update(children) return errors @dataclass class ObjectMethod(DeserializationMethod): constructor: Constructor constraints: Tuple[Constraint, ...] fields: Tuple[Field, ...] flattened_fields: Tuple[FlattenedField, ...] pattern_fields: Tuple[PatternField, ...] additional_field: Optional[AdditionalField] all_aliases: AbstractSet[str] additional_properties: bool typed_dict: bool validators: Tuple[Validator, ...] init_defaults: Tuple[Tuple[str, Optional[Callable[[], Any]]], ...] post_init_modified: AbstractSet[str] aliaser: Aliaser missing: str unexpected: str aggregate_fields: bool = field(init=False) def __post_init__(self): self.aggregate_fields = bool( self.flattened_fields or self.pattern_fields or self.additional_field is not None ) def deserialize(self, data: Any) -> Any: discriminator: Optional[str] = None if not isinstance(data, dict): if isinstance(data, Discriminated): discriminator = data.discriminator data = data.data if not isinstance(data, dict): raise bad_type(data, dict) else: raise bad_type(data, dict) values: dict = {} fields_count: int = 0 errors: Optional[list] = None try: validate_constraints(data, self.constraints, None) except ValidationError as err: errors = list(err.messages) field_errors: Optional[dict] = None for field in self.fields: if field.alias in data: fields_count += 1 try: values[field.name] = field.method.deserialize(data[field.alias]) except ValidationError as err: if field.required or not field.fall_back_on_default: field_errors = set_child_error(field_errors, field.alias, err) elif field.required: field_errors = set_child_error( field_errors, field.alias, ValidationError(self.missing) ) elif field.required_by is not None and not field.required_by.isdisjoint( data ): requiring = sorted(field.required_by & data.keys()) error = ValidationError([self.missing + f" (required by {requiring})"]) field_errors = set_child_error(field_errors, field.alias, error) if self.aggregate_fields: remain = data.keys() - self.all_aliases for flattened_field in self.flattened_fields: flattened: dict = { alias: data[alias] for alias in flattened_field.aliases if alias in data } remain.difference_update(flattened) try: values[flattened_field.name] = flattened_field.method.deserialize( flattened ) except ValidationError as err: if not flattened_field.fall_back_on_default: errors = extend_errors(errors, err.messages) field_errors = update_children_errors( field_errors, err.children ) for pattern_field in self.pattern_fields: matched: dict = { key: data[key] for key in remain if pattern_field.pattern.match(key) } remain.difference_update(matched) try: values[pattern_field.name] = pattern_field.method.deserialize( matched ) except ValidationError as err: if not pattern_field.fall_back_on_default: errors = extend_errors(errors, err.messages) field_errors = update_children_errors( field_errors, err.children ) if self.additional_field is not None: additional: dict = {key: data[key] for key in remain} try: values[ self.additional_field.name ] = self.additional_field.method.deserialize(additional) except ValidationError as err: if not self.additional_field.fall_back_on_default: errors = extend_errors(errors, err.messages) field_errors = update_children_errors( field_errors, err.children ) elif remain: if not self.additional_properties: for key in remain: if key != discriminator: field_errors = set_child_error( field_errors, key, ValidationError(self.unexpected) ) elif self.typed_dict: for key in remain: values[key] = data[key] elif len(data) != fields_count: if not self.additional_properties: for key in data.keys() - self.all_aliases: if key != discriminator: field_errors = set_child_error( field_errors, key, ValidationError(self.unexpected) ) elif self.typed_dict: for key in data.keys() - self.all_aliases: values[key] = data[key] if self.validators: init = None if self.init_defaults: init = {} for name, default_factory in self.init_defaults: if name in values: init[name] = values[name] elif not field_errors or name not in field_errors: assert default_factory is not None init[name] = default_factory() aliases = values.keys() # Don't keep validators when all dependencies are default validators = [ v for v in self.validators if not v.dependencies.isdisjoint(aliases) ] if field_errors or errors: error = ValidationError(errors or [], field_errors or {}) invalid_fields = self.post_init_modified if field_errors: invalid_fields = invalid_fields | field_errors.keys() try: validate( ValidatorMock(self.constructor.cls, values), [ v for v in validators if v.dependencies.isdisjoint(invalid_fields) ], init, aliaser=self.aliaser, ) except ValidationError as err: error = merge_errors(error, err) raise error obj = self.constructor.construct(values) return validate(obj, validators, init, aliaser=self.aliaser) elif field_errors or errors: raise ValidationError(errors or [], field_errors or {}) return self.constructor.construct(values) class NoneMethod(DeserializationMethod): def deserialize(self, data: Any) -> Any: if data is not None: raise bad_type(data, NoneType) return data class IntMethod(DeserializationMethod): def deserialize(self, data: Any) -> Any: if not isinstance(data, int) or isinstance(data, bool): raise bad_type(data, int) return data class FloatMethod(DeserializationMethod): def deserialize(self, data: Any) -> Any: if isinstance(data, float): return data elif isinstance(data, int): return float(data) else: raise bad_type(data, float) class StrMethod(DeserializationMethod): def deserialize(self, data: Any) -> Any: if not isinstance(data, str): raise bad_type(data, str) return data class BoolMethod(DeserializationMethod): def deserialize(self, data: Any) -> Any: if not isinstance(data, bool): raise bad_type(data, bool) return data @dataclass class ConstrainedIntMethod(IntMethod): constraints: Tuple[Constraint, ...] def deserialize(self, data: Any) -> Any: return validate_constraints(super().deserialize(data), self.constraints, None) @dataclass class ConstrainedFloatMethod(FloatMethod): constraints: Tuple[Constraint, ...] def deserialize(self, data: Any) -> Any: return validate_constraints(super().deserialize(data), self.constraints, None) @dataclass class ConstrainedStrMethod(StrMethod): constraints: Tuple[Constraint, ...] def deserialize(self, data: Any) -> Any: return validate_constraints(super().deserialize(data), self.constraints, None) @dataclass class SubprimitiveMethod(DeserializationMethod): cls: type method: DeserializationMethod def deserialize(self, data: Any) -> Any: return self.cls(self.method.deserialize(data)) @dataclass class TupleMethod(DeserializationMethod): constraints: Tuple[Constraint, ...] min_len_error: Union[str, Callable[[Any], str]] max_len_error: Union[str, Callable[[Any], str]] elt_methods: Tuple[DeserializationMethod, ...] def deserialize(self, data: Any) -> Any: if not isinstance(data, list): raise bad_type(data, list) data_len = len(data) if data_len != len(self.elt_methods): if data_len < len(self.elt_methods): raise ValidationError(format_error(self.min_len_error, data)) elif data_len > len(self.elt_methods): raise ValidationError(format_error(self.max_len_error, data)) else: raise NotImplementedError elt_errors: Optional[ErrorDict] = None elts: list = [None] * len(self.elt_methods) for i, elt_method in enumerate(self.elt_methods): try: elts[i] = elt_method.deserialize(data[i]) except ValidationError as err: set_child_error(elt_errors, i, err) validate_constraints(data, self.constraints, elt_errors) return tuple(elts) @dataclass class OptionalMethod(DeserializationMethod): value_method: DeserializationMethod coercer: Optional[Coercer] def deserialize(self, data: Any) -> Any: if data is None: return None try: return self.value_method.deserialize(data) except ValidationError as err: if self.coercer is not None and self.coercer(NoneType, data) is None: return None else: raise merge_errors(err, bad_type(data, NoneType)) @dataclass class UnionByTypeMethod(DeserializationMethod): method_by_cls: Dict[type, DeserializationMethod] def deserialize(self, data: Any) -> Any: try: method: DeserializationMethod = self.method_by_cls[type(data)] return method.deserialize(data) except KeyError: raise bad_type(data, *self.method_by_cls) from None except ValidationError as err: other_classes = (cls for cls in self.method_by_cls if cls is not type(data)) raise merge_errors(err, bad_type(data, *other_classes)) @dataclass class UnionMethod(DeserializationMethod): alt_methods: Tuple[DeserializationMethod, ...] def deserialize(self, data: Any) -> Any: error = None for i, alt_method in enumerate(self.alt_methods): try: return alt_method.deserialize(data) except ValidationError as err: error = merge_errors(error, err) assert error is not None raise error @dataclass class ConversionMethod(DeserializationMethod): converter: Converter method: DeserializationMethod def deserialize(self, data: Any) -> Any: return self.converter(self.method.deserialize(data)) @dataclass class ConversionWithValueErrorMethod(ConversionMethod): def deserialize(self, data: Any) -> Any: value = self.method.deserialize(data) try: return self.converter(value) except ValueError as err: raise ValidationError(str(err)) @dataclass class ConversionAlternative: converter: Converter method: DeserializationMethod value_error: bool @dataclass class ConversionUnionMethod(DeserializationMethod): alternatives: Tuple[ConversionAlternative, ...] def deserialize(self, data: Any) -> Any: error = None for alternative in self.alternatives: try: value = alternative.method.deserialize(data) except ValidationError as err: error = merge_errors(error, err) continue try: return alternative.converter(value) except ValidationError as err: error = merge_errors(error, err) except ValueError as err: if not alternative.value_error: raise error = merge_errors(error, ValidationError(str(err))) assert error is not None raise error @dataclass class Discriminated: discriminator: str data: Any @dataclass class DiscriminatorMethod(DeserializationMethod): alias: str mapping: Dict[str, DeserializationMethod] missing: str error: Union[str, Callable[[Any], str]] def deserialize(self, data: Any): if not isinstance(data, dict): raise bad_type(data, dict) if self.alias not in data: raise ValidationError([], {self.alias: ValidationError(self.missing)}) try: method: DeserializationMethod = self.mapping[data[self.alias]] except (TypeError, KeyError): raise ValidationError( [], { self.alias: ValidationError( format_error(self.error, data[self.alias]) ) }, ) else: return method.deserialize(Discriminated(self.alias, data)) apischema-0.18.3/apischema/discriminators.py000066400000000000000000000103131467672046000211050ustar00rootroot00000000000000import operator from dataclasses import dataclass from functools import reduce from typing import ( Callable, Iterable, Mapping, MutableMapping, Optional, Sequence, TypeVar, Union, ) from apischema.cache import CacheAwareDict from apischema.conversions import Conversion, deserializer, serializer from apischema.metadata.keys import DISCRIMINATOR_METADATA from apischema.objects import object_fields from apischema.type_names import get_type_name from apischema.types import AnyType, MetadataMixin from apischema.typing import get_args, is_literal, is_typed_dict from apischema.utils import get_origin_or_type2, identity, no_annotated Cls = TypeVar("Cls", bound=type) def get_discriminated(alias: str, tp: AnyType) -> Sequence[str]: cls = get_origin_or_type2(tp) try: has_field = False for field in object_fields(cls).values(): if field.alias == alias: has_field = True field_type = no_annotated(field.type) if is_literal(field_type): return [v for v in get_args(field_type) if isinstance(v, str)] if ( is_typed_dict(cls) and not has_field ): # TypedDict must have a discriminator field return () return [name for name in [get_type_name(tp).json_schema] if name is not None] except TypeError: return () def default_discriminator_mapping( alias: str, types: Sequence[AnyType] ) -> Mapping[str, AnyType]: mapping = {} for tp in types: discriminated = get_discriminated(alias, tp) if not discriminated: raise TypeError(f"{tp} can't be discriminated") for key in discriminated: mapping[key] = tp return mapping def rec_subclasses(cls: type) -> Iterable[type]: for sub_cls in cls.__subclasses__(): yield sub_cls yield from rec_subclasses(sub_cls) @dataclass(frozen=True, unsafe_hash=False) class Discriminator(MetadataMixin): key = DISCRIMINATOR_METADATA alias: str mapping: Union[ Mapping[str, AnyType], Callable[[str, Sequence[AnyType]], Mapping[str, AnyType]] ] = default_discriminator_mapping override_implicit: bool = True def get_mapping(self, types: Sequence[AnyType]) -> Mapping[str, AnyType]: default_mapping = default_discriminator_mapping(self.alias, types) if self.mapping is default_discriminator_mapping: return default_mapping mapping = ( self.mapping(self.alias, types) if callable(self.mapping) else self.mapping ) if self.override_implicit: mapping_types = set(mapping.values()) mapping = dict(mapping) for key, tp in default_mapping.items(): if tp not in mapping_types: mapping[key] = tp return mapping else: return {**default_mapping, **mapping} # Make it hashable to be used in Annotated def __hash__(self): return hash(id(self)) def __call__(self, cls: Cls) -> Cls: _discriminators[cls] = self deserializer( lazy=lambda: Conversion( identity, source=Union[tuple(rec_subclasses(cls))], target=cls ), target=cls, ) serializer( lazy=lambda: Conversion( identity, source=cls, target=Union[tuple(rec_subclasses(cls))], inherited=False, ), source=cls, ) return cls _discriminators: MutableMapping[type, Discriminator] = CacheAwareDict({}) get_discriminator = _discriminators.get discriminator = Discriminator def get_discriminated_parent(cls: type) -> Optional[type]: for base in cls.__mro__: if base in _discriminators: return base return None def get_inherited_discriminator(types: Iterable[AnyType]) -> Optional[Discriminator]: discriminators = [ { base for base in getattr(get_origin_or_type2(tp), "__mro__", ()) if base in _discriminators } for tp in types ] for cls in reduce(operator.and_, discriminators): return _discriminators[cls] return None apischema-0.18.3/apischema/fields.py000066400000000000000000000105221467672046000173230ustar00rootroot00000000000000__all__ = ["fields_set", "is_set", "set_fields", "unset_fields", "with_fields_set"] from dataclasses import ( # type: ignore _FIELD, _FIELD_INITVAR, _FIELDS, Field, is_dataclass, ) from functools import wraps from inspect import signature from typing import AbstractSet, Any, Collection, Set, Type, TypeVar, cast from apischema.objects.fields import get_field_name from apischema.utils import PREFIX FIELDS_SET_ATTR = f"{PREFIX}fields_set" _ALREADY_SET = f"{PREFIX}already_set" Cls = TypeVar("Cls", bound=Type) _fields_set_classes: Set[type] = set() def support_fields_set(cls: type) -> bool: return any(base in _fields_set_classes for base in cls.__mro__) def with_fields_set(cls: Cls) -> Cls: from apischema.metadata.keys import DEFAULT_AS_SET_METADATA init_fields = set() post_init_fields = set() if is_dataclass(cls): for field in getattr(cls, _FIELDS).values(): assert isinstance(field, Field) if field._field_type == _FIELD_INITVAR: # type: ignore init_fields.add(field.name) if field._field_type == _FIELD and not field.init: # type: ignore post_init_fields.add(field.name) if field.metadata.get(DEFAULT_AS_SET_METADATA): post_init_fields.add(field.name) params = list(signature(cls.__init__).parameters)[1:] old_new = cls.__new__ old_init = cls.__init__ old_setattr = cls.__setattr__ def new_new(*args, **kwargs): if old_new is object.__new__: obj = object.__new__(args[0]) else: obj = old_new(*args, **kwargs) # Initialize FIELD_SET_ATTR in order to prevent inherited class which override # __init__ to raise in __setattr__ obj.__dict__[FIELDS_SET_ATTR] = set() return obj def new_init(self, *args, **kwargs): prev_fields_set = self.__dict__.get(FIELDS_SET_ATTR, set()).copy() self.__dict__[FIELDS_SET_ATTR] = set() try: old_init(self, *args, **kwargs) except TypeError as err: if str(err) == no_dataclass_init_error: raise RuntimeError(dataclass_before_error) from None else: raise arg_fields = {*params[: len(args)], *kwargs} - init_fields self.__dict__[FIELDS_SET_ATTR] = prev_fields_set | arg_fields | post_init_fields def new_setattr(self, attr, value): try: self.__dict__[FIELDS_SET_ATTR].add(attr) except KeyError: raise RuntimeError(dataclass_before_error) from None old_setattr(self, attr, value) # type: ignore for attr, old, new in [ ("__new__", old_new, new_new), ("__init__", old_init, new_init), ("__setattr__", old_setattr, new_setattr), ]: if hasattr(old, _ALREADY_SET): continue setattr(new, _ALREADY_SET, True) setattr(cls, attr, wraps(old)(new)) _fields_set_classes.add(cls) return cls no_dataclass_init_error = ( "object.__init__() takes exactly one argument (the instance to initialize)" ) dataclass_before_error = ( f"{with_fields_set.__name__} must be put before dataclass decorator" ) T = TypeVar("T") def _field_names(fields: Collection) -> AbstractSet[str]: result: Set[str] = set() for field in fields: result.add(get_field_name(field)) return result def _fields_set(obj: Any) -> Set[str]: try: return getattr(obj, FIELDS_SET_ATTR) except AttributeError: raise TypeError( f"Type {obj.__class__} is not decorated" f" with {with_fields_set.__name__}" ) def set_fields(obj: T, *fields: Any, overwrite=False) -> T: if overwrite: _fields_set(obj).clear() _fields_set(obj).update(map(get_field_name, fields)) return obj def unset_fields(obj: T, *fields: Any) -> T: _fields_set(obj).difference_update(map(get_field_name, fields)) return obj # This could just be an alias with a specified type, but it's better handled by IDE # like this def fields_set(obj: Any) -> AbstractSet[str]: return _fields_set(obj) class FieldIsSet: def __init__(self, obj: Any): self.fields_set = fields_set(obj) def __getattribute__(self, name: str) -> bool: return name in object.__getattribute__(self, "fields_set") def is_set(obj: T) -> T: return cast(T, FieldIsSet(obj)) apischema-0.18.3/apischema/graphql/000077500000000000000000000000001467672046000171415ustar00rootroot00000000000000apischema-0.18.3/apischema/graphql/__init__.py000066400000000000000000000005361467672046000212560ustar00rootroot00000000000000__all__ = [ "ID", "Mutation", "Query", "Subscription", "graphql_schema", "interface", "relay", "resolver", ] try: from . import relay from .interfaces import interface from .resolvers import resolver from .schema import ID, Mutation, Query, Subscription, graphql_schema except ImportError: raise apischema-0.18.3/apischema/graphql/interfaces.py000066400000000000000000000005571467672046000216450ustar00rootroot00000000000000from typing import Collection, Set, Type, TypeVar _interfaces: Set[Type] = set() Cls = TypeVar("Cls", bound=Type) def interface(cls: Cls) -> Cls: _interfaces.add(cls) return cls def is_interface(cls: Type) -> bool: return cls in _interfaces def get_interfaces(cls: Type) -> Collection[Type]: return list(filter(is_interface, cls.__mro__[1:])) apischema-0.18.3/apischema/graphql/relay/000077500000000000000000000000001467672046000202555ustar00rootroot00000000000000apischema-0.18.3/apischema/graphql/relay/__init__.py000066400000000000000000000006241467672046000223700ustar00rootroot00000000000000__all__ = [ "ClientMutationId", "Connection", "Edge", "GlobalId", "Mutation", "Node", "PageInfo", "base64_encoding", "mutations", "node", "nodes", ] from .connections import Connection, Edge, PageInfo from .global_identification import GlobalId, Node, node, nodes from .mutations import ClientMutationId, Mutation, mutations from .utils import base64_encoding apischema-0.18.3/apischema/graphql/relay/connections.py000066400000000000000000000050431467672046000231530ustar00rootroot00000000000000from dataclasses import dataclass from typing import Generic, Optional, Sequence, Type, TypeVar from apischema.type_names import get_type_name, type_name from apischema.types import NoneType from apischema.typing import generic_mro, get_args, get_origin from apischema.utils import get_args2, is_union_of Cursor_ = TypeVar("Cursor_") Node_ = TypeVar("Node_") def get_node_name(tp): if is_union_of(tp, NoneType) and len(get_args2(tp)): tp = next(arg for arg in get_args2(tp) if arg is not NoneType) ref = get_type_name(tp).graphql if ref is None: raise TypeError( f"Node {tp} must have a ref registered to be used with connection" ) return ref def edge_name(tp: Type["Edge"], *args) -> str: for base in generic_mro(tp[tuple(args)] if args else tp): # type: ignore if get_origin(base) == Edge: return f"{get_node_name(get_args(base)[0])}Edge" raise NotImplementedError @type_name(graphql=edge_name) @dataclass class Edge(Generic[Node_, Cursor_]): node: Node_ cursor: Cursor_ def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) type_name(graphql=edge_name)(cls) @type_name(graphql=lambda *_: "PageInfo") @dataclass class PageInfo(Generic[Cursor_]): has_previous_page: bool = False has_next_page: bool = False start_cursor: Optional[Cursor_] = None end_cursor: Optional[Cursor_] = None @staticmethod def from_edges( edges: Sequence[Optional[Edge[Node_, Cursor_]]], has_previous_page: bool = False, has_next_page: bool = False, ) -> "PageInfo": start_cursor, end_cursor = None, None if edges is not None: if edges[0] is not None: start_cursor = edges[0].cursor if edges[-1] is not None: end_cursor = edges[-1].cursor return PageInfo(has_previous_page, has_next_page, start_cursor, end_cursor) def connection_name(tp: Type["Connection"], *args) -> str: for base in generic_mro(tp[tuple(args)] if args else tp): # type: ignore if get_origin(base) == Connection: return f"{get_node_name(get_args(base)[0])}Connection" raise NotImplementedError Edge_ = TypeVar("Edge_", bound=Edge) @type_name(graphql=connection_name) @dataclass class Connection(Generic[Node_, Cursor_, Edge_]): edges: Optional[Sequence[Optional[Edge_]]] page_info: PageInfo[Cursor_] def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) type_name(graphql=connection_name)(cls) apischema-0.18.3/apischema/graphql/relay/global_identification.py000066400000000000000000000105721467672046000251450ustar00rootroot00000000000000from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import ( Awaitable, Collection, Dict, Generic, List, Optional, Type, TypeVar, Union, cast, ) import graphql from apischema import deserialize, deserializer, serialize, serializer, type_name from apischema.graphql.interfaces import interface from apischema.graphql.resolvers import resolver from apischema.graphql.schema import ID from apischema.metadata import skip from apischema.ordering import order from apischema.type_names import get_type_name from apischema.typing import generic_mro, get_args, get_origin from apischema.utils import PREFIX, has_type_vars ID_TYPE_ATTR = f"{PREFIX}id_type" class InvalidGlobalId(Exception): def __init__(self, value: str): self.value = value def __str__(self): return f"{self.value} is not a valid global id" class NotANode(Exception): def __init__(self, node_type: str): self.node_type = node_type def __str__(self): return f"{self.node_type} is not a Node" Node_ = TypeVar("Node_", bound="Node") @dataclass class GlobalId(Generic[Node_]): id: str node_type: Type[Node_] @deserializer def deserialize_global_id(global_id: ID) -> GlobalId: try: node_key, id = global_id.split(":") except ValueError: raise InvalidGlobalId(global_id) from None try: return GlobalId(id, _nodes[node_key]) except KeyError: raise NotANode(node_key) from None @serializer def serialize_global_id(global_id: GlobalId) -> ID: return ID(f"{global_id.node_type._node_key()}:{global_id.id}") Id = TypeVar("Id") @type_name(graphql=lambda *_: "Node") @interface @dataclass class Node(Generic[Id], ABC): id: Id = field(metadata=skip) @resolver("id", order=order(-1)) # type: ignore @property def global_id(self: Node_) -> GlobalId[Node_]: return self.id_to_global(self.id) @classmethod def id_from_global(cls: Type[Node_], global_id: GlobalId[Node_]) -> Id: if global_id.node_type != cls: raise ValueError( f"Expected {cls.__name__} global id," f" found {global_id.node_type.__name__} global id" ) id_type = getattr(cls, ID_TYPE_ATTR) # Use coercion to handle integer id return cast(Id, deserialize(id_type, global_id.id, coerce=True)) @classmethod def id_to_global(cls: Type[Node_], id: Id) -> GlobalId[Node_]: return GlobalId(str(serialize(getattr(cls, ID_TYPE_ATTR), id)), cls) @classmethod @abstractmethod def get_by_id( cls: Type[Node_], id: Id, info: Optional[graphql.GraphQLResolveInfo] = None ) -> Union[Node_, Awaitable[Node_]]: raise NotImplementedError @classmethod def _node_key(cls) -> str: node_name = get_type_name(cls).graphql if node_name is None: raise TypeError(f"Node {cls} has no type_name registered") return node_name def __init_subclass__(cls, not_a_node: bool = False, **kwargs): super().__init_subclass__(**kwargs) if not not_a_node: _tmp_nodes.append(cls) _tmp_nodes: List[Type[Node]] = [] _nodes: Dict[str, Type[Node]] = {} def process_node(node_cls: Type[Node]): if has_type_vars(node_cls) or node_cls.get_by_id is Node.get_by_id: return for base in node_cls.__mro__: if base != Node and Node.get_by_id.__name__ in base.__dict__: if not isinstance( base.__dict__[Node.get_by_id.__name__], (classmethod, staticmethod) ): raise TypeError( f"{node_cls.__name__}.get_by_id must be a" f" classmethod/staticmethod" ) break for base in generic_mro(node_cls): if get_origin(base) == Node: setattr(node_cls, ID_TYPE_ATTR, get_args(base)[0]) _nodes[node_cls._node_key()] = node_cls break else: raise TypeError("Node type parameter Id must be specialized") def nodes() -> Collection[Type[Node]]: for node_cls in _tmp_nodes: process_node(node_cls) return list(_nodes.values()) def node(id: ID, info: Optional[graphql.GraphQLResolveInfo] = None) -> Node: global_id = deserialize_global_id(id) node_type = global_id.node_type return node_type.get_by_id(node_type.id_from_global(global_id), info) apischema-0.18.3/apischema/graphql/relay/mutations.py000066400000000000000000000124601467672046000226550ustar00rootroot00000000000000from dataclasses import MISSING, Field, field, make_dataclass from functools import wraps from inspect import Parameter, signature from typing import ( Awaitable, Callable, ClassVar, Collection, Iterator, List, NewType, Optional, Tuple, Type, TypeVar, ) from graphql.pyutils import camel_to_snake from apischema.aliases import alias from apischema.graphql.schema import Mutation as Mutation_ from apischema.schemas import Schema from apischema.serialization.serialized_methods import ErrorHandler from apischema.type_names import type_name from apischema.types import AnyType, Undefined from apischema.typing import get_type_hints from apischema.utils import is_async, is_union_of ClientMutationId = NewType("ClientMutationId", str) type_name(None)(ClientMutationId) CLIENT_MUTATION_ID = "client_mutation_id" M = TypeVar("M", bound="Mutation") class Mutation: _error_handler: ClassVar[ErrorHandler] = Undefined _schema: ClassVar[Optional[Schema]] = None _client_mutation_id: ClassVar[Optional[bool]] = None _mutation: ClassVar[Mutation_] # set in __init_subclass__ # Mutate is not defined to prevent Mypy warning about signature of superclass mutate: ClassVar[Callable] def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if not hasattr(cls, "mutate"): return if not isinstance(cls.__dict__["mutate"], (classmethod, staticmethod)): raise TypeError(f"{cls.__name__}.mutate must be a classmethod/staticmethod") mutate = getattr(cls, "mutate") type_name(f"{cls.__name__}Payload")(cls) types = get_type_hints(mutate, localns={cls.__name__: cls}, include_extras=True) async_mutate = is_async(mutate, types) fields: List[Tuple[str, AnyType, Field]] = [] cmi_param = None for param_name, param in signature(mutate).parameters.items(): if param.kind is Parameter.POSITIONAL_ONLY: raise TypeError("Positional only parameters are not supported") if param.kind in {Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY}: if param_name not in types: raise TypeError("Mutation parameters must be typed") field_type = types[param_name] field_ = MISSING if param.default is Parameter.empty else param.default if is_union_of(field_type, ClientMutationId): cmi_param = param_name if cls._client_mutation_id is False: if field_ is MISSING: raise TypeError( "Cannot have a ClientMutationId parameter" " when _client_mutation_id = False" ) continue elif cls._client_mutation_id is True: field_ = MISSING field_ = field(default=field_, metadata=alias(CLIENT_MUTATION_ID)) fields.append((param_name, field_type, field_)) field_names = [name for (name, _, _) in fields] if cmi_param is None and cls._client_mutation_id is not False: fields.append( ( CLIENT_MUTATION_ID, ClientMutationId if cls._client_mutation_id else Optional[ClientMutationId], # TODO why missing here? MISSING if cls._client_mutation_id else None, # type: ignore ) ) cmi_param = CLIENT_MUTATION_ID input_cls = make_dataclass(f"{cls.__name__}Input", fields) def wrapper(input): return mutate(**{name: getattr(input, name) for name in field_names}) wrapper.__annotations__["input"] = input_cls wrapper.__annotations__["return"] = Awaitable[cls] if async_mutate else cls # type: ignore if cls._client_mutation_id is not False: assert cmi_param is not None cls.__annotations__[CLIENT_MUTATION_ID] = input_cls.__annotations__[ cmi_param ] setattr(cls, CLIENT_MUTATION_ID, field(init=False)) wrapped = wrapper if async_mutate: async def wrapper(input): assert cmi_param is not None result = await wrapped(input) setattr(result, CLIENT_MUTATION_ID, getattr(input, cmi_param)) return result else: def wrapper(input): assert cmi_param is not None result = wrapped(input) setattr(result, CLIENT_MUTATION_ID, getattr(input, cmi_param)) return result wrapper = wraps(wrapped)(wrapper) cls._mutation = Mutation_( function=wrapper, alias=camel_to_snake(cls.__name__), schema=cls._schema, error_handler=cls._error_handler, ) def _mutations(cls: Type[Mutation] = Mutation) -> Iterator[Type[Mutation]]: for base in cls.__subclasses__(): if hasattr(base, "_mutation"): yield base yield from _mutations(base) def mutations() -> Collection[Mutation_]: return [mut._mutation for mut in _mutations()] apischema-0.18.3/apischema/graphql/relay/utils.py000066400000000000000000000003601467672046000217660ustar00rootroot00000000000000from base64 import b64decode, b64encode def decode_base_64(s: str) -> str: return b64decode(s).decode() def encode_base64(s: str) -> str: return b64encode(s.encode()).decode() base64_encoding = (decode_base_64, encode_base64) apischema-0.18.3/apischema/graphql/resolvers.py000066400000000000000000000251461467672046000215470ustar00rootroot00000000000000from collections import defaultdict from dataclasses import dataclass from enum import Enum from functools import lru_cache from inspect import Parameter, signature from typing import ( Any, Awaitable, Callable, Collection, Dict, Iterator, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, TypeVar, overload, ) import graphql from apischema import UndefinedType from apischema.aliases import Aliaser from apischema.cache import CacheAwareDict, cache from apischema.conversions import Conversion from apischema.conversions.conversions import AnyConversion, DefaultConversion from apischema.deserialization import deserialization_method from apischema.methods import method_registerer from apischema.objects import ObjectField from apischema.ordering import Ordering from apischema.schemas import Schema from apischema.serialization import ( IDENTITY_METHOD, METHODS, PassThroughOptions, SerializationMethod, SerializationMethodVisitor, ) from apischema.serialization.serialized_methods import ( ErrorHandler, SerializedMethod, _get_methods, ) from apischema.serialization.serialized_methods import serialized as register_serialized from apischema.types import AnyType, NoneType, Undefined from apischema.typing import is_type from apischema.utils import ( awaitable_origin, empty_dict, get_args2, get_origin_or_type2, identity, is_async, is_union_of, keep_annotations, ) from apischema.validation.errors import ValidationError class PartialSerializationMethodVisitor(SerializationMethodVisitor): use_cache = False def __init__( self, aliaser: Aliaser, default_conversion: DefaultConversion, pass_through_options: PassThroughOptions, ): super().__init__( False, aliaser, False, default_conversion, False, False, False, False, True, pass_through_options, ) def enum(self, cls: Type[Enum]) -> SerializationMethod: return IDENTITY_METHOD def object(self, tp: AnyType, fields: Sequence[ObjectField]) -> SerializationMethod: return IDENTITY_METHOD def visit(self, tp: AnyType) -> SerializationMethod: return METHODS[NoneType] if tp is UndefinedType else super().visit(tp) @cache def partial_serialization_method_factory( aliaser: Aliaser, conversion: Optional[AnyConversion], default_conversion: DefaultConversion, ) -> Callable[[AnyType], SerializationMethod]: @lru_cache() def factory(tp: AnyType) -> SerializationMethod: return PartialSerializationMethodVisitor( aliaser, default_conversion, PassThroughOptions() ).visit_with_conv(tp, conversion) return factory def unwrap_awaitable(tp: AnyType) -> AnyType: if get_origin_or_type2(tp) == awaitable_origin: return keep_annotations(get_args2(tp)[0] if get_args2(tp) else Any, tp) else: return tp @dataclass(frozen=True) class Resolver(SerializedMethod): parameters: Sequence[Parameter] parameters_metadata: Mapping[str, Mapping] def error_type(self) -> AnyType: return unwrap_awaitable(super().error_type()) def return_type(self, return_type: AnyType) -> AnyType: return super().return_type(unwrap_awaitable(return_type)) _resolvers: MutableMapping[Type, Dict[str, Resolver]] = CacheAwareDict( defaultdict(dict) ) def get_resolvers(tp: AnyType) -> Collection[Tuple[Resolver, Mapping[str, AnyType]]]: return _get_methods(tp, _resolvers) def none_error_handler( __error: Exception, __obj: Any, __info: graphql.GraphQLResolveInfo, **kwargs ) -> None: return None def resolver_parameters( resolver: Callable, *, check_first: bool ) -> Iterator[Parameter]: first = True for param in signature(resolver).parameters.values(): if param.kind is Parameter.POSITIONAL_ONLY: raise TypeError("Resolver can not have positional only parameters") if param.kind in {Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY}: if param.annotation is Parameter.empty and (check_first or not first): raise TypeError("Resolver parameters must be typed") yield param first = False MethodOrProp = TypeVar("MethodOrProp", Callable, property) @overload def resolver(__method_or_property: MethodOrProp) -> MethodOrProp: ... @overload def resolver( alias: Optional[str] = None, *, conversion: Optional[AnyConversion] = None, error_handler: ErrorHandler = Undefined, order: Optional[Ordering] = None, schema: Optional[Schema] = None, parameters_metadata: Optional[Mapping[str, Mapping]] = None, serialized: bool = False, owner: Optional[Type] = None, ) -> Callable[[MethodOrProp], MethodOrProp]: ... def resolver( __arg=None, *, alias: Optional[str] = None, conversion: Optional[AnyConversion] = None, error_handler: ErrorHandler = Undefined, order: Optional[Ordering] = None, schema: Optional[Schema] = None, parameters_metadata: Optional[Mapping[str, Mapping]] = None, serialized: bool = False, owner: Optional[Type] = None, ): def register(func: Callable, owner: Type, alias2: str): alias2 = alias or alias2 _, *parameters = resolver_parameters(func, check_first=owner is None) error_handler2 = error_handler if error_handler2 is None: error_handler2 = none_error_handler elif error_handler2 is Undefined: error_handler2 = None resolver = Resolver( func, alias2, conversion, error_handler2, order, schema, parameters, parameters_metadata or {}, ) _resolvers[owner][alias2] = resolver if serialized: if is_async(func): raise TypeError("Async resolver cannot be used as a serialized method") try: register_serialized( alias=alias2, conversion=conversion, schema=schema, error_handler=error_handler, owner=owner, )(func) except Exception: raise TypeError("Resolver cannot be used as a serialized method") if isinstance(__arg, str): alias = __arg __arg = None return method_registerer(__arg, owner, register) T = TypeVar("T") U = TypeVar("U") def as_async(func: Callable[[T], U]) -> Callable[[Awaitable[T]], Awaitable[U]]: async def wrapper(arg: Awaitable[T]) -> U: return func(await arg) return wrapper def resolver_resolve( resolver: Resolver, types: Mapping[str, AnyType], aliaser: Aliaser, default_deserialization: DefaultConversion, default_serialization: DefaultConversion, serialized: bool = True, ) -> Callable: # graphql deserialization will give Enum objects instead of strings def handle_enum(tp: AnyType) -> Optional[AnyConversion]: if is_type(tp) and issubclass(tp, Enum): return Conversion(identity, source=Any, target=tp) return default_deserialization(tp) parameters, info_parameter = [], None for param in resolver.parameters: param_type = types[param.name] if is_union_of(param_type, graphql.GraphQLResolveInfo): info_parameter = param.name else: param_field = ObjectField( param.name, param_type, param.default is Parameter.empty, resolver.parameters_metadata.get(param.name, empty_dict), param.default, ) deserializer = deserialization_method( param_type, additional_properties=False, aliaser=aliaser, coerce=False, conversion=param_field.deserialization, default_conversion=handle_enum, fall_back_on_default=False, schema=param_field.schema, ) opt_param = is_union_of(param_type, NoneType) or param.default is None parameters.append( ( aliaser(param_field.alias), param.name, deserializer, opt_param, param_field.required, ) ) func, error_handler = resolver.func, resolver.error_handler method_factory = partial_serialization_method_factory( aliaser, resolver.conversion, default_serialization ) serialize_result: Callable[[Any], Any] if not serialized: serialize_result = identity elif is_async(resolver.func): serialize_result = as_async(method_factory(types["return"]).serialize) else: serialize_result = method_factory(types["return"]).serialize serialize_error: Optional[Callable[[Any], Any]] if error_handler is None: serialize_error = None elif is_async(error_handler): serialize_error = as_async(method_factory(resolver.error_type()).serialize) else: serialize_error = method_factory(resolver.error_type()).serialize def resolve(__self, __info, **kwargs): values = {} errors: Dict[str, ValidationError] = {} for alias, param_name, deserializer, opt_param, required in parameters: if alias in kwargs: # It is possible for the parameter to be non-optional in Python # type hints but optional in the generated schema. In this case # we should ignore it. # See: https://github.com/wyfo/apischema/pull/130#issuecomment-845497392 if not opt_param and kwargs[alias] is None: assert not required continue try: values[param_name] = deserializer(kwargs[alias]) except ValidationError as err: errors[aliaser(param_name)] = err elif opt_param and required: values[param_name] = None if errors: # TODO raise a mypy issue raise ValueError(ValidationError(children=errors).errors) # type: ignore if info_parameter: values[info_parameter] = __info try: return serialize_result(func(__self, **values)) except Exception as error: if error_handler is None: raise assert serialize_error is not None return serialize_error(error_handler(error, __self, __info, **kwargs)) return resolve apischema-0.18.3/apischema/graphql/schema.py000066400000000000000000001071041467672046000207560ustar00rootroot00000000000000from dataclasses import dataclass from dataclasses import field as field_ from dataclasses import replace from enum import Enum from functools import wraps from inspect import Parameter, iscoroutinefunction from typing import ( Any, AsyncIterable, AsyncIterator, Callable, Collection, Dict, Generic, Iterable, List, Literal, Mapping, NewType, Optional, Sequence, Tuple, Type, TypeVar, Union, cast, ) import graphql from apischema import settings from apischema.aliases import Aliaser from apischema.conversions.conversions import AnyConversion, DefaultConversion from apischema.conversions.visitor import ( Conv, Deserialization, DeserializationVisitor, Serialization, SerializationVisitor, ) from apischema.graphql.interfaces import get_interfaces, is_interface from apischema.graphql.resolvers import ( Resolver, get_resolvers, none_error_handler, partial_serialization_method_factory, resolver_parameters, resolver_resolve, ) from apischema.json_schema.schema import get_field_schema, get_method_schema, get_schema from apischema.metadata.keys import SCHEMA_METADATA from apischema.objects import ObjectField from apischema.objects.visitor import ( DeserializationObjectVisitor, ObjectVisitor, SerializationObjectVisitor, ) from apischema.ordering import Ordering, sort_by_order from apischema.recursion import RecursiveConversionsVisitor from apischema.schemas import Schema, merge_schema from apischema.serialization import SerializationMethod, serialize from apischema.serialization.serialized_methods import ErrorHandler from apischema.type_names import TypeName, TypeNameFactory, get_type_name from apischema.types import AnyType, NoneType, Undefined, UndefinedType from apischema.typing import get_args, get_origin, is_annotated from apischema.utils import ( Lazy, as_predicate, context_setter, empty_dict, get_args2, get_origin2, get_origin_or_type, identity, is_union_of, to_camel_case, ) JSON_SCALAR = graphql.GraphQLScalarType("JSON") GRAPHQL_PRIMITIVE_TYPES = { int: graphql.GraphQLInt, float: graphql.GraphQLFloat, str: graphql.GraphQLString, bool: graphql.GraphQLBoolean, } ID = NewType("ID", str) class MissingName(Exception): pass class Nullable(Exception): pass T = TypeVar("T") Thunk = Union[Callable[[], T], T] TypeThunk = Thunk[graphql.GraphQLType] def exec_thunk(thunk: TypeThunk, *, non_null=None) -> Any: result = thunk if isinstance(thunk, graphql.GraphQLType) else thunk() if non_null is True and not isinstance(result, graphql.GraphQLNonNull): return graphql.GraphQLNonNull(result) if non_null is False and isinstance(result, graphql.GraphQLNonNull): return result.of_type return result def get_parameter_schema( func: Callable, parameter: Parameter, field: ObjectField ) -> Optional[Schema]: from apischema import settings return merge_schema( settings.base_schema.parameter(func, parameter, field.alias), field.schema ) def merged_schema( schema: Optional[Schema], tp: Optional[AnyType] ) -> Tuple[Optional[Schema], Mapping[str, Any]]: if is_annotated(tp): for annotation in reversed(get_args(tp)[1:]): if isinstance(annotation, TypeNameFactory): break elif isinstance(annotation, Mapping) and SCHEMA_METADATA in annotation: schema = merge_schema(annotation[SCHEMA_METADATA], schema) schema_dict: Dict[str, Any] = {} if schema is not None: schema.merge_into(schema_dict) return schema, schema_dict def get_description( schema: Optional[Schema], tp: Optional[AnyType] = None ) -> Optional[str]: _, schema_dict = merged_schema(schema, tp) return schema_dict.get("description") def get_deprecated( schema: Optional[Schema], tp: Optional[AnyType] = None ) -> Optional[str]: schema, schema_dict = merged_schema(schema, tp) if not schema_dict.get("deprecated", False): return None while schema is not None: if isinstance(schema.deprecated, str): return schema.deprecated elif schema.deprecated: return graphql.DEFAULT_DEPRECATION_REASON schema = schema.child return graphql.DEFAULT_DEPRECATION_REASON @dataclass(frozen=True) class ResolverField: resolver: Resolver types: Mapping[str, AnyType] parameters: Sequence[Parameter] metadata: Mapping[str, Mapping] subscribe: Optional[Callable] = None IdPredicate = Callable[[AnyType], bool] UnionNameFactory = Callable[[Sequence[str]], str] GraphQLTp = TypeVar("GraphQLTp", graphql.GraphQLInputType, graphql.GraphQLOutputType) FactoryFunction = Callable[[Optional[str], Optional[str]], GraphQLTp] @dataclass(frozen=True) class TypeFactory(Generic[GraphQLTp]): factory: FactoryFunction[GraphQLTp] name: Optional[str] = None description: Optional[str] = None # non_null cannot be a field because it can not be forward to factories called in # wrapping factories (e.g. recursive wrapper) def merge( self, type_name: TypeName = TypeName(), schema: Optional[Schema] = None ) -> "TypeFactory[GraphQLTp]": if type_name == TypeName() and schema is None: return self return replace( self, name=type_name.graphql or self.name, description=get_description(schema) or self.description, ) @property def type(self) -> GraphQLTp: return self.factory(self.name, self.description) @property def raw_type(self) -> GraphQLTp: tp = self.type return tp.of_type if isinstance(tp, graphql.GraphQLNonNull) else tp def unwrap_name(name: Optional[str], tp: AnyType) -> str: if name is None: raise TypeError(f"Missing name for {tp}") return name Method = TypeVar("Method", bound=Callable[..., TypeFactory]) def cache_type(method: Method) -> Method: @wraps(method) def wrapper(self: "SchemaBuilder", *args, **kwargs): factory = method(self, *args, **kwargs) @wraps(factory.factory) def name_cache( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLNonNull: if name is None: tp = factory.factory(name, description) return graphql.GraphQLNonNull(tp) if tp is not JSON_SCALAR else tp # Method is in cache key because scalar types will have the same method, # and then be shared by both visitors, while input/output types will have # their own cache entry. if (name, method, description) in self._cache_by_name: tp, cached_args = self._cache_by_name[(name, method, description)] if cached_args == (args, kwargs): return tp tp = graphql.GraphQLNonNull(factory.factory(name, description)) # Don't put args in cache in order to avoid hashable issue self._cache_by_name[(name, method, description)] = (tp, (args, kwargs)) return tp return replace(factory, factory=name_cache) return cast(Method, wrapper) class SchemaBuilder( RecursiveConversionsVisitor[Conv, TypeFactory[GraphQLTp]], ObjectVisitor[TypeFactory[GraphQLTp]], ): types: Tuple[Type[graphql.GraphQLType], ...] def __init__( self, aliaser: Aliaser, enum_aliaser: Aliaser, enum_schemas: Mapping[Enum, Schema], default_conversion: DefaultConversion, id_type: graphql.GraphQLScalarType, is_id: Optional[IdPredicate], ): super().__init__(default_conversion) self.aliaser = aliaser self.enum_aliaser = enum_aliaser self.enum_schemas = enum_schemas self.id_type = id_type self.is_id = is_id or (lambda t: False) self._cache_by_name: Dict[ Tuple[str, Callable, Optional[str]], Tuple[graphql.GraphQLNonNull, Tuple[tuple, dict]], ] = {} def _recursive_result( self, lazy: Lazy[TypeFactory[GraphQLTp]] ) -> TypeFactory[GraphQLTp]: def factory(name: Optional[str], description: Optional[str]) -> GraphQLTp: cached_fact = lazy() return cached_fact.factory( name or cached_fact.name, description or cached_fact.description ) return TypeFactory(factory) def annotated( self, tp: AnyType, annotations: Sequence[Any] ) -> TypeFactory[GraphQLTp]: factory = super().annotated(tp, annotations) type_name = False for annotation in reversed(annotations): if isinstance(annotation, TypeNameFactory): if type_name: break type_name = True factory = factory.merge(annotation.to_type_name(tp)) if isinstance(annotation, Mapping): if type_name: factory = factory.merge(schema=annotation.get(SCHEMA_METADATA)) return factory @cache_type def any(self) -> TypeFactory[GraphQLTp]: def factory( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLScalarType: if name is None: return JSON_SCALAR else: return graphql.GraphQLScalarType(name, description=description) return TypeFactory(factory) @cache_type def collection( self, cls: Type[Collection], value_type: AnyType ) -> TypeFactory[GraphQLTp]: return TypeFactory(lambda *_: graphql.GraphQLList(self.visit(value_type).type)) @cache_type def enum(self, cls: Type[Enum]) -> TypeFactory[GraphQLTp]: def factory( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLEnumType: return graphql.GraphQLEnumType( unwrap_name(name, cls), { self.enum_aliaser(name): graphql.GraphQLEnumValue( member, get_description(self.enum_schemas.get(member)), get_deprecated(self.enum_schemas.get(member)), ) for name, member in cls.__members__.items() }, description=description, ) return TypeFactory(factory) @cache_type def literal(self, values: Sequence[Any]) -> TypeFactory[GraphQLTp]: if not all(isinstance(v, str) for v in values): raise TypeError("apischema GraphQL only support Literal of strings") def factory( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLEnumType: return graphql.GraphQLEnumType( unwrap_name(name, Literal[tuple(values)]), dict(zip(map(self.enum_aliaser, values), values)), description=description, ) return TypeFactory(factory) @cache_type def mapping( self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType ) -> TypeFactory[GraphQLTp]: def factory( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLScalarType: if name is not None: return graphql.GraphQLScalarType(name, description=description) else: return JSON_SCALAR return TypeFactory(factory) def object( self, tp: AnyType, fields: Sequence[ObjectField] ) -> TypeFactory[GraphQLTp]: raise NotImplementedError @cache_type def primitive(self, cls: Type) -> TypeFactory[GraphQLTp]: def factory( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLScalarType: assert cls is not NoneType if name is not None: return graphql.GraphQLScalarType(name, description=description) else: return GRAPHQL_PRIMITIVE_TYPES[cls] return TypeFactory(factory) def tuple(self, types: Sequence[AnyType]) -> TypeFactory[GraphQLTp]: raise TypeError("Tuple are not supported") def union(self, types: Sequence[AnyType]) -> TypeFactory[GraphQLTp]: factories = self._union_results((alt for alt in types if alt is not NoneType)) if len(factories) == 1: factory = factories[0] else: factory = self._visited_union(factories) if NoneType in types or UndefinedType in types: def nullable(name: Optional[str], description: Optional[str]) -> GraphQLTp: res = factory.factory(name, description) return res.of_type if isinstance(res, graphql.GraphQLNonNull) else res return replace(factory, factory=nullable) else: return factory def visit_conversion( self, tp: AnyType, conversion: Optional[Conv], dynamic: bool, next_conversion: Optional[AnyConversion] = None, ) -> TypeFactory[GraphQLTp]: if not dynamic and self.is_id(tp) or tp == ID: return TypeFactory(lambda *_: graphql.GraphQLNonNull(self.id_type)) factory = super().visit_conversion(tp, conversion, dynamic, next_conversion) if not dynamic: factory = factory.merge(get_type_name(tp), get_schema(tp)) if get_args(tp): factory = factory.merge(schema=get_schema(get_origin(tp))) return factory FieldType = TypeVar("FieldType", graphql.GraphQLInputField, graphql.GraphQLField) class BaseField(Generic[FieldType]): name: str ordering: Optional[Ordering] def items(self) -> Iterable[Tuple[str, FieldType]]: raise NotImplementedError @dataclass class NormalField(BaseField[FieldType]): alias: str name: str field: Lazy[FieldType] ordering: Optional[Ordering] def items(self) -> Iterable[Tuple[str, FieldType]]: yield self.alias, self.field() @dataclass class FlattenedField(BaseField[FieldType]): name: str ordering: Optional[Ordering] type: TypeFactory def items(self) -> Iterable[Tuple[str, FieldType]]: tp = self.type.raw_type if not isinstance( tp, ( graphql.GraphQLObjectType, graphql.GraphQLInterfaceType, graphql.GraphQLInputObjectType, ), ): raise FlattenedError(self) yield from tp.fields.items() class FlattenedError(Exception): def __init__(self, field: FlattenedField): self.field = field def merge_fields(cls: type, fields: Sequence[BaseField]) -> Dict[str, FieldType]: try: sorted_fields = sort_by_order( cls, fields, lambda f: f.name, lambda f: f.ordering ) except FlattenedError as err: raise TypeError( f"Flattened field {cls.__name__}.{err.field.name}" f" must have an object type" ) return {k: v for f in sorted_fields for k, v in f.items()} class InputSchemaBuilder( SchemaBuilder[Deserialization, graphql.GraphQLInputType], DeserializationVisitor[TypeFactory[graphql.GraphQLInputType]], DeserializationObjectVisitor[TypeFactory[graphql.GraphQLInputType]], ): types = graphql.type.definition.graphql_input_types def _field( self, tp: AnyType, field: ObjectField ) -> Lazy[graphql.GraphQLInputField]: field_type = field.type field_default = graphql.Undefined if field.required else field.get_default() default: Any = graphql.Undefined # Don't put `null` default + handle Undefined as None if field_default in {None, Undefined}: field_type = Optional[field_type] elif field_default is not graphql.Undefined: try: default = serialize( field_type, field_default, aliaser=self.aliaser, conversion=field.deserialization, ) except Exception: field_type = Optional[field_type] factory = self.visit_with_conv(field_type, field.deserialization) return lambda: graphql.GraphQLInputField( factory.type, default_value=default, description=get_description(get_field_schema(tp, field), field.type), ) @cache_type def object( self, tp: AnyType, fields: Sequence[ObjectField] ) -> TypeFactory[graphql.GraphQLInputType]: visited_fields: List[BaseField] = [] for field in fields: if not field.is_aggregate: normal_field = NormalField( self.aliaser(field.alias), field.name, self._field(tp, field), field.ordering, ) visited_fields.append(normal_field) elif field.flattened: flattened_fields = FlattenedField( field.name, field.ordering, self.visit_with_conv(field.type, field.deserialization), ) visited_fields.append(flattened_fields) def factory( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLInputObjectType: name = unwrap_name(name, tp) if not name.endswith("Input"): name += "Input" return graphql.GraphQLInputObjectType( name, lambda: merge_fields(get_origin_or_type(tp), visited_fields), description, ) return TypeFactory(factory) def _visited_union( self, results: Sequence[TypeFactory] ) -> TypeFactory[graphql.GraphQLInputType]: # Check must be done here too because _union_result is used by visit_conversion if len(results) != 1: raise TypeError("Union are not supported for input") return results[0] Func = TypeVar("Func", bound=Callable) class OutputSchemaBuilder( SchemaBuilder[Serialization, graphql.GraphQLOutputType], SerializationVisitor[TypeFactory[graphql.GraphQLOutputType]], SerializationObjectVisitor[TypeFactory[graphql.GraphQLOutputType]], ): types = graphql.type.definition.graphql_output_types def __init__( self, aliaser: Aliaser, enum_aliaser: Aliaser, enum_schemas: Mapping[Enum, Schema], default_conversion: DefaultConversion, id_type: graphql.GraphQLScalarType, is_id: Optional[IdPredicate], union_name_factory: UnionNameFactory, default_deserialization: DefaultConversion, ): super().__init__( aliaser, enum_aliaser, enum_schemas, default_conversion, id_type, is_id ) self.union_name_factory = union_name_factory self.input_builder = InputSchemaBuilder( self.aliaser, self.enum_aliaser, self.enum_schemas, default_deserialization, self.id_type, self.is_id, ) # Share the same cache for input_builder in order to share scalar types self.input_builder._cache_by_name = self._cache_by_name self.get_flattened: Optional[Callable[[Any], Any]] = None def _field_serialization_method(self, field: ObjectField) -> SerializationMethod: return partial_serialization_method_factory( self.aliaser, field.serialization, self.default_conversion )(Optional[field.type] if field.none_as_undefined else field.type) def _wrap_resolve(self, resolve: Func) -> Func: if self.get_flattened is None: return resolve else: get_flattened = self.get_flattened def resolve_wrapper(__obj, __info, **kwargs): return resolve(get_flattened(__obj), __info, **kwargs) return cast(Func, resolve_wrapper) def _field(self, tp: AnyType, field: ObjectField) -> Lazy[graphql.GraphQLField]: field_name = field.name partial_serialize = self._field_serialization_method(field).serialize @self._wrap_resolve def resolve(obj, _): return partial_serialize(getattr(obj, field_name)) factory = self.visit_with_conv(field.type, field.serialization) field_schema = get_field_schema(tp, field) return lambda: graphql.GraphQLField( factory.type, None, resolve, description=get_description(field_schema, field.type), deprecation_reason=get_deprecated(field_schema, field.type), ) def _resolver( self, tp: AnyType, field: ResolverField ) -> Lazy[graphql.GraphQLField]: resolve = self._wrap_resolve( resolver_resolve( field.resolver, field.types, self.aliaser, self.input_builder.default_conversion, self.default_conversion, ) ) args = None if field.parameters is not None: args = {} for param in field.parameters: default: Any = graphql.Undefined param_type = field.types[param.name] if is_union_of(param_type, graphql.GraphQLResolveInfo): break param_field = ObjectField( param.name, param_type, param.default is Parameter.empty, field.metadata.get(param.name, empty_dict), default=param.default, ) if param_field.required: pass # Don't put `null` default + handle Undefined as None # also https://github.com/python/typing/issues/775 elif param.default in {None, Undefined}: param_type = Optional[param_type] # param.default == graphql.Undefined means the parameter is required # even if it has a default elif param.default not in {Parameter.empty, graphql.Undefined}: try: default = serialize( param_type, param.default, fall_back_on_any=False, check_type=True, ) except Exception: param_type = Optional[param_type] arg_factory = self.input_builder.visit_with_conv( param_type, param_field.deserialization ) description = get_description( get_parameter_schema(field.resolver.func, param, param_field), param_field.type, ) def arg_thunk( arg_factory=arg_factory, default=default, description=description ) -> graphql.GraphQLArgument: return graphql.GraphQLArgument( arg_factory.type, default, description ) args[self.aliaser(param_field.alias)] = arg_thunk factory = self.visit_with_conv(field.types["return"], field.resolver.conversion) field_schema = get_method_schema(tp, field.resolver) return lambda: graphql.GraphQLField( factory.type, {name: arg() for name, arg in args.items()} if args else None, resolve, field.subscribe, get_description(field_schema), get_deprecated(field_schema), ) def _visit_flattened( self, field: ObjectField ) -> TypeFactory[graphql.GraphQLOutputType]: get_prev_flattened = ( self.get_flattened if self.get_flattened is not None else identity ) field_name = field.name partial_serialize = self._field_serialization_method(field).serialize def get_flattened(obj): return partial_serialize(getattr(get_prev_flattened(obj), field_name)) with context_setter(self): self.get_flattened = get_flattened return self.visit_with_conv(field.type, field.serialization) @cache_type def object( self, tp: AnyType, fields: Sequence[ObjectField], resolvers: Sequence[ResolverField] = (), ) -> TypeFactory[graphql.GraphQLOutputType]: cls = get_origin_or_type(tp) visited_fields: List[BaseField[graphql.GraphQLField]] = [] flattened_factories = [] for field in fields: if not field.is_aggregate: normal_field = NormalField( self.aliaser(field.name), field.name, self._field(tp, field), field.ordering, ) visited_fields.append(normal_field) elif field.flattened: flattened_factory = self._visit_flattened(field) flattened_factories.append(flattened_factory) visited_fields.append( FlattenedField(field.name, field.ordering, flattened_factory) ) resolvers = list(resolvers) for resolver, types in get_resolvers(tp): resolver_field = ResolverField( resolver, types, resolver.parameters, resolver.parameters_metadata ) resolvers.append(resolver_field) for resolver_field in resolvers: normal_field = NormalField( self.aliaser(resolver_field.resolver.alias), resolver_field.resolver.func.__name__, self._resolver(tp, resolver_field), resolver_field.resolver.ordering, ) visited_fields.append(normal_field) interface_thunk = None interfaces = list(map(self.visit, get_interfaces(cls))) if interfaces or flattened_factories: def interface_thunk() -> Collection[graphql.GraphQLInterfaceType]: # noqa all_interfaces = { cast(graphql.GraphQLInterfaceType, i.raw_type) for i in interfaces } for flattened_factory in flattened_factories: flattened = flattened_factory.raw_type if isinstance(flattened, graphql.GraphQLObjectType): all_interfaces.update(flattened.interfaces) elif isinstance(flattened, graphql.GraphQLInterfaceType): all_interfaces.add(flattened) return sorted(all_interfaces, key=lambda i: i.name) def factory( name: Optional[str], description: Optional[str] ) -> Union[graphql.GraphQLObjectType, graphql.GraphQLInterfaceType]: name = unwrap_name(name, cls) if is_interface(cls): return graphql.GraphQLInterfaceType( name, lambda: merge_fields(cls, visited_fields), interface_thunk, description=description, ) else: return graphql.GraphQLObjectType( name, lambda: merge_fields(cls, visited_fields), interface_thunk, is_type_of=lambda obj, _: isinstance(obj, cls), description=description, ) return TypeFactory(factory) def typed_dict( self, tp: Type, types: Mapping[str, AnyType], required_keys: Collection[str] ) -> TypeFactory[graphql.GraphQLOutputType]: raise TypeError("TypedDict are not supported in output schema") @cache_type def _visited_union( self, results: Sequence[TypeFactory] ) -> TypeFactory[graphql.GraphQLOutputType]: def factory( name: Optional[str], description: Optional[str] ) -> graphql.GraphQLOutputType: types = [factory.raw_type for factory in results] if name is None: name = self.union_name_factory([t.name for t in types]) return graphql.GraphQLUnionType(name, types, description=description) return TypeFactory(factory) async_iterable_origins = set(map(get_origin, (AsyncIterable[Any], AsyncIterator[Any]))) _fake_type = cast(type, ...) @dataclass(frozen=True) class Operation(Generic[T]): function: Callable[..., T] alias: Optional[str] = None conversion: Optional[AnyConversion] = None error_handler: ErrorHandler = Undefined order: Optional[Ordering] = None schema: Optional[Schema] = None parameters_metadata: Mapping[str, Mapping] = field_(default_factory=dict) class Query(Operation): pass class Mutation(Operation): pass @dataclass(frozen=True) class Subscription(Operation[AsyncIterable]): resolver: Optional[Callable] = None Op = TypeVar("Op", bound=Operation) def operation_resolver(operation: Union[Callable, Op], op_class: Type[Op]) -> Resolver: if not isinstance(operation, op_class): operation = op_class(operation) # type: ignore error_handler: Optional[Callable] if operation.error_handler is Undefined: error_handler = None elif operation.error_handler is None: error_handler = none_error_handler else: error_handler = operation.error_handler op = operation.function if iscoroutinefunction(op): @wraps(op) async def wrapper(_, *args, **kwargs): return await op(*args, **kwargs) else: @wraps(op) def wrapper(_, *args, **kwargs): return op(*args, **kwargs) wrapper.__annotations__ = op.__annotations__ (*parameters,) = resolver_parameters(operation.function, check_first=True) return Resolver( wrapper, operation.alias or operation.function.__name__, operation.conversion, error_handler, operation.order, operation.schema, parameters, operation.parameters_metadata, ) def graphql_schema( *, query: Iterable[Union[Callable, Query]] = (), mutation: Iterable[Union[Callable, Mutation]] = (), subscription: Iterable[Union[Callable[..., AsyncIterable], Subscription]] = (), types: Iterable[Type] = (), directives: Optional[Collection[graphql.GraphQLDirective]] = None, description: Optional[str] = None, extensions: Optional[Dict[str, Any]] = None, aliaser: Optional[Aliaser] = to_camel_case, enum_aliaser: Optional[Aliaser] = str.upper, enum_schemas: Optional[Mapping[Enum, Schema]] = None, id_types: Union[Collection[AnyType], IdPredicate] = (), id_encoding: Tuple[ Optional[Callable[[str], Any]], Optional[Callable[[Any], str]] ] = (None, None), union_name: UnionNameFactory = "Or".join, default_deserialization: Optional[DefaultConversion] = None, default_serialization: Optional[DefaultConversion] = None, ) -> graphql.GraphQLSchema: if aliaser is None: aliaser = settings.aliaser if enum_aliaser is None: enum_aliaser = lambda s: s if default_deserialization is None: default_deserialization = settings.deserialization.default_conversion if default_serialization is None: default_serialization = settings.serialization.default_conversion query_fields: List[ResolverField] = [] mutation_fields: List[ResolverField] = [] subscription_fields: List[ResolverField] = [] for operations, op_class, fields in [ (query, Query, query_fields), (mutation, Mutation, mutation_fields), ]: for operation in operations: resolver = operation_resolver(operation, op_class) resolver_field = ResolverField( resolver, resolver.types(), resolver.parameters, resolver.parameters_metadata, ) fields.append(resolver_field) for sub_op in subscription: if not isinstance(sub_op, Subscription): sub_op = Subscription(sub_op) sub_parameters: Sequence[Parameter] if sub_op.resolver is not None: subscriber2 = operation_resolver(sub_op, Subscription) _, *sub_parameters = resolver_parameters(sub_op.resolver, check_first=False) resolver = Resolver( sub_op.resolver, sub_op.alias or sub_op.resolver.__name__, sub_op.conversion, subscriber2.error_handler, sub_op.order, sub_op.schema, sub_parameters, sub_op.parameters_metadata, ) sub_types = resolver.types() subscriber = replace(subscriber2, error_handler=None) subscribe = resolver_resolve( subscriber, subscriber.types(), aliaser, default_deserialization, default_serialization, serialized=False, ) else: subscriber2 = operation_resolver(sub_op, Subscription) resolver = Resolver( lambda _: _, subscriber2.alias, sub_op.conversion, subscriber2.error_handler, sub_op.order, sub_op.schema, (), {}, ) subscriber = replace(subscriber2, error_handler=None) sub_parameters = subscriber.parameters sub_types = subscriber.types() if get_origin2(sub_types["return"]) not in async_iterable_origins: raise TypeError( "Subscriptions must return an AsyncIterable/AsyncIterator" ) event_type = get_args2(sub_types["return"])[0] subscribe = resolver_resolve( subscriber, sub_types, aliaser, default_deserialization, default_serialization, serialized=False, ) sub_types = {**sub_types, "return": resolver.return_type(event_type)} resolver_field = ResolverField( resolver, sub_types, sub_parameters, sub_op.parameters_metadata, subscribe ) subscription_fields.append(resolver_field) is_id = as_predicate(id_types) if id_encoding == (None, None): id_type: graphql.GraphQLScalarType = graphql.GraphQLID else: id_deserializer, id_serializer = id_encoding id_type = graphql.GraphQLScalarType( name="ID", serialize=id_serializer or graphql.GraphQLID.serialize, parse_value=id_deserializer or graphql.GraphQLID.parse_value, parse_literal=graphql.GraphQLID.parse_literal, description=graphql.GraphQLID.description, ) output_builder = OutputSchemaBuilder( aliaser, enum_aliaser, enum_schemas or {}, default_serialization, id_type, is_id, union_name, default_deserialization, ) def root_type( name: str, fields: Sequence[ResolverField] ) -> Optional[graphql.GraphQLObjectType]: if not fields: return None tp, type_name = type(name, (), {}), TypeName(graphql=name) root = output_builder.object(tp, (), fields).merge(type_name, None).raw_type assert isinstance(root, graphql.GraphQLObjectType) return root def check_named(tp: graphql.GraphQLType) -> graphql.GraphQLNamedType: if not isinstance(tp, graphql.GraphQLNamedType): raise TypeError(f"schema type {tp} is not named") return tp return graphql.GraphQLSchema( query=root_type("Query", query_fields), mutation=root_type("Mutation", mutation_fields), subscription=root_type("Subscription", subscription_fields), types=[check_named(output_builder.visit(cls).raw_type) for cls in types], directives=directives, description=description, extensions=extensions, ) apischema-0.18.3/apischema/json_schema/000077500000000000000000000000001467672046000177745ustar00rootroot00000000000000apischema-0.18.3/apischema/json_schema/__init__.py000066400000000000000000000003711467672046000221060ustar00rootroot00000000000000__all__ = [ "JsonSchemaVersion", "definitions_schema", "deserialization_schema", "serialization_schema", ] from .schema import definitions_schema, deserialization_schema, serialization_schema from .versions import JsonSchemaVersion apischema-0.18.3/apischema/json_schema/conversions_resolver.py000066400000000000000000000104661467672046000246460ustar00rootroot00000000000000from contextlib import suppress from typing import ( Any, Collection, Iterable, Iterator, Mapping, Optional, Sequence, Set, Tuple, Type, Union, ) from apischema.conversions.conversions import AnyConversion, DefaultConversion from apischema.conversions.visitor import ( Conv, ConversionsVisitor, DeserializationVisitor, SerializationVisitor, ) from apischema.types import AnyType from apischema.utils import is_hashable from apischema.visitor import Unsupported try: from apischema.typing import Annotated, is_union except ImportError: Annotated = ... # type: ignore def merge_results( results: Iterable[Sequence[AnyType]], origin: AnyType ) -> Sequence[AnyType]: def rec(index=0) -> Iterator[Sequence[AnyType]]: if index < len(result_list): for next_ in rec(index + 1): for res in result_list[index]: yield (res, *next_) else: yield () result_list = list(results) return [(Union if is_union(origin) else origin)[tuple(r)] for r in rec()] class ConversionsResolver(ConversionsVisitor[Conv, Sequence[AnyType]]): def __init__(self, default_conversion: DefaultConversion): super().__init__(default_conversion) self._skip_conversion = True self._rec_guard: Set[Tuple[AnyType, Conv]] = set() def annotated(self, tp: AnyType, annotations: Sequence[Any]) -> Sequence[AnyType]: return [ Annotated[(res, *annotations)] for res in super().annotated(tp, annotations) ] def collection( self, cls: Type[Collection], value_type: AnyType ) -> Sequence[AnyType]: return merge_results([self.visit(value_type)], Collection) def mapping( self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType ) -> Sequence[AnyType]: return merge_results([self.visit(key_type), self.visit(value_type)], Mapping) def new_type(self, tp: AnyType, super_type: AnyType) -> Sequence[AnyType]: raise NotImplementedError def tuple(self, types: Sequence[AnyType]) -> Sequence[AnyType]: return merge_results(map(self.visit, types), Tuple) def _visited_union(self, results: Sequence[Sequence[AnyType]]) -> Sequence[AnyType]: return merge_results(results, Union) def visit_conversion( self, tp: AnyType, conversion: Any, dynamic: bool, next_conversion: Optional[AnyConversion] = None, ) -> Sequence[AnyType]: if conversion is not None and self._skip_conversion: return [] if dynamic else [tp] self._skip_conversion = False results: Sequence[AnyType] = [] if not is_hashable(tp): with suppress(NotImplementedError, Unsupported): results = super().visit_conversion( tp, conversion, dynamic, next_conversion ) elif (tp, conversion) not in self._rec_guard: self._rec_guard.add((tp, conversion)) with suppress(NotImplementedError, Unsupported): results = super().visit_conversion( tp, conversion, dynamic, next_conversion ) self._rec_guard.remove((tp, conversion)) if not dynamic and (conversion is not None or not results): results = [tp, *results] return results class WithConversionsResolver: def resolve_conversion(self, tp: AnyType) -> Sequence[AnyType]: raise NotImplementedError def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) Resolver: Type[ConversionsResolver] if issubclass(cls, DeserializationVisitor): class Resolver(ConversionsResolver, DeserializationVisitor): # type: ignore pass elif issubclass(cls, SerializationVisitor): class Resolver(ConversionsResolver, SerializationVisitor): # type: ignore pass else: return def resolve_conversion( self: ConversionsVisitor, tp: AnyType ) -> Sequence[AnyType]: return Resolver(self.default_conversion).visit_with_conv( tp, self._conversion ) assert issubclass(cls, WithConversionsResolver) cls.resolve_conversion = resolve_conversion # type: ignore apischema-0.18.3/apischema/json_schema/patterns.py000066400000000000000000000014251467672046000222100ustar00rootroot00000000000000from typing import Pattern from apischema.conversions.conversions import DefaultConversion from apischema.types import AnyType def infer_pattern(tp: AnyType, default_conversion: DefaultConversion) -> Pattern: from apischema.json_schema.schema import DeserializationSchemaBuilder try: builder = DeserializationSchemaBuilder( False, default_conversion, False, lambda r: r, {} ) prop_schema = builder.visit(tp) except RecursionError: pass else: if ( len(prop_schema.get("patternProperties", {})) == 1 and "additionalProperties" not in prop_schema ): return next(iter(prop_schema["patternProperties"])) raise TypeError("Cannot inferred pattern from type schema") from None apischema-0.18.3/apischema/json_schema/refs.py000066400000000000000000000130061467672046000213050ustar00rootroot00000000000000from collections import defaultdict from enum import Enum from typing import ( Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Type, TypeVar, ) from apischema.conversions.conversions import AnyConversion, DefaultConversion from apischema.conversions.visitor import ( ConversionsVisitor, DeserializationVisitor, SerializationVisitor, ) from apischema.discriminators import ( get_discriminated_parent, get_inherited_discriminator, ) from apischema.json_schema.conversions_resolver import WithConversionsResolver from apischema.metadata.keys import DISCRIMINATOR_METADATA from apischema.objects import ObjectField from apischema.objects.visitor import ( DeserializationObjectVisitor, ObjectVisitor, SerializationObjectVisitor, ) from apischema.type_names import TypeNameFactory, get_type_name from apischema.types import AnyType from apischema.utils import get_origin_or_type, is_hashable, replace_builtins from apischema.visitor import Unsupported try: from apischema.typing import Annotated, get_origin, is_union except ImportError: Annotated = ... # type: ignore Refs = Dict[str, Tuple[AnyType, int]] class Recursive(Exception): pass T = TypeVar("T") class RefsExtractor(ConversionsVisitor, ObjectVisitor, WithConversionsResolver): def __init__(self, default_conversion: DefaultConversion, refs: Refs): super().__init__(default_conversion) self.refs = refs self._rec_guard: Dict[ Tuple[AnyType, Optional[AnyConversion]], int ] = defaultdict(lambda: 0) def _incr_ref(self, ref: Optional[str], tp: AnyType) -> bool: if ref is None: return False else: ref_cls, count = self.refs.get(ref, (tp, 0)) if replace_builtins(ref_cls) != replace_builtins(tp): raise ValueError( f"Types {tp} and {self.refs[ref][0]} share same reference '{ref}'" ) self.refs[ref] = (ref_cls, count + 1) return count > 0 def annotated(self, tp: AnyType, annotations: Sequence[Any]): for i, annotation in enumerate(reversed(annotations)): if isinstance(annotation, TypeNameFactory): ref = annotation.to_type_name(tp).json_schema if not isinstance(ref, str): continue ref_annotations = annotations[: len(annotations) - i] annotated = Annotated[(tp, *ref_annotations)] if self._incr_ref(ref, annotated): return if ( isinstance(annotation, Mapping) and DISCRIMINATOR_METADATA in annotation and is_union(get_origin(tp)) ): # Visit one more time discriminated union in order to ensure ref count > 1 self.visit(tp) return super().annotated(tp, annotations) def any(self): pass def collection(self, cls: Type[Collection], value_type: AnyType): self.visit(value_type) def enum(self, cls: Type[Enum]): pass def literal(self, values: Sequence[Any]): pass def mapping(self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType): self.visit(key_type) self.visit(value_type) def object(self, tp: AnyType, fields: Sequence[ObjectField]): if parent := get_discriminated_parent(get_origin_or_type(tp)): self._incr_ref(get_type_name(parent).json_schema, parent) for field in fields: self.visit_with_conv(field.type, self._field_conversion(field)) def primitive(self, cls: Type): pass def tuple(self, types: Sequence[AnyType]): for cls in types: self.visit(cls) def _visited_union(self, results: Sequence): pass def union(self, types: Sequence[AnyType]): super().union(types) if get_inherited_discriminator(types): # Visit one more time discriminated union in order to ensure ref count > 1 super().union(types) def visit_conversion( self, tp: AnyType, conversion: Optional[Any], dynamic: bool, next_conversion: Optional[AnyConversion] = None, ): ref_types = [] if not dynamic: for ref_tp in self.resolve_conversion(tp): ref_types.append(ref_tp) if self._incr_ref(get_type_name(ref_tp).json_schema, ref_tp): return if not is_hashable(tp): return super().visit_conversion(tp, conversion, dynamic, next_conversion) # 2 because the first type encountered of the recursive cycle can have no ref # (see test_recursive_by_conversion_schema) if self._rec_guard[(tp, self._conversion)] > 2: raise TypeError( f"Recursive type {tp} needs a ref. " "You can supply one using the type_name() decorator." ) self._rec_guard[(tp, self._conversion)] += 1 try: super().visit_conversion(tp, conversion, dynamic, next_conversion) except Unsupported: for ref_tp in ref_types: self.refs.pop(get_type_name(ref_tp).json_schema, ...) # type: ignore finally: self._rec_guard[(tp, self._conversion)] -= 1 class DeserializationRefsExtractor( RefsExtractor, DeserializationVisitor, DeserializationObjectVisitor ): pass class SerializationRefsExtractor( RefsExtractor, SerializationVisitor, SerializationObjectVisitor ): pass apischema-0.18.3/apischema/json_schema/schema.py000066400000000000000000000710001467672046000216040ustar00rootroot00000000000000from contextlib import suppress from dataclasses import dataclass from enum import Enum from itertools import chain from typing import ( AbstractSet, Any, Callable, ClassVar, Collection, Dict, List, Mapping, Optional, Pattern, Sequence, Tuple, Type, TypeVar, Union, ) from apischema.aliases import Aliaser from apischema.conversions import converters from apischema.conversions.conversions import AnyConversion, DefaultConversion from apischema.conversions.visitor import ( Conv, ConversionsVisitor, Deserialization, DeserializationVisitor, Serialization, SerializationVisitor, ) from apischema.dependencies import get_dependent_required from apischema.discriminators import ( Discriminator, get_discriminated_parent, get_discriminator, get_inherited_discriminator, rec_subclasses, ) from apischema.json_schema.conversions_resolver import WithConversionsResolver from apischema.json_schema.patterns import infer_pattern from apischema.json_schema.refs import DeserializationRefsExtractor, Refs from apischema.json_schema.refs import RefsExtractor as RefsExtractor_ from apischema.json_schema.refs import SerializationRefsExtractor from apischema.json_schema.types import JsonSchema, JsonType, json_schema from apischema.json_schema.versions import JsonSchemaVersion, RefFactory from apischema.metadata.keys import DISCRIMINATOR_METADATA, SCHEMA_METADATA from apischema.objects import AliasedStr, ObjectField from apischema.objects.visitor import ( DeserializationObjectVisitor, ObjectVisitor, SerializationObjectVisitor, ) from apischema.ordering import Ordering, sort_by_order from apischema.schemas import Schema from apischema.schemas import get_schema as _get_schema from apischema.schemas import merge_schema from apischema.serialization import serialize from apischema.serialization.serialized_methods import ( SerializedMethod, get_serialized_methods, ) from apischema.type_names import TypeNameFactory, get_type_name from apischema.types import AnyType, UndefinedType from apischema.typing import get_args, get_origin, is_typed_dict, is_union from apischema.utils import ( context_setter, get_origin_or_type, identity, is_hashable, is_union_of, literal_values, ) from apischema.visitor import Unsupported def get_schema(tp: AnyType) -> Optional[Schema]: from apischema import settings return merge_schema(settings.base_schema.type(tp), _get_schema(tp)) def get_field_schema(tp: AnyType, field: ObjectField) -> Optional[Schema]: from apischema import settings assert not field.is_aggregate return merge_schema( settings.base_schema.field(tp, field.name, field.alias), field.schema ) def get_method_schema(tp: AnyType, method: SerializedMethod) -> Optional[Schema]: from apischema import settings return merge_schema( settings.base_schema.method(tp, method.func, method.alias), method.schema ) def full_schema(base_schema: JsonSchema, schema: Optional[Schema]) -> JsonSchema: if schema is not None: base_schema = JsonSchema(base_schema) schema.merge_into(base_schema) return base_schema Method = TypeVar("Method", bound=Callable) @dataclass(frozen=True) class Property: alias: AliasedStr name: str ordering: Optional[Ordering] required: bool schema: JsonSchema class SchemaBuilder( ConversionsVisitor[Conv, JsonSchema], ObjectVisitor[JsonSchema], WithConversionsResolver, ): def __init__( self, additional_properties: bool, default_conversion: DefaultConversion, ignore_first_ref: bool, ref_factory: RefFactory, refs: Collection[str], ): super().__init__(default_conversion) self.additional_properties = additional_properties self._ignore_first_ref = ignore_first_ref self.ref_factory = ref_factory self.refs = refs def ref_schema(self, ref: Optional[str]) -> Optional[JsonSchema]: if ref not in self.refs: return None elif self._ignore_first_ref: self._ignore_first_ref = False return None else: assert isinstance(ref, str) return JsonSchema({"$ref": self.ref_factory(ref)}) def discriminator_schema( self, discriminator: Discriminator, types: Sequence[type] ) -> Mapping[str, Any]: discriminator_schema: Dict[str, Any] = { "propertyName": AliasedStr(discriminator.alias) } mapping = { key: self.ref_factory(type_name) for key, tp in discriminator.get_mapping(types).items() for type_name in [get_type_name(tp).json_schema] if type_name is not None and type_name != key } if mapping: discriminator_schema["mapping"] = mapping return {"discriminator": discriminator_schema} def annotated(self, tp: AnyType, annotations: Sequence[Any]) -> JsonSchema: schema = None discriminator: Optional[Discriminator] = None for annotation in reversed(annotations): if isinstance(annotation, TypeNameFactory): ref = annotation.to_type_name(tp).json_schema ref_schema = self.ref_schema(ref) if ref_schema is not None: return full_schema(ref_schema, schema) if isinstance(annotation, Mapping): schema = merge_schema(annotation.get(SCHEMA_METADATA), schema) if DISCRIMINATOR_METADATA in annotation and is_union(get_origin(tp)): discriminator = annotation[DISCRIMINATOR_METADATA] res = full_schema(super().annotated(tp, annotations), schema) return ( json_schema( oneOf=res["anyOf"], **self.discriminator_schema(discriminator, get_args(tp)), ) if discriminator is not None else res ) def any(self) -> JsonSchema: return JsonSchema() def collection(self, cls: Type[Collection], value_type: AnyType) -> JsonSchema: return json_schema( type=JsonType.ARRAY, items=self.visit(value_type), uniqueItems=issubclass(cls, AbstractSet), ) def enum(self, cls: Type[Enum]) -> JsonSchema: if len(cls) == 0: raise TypeError("Empty enum") return self.literal(list(cls)) def literal(self, values: Sequence[Any]) -> JsonSchema: if not values: raise TypeError("Empty Literal") types = {JsonType.from_type(type(v)) for v in literal_values(values)} # Mypy issue type_: Any = types.pop() if len(types) == 1 else types if len(values) == 1: return json_schema(type=type_, const=values[0]) else: return json_schema(type=type_, enum=values) def mapping( self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType ) -> JsonSchema: with context_setter(self): self._ignore_first_ref = True key = self.visit(key_type) if "type" not in key or key["type"] != JsonType.STRING: raise ValueError("Mapping types must have string-convertible keys") value = self.visit(value_type) if "pattern" in key: return json_schema( type=JsonType.OBJECT, patternProperties={key["pattern"]: value} ) else: return json_schema(type=JsonType.OBJECT, additionalProperties=value) def visit_field( self, tp: AnyType, field: ObjectField, required: bool = True ) -> JsonSchema: assert not field.is_aggregate result = full_schema( self.visit_with_conv(field.type, self._field_conversion(field)), get_field_schema(tp, field) if tp is not None else field.schema, ) if not required and "default" not in result: result = JsonSchema(result) with suppress(Exception): result["default"] = serialize( field.type, field.get_default(), fall_back_on_any=False, check_type=True, conversion=field.serialization, ) return result def _object_schema(self, cls: type, field: ObjectField) -> JsonSchema: assert field.is_aggregate with context_setter(self): self._ignore_first_ref = True object_schema = full_schema( self.visit_with_conv(field.type, self._field_conversion(field)), field.schema, ) if object_schema.get("type") not in {JsonType.OBJECT, "object"}: field_type = "Flattened" if field.flattened else "Properties" raise TypeError( f"{field_type} field {cls.__name__}.{field.name}" f" must have an object type" ) return object_schema def _properties_schema( self, object_schema: JsonSchema, pattern: Optional[Pattern] = None ): if "patternProperties" in object_schema: if pattern is not None: for p in (pattern, pattern.pattern): if p in object_schema["patternProperties"]: return object_schema["patternProperties"][p] elif ( len(object_schema["patternProperties"]) == 1 and "additionalProperties" not in object_schema ): return next(iter(object_schema["patternProperties"].values())) if isinstance(object_schema.get("additionalProperties"), Mapping): return object_schema["additionalProperties"] return JsonSchema() def properties( self, tp: AnyType, fields: Sequence[ObjectField] ) -> Sequence[Property]: raise NotImplementedError def object(self, tp: AnyType, fields: Sequence[ObjectField]) -> JsonSchema: cls = get_origin_or_type(tp) properties = sort_by_order( cls, self.properties(tp, fields), lambda p: p.name, lambda p: p.ordering ) flattened_schemas: List[JsonSchema] = [] pattern_properties = {} additional_properties: Union[bool, JsonSchema] = self.additional_properties for field in fields: if field.flattened: self._object_schema(cls, field) # check the field is an object flattened_schemas.append( full_schema( self.visit_with_conv(field.type, self._field_conversion(field)), field.schema, ) ) elif field.pattern_properties is not None: if field.pattern_properties is ...: pattern = infer_pattern(field.type, self.default_conversion) else: assert isinstance(field.pattern_properties, Pattern) pattern = field.pattern_properties pattern_properties[pattern] = self._properties_schema( self._object_schema(cls, field), pattern ) elif field.additional_properties: additional_properties = self._properties_schema( self._object_schema(cls, field) ) alias_by_names = {f.name: f.alias for f in fields}.__getitem__ dependent_required = get_dependent_required(cls) result = [] if discriminator_parent := get_discriminated_parent(cls): discriminator_ref = self.ref_schema( get_type_name(discriminator_parent).json_schema ) assert discriminator_ref is not None result.append(discriminator_ref) additional_properties = True result.append( json_schema( type=JsonType.OBJECT, properties={p.alias: p.schema for p in properties}, required=[p.alias for p in properties if p.required], additionalProperties=additional_properties, patternProperties=pattern_properties, dependentRequired={ alias_by_names(f): sorted( map(alias_by_names, dependent_required[f]) ) for f in sorted(dependent_required, key=alias_by_names) }, ) ) if flattened_schemas: return json_schema( allOf=result + flattened_schemas, unevaluatedProperties=False ) elif len(result) == 1: return result[0] else: return json_schema(allOf=result) def primitive(self, cls: Type) -> JsonSchema: return JsonSchema(type=JsonType.from_type(cls)) def tuple(self, types: Sequence[AnyType]) -> JsonSchema: return json_schema( type=JsonType.ARRAY, prefixItems=[self.visit(cls) for cls in types], items=False, minItems=len(types), maxItems=len(types), ) def _visited_union(self, results: Sequence[JsonSchema]) -> JsonSchema: if len(results) == 1: return results[0] elif any(alt == {} for alt in results): return JsonSchema() elif all(alt.keys() == {"type"} for alt in results): types: Any = chain.from_iterable( [res["type"]] if isinstance(res["type"], (str, JsonType)) else res["type"] for res in results ) return json_schema(type=list(types)) elif ( len(results) == 2 and all("type" in res for res in results) and {"type": "null"} in results ): for result in results: if result != {"type": "null"}: types = result["type"] if isinstance(types, (str, JsonType)): types = [types] if "null" not in types: result = JsonSchema({**result, "type": [*types, "null"]}) return result else: raise NotImplementedError else: return json_schema(anyOf=results) def union(self, types: Sequence[AnyType]) -> JsonSchema: result = super().union(types) if get_inherited_discriminator(types) is not None: result = json_schema(oneOf=result["anyOf"]) return result def visit_conversion( self, tp: AnyType, conversion: Optional[Conv], dynamic: bool, next_conversion: Optional[AnyConversion] = None, ) -> JsonSchema: schema = None if not dynamic: for ref_tp in self.resolve_conversion(tp): ref_schema = self.ref_schema(get_type_name(ref_tp).json_schema) if ref_schema is not None: if ( conversion is not None and is_hashable(tp) and get_discriminator(tp) is not None ): return self.visit(Union[tuple(rec_subclasses(tp))]) return ref_schema if get_args(tp): schema = merge_schema(schema, get_schema(get_origin_or_type(tp))) schema = merge_schema(schema, get_schema(tp)) if ( conversion is not None and is_hashable(tp) and (discriminator := get_discriminator(tp)) ): discriminator_alias = AliasedStr(discriminator.alias) try: parent_schema = self.visit_with_conv(tp, conversion=identity) except Unsupported: parent_schema = json_schema(type=JsonType.OBJECT) required = parent_schema.get("required", []) if discriminator_alias not in required: required = required + [discriminator_alias] properties = parent_schema.get("properties", {}) if discriminator_alias not in properties: properties = { **properties, discriminator_alias: json_schema(type=JsonType.STRING), } return full_schema( JsonSchema( { **parent_schema, "required": required, "properties": properties, **self.discriminator_schema( discriminator, list(rec_subclasses(tp)) ), } ), schema, ) result = super().visit_conversion(tp, conversion, dynamic, next_conversion) return full_schema(result, schema) RefsExtractor: ClassVar[Type[RefsExtractor_]] class DeserializationSchemaBuilder( SchemaBuilder[Deserialization], DeserializationVisitor[JsonSchema], DeserializationObjectVisitor[JsonSchema], ): RefsExtractor = DeserializationRefsExtractor def properties( self, tp: AnyType, fields: Sequence[ObjectField] ) -> Sequence[Property]: return [ Property( AliasedStr(field.alias), field.name, field.ordering, field.required, self.visit_field(tp, field, field.required), ) for field in fields if not field.is_aggregate ] class SerializationSchemaBuilder( SchemaBuilder[Serialization], SerializationVisitor[JsonSchema], SerializationObjectVisitor[JsonSchema], ): RefsExtractor = SerializationRefsExtractor @staticmethod def _field_required(field: ObjectField): from apischema import settings return not field.skippable( settings.serialization.exclude_defaults, settings.serialization.exclude_none ) def properties( self, tp: AnyType, fields: Sequence[ObjectField] ) -> Sequence[Property]: from apischema import settings return [ Property( AliasedStr(field.alias), field.name, field.ordering, required, self.visit_field(tp, field, required), ) for field in fields if not field.is_aggregate for required in [ field.required if is_typed_dict(get_origin_or_type(tp)) else not field.skippable( settings.serialization.exclude_defaults, settings.serialization.exclude_none, ) ] ] + [ Property( AliasedStr(serialized.alias), serialized.func.__name__, serialized.ordering, not is_union_of(types["return"], UndefinedType), full_schema( self.visit_with_conv(types["return"], serialized.conversion), get_method_schema(tp, serialized), ), ) for serialized, types in get_serialized_methods(tp) ] TypesWithConversion = Collection[Union[AnyType, Tuple[AnyType, AnyConversion]]] def _default_version( version: Optional[JsonSchemaVersion], ref_factory: Optional[RefFactory], all_refs: Optional[bool], ) -> Tuple[JsonSchemaVersion, RefFactory, bool]: from apischema import settings if version is None: version = settings.json_schema_version if ref_factory is None: ref_factory = version.ref_factory if all_refs is None: all_refs = version.all_refs return version, ref_factory, all_refs def _extract_refs( types: TypesWithConversion, default_conversion: DefaultConversion, builder: Type[SchemaBuilder], all_refs: bool, ) -> Mapping[str, AnyType]: refs: Refs = {} for tp in types: conversion = None if isinstance(tp, tuple): tp, conversion = tp builder.RefsExtractor(default_conversion, refs).visit_with_conv(tp, conversion) filtr = (lambda count: True) if all_refs else (lambda count: count > 1) return {ref: tp for ref, (tp, count) in refs.items() if filtr(count)} def _refs_schema( builder: Type[SchemaBuilder], default_conversion: DefaultConversion, refs: Mapping[str, AnyType], ref_factory: RefFactory, additional_properties: bool, ) -> Mapping[str, JsonSchema]: return { ref: builder( additional_properties, default_conversion, True, ref_factory, refs ).visit(tp) for ref, tp in refs.items() } def _schema( builder: Type[SchemaBuilder], tp: AnyType, schema: Optional[Schema], conversion: Optional[AnyConversion], default_conversion: DefaultConversion, version: Optional[JsonSchemaVersion], aliaser: Optional[Aliaser], ref_factory: Optional[RefFactory], all_refs: Optional[bool], with_schema: bool, additional_properties: Optional[bool], ) -> Mapping[str, Any]: from apischema import settings add_defs = ref_factory is None if aliaser is None: aliaser = settings.aliaser if additional_properties is None: additional_properties = settings.additional_properties version, ref_factory, all_refs = _default_version(version, ref_factory, all_refs) refs = _extract_refs([(tp, conversion)], default_conversion, builder, all_refs) json_schema = builder( additional_properties, default_conversion, False, ref_factory, refs ).visit_with_conv(tp, conversion) json_schema = full_schema(json_schema, schema) if add_defs and version.defs: defs = _refs_schema( builder, default_conversion, refs, ref_factory, additional_properties ) if defs: json_schema["$defs"] = defs result = serialize( JsonSchema, json_schema, aliaser=aliaser, check_type=True, conversion=version.conversion, default_conversion=converters.default_serialization, fall_back_on_any=True, ) if with_schema and version.schema is not None: result["$schema"] = version.schema return result def deserialization_schema( tp: AnyType, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, all_refs: Optional[bool] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, ref_factory: Optional[RefFactory] = None, schema: Optional[Schema] = None, version: Optional[JsonSchemaVersion] = None, with_schema: bool = True, ) -> Mapping[str, Any]: from apischema import settings return _schema( DeserializationSchemaBuilder, tp, schema, conversion, default_conversion or settings.deserialization.default_conversion, version, aliaser, ref_factory, all_refs, with_schema, additional_properties, ) def serialization_schema( tp: AnyType, *, additional_properties: Optional[bool] = None, all_refs: Optional[bool] = None, aliaser: Optional[Aliaser] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, ref_factory: Optional[RefFactory] = None, schema: Optional[Schema] = None, version: Optional[JsonSchemaVersion] = None, with_schema: bool = True, ) -> Mapping[str, Any]: from apischema import settings return _schema( SerializationSchemaBuilder, tp, schema, conversion, default_conversion or settings.serialization.default_conversion, version, aliaser, ref_factory, all_refs, with_schema, additional_properties, ) def _defs_schema( types: TypesWithConversion, default_conversion: DefaultConversion, builder: Type[SchemaBuilder], ref_factory: RefFactory, all_refs: bool, additional_properties: bool, ) -> Mapping[str, JsonSchema]: return _refs_schema( builder, default_conversion, _extract_refs(types, default_conversion, builder, all_refs), ref_factory, additional_properties, ) def _set_missing_properties( schema: JsonSchema, properties: Optional[Mapping[str, JsonSchema]], key: str ) -> JsonSchema: if properties is None: return schema missing = {name: prop for name, prop in properties.items() if prop.get(key, False)} schema.setdefault("properties", {}).update(missing) return schema def compare_schemas(write: Any, read: Any) -> Any: if isinstance(write, Mapping): if not isinstance(read, Mapping): raise ValueError merged: Dict[str, Any] = {} for key in write.keys() | read.keys(): if key in write and key in read: if key == "properties": merged[key] = {} for prop in write[key].keys() | read[key].keys(): if prop in write[key] and prop in read[key]: merged[key][prop] = compare_schemas( write[key][prop], read[key][prop] ) elif prop in write[key]: merged[key][prop] = {**write[key][prop], "writeOnly": True} else: merged[key][prop] = {**read[key][prop], "readOnly": True} elif key in { "required", "dependentRequired", "additionalProperties", "patternProperties", }: merged[key] = write[key] else: merged[key] = compare_schemas(write[key], read[key]) else: merged[key] = write.get(key, read.get(key)) return merged elif isinstance(read, Sequence) and not isinstance(read, str): if not isinstance(read, Sequence) or len(write) != len(read): raise ValueError return [compare_schemas(write[i], read[i]) for i in range(len(write))] else: if not write == read: raise ValueError return write def definitions_schema( *, deserialization: TypesWithConversion = (), serialization: TypesWithConversion = (), default_deserialization: Optional[DefaultConversion] = None, default_serialization: Optional[DefaultConversion] = None, aliaser: Optional[Aliaser] = None, version: Optional[JsonSchemaVersion] = None, ref_factory: Optional[RefFactory] = None, all_refs: Optional[bool] = None, additional_properties: Optional[bool] = None, ) -> Mapping[str, Mapping[str, Any]]: from apischema import settings if additional_properties is None: additional_properties = settings.additional_properties if aliaser is None: aliaser = settings.aliaser if default_deserialization is None: default_deserialization = settings.deserialization.default_conversion if default_serialization is None: default_serialization = settings.serialization.default_conversion version, ref_factory, all_refs = _default_version(version, ref_factory, all_refs) deserialization_schemas = _defs_schema( deserialization, default_deserialization, DeserializationSchemaBuilder, ref_factory, all_refs, additional_properties, ) serialization_schemas = _defs_schema( serialization, default_serialization, SerializationSchemaBuilder, ref_factory, all_refs, additional_properties, ) schemas = {} for ref in deserialization_schemas.keys() | serialization_schemas.keys(): if ref in deserialization_schemas and ref in serialization_schemas: try: schemas[ref] = compare_schemas( deserialization_schemas[ref], serialization_schemas[ref] ) except ValueError: raise TypeError( f"Reference {ref} has different schemas" f" for deserialization and serialization" ) else: schemas[ref] = deserialization_schemas.get( ref, serialization_schemas.get(ref) ) return { ref: serialize( JsonSchema, schema, aliaser=aliaser, fall_back_on_any=True, check_type=True, conversion=version.conversion, default_conversion=converters.default_serialization, ) for ref, schema in schemas.items() } apischema-0.18.3/apischema/json_schema/types.py000066400000000000000000000073111467672046000215140ustar00rootroot00000000000000from enum import Enum from functools import wraps from inspect import signature from typing import ( Any, Callable, Collection, Dict, Mapping, Optional, Pattern, Sequence, Type, TypeVar, Union, cast, ) from apischema.conversions import Conversion, serializer from apischema.types import NoneType, Number, Undefined from apischema.validation.errors import ValidationError class JsonType(str, Enum): NULL = "null" BOOLEAN = "boolean" STRING = "string" INTEGER = "integer" NUMBER = "number" ARRAY = "array" OBJECT = "object" @staticmethod def from_type(cls: Type) -> "JsonType": try: return TYPE_TO_JSON_TYPE[cls] except KeyError: # pragma: no cover raise TypeError(f"Invalid JSON type {cls}") def __repr__(self): return f"'{self.value}'" # pragma: no cover def __str__(self): return self.value TYPE_TO_JSON_TYPE = { NoneType: JsonType.NULL, bool: JsonType.BOOLEAN, str: JsonType.STRING, int: JsonType.INTEGER, float: JsonType.NUMBER, list: JsonType.ARRAY, dict: JsonType.OBJECT, } def bad_type(data: Any, *expected: type) -> ValidationError: msgs = [ f"expected type {JsonType.from_type(tp)}," f" found {JsonType.from_type(data.__class__)}" for tp in expected ] return ValidationError(msgs) class JsonSchema(Dict[str, Any]): pass serializer(Conversion(dict, source=JsonSchema)) Func = TypeVar("Func", bound=Callable) def json_schema_kwargs(func: Func) -> Func: @wraps(func) def wrapper(**kwargs): type_ = kwargs.get("type") if isinstance(type_, Sequence): if JsonType.INTEGER in type_ and JsonType.NUMBER in type_: kwargs["type"] = [t for t in type_ if t != JsonType.INTEGER] return JsonSchema( (k, v) for k, v in kwargs.items() if k not in _json_schema_params or ( v != _json_schema_params[k].default if _json_schema_params[k].default is not True else v not in (True, JsonSchema()) ) ) _json_schema_params = signature(func).parameters return cast(Func, wrapper) @json_schema_kwargs # type: ignore def json_schema( *, additionalProperties: Union[bool, JsonSchema] = True, allOf: Sequence[JsonSchema] = [], anyOf: Sequence[JsonSchema] = [], const: Any = Undefined, default: Any = Undefined, dependentRequired: Mapping[str, Collection[str]] = {}, deprecated: bool = False, description: Optional[str] = None, enum: Sequence[Any] = [], exclusiveMaximum: Optional[Number] = None, exclusiveMinimum: Optional[Number] = None, examples: Optional[Sequence[Any]] = None, format: Optional[str] = None, items: Union[bool, JsonSchema] = True, maximum: Optional[Number] = None, minimum: Optional[Number] = None, maxItems: Optional[int] = None, minItems: Optional[int] = None, maxLength: Optional[int] = None, minLength: Optional[int] = None, maxProperties: Optional[int] = None, minProperties: Optional[int] = None, multipleOf: Optional[Number] = None, oneOf: Sequence[JsonSchema] = [], pattern: Optional[Pattern] = None, patternProperties: Mapping[Pattern, JsonSchema] = {}, prefixItems: Sequence[JsonSchema] = [], properties: Mapping[str, JsonSchema] = {}, readOnly: bool = False, required: Sequence[str] = [], title: Optional[str] = None, type: Optional[Union[JsonType, Sequence[JsonType]]] = None, uniqueItems: bool = False, unevaluatedProperties: Union[bool, JsonSchema] = True, writeOnly: bool = False, ) -> JsonSchema: ... apischema-0.18.3/apischema/json_schema/versions.py000066400000000000000000000076541467672046000222320ustar00rootroot00000000000000from dataclasses import dataclass from typing import Any, Callable, ClassVar, Dict, Optional from apischema.conversions import Conversion, LazyConversion from apischema.json_schema.types import JsonSchema, JsonType RefFactory = Callable[[str], str] def ref_prefix(prefix: str) -> RefFactory: if not prefix.endswith("/"): prefix += "/" return lambda ref: prefix + ref def isolate_ref(schema: Dict[str, Any]): if "$ref" in schema and len(schema) > 1: schema.setdefault("allOf", []).append({"$ref": schema.pop("$ref")}) def to_json_schema_2019_09(schema: JsonSchema) -> Dict[str, Any]: result = schema.copy() if "prefixItems" in result: if "items" in result: result["additionalItems"] = result.pop("items") result["items"] = result["prefixItems"] return result def to_json_schema_7(schema: JsonSchema) -> Dict[str, Any]: result = to_json_schema_2019_09(schema) isolate_ref(result) if "$defs" in result: result["definitions"] = {**result.pop("$defs"), **result.get("definitions", {})} if "dependentRequired" in result: result["dependencies"] = { **result.pop("dependentRequired"), **result.get("dependencies", {}), } return result OPEN_API_3_0_UNSUPPORTED = [ "dependentRequired", "unevaluatedProperties", "additionalItems", ] def to_open_api_3_0(schema: JsonSchema) -> Dict[str, Any]: result = to_json_schema_2019_09(schema) for key in OPEN_API_3_0_UNSUPPORTED: result.pop(key, ...) isolate_ref(result) if {"type": "null"} in result.get("anyOf", ()): result.setdefault("nullable", True) result["anyOf"] = [a for a in result["anyOf"] if a != {"type": "null"}] if "type" in result and not isinstance(result["type"], (str, JsonType)): if "null" in result["type"]: result.setdefault("nullable", True) result["type"] = [t for t in result["type"] if t != "null"] if len(result["type"]) > 1: result.setdefault("anyOf", []).extend( {"type": t} for t in result.pop("type") ) else: result["type"] = result["type"][0] if "examples" in result: result.setdefault("example", result.pop("examples")[0]) if "const" in result: result.setdefault("enum", [result.pop("const")]) return result @dataclass class JsonSchemaVersion: schema: Optional[str] = None ref_prefix: str = "" serialization: Optional[Callable] = None all_refs: bool = True defs: bool = True @property def conversion(self) -> Optional[Conversion]: if self.serialization: # Recursive conversion pattern tmp = None conversion = Conversion( self.serialization, sub_conversion=LazyConversion(lambda: tmp) ) tmp = conversion return conversion else: return None @property def ref_factory(self) -> RefFactory: return ref_prefix(self.ref_prefix) DRAFT_2020_12: ClassVar["JsonSchemaVersion"] DRAFT_2019_09: ClassVar["JsonSchemaVersion"] DRAFT_7: ClassVar["JsonSchemaVersion"] OPEN_API_3_0: ClassVar["JsonSchemaVersion"] OPEN_API_3_1: ClassVar["JsonSchemaVersion"] JsonSchemaVersion.DRAFT_2020_12 = JsonSchemaVersion( "http://json-schema.org/draft/2020-12/schema#", "#/$defs/", None, False, True ) JsonSchemaVersion.DRAFT_2019_09 = JsonSchemaVersion( "http://json-schema.org/draft/2020-12/schema#", "#/$defs/", to_json_schema_2019_09, False, True, ) JsonSchemaVersion.DRAFT_7 = JsonSchemaVersion( "http://json-schema.org/draft-07/schema#", "#/definitions/", to_json_schema_7, False, True, ) JsonSchemaVersion.OPEN_API_3_0 = JsonSchemaVersion( None, "#/components/schemas/", to_open_api_3_0, True, False ) JsonSchemaVersion.OPEN_API_3_1 = JsonSchemaVersion( None, "#/components/schemas/", None, True, False ) apischema-0.18.3/apischema/metadata/000077500000000000000000000000001467672046000172635ustar00rootroot00000000000000apischema-0.18.3/apischema/metadata/__init__.py000066400000000000000000000011011467672046000213650ustar00rootroot00000000000000__all__ = [ "alias", "conversion", "default_as_set", "fall_back_on_default", "flatten", "init_var", "none_as_undefined", "order", "post_init", "properties", "required", "schema", "skip", "validators", ] from apischema.aliases import alias from apischema.ordering import order from apischema.schemas import schema from .implem import ( conversion, default_as_set, fall_back_on_default, flatten, init_var, none_as_undefined, post_init, properties, required, skip, validators, ) apischema-0.18.3/apischema/metadata/implem.py000066400000000000000000000054701467672046000211260ustar00rootroot00000000000000import re from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, Optional, Pattern, Tuple, Union from apischema.metadata.keys import ( CONVERSION_METADATA, DEFAULT_AS_SET_METADATA, FALL_BACK_ON_DEFAULT_METADATA, FLATTEN_METADATA, INIT_VAR_METADATA, NONE_AS_UNDEFINED_METADATA, POST_INIT_METADATA, PROPERTIES_METADATA, REQUIRED_METADATA, SKIP_METADATA, VALIDATORS_METADATA, ) from apischema.types import AnyType, Metadata, MetadataImplem, MetadataMixin if TYPE_CHECKING: from apischema.conversions.conversions import AnyConversion from apischema.validation.validators import Validator def simple_metadata(key: str) -> Metadata: return MetadataImplem({key: ...}) @dataclass(frozen=True) class ConversionMetadata(MetadataMixin): key = CONVERSION_METADATA deserialization: Optional["AnyConversion"] = None serialization: Optional["AnyConversion"] = None conversion = ConversionMetadata default_as_set = simple_metadata(DEFAULT_AS_SET_METADATA) fall_back_on_default = simple_metadata(FALL_BACK_ON_DEFAULT_METADATA) flatten = simple_metadata(FLATTEN_METADATA) flattened = flatten merged = flatten def init_var(tp: AnyType) -> Metadata: return MetadataImplem({INIT_VAR_METADATA: tp}) none_as_undefined = simple_metadata(NONE_AS_UNDEFINED_METADATA) post_init = simple_metadata(POST_INIT_METADATA) class PropertiesMetadata(dict, Metadata): # type: ignore def __init__(self): super().__init__({PROPERTIES_METADATA: None}) def __call__( self, pattern: Union[str, Pattern, "ellipsis"] # noqa: F821 ) -> Metadata: if pattern is not ...: pattern = re.compile(pattern) return MetadataImplem({PROPERTIES_METADATA: pattern}) properties = PropertiesMetadata() required = simple_metadata(REQUIRED_METADATA) @dataclass(frozen=True) class SkipMetadata(MetadataMixin): key = SKIP_METADATA deserialization: bool = False serialization: bool = False serialization_default: bool = False serialization_if: Optional[Callable[[Any], Any]] = None def __call__( self, deserialization: bool = False, serialization: bool = False, serialization_default: bool = False, serialization_if: Optional[Callable[[Any], Any]] = None, ) -> "SkipMetadata": return SkipMetadata( deserialization, serialization, serialization_default, serialization_if ) skip = SkipMetadata(deserialization=True, serialization=True) @dataclass(frozen=True) class ValidatorsMetadata(MetadataMixin): key = VALIDATORS_METADATA validators: Tuple["Validator", ...] def validators(*validator: Callable) -> ValidatorsMetadata: from apischema.validation.validators import Validator return ValidatorsMetadata(tuple(map(Validator, validator))) apischema-0.18.3/apischema/metadata/keys.py000066400000000000000000000016001467672046000206050ustar00rootroot00000000000000from apischema.utils import PREFIX def metadata_key(key: str) -> str: return PREFIX + key ALIAS_METADATA = metadata_key("alias") ALIAS_NO_OVERRIDE_METADATA = metadata_key("alias_no_override") CONVERSION_METADATA = metadata_key("conversion") DEFAULT_AS_SET_METADATA = metadata_key("default_as_set") DISCRIMINATOR_METADATA = metadata_key("discriminator") FALL_BACK_ON_DEFAULT_METADATA = metadata_key("fall_back_on_default") FLATTEN_METADATA = metadata_key("flattened") INIT_VAR_METADATA = metadata_key("init_var") NONE_AS_UNDEFINED_METADATA = metadata_key("none_as_undefined") ORDERING_METADATA = metadata_key("ordering") POST_INIT_METADATA = metadata_key("post_init") PROPERTIES_METADATA = metadata_key("properties") REQUIRED_METADATA = metadata_key("required") SCHEMA_METADATA = metadata_key("schema") SKIP_METADATA = metadata_key("skip") VALIDATORS_METADATA = metadata_key("validators") apischema-0.18.3/apischema/methods.py000066400000000000000000000106251467672046000175240ustar00rootroot00000000000000import inspect from functools import wraps from inspect import signature from types import FunctionType from typing import Callable, Generic, Optional, Type, Union from apischema.typing import get_type_hints from apischema.utils import PREFIX, T, get_origin_or_type2 MethodOrProperty = Union[Callable, property] def _method_location(method: MethodOrProperty) -> Optional[Type]: if isinstance(method, property): assert method.fget is not None method = method.fget while hasattr(method, "__wrapped__"): method = method.__wrapped__ assert isinstance(method, FunctionType) global_name, *class_path = method.__qualname__.split(".")[:-1] if global_name not in method.__globals__: return None location = method.__globals__[global_name] for attr in class_path: if hasattr(location, attr): location = getattr(location, attr) else: break return location def is_method(method: MethodOrProperty) -> bool: """Return if the function is method/property declared in a class""" return ( isinstance(method, property) and method.fget is not None and is_method(method.fget) ) or ( isinstance(method, FunctionType) and method.__name__ != method.__qualname__ and isinstance(_method_location(method), (type, type(None))) and next(iter(inspect.signature(method).parameters), None) == "self" ) def method_class(method: MethodOrProperty) -> Optional[Type]: cls = _method_location(method) return cls if isinstance(cls, type) else None METHOD_WRAPPER_ATTR = f"{PREFIX}method_wrapper" def method_wrapper(method: MethodOrProperty, name: Optional[str] = None) -> Callable: if isinstance(method, property): assert method.fget is not None name = name or method.fget.__name__ @wraps(method.fget) def wrapper(self): assert name is not None return getattr(self, name) else: if hasattr(method, METHOD_WRAPPER_ATTR): return method name = name or method.__name__ if list(signature(method).parameters) == ["self"]: @wraps(method) def wrapper(self): assert name is not None return getattr(self, name)() else: @wraps(method) def wrapper(self, *args, **kwargs): assert name is not None return getattr(self, name)(*args, **kwargs) setattr(wrapper, METHOD_WRAPPER_ATTR, True) return wrapper class MethodWrapper(Generic[T]): def __init__(self, method: T): self._method = method def getter(self, func: Callable): self._method = self._method.getter(func) # type: ignore return self def setter(self, func: Callable): self._method = self._method.setter(func) # type: ignore return self def deleter(self, func: Callable): self._method = self._method.deleter(func) # type: ignore return self def __set_name__(self, owner, name): setattr(owner, name, self._method) def __call__(self, *args, **kwargs): raise RuntimeError("Method __set_name__ has not been called") def method_registerer( arg: Optional[Callable], owner: Optional[Type], register: Callable[[Callable, Type, str], None], ): def decorator(method: MethodOrProperty): if owner is None and is_method(method) and method_class(method) is None: class Descriptor(MethodWrapper[MethodOrProperty]): def __set_name__(self, owner, name): super().__set_name__(owner, name) register(method_wrapper(method), owner, name) return Descriptor(method) else: owner2 = owner if is_method(method): if owner2 is None: owner2 = method_class(method) method = method_wrapper(method) if owner2 is None: try: hints = get_type_hints(method) owner2 = get_origin_or_type2(hints[next(iter(hints))]) except (KeyError, StopIteration): raise TypeError("First parameter of method must be typed") from None assert not isinstance(method, property) register(method, owner2, method.__name__) return method return decorator if arg is None else decorator(arg) apischema-0.18.3/apischema/objects/000077500000000000000000000000001467672046000171345ustar00rootroot00000000000000apischema-0.18.3/apischema/objects/__init__.py000066400000000000000000000005731467672046000212520ustar00rootroot00000000000000__all__ = [ "AliasedStr", "ObjectField", "get_alias", "get_field", "object_deserialization", "object_fields", "object_serialization", "set_object_fields", ] from .conversions import object_deserialization, object_serialization from .fields import ObjectField, set_object_fields from .getters import AliasedStr, get_alias, get_field, object_fields apischema-0.18.3/apischema/objects/conversions.py000066400000000000000000000133701467672046000220620ustar00rootroot00000000000000import inspect from dataclasses import Field, replace from types import new_class from typing import ( Any, Callable, Dict, Generic, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from apischema.methods import is_method, method_wrapper from apischema.objects.fields import MISSING_DEFAULT, ObjectField, set_object_fields from apischema.objects.getters import object_fields, parameters_as_fields from apischema.type_names import type_name from apischema.typing import get_type_hints from apischema.utils import ( empty_dict, substitute_type_vars, subtyping_substitution, to_pascal_case, with_parameters, ) T = TypeVar("T") def object_deserialization( func: Callable[..., T], *input_class_modifiers: Callable[[type], Any], parameters_metadata: Optional[Mapping[str, Mapping]] = None, ) -> Any: fields = parameters_as_fields(func, parameters_metadata) types = get_type_hints(func, include_extras=True) if "return" not in types: raise TypeError("Object deserialization must be typed") return_type = types["return"] bases = () if getattr(return_type, "__parameters__", ()): bases = (Generic[return_type.__parameters__],) # type: ignore elif func.__name__ != "": input_class_modifiers = ( type_name(to_pascal_case(func.__name__)), *input_class_modifiers, ) def __init__(self, **kwargs): self.kwargs = kwargs input_cls = new_class( to_pascal_case(func.__name__), bases, exec_body=lambda ns: ns.update({"__init__": __init__}), ) for modifier in input_class_modifiers: modifier(input_cls) set_object_fields(input_cls, fields) if any(f.additional_properties for f in fields): kwargs_param = next(f.name for f in fields if f.additional_properties) def wrapper(input): kwargs = input.kwargs.copy() kwargs.update(kwargs.pop(kwargs_param)) return func(**kwargs) else: def wrapper(input): return func(**input.kwargs) wrapper.__annotations__["input"] = with_parameters(input_cls) wrapper.__annotations__["return"] = return_type return wrapper def _fields_and_init( cls: type, fields_and_methods: Union[Iterable[Any], Callable[[], Iterable[Any]]] ) -> Tuple[Sequence[ObjectField], Callable[[Any, Any], None]]: fields = object_fields(cls, serialization=True) output_fields: Dict[str, ObjectField] = {} methods = [] if callable(fields_and_methods): fields_and_methods = fields_and_methods() for elt in fields_and_methods: if elt is ...: output_fields.update(fields) continue if isinstance(elt, tuple): elt, metadata = elt else: metadata = empty_dict if not isinstance(metadata, Mapping): raise TypeError(f"Invalid metadata {metadata}") if isinstance(elt, Field): elt = elt.name if isinstance(elt, str) and elt in fields: elt = fields[elt] if is_method(elt): elt = method_wrapper(elt) if isinstance(elt, ObjectField): if metadata: output_fields[elt.name] = replace( elt, metadata={**elt.metadata, **metadata}, default=MISSING_DEFAULT ) else: output_fields[elt.name] = elt continue elif callable(elt): types = get_type_hints(elt) first_param = next(iter(inspect.signature(elt).parameters)) substitution, _ = subtyping_substitution(types.get(first_param, cls), cls) ret = substitute_type_vars(types.get("return", Any), substitution) output_fields[elt.__name__] = ObjectField( elt.__name__, ret, metadata=metadata ) methods.append((elt, output_fields[elt.__name__])) else: raise TypeError(f"Invalid serialization member {elt} for class {cls}") serialized_methods = [m for m, f in methods if output_fields[f.name] is f] serialized_fields = list( output_fields.keys() - {m.__name__ for m in serialized_methods} ) def __init__(self, obj): for field in serialized_fields: setattr(self, field, getattr(obj, field)) for method in serialized_methods: setattr(self, method.__name__, method(obj)) return tuple(output_fields.values()), __init__ def object_serialization( cls: Type[T], fields_and_methods: Union[Iterable[Any], Callable[[], Iterable[Any]]], *output_class_modifiers: Callable[[type], Any], ) -> Callable[[T], Any]: generic, bases = cls, () if getattr(cls, "__parameters__", ()): generic = cls[cls.__parameters__] # type: ignore bases = Generic[cls.__parameters__] # type: ignore elif ( callable(fields_and_methods) and fields_and_methods.__name__ != "" and not getattr(cls, "__parameters__", ()) ): output_class_modifiers = ( type_name(to_pascal_case(fields_and_methods.__name__)), *output_class_modifiers, ) def __init__(self, obj): _, new_init = _fields_and_init(cls, fields_and_methods) new_init.__annotations__ = {"obj": generic} output_cls.__init__ = new_init # type: ignore new_init(self, obj) __init__.__annotations__ = {"obj": generic} output_cls = new_class( f"{cls.__name__}Serialization", bases, exec_body=lambda ns: ns.update({"__init__": __init__}), ) for modifier in output_class_modifiers: modifier(output_cls) set_object_fields(output_cls, lambda: _fields_and_init(cls, fields_and_methods)[0]) return output_cls apischema-0.18.3/apischema/objects/fields.py000066400000000000000000000174171467672046000207660ustar00rootroot00000000000000from collections import ChainMap from dataclasses import MISSING, Field, InitVar, dataclass, field from enum import Enum, auto from types import FunctionType from typing import ( TYPE_CHECKING, Any, Callable, Iterable, Mapping, MutableMapping, NoReturn, Optional, Pattern, Sequence, Union, cast, ) from apischema.cache import CacheAwareDict from apischema.conversions.conversions import AnyConversion from apischema.metadata.implem import ( ConversionMetadata, SkipMetadata, ValidatorsMetadata, ) from apischema.metadata.keys import ( ALIAS_METADATA, ALIAS_NO_OVERRIDE_METADATA, CONVERSION_METADATA, DEFAULT_AS_SET_METADATA, FALL_BACK_ON_DEFAULT_METADATA, FLATTEN_METADATA, NONE_AS_UNDEFINED_METADATA, ORDERING_METADATA, POST_INIT_METADATA, PROPERTIES_METADATA, REQUIRED_METADATA, SCHEMA_METADATA, SKIP_METADATA, VALIDATORS_METADATA, ) from apischema.types import AnyType, NoneType, UndefinedType from apischema.typing import get_args, is_annotated from apischema.utils import ( LazyValue, empty_dict, get_args2, is_union_of, keep_annotations, ) if TYPE_CHECKING: from apischema.ordering import Ordering from apischema.schemas import Schema from apischema.validation.validators import Validator class FieldKind(Enum): NORMAL = auto() READ_ONLY = auto() WRITE_ONLY = auto() # Cannot reuse MISSING for dataclass field because it would be interpreted as no default MISSING_DEFAULT = object() @dataclass(frozen=True) class ObjectField: name: str type: AnyType required: bool = True metadata: Mapping[str, Any] = field(default_factory=lambda: empty_dict) default: InitVar[Any] = MISSING_DEFAULT default_factory: Optional[Callable[[], Any]] = None kind: FieldKind = FieldKind.NORMAL def __post_init__(self, default: Any): if REQUIRED_METADATA in self.full_metadata: object.__setattr__(self, "required", True) if self.default_factory is MISSING: object.__setattr__(self, "default_factory", None) if not self.required and self.default_factory is None: if default is MISSING_DEFAULT or default is MISSING: raise ValueError("Missing default for non-required ObjectField") object.__setattr__(self, "default_factory", LazyValue(default)) if self.none_as_undefined and is_union_of(self.type, NoneType): new_type = Union[tuple(a for a in get_args2(self.type) if a != NoneType)] # type: ignore object.__setattr__(self, "type", keep_annotations(new_type, self.type)) @property def full_metadata(self) -> Mapping[str, Any]: if not is_annotated(self.type): return self.metadata return ChainMap( cast(MutableMapping, self.metadata), *( cast(MutableMapping, arg) for arg in reversed(get_args(self.type)[1:]) if isinstance(arg, Mapping) ), ) @property def additional_properties(self) -> bool: return self.full_metadata.get(PROPERTIES_METADATA, ...) is None @property def alias(self) -> str: return self.full_metadata.get(ALIAS_METADATA, self.name) @property def override_alias(self) -> bool: return ALIAS_NO_OVERRIDE_METADATA not in self.full_metadata @property def _conversion(self) -> Optional[ConversionMetadata]: return self.metadata.get(CONVERSION_METADATA) @property def default_as_set(self) -> bool: return DEFAULT_AS_SET_METADATA in self.full_metadata @property def deserialization(self) -> Optional[AnyConversion]: conversion = self._conversion return conversion.deserialization if conversion is not None else None @property def fall_back_on_default(self) -> bool: return ( FALL_BACK_ON_DEFAULT_METADATA in self.full_metadata and self.default_factory is not None ) @property def flattened(self) -> bool: return FLATTEN_METADATA in self.full_metadata def get_default(self) -> Any: if self.required: raise RuntimeError("Field is required") assert self.default_factory is not None return self.default_factory() @property def is_aggregate(self) -> bool: return ( self.flattened or self.additional_properties or self.pattern_properties is not None ) @property def none_as_undefined(self): return NONE_AS_UNDEFINED_METADATA in self.full_metadata @property def ordering(self) -> Optional["Ordering"]: return self.full_metadata.get(ORDERING_METADATA) @property def post_init(self) -> bool: return POST_INIT_METADATA in self.full_metadata @property def pattern_properties(self) -> Union[Pattern, "ellipsis", None]: # noqa: F821 return self.full_metadata.get(PROPERTIES_METADATA) @property def schema(self) -> Optional["Schema"]: return self.metadata.get(SCHEMA_METADATA) @property def serialization(self) -> Optional[AnyConversion]: conversion = self._conversion return conversion.serialization if conversion is not None else None @property def skip(self) -> SkipMetadata: return self.metadata.get(SKIP_METADATA, SkipMetadata()) def skippable(self, default: bool, none: bool) -> bool: return bool( self.skip.serialization_if or is_union_of(self.type, UndefinedType) or ( self.default_factory is not None and (self.skip.serialization_default or default) ) or self.none_as_undefined or (none and is_union_of(self.type, NoneType)) ) @property def undefined(self) -> bool: return is_union_of(self.type, UndefinedType) @property def validators(self) -> Sequence["Validator"]: if VALIDATORS_METADATA in self.metadata: return cast( ValidatorsMetadata, self.metadata[VALIDATORS_METADATA] ).validators else: return () FieldOrName = Union[str, ObjectField, Field] def _bad_field(obj: Any, methods: bool) -> NoReturn: method_types = "property/types.FunctionType" if methods else "" raise TypeError( f"Expected dataclasses.Field/apischema.ObjectField/str{method_types}, found {obj}" ) def check_field_or_name(field_or_name: Any, *, methods: bool = False): method_types = (property, FunctionType) if methods else () if not isinstance(field_or_name, (str, ObjectField, Field, *method_types)): _bad_field(field_or_name, methods) def get_field_name(field_or_name: Any, *, methods: bool = False) -> str: if isinstance(field_or_name, (Field, ObjectField)): return field_or_name.name elif isinstance(field_or_name, str): return field_or_name elif ( methods and isinstance(field_or_name, property) and field_or_name.fget is not None ): return field_or_name.fget.__name__ elif methods and isinstance(field_or_name, FunctionType): return field_or_name.__name__ else: _bad_field(field_or_name, methods) _class_fields: MutableMapping[ type, Callable[[], Sequence[ObjectField]] ] = CacheAwareDict({}) def set_object_fields( cls: type, fields: Union[Iterable[ObjectField], Callable[[], Sequence[ObjectField]], None], ): if fields is None: _class_fields.pop(cls, ...) elif callable(fields): _class_fields[cls] = fields else: _class_fields[cls] = lambda fields=tuple(fields): fields # type: ignore def default_object_fields(cls: type) -> Optional[Sequence[ObjectField]]: return _class_fields[cls]() if cls in _class_fields else None apischema-0.18.3/apischema/objects/getters.py000066400000000000000000000100511467672046000211600ustar00rootroot00000000000000import inspect from typing import ( Any, Callable, Mapping, Optional, Sequence, Type, TypeVar, Union, cast, overload, ) from apischema.cache import cache from apischema.metadata import properties from apischema.objects.fields import ObjectField from apischema.objects.visitor import ObjectVisitor from apischema.types import AnyType from apischema.typing import _GenericAlias, get_type_hints from apischema.utils import empty_dict from apischema.visitor import Unsupported @cache def object_fields( tp: AnyType, deserialization: bool = False, serialization: bool = False, default: Optional[ Callable[[type], Optional[Sequence[ObjectField]]] ] = ObjectVisitor._default_fields, ) -> Mapping[str, ObjectField]: class GetFields(ObjectVisitor[Sequence[ObjectField]]): def _skip_field(self, field: ObjectField) -> bool: return (field.skip.deserialization and serialization) or ( field.skip.serialization and deserialization ) @staticmethod def _default_fields(cls: type) -> Optional[Sequence[ObjectField]]: return None if default is None else default(cls) def object( self, cls: Type, fields: Sequence[ObjectField] ) -> Sequence[ObjectField]: return fields try: return {f.name: f for f in GetFields().visit(tp)} except (Unsupported, NotImplementedError): raise TypeError(f"{tp} doesn't have fields") def object_fields2(obj: Any) -> Mapping[str, ObjectField]: return object_fields( obj if isinstance(obj, (type, _GenericAlias)) else obj.__class__ ) T = TypeVar("T") class FieldGetter: def __init__(self, obj: Any): self.fields = object_fields2(obj) def __getattribute__(self, name: str) -> ObjectField: try: return object.__getattribute__(self, "fields")[name] except KeyError: raise AttributeError(name) @overload def get_field(obj: Type[T]) -> T: ... @overload def get_field(obj: T) -> T: ... # Overload because of Mypy issue # https://github.com/python/mypy/issues/9003#issuecomment-667418520 def get_field(obj: Union[Type[T], T]) -> T: return cast(T, FieldGetter(obj)) class AliasedStr(str): pass class AliasGetter: def __init__(self, obj: Any): self.fields = object_fields2(obj) def __getattribute__(self, name: str) -> str: try: return AliasedStr(object.__getattribute__(self, "fields")[name].alias) except KeyError: raise AttributeError(name) @overload def get_alias(obj: Type[T]) -> T: ... @overload def get_alias(obj: T) -> T: ... def get_alias(obj: Union[Type[T], T]) -> T: return cast(T, AliasGetter(obj)) def parameters_as_fields( func: Callable, parameters_metadata: Optional[Mapping[str, Mapping]] = None ) -> Sequence[ObjectField]: parameters_metadata = parameters_metadata or {} types = get_type_hints(func, include_extras=True) fields = [] for param_name, param in inspect.signature(func).parameters.items(): if param.kind is inspect.Parameter.POSITIONAL_ONLY: raise TypeError("Positional only parameters are not supported") param_type = types.get(param_name, Any) if param.kind in { inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY, }: field = ObjectField( param_name, param_type, param.default is inspect.Parameter.empty, parameters_metadata.get(param_name, empty_dict), default=param.default, ) fields.append(field) elif param.kind == inspect.Parameter.VAR_KEYWORD: field = ObjectField( param_name, Mapping[str, param_type], # type: ignore False, properties | parameters_metadata.get(param_name, empty_dict), default_factory=dict, ) fields.append(field) return fields apischema-0.18.3/apischema/objects/visitor.py000066400000000000000000000122201467672046000212020ustar00rootroot00000000000000from dataclasses import MISSING, Field from typing import Any, Collection, Mapping, Optional, Sequence from apischema.aliases import Aliaser, get_class_aliaser from apischema.conversions.conversions import AnyConversion from apischema.dataclasses import replace from apischema.metadata.keys import ALIAS_METADATA from apischema.objects.fields import MISSING_DEFAULT, FieldKind, ObjectField from apischema.types import AnyType, Undefined from apischema.typing import get_args from apischema.utils import get_origin_or_type, get_parameters, substitute_type_vars from apischema.visitor import Result, Visitor def object_field_from_field( field: Field, field_type: AnyType, init_var: bool ) -> ObjectField: required = field.default is MISSING and field.default_factory is MISSING if init_var: kind = FieldKind.WRITE_ONLY elif not field.init: kind = FieldKind.READ_ONLY else: kind = FieldKind.NORMAL return ObjectField( field.name, field_type, required, field.metadata, default=field.default, default_factory=field.default_factory, # type: ignore kind=kind, ) def _override_alias(field: ObjectField, aliaser: Aliaser) -> ObjectField: if field.override_alias: return replace( field, metadata={**field.metadata, ALIAS_METADATA: aliaser(field.alias)}, default=MISSING_DEFAULT, ) else: return field class ObjectVisitor(Visitor[Result]): _field_kind_filtered: Optional[FieldKind] = None def _field_conversion(self, field: ObjectField) -> Optional[AnyConversion]: raise NotImplementedError def _skip_field(self, field: ObjectField) -> bool: raise NotImplementedError @staticmethod def _default_fields(cls: type) -> Optional[Sequence[ObjectField]]: from apischema import settings return settings.default_object_fields(cls) def _override_fields( self, tp: AnyType, fields: Sequence[ObjectField] ) -> Sequence[ObjectField]: origin = get_origin_or_type(tp) if isinstance(origin, type): default_fields = self._default_fields(origin) if default_fields is not None: if get_args(tp): sub = dict(zip(get_parameters(origin), get_args(tp))) default_fields = [ replace(f, type=substitute_type_vars(f.type, sub)) for f in default_fields ] return default_fields return fields def _object(self, tp: AnyType, fields: Sequence[ObjectField]) -> Result: fields = [f for f in fields if not self._skip_field(f)] if aliaser := get_class_aliaser(get_origin_or_type(tp)): fields = [_override_alias(f, aliaser) for f in fields] return self.object(tp, fields) def dataclass( self, tp: AnyType, types: Mapping[str, AnyType], fields: Sequence[Field], init_vars: Sequence[Field], ) -> Result: by_name = { f.name: object_field_from_field(f, types[f.name], init_var) for field_group, init_var in [(fields, False), (init_vars, True)] for f in field_group } object_fields = [ by_name[name] for name in types if name in by_name and by_name[name].kind != self._field_kind_filtered ] return self._object(tp, self._override_fields(tp, object_fields)) def object(self, tp: AnyType, fields: Sequence[ObjectField]) -> Result: raise NotImplementedError def named_tuple( self, tp: AnyType, types: Mapping[str, AnyType], defaults: Mapping[str, Any] ) -> Result: fields = [ ObjectField(name, type_, name not in defaults, default=defaults.get(name)) for name, type_ in types.items() ] return self._object(tp, self._override_fields(tp, fields)) def typed_dict( self, tp: AnyType, types: Mapping[str, AnyType], required_keys: Collection[str] ) -> Result: fields = [ ObjectField(name, type_, name in required_keys, default=Undefined) for name, type_ in types.items() ] return self._object(tp, self._override_fields(tp, fields)) def unsupported(self, tp: AnyType) -> Result: dummy: list = [] fields = self._override_fields(tp, dummy) return super().unsupported(tp) if fields is dummy else self._object(tp, fields) class DeserializationObjectVisitor(ObjectVisitor[Result]): _field_kind_filtered = FieldKind.READ_ONLY @staticmethod def _field_conversion(field: ObjectField) -> Optional[AnyConversion]: return field.deserialization @staticmethod def _skip_field(field: ObjectField) -> bool: return field.skip.deserialization class SerializationObjectVisitor(ObjectVisitor[Result]): _field_kind_filtered = FieldKind.WRITE_ONLY @staticmethod def _field_conversion(field: ObjectField) -> Optional[AnyConversion]: return field.serialization @staticmethod def _skip_field(field: ObjectField) -> bool: return field.skip.serialization apischema-0.18.3/apischema/ordering.py000066400000000000000000000074051467672046000176740ustar00rootroot00000000000000from collections import defaultdict from dataclasses import dataclass from typing import ( Any, Callable, Collection, Dict, List, Mapping, MutableMapping, Optional, Sequence, TypeVar, overload, ) from apischema.cache import CacheAwareDict from apischema.metadata.keys import ORDERING_METADATA from apischema.types import MetadataMixin from apischema.utils import stop_signature_abuse Cls = TypeVar("Cls", bound=type) @dataclass(frozen=True) class Ordering(MetadataMixin): key = ORDERING_METADATA order: Optional[int] = None after: Optional[Any] = None before: Optional[Any] = None def __post_init__(self): from apischema.objects.fields import check_field_or_name if self.after is not None: check_field_or_name(self.after, methods=True) if self.before is not None: check_field_or_name(self.before, methods=True) _order_overriding: MutableMapping[type, Mapping[Any, Ordering]] = CacheAwareDict({}) @overload def order(__value: int) -> Ordering: ... @overload def order(*, after: Any) -> Ordering: ... @overload def order(*, before: Any) -> Ordering: ... @overload def order(__fields: Sequence[Any]) -> Callable[[Cls], Cls]: ... @overload def order(__override: Mapping[Any, Ordering]) -> Callable[[Cls], Cls]: ... def order(__arg=None, *, before=None, after=None): if len([arg for arg in (__arg, before, after) if arg is not None]) != 1: stop_signature_abuse() if isinstance(__arg, Sequence): __arg = {field: order(after=prev) for field, prev in zip(__arg[1:], __arg)} if isinstance(__arg, Mapping): if not all(isinstance(val, Ordering) for val in __arg.values()): stop_signature_abuse() def decorator(cls: Cls) -> Cls: _order_overriding[cls] = __arg return cls return decorator elif __arg is not None and not isinstance(__arg, int): stop_signature_abuse() else: return Ordering(__arg, after, before) def get_order_overriding(cls: type) -> Mapping[str, Ordering]: from apischema.objects.fields import get_field_name return { get_field_name(field, methods=True): ordering for sub_cls in reversed(cls.__mro__) if sub_cls in _order_overriding for field, ordering in _order_overriding[sub_cls].items() } T = TypeVar("T") def sort_by_order( cls: type, elts: Collection[T], name: Callable[[T], str], order: Callable[[T], Optional[Ordering]], ) -> Sequence[T]: from apischema.objects.fields import get_field_name order_overriding = get_order_overriding(cls) groups: Dict[int, List[T]] = defaultdict(list) after: Dict[str, List[T]] = defaultdict(list) before: Dict[str, List[T]] = defaultdict(list) for elt in elts: ordering = order_overriding.get(name(elt), order(elt)) if ordering is None: groups[0].append(elt) elif ordering.order is not None: groups[ordering.order].append(elt) elif ordering.after is not None: after[get_field_name(ordering.after, methods=True)].append(elt) elif ordering.before is not None: before[get_field_name(ordering.before, methods=True)].append(elt) else: raise NotImplementedError if not after and not before and len(groups) == 1: return next(iter(groups.values())) result = [] def add_to_result(elt: T): elt_name = name(elt) for before_elt in before[elt_name]: add_to_result(before_elt) result.append(elt) for after_elt in after[elt_name]: add_to_result(after_elt) for value in sorted(groups): for elt in groups[value]: add_to_result(elt) return result apischema-0.18.3/apischema/py.typed000066400000000000000000000000001467672046000171700ustar00rootroot00000000000000apischema-0.18.3/apischema/recursion.py000066400000000000000000000122551467672046000200730ustar00rootroot00000000000000from enum import Enum from typing import ( Any, Collection, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Type, ) from apischema.cache import cache from apischema.conversions import AnyConversion from apischema.conversions.conversions import DefaultConversion from apischema.conversions.visitor import ( Conv, ConversionsVisitor, Deserialization, DeserializationVisitor, Serialization, SerializationVisitor, ) from apischema.objects import ObjectField from apischema.objects.visitor import ( DeserializationObjectVisitor, ObjectVisitor, SerializationObjectVisitor, ) from apischema.types import AnyType from apischema.utils import Lazy from apischema.visitor import Result RecursionKey = Tuple[AnyType, Optional[AnyConversion]] class RecursiveChecker(ConversionsVisitor[Conv, Any], ObjectVisitor[Any]): def __init__(self, default_conversion: DefaultConversion): super().__init__(default_conversion) self._cache = recursion_cache(self.__class__) self._recursive: Dict[RecursionKey, Set[RecursionKey]] = {} self._all_recursive: Set[RecursionKey] = set() self._guard: List[RecursionKey] = [] self._guard_indices: Dict[RecursionKey, int] = {} def any(self): pass def collection(self, cls: Type[Collection], value_type: AnyType): return self.visit(value_type) def enum(self, cls: Type[Enum]): pass def literal(self, values: Sequence[Any]): pass def mapping(self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType): self.visit(key_type) self.visit(value_type) def object(self, tp: AnyType, fields: Sequence[ObjectField]): for field in fields: self.visit_with_conv(field.type, self._field_conversion(field)) def primitive(self, cls: Type): pass def tuple(self, types: Sequence[AnyType]): for tp in types: self.visit(tp) def _visited_union(self, results: Sequence): pass def unsupported(self, tp: AnyType): pass def visit(self, tp: AnyType): rec_key = (tp, self._conversion) if rec_key in self._cache: pass elif rec_key in self._guard_indices: recursive = self._guard[self._guard_indices[rec_key] :] self._recursive.setdefault(rec_key, set()).update(recursive) self._all_recursive.update(recursive) else: self._guard_indices[rec_key] = len(self._guard) self._guard.append(rec_key) try: super().visit(tp) finally: self._guard.pop() self._guard_indices.pop(rec_key) if rec_key in self._recursive: for key in self._recursive[rec_key]: self._cache[key] = True assert self._cache[rec_key] elif rec_key not in self._all_recursive: self._cache[rec_key] = False class DeserializationRecursiveChecker( DeserializationVisitor, DeserializationObjectVisitor, RecursiveChecker[Deserialization], ): pass class SerializationRecursiveChecker( SerializationVisitor, SerializationObjectVisitor, RecursiveChecker[Serialization] ): pass @cache # use @cache for reset def recursion_cache(checker_cls: Type[RecursiveChecker]) -> Dict[RecursionKey, bool]: return {} @cache def is_recursive( tp: AnyType, conversion: Optional[AnyConversion], default_conversion: DefaultConversion, checker_cls: Type[RecursiveChecker], ) -> bool: cache, rec_key = recursion_cache(checker_cls), (tp, conversion) if rec_key not in cache: checker_cls(default_conversion).visit_with_conv(tp, conversion) return cache[rec_key] class RecursiveConversionsVisitor(ConversionsVisitor[Conv, Result]): def __init__(self, default_conversion: DefaultConversion): super().__init__(default_conversion) self._cache: Dict[Tuple[AnyType, Optional[AnyConversion]], Result] = {} self._first_visit = True def _recursive_result(self, lazy: Lazy[Result]) -> Result: raise NotImplementedError def visit_not_recursive(self, tp: AnyType) -> Result: return super().visit(tp) def visit(self, tp: AnyType) -> Result: if is_recursive( tp, self._conversion, self.default_conversion, DeserializationRecursiveChecker # type: ignore if isinstance(self, DeserializationVisitor) else SerializationRecursiveChecker, ): cache_key = tp, self._conversion if cache_key in self._cache: return self._cache[cache_key] result = None def lazy_result(): assert result is not None return result self._cache[cache_key] = self._recursive_result(lazy_result) try: result = super().visit(tp) finally: del self._cache[cache_key] return result elif self._first_visit: self._first_visit = False return super().visit(tp) else: return self.visit_not_recursive(tp) apischema-0.18.3/apischema/schemas.py000066400000000000000000000111301467672046000174740ustar00rootroot00000000000000import re from dataclasses import dataclass, replace from typing import ( Any, Callable, Dict, Literal, Mapping, Optional, Pattern, Sequence, TypeVar, Union, ) from apischema.constraints import Constraints from apischema.metadata.keys import SCHEMA_METADATA from apischema.types import AnyType, MetadataMixin, Number, Undefined from apischema.typing import is_annotated from apischema.utils import merge_opts, replace_builtins, to_camel_case T = TypeVar("T") Extra = Union[Mapping[str, Any], Callable[[Dict[str, Any]], None]] Deprecated = Union[bool, str] ContentEncoding = Literal["7bit", "8bit", "binary", "quoted-printable", "base64"] @dataclass(frozen=True) class Schema(MetadataMixin): key = SCHEMA_METADATA title: Optional[str] = None description: Optional[str] = None # use a callable wrapper in order to be hashable default: Optional[Callable[[], Any]] = None examples: Optional[Sequence[Any]] = None format: Optional[str] = None deprecated: Optional[Deprecated] = None media_type: Optional[str] = None encoding: Optional[ContentEncoding] = None constraints: Optional[Constraints] = None extra: Optional[Callable[[Dict[str, Any]], None]] = None override: bool = False child: Optional["Schema"] = None def __call__(self, tp: T) -> T: if is_annotated(tp): raise TypeError("Cannot register schema on Annotated type") _schemas[replace_builtins(tp)] = self return tp def merge_into(self, base_schema: Dict[str, Any]): if self.override: base_schema.clear() elif self.child is not None: self.child.merge_into(base_schema) if self.constraints is not None: self.constraints.merge_into(base_schema) if self.deprecated: base_schema["deprecated"] = bool(self.deprecated) for k in ("title", "description", "examples", "format"): if getattr(self, k) is not None: base_schema[k] = getattr(self, k) for k in ("media_type", "encoding"): if getattr(self, k) is not None: base_schema[to_camel_case("content_" + k)] = getattr(self, k) if self.default is not None: base_schema["default"] = self.default() if self.extra is not None: self.extra(base_schema) def schema( *, # annotations title: Optional[str] = None, description: Optional[str] = None, default: Any = Undefined, examples: Optional[Sequence[Any]] = None, deprecated: Optional[Deprecated] = None, # number min: Optional[Number] = None, max: Optional[Number] = None, exc_min: Optional[Number] = None, exc_max: Optional[Number] = None, mult_of: Optional[Number] = None, # string format: Optional[str] = None, media_type: Optional[str] = None, encoding: Optional[ContentEncoding] = None, min_len: Optional[int] = None, max_len: Optional[int] = None, pattern: Optional[Union[str, Pattern]] = None, # array min_items: Optional[int] = None, max_items: Optional[int] = None, unique: Optional[bool] = None, # objects min_props: Optional[int] = None, max_props: Optional[int] = None, # extra extra: Optional[Extra] = None, override: bool = False, ) -> Schema: default = None if default is Undefined else (lambda d=default: d) if pattern is not None: pattern = re.compile(pattern) if isinstance(extra, Mapping): extra = lambda js, to_update=extra: js.update(to_update) # type: ignore constraints = Constraints( min=min, max=max, exc_min=exc_min, exc_max=exc_max, mult_of=mult_of, min_len=min_len, max_len=max_len, pattern=pattern, min_items=min_items, max_items=max_items, unique=unique, min_props=min_props, max_props=max_props, ) return Schema( title=title, description=description, default=default, examples=examples, format=format, deprecated=deprecated, media_type=media_type, encoding=encoding, constraints=constraints, extra=extra, override=override, ) _schemas: Dict[Any, Schema] = {} def get_schema(tp: AnyType) -> Optional[Schema]: tp = replace_builtins(tp) try: return _schemas.get(tp) except TypeError: return None @merge_opts def merge_schema(default: Schema, override: Schema) -> Schema: if override.override: return override return replace(override, child=merge_schema(default, override.child)) apischema-0.18.3/apischema/serialization/000077500000000000000000000000001467672046000203605ustar00rootroot00000000000000apischema-0.18.3/apischema/serialization/__init__.py000066400000000000000000000601511467672046000224740ustar00rootroot00000000000000import collections.abc from contextlib import suppress from dataclasses import dataclass, is_dataclass from enum import Enum from functools import lru_cache from typing import ( Any, Callable, Collection, Mapping, Optional, Sequence, Type, TypeVar, Union, overload, ) from apischema.aliases import Aliaser from apischema.cache import cache from apischema.conversions.conversions import AnyConversion, DefaultConversion from apischema.conversions.visitor import ( Serialization, SerializationVisitor, sub_conversion, ) from apischema.discriminators import Discriminator, get_inherited_discriminator from apischema.fields import support_fields_set from apischema.metadata.keys import DISCRIMINATOR_METADATA from apischema.objects import AliasedStr, ObjectField, object_fields from apischema.objects.visitor import SerializationObjectVisitor from apischema.ordering import Ordering, sort_by_order from apischema.recursion import RecursiveConversionsVisitor from apischema.serialization.methods import ( AnyFallback, AnyMethod, BaseField, BoolMethod, CheckedTupleMethod, CollectionCheckOnlyMethod, CollectionMethod, ComplexField, ConversionMethod, DictMethod, DiscriminatedAlternative, DiscriminateTypedDict, EnumMethod, Fallback, FloatMethod, IdentityField, IdentityMethod, IntMethod, ListMethod, MappingCheckOnlyMethod, MappingMethod, NoFallback, NoneMethod, ObjectAdditionalMethod, ObjectMethod, OptionalMethod, RecMethod, SerializationMethod, SerializedField, SimpleField, SimpleObjectMethod, StrMethod, TupleCheckOnlyMethod, TupleMethod, TypeCheckIdentityMethod, TypeCheckMethod, UnionAlternative, UnionMethod, ValueMethod, WrapperMethod, ) from apischema.serialization.methods import identity as optimized_identity from apischema.serialization.serialized_methods import get_serialized_methods from apischema.types import AnyType, NoneType, Undefined, UndefinedType from apischema.typing import ( get_args, get_origin, is_new_type, is_type, is_type_var, is_typed_dict, is_union, ) from apischema.utils import ( CollectionOrPredicate, Lazy, as_predicate, get_origin_or_type, get_origin_or_type2, identity, is_union_of, opt_or, ) from apischema.visitor import Unsupported IDENTITY_METHOD = IdentityMethod() METHODS = { identity: IDENTITY_METHOD, list: ListMethod(), dict: DictMethod(), str: StrMethod(), int: IntMethod(), bool: BoolMethod(), float: FloatMethod(), NoneType: NoneMethod(), } SerializationMethodFactory = Callable[[AnyType], SerializationMethod] T = TypeVar("T") def expected_class(tp: AnyType) -> type: origin = get_origin_or_type2(tp) if origin is NoneType: return NoneType elif is_typed_dict(origin): return collections.abc.Mapping elif is_type(origin): return origin elif is_new_type(origin): return expected_class(origin.__supertype__) elif is_type_var(origin) or origin is Any: return object else: raise TypeError(f"{tp} is not supported in union serialization") @dataclass(frozen=True) class PassThroughOptions: any: bool = False collections: bool = False dataclasses: bool = False enums: bool = False tuple: bool = False types: CollectionOrPredicate[AnyType] = () def __post_init__(self): if isinstance(self.types, Collection) and not isinstance(self.types, tuple): object.__setattr__(self, "types", tuple(self.types)) if self.collections and not self.tuple: object.__setattr__(self, "tuple", True) @dataclass class FieldToOrder: name: str ordering: Optional[Ordering] field: BaseField CHECK_ONLY_METHODS = ( IdentityMethod, TypeCheckIdentityMethod, CollectionCheckOnlyMethod, MappingCheckOnlyMethod, ) def check_only(method: SerializationMethod) -> bool: """If the method transforms the data""" return ( isinstance(method, CHECK_ONLY_METHODS) or (isinstance(method, TypeCheckMethod) and check_only(method.method)) or (isinstance(method, OptionalMethod) and check_only(method.value_method)) or ( isinstance(method, UnionMethod) and all(check_only(alt.method) for alt in method.alternatives) ) ) class SerializationMethodVisitor( RecursiveConversionsVisitor[Serialization, SerializationMethod], SerializationVisitor[SerializationMethod], SerializationObjectVisitor[SerializationMethod], ): use_cache: bool = True def __init__( self, additional_properties: bool, aliaser: Aliaser, check_type: bool, default_conversion: DefaultConversion, exclude_defaults: bool, exclude_none: bool, exclude_unset: bool, fall_back_on_any: bool, no_copy: bool, pass_through_options: PassThroughOptions, ): super().__init__(default_conversion) self.additional_properties = additional_properties self.aliaser = aliaser self.check_type = check_type self.exclude_defaults = exclude_defaults self.exclude_none = exclude_none self.exclude_unset = exclude_unset self.fall_back_on_any = fall_back_on_any self.no_copy = no_copy self.pass_through_options = pass_through_options self.pass_through_type = as_predicate(self.pass_through_options.types) self._has_skipped_field = False @property def _factory(self) -> SerializationMethodFactory: return serialization_method_factory( self.additional_properties, self.aliaser, self.check_type, self._conversion, self.default_conversion, self.exclude_defaults, self.exclude_none, self.exclude_unset, self.fall_back_on_any, self.no_copy, self.pass_through_options, ) def visit_not_recursive(self, tp: AnyType): return self._factory(tp) if self.use_cache else super().visit_not_recursive(tp) def _recursive_result(self, lazy: Lazy[SerializationMethod]) -> SerializationMethod: return RecMethod(lazy) def discriminate(self, discriminator: Discriminator, types: Sequence[Type]): fallback = self._any_fallback(Union[types]) if all(map(is_typed_dict, types)): with suppress(Exception): field_names = set() for tp in types: for field in object_fields(tp, serialization=True).values(): if field.alias == discriminator.alias: field_names.add(field.name) (field_name,) = field_names return DiscriminateTypedDict( field_name, { key: self.visit(tp) for key, tp in discriminator.get_mapping(types).items() }, fallback, ) else: return UnionMethod( tuple( DiscriminatedAlternative( expected_class(tp), self.visit(tp), discriminator.alias, key ) for key, tp in discriminator.get_mapping(types).items() ), fallback, ) def annotated(self, tp: AnyType, annotations: Sequence[Any]) -> SerializationMethod: for annotation in reversed(annotations): if ( isinstance(annotation, Mapping) and DISCRIMINATOR_METADATA in annotation and is_union(get_origin(tp)) ): return self.discriminate( annotation[DISCRIMINATOR_METADATA], get_args(tp) ) return super().annotated(tp, annotations) def any(self) -> SerializationMethod: if self.pass_through_options.any: return IDENTITY_METHOD return AnyMethod(self._factory) def _any_fallback(self, tp: AnyType) -> Fallback: return AnyFallback(self.any()) if self.fall_back_on_any else NoFallback(tp) def _wrap(self, tp: AnyType, method: SerializationMethod) -> SerializationMethod: if not self.check_type: return method elif method is IDENTITY_METHOD: return TypeCheckIdentityMethod(expected_class(tp), self._any_fallback(tp)) else: return TypeCheckMethod(method, expected_class(tp), self._any_fallback(tp)) def collection( self, cls: Type[Collection], value_type: AnyType ) -> SerializationMethod: value_method = self.visit(value_type) method: SerializationMethod passthrough = ( (self.no_copy and issubclass(cls, list)) or (self.pass_through_options.tuple and issubclass(cls, tuple)) or ( self.pass_through_options.collections and not issubclass(cls, collections.abc.Set) ) ) if value_method is IDENTITY_METHOD: method = IDENTITY_METHOD if passthrough else METHODS[list] elif passthrough and check_only(value_method): method = CollectionCheckOnlyMethod(value_method) else: method = CollectionMethod(value_method) return self._wrap(cls, method) def enum(self, cls: Type[Enum]) -> SerializationMethod: method: SerializationMethod if self.pass_through_options.enums or issubclass(cls, (int, str)): method = IDENTITY_METHOD else: any_method = self.any() if any_method is IDENTITY_METHOD or all( m is IDENTITY_METHOD for m in map(self.visit, {elt.value.__class__ for elt in cls}) ): method = ValueMethod() else: assert isinstance(any_method, AnyMethod) method = EnumMethod(any_method) return self._wrap(cls, method) def literal(self, values: Sequence[Any]) -> SerializationMethod: if self.pass_through_options.enums or all( isinstance(v, (int, str)) for v in values ): return IDENTITY_METHOD else: return self.any() def mapping( self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType ) -> SerializationMethod: key_method, value_method = self.visit(key_type), self.visit(value_type) method: SerializationMethod passthrough = ( issubclass(cls, dict) and self.no_copy ) or self.pass_through_options.collections if key_method is IDENTITY_METHOD and value_method is IDENTITY_METHOD: method = IDENTITY_METHOD if passthrough else METHODS[dict] elif passthrough and check_only(key_method) and check_only(value_method): method = MappingCheckOnlyMethod(key_method, value_method) else: method = MappingMethod(key_method, value_method) return self._wrap(cls, method) def _object( self, tp: AnyType, fields: Sequence[ObjectField] ) -> SerializationMethod: self._has_skipped_field = any(map(self._skip_field, fields)) return super()._object(tp, fields) def object(self, tp: AnyType, fields: Sequence[ObjectField]) -> SerializationMethod: cls = get_origin_or_type(tp) fields_to_order = [] exclude_unset = self.exclude_unset and support_fields_set(cls) typed_dict = is_typed_dict(cls) for field in fields: field_alias = self.aliaser(field.alias) if not field.is_aggregate else None field_method = self.visit_with_conv(field.type, field.serialization) field_default = ... if field.required else field.get_default() base_field: BaseField if ( typed_dict or exclude_unset or field_alias is None or field.skippable(self.exclude_defaults, self.exclude_none) ): base_field = ComplexField( field.name, field_alias, # type: ignore field_method, typed_dict, field.required, exclude_unset, field.skip.serialization_if, is_union_of(field.type, UndefinedType) or field_default is Undefined, (is_union_of(field.type, NoneType) and self.exclude_none) or field.none_as_undefined or (field_default is None and self.exclude_defaults), (field.skip.serialization_default or self.exclude_defaults) and field_default not in (None, Undefined), field_default, ) elif field_method is IDENTITY_METHOD: base_field = IdentityField(field.name, field_alias) else: base_field = SimpleField(field.name, field_alias, field_method) fields_to_order.append(FieldToOrder(field.name, field.ordering, base_field)) for serialized, types in get_serialized_methods(tp): ret_type = types["return"] fields_to_order.append( FieldToOrder( serialized.func.__name__, serialized.ordering, SerializedField( serialized.func.__name__, self.aliaser(serialized.alias), serialized.func, is_union_of(ret_type, UndefinedType), is_union_of(ret_type, NoneType) and self.exclude_none, self.visit_with_conv(ret_type, serialized.conversion), ), ) ) base_fields = tuple( f.field for f in sort_by_order( cls, fields_to_order, lambda f: f.name, lambda f: f.ordering ) ) method: SerializationMethod if is_typed_dict(cls) and self.additional_properties: method = ObjectAdditionalMethod( base_fields, {f.name for f in fields}, self.any() ) elif not all( isinstance(f, IdentityField) and f.alias == f.name for f in base_fields ): method = ObjectMethod(base_fields) elif ( is_dataclass(cls) and self.pass_through_options.dataclasses and all(f2.field for f, f2 in zip(base_fields, fields_to_order)) and not self._has_skipped_field ): method = IDENTITY_METHOD else: method = SimpleObjectMethod(tuple(f.name for f in base_fields)) return self._wrap(cls, method) def primitive(self, cls: Type) -> SerializationMethod: return self._wrap(cls, IDENTITY_METHOD) def subprimitive(self, cls: Type, superclass: Type) -> SerializationMethod: if cls is AliasedStr: return WrapperMethod(self.aliaser) else: return super().subprimitive(cls, superclass) def tuple(self, types: Sequence[AnyType]) -> SerializationMethod: elt_methods = tuple(map(self.visit, types)) method: SerializationMethod = TupleMethod(len(types), elt_methods) if self.pass_through_options.tuple: if all(m is IDENTITY_METHOD for m in elt_methods): method = IDENTITY_METHOD elif all(map(check_only, elt_methods)): method = TupleCheckOnlyMethod(len(types), elt_methods) if self.check_type: method = CheckedTupleMethod(len(types), method) return self._wrap(tuple, method) def union(self, types: Sequence[AnyType]) -> SerializationMethod: if discriminator := get_inherited_discriminator(types): return self.discriminate(discriminator, types) alternatives = [] for tp in types: with suppress(Unsupported): method = alt_method = self.visit(tp) if isinstance(method, TypeCheckMethod): alt_method = method.method elif isinstance(method, TypeCheckIdentityMethod): alt_method = IdentityMethod() if isinstance(alt_method, (TupleMethod, TupleCheckOnlyMethod)): alt_method = CheckedTupleMethod(alt_method.nb_elts, method) alt = UnionAlternative(expected_class(tp), alt_method) alternatives.append((method, alt)) if not alternatives: raise Unsupported(Union[tuple(types)]) elif len(alternatives) == 1: return alternatives[0][0] elif all(meth is IDENTITY_METHOD for meth, _ in alternatives): return IDENTITY_METHOD elif len(alternatives) == 2 and NoneType in types: return OptionalMethod( next(meth for meth, alt in alternatives if alt.cls is not NoneType) ) else: fallback = self._any_fallback(Union[types]) return UnionMethod(tuple(alt for _, alt in alternatives), fallback) def unsupported(self, tp: AnyType) -> SerializationMethod: try: return super().unsupported(tp) except Unsupported: if self.fall_back_on_any and is_type(tp): if issubclass(tp, Mapping): return self.visit(Mapping[Any, Any]) elif issubclass(tp, Collection): return self.visit(Collection[Any]) raise def _visit_conversion( self, tp: AnyType, conversion: Serialization, dynamic: bool, next_conversion: Optional[AnyConversion], ) -> SerializationMethod: conv_method = self.visit_with_conv( conversion.target, sub_conversion(conversion, next_conversion) ) converter = conversion.converter if converter is identity: method = conv_method elif conv_method is identity: method = METHODS.get(converter, WrapperMethod(converter)) else: method = ConversionMethod(converter, conv_method) return self._wrap(tp, method) def visit_conversion( self, tp: AnyType, conversion: Optional[Serialization], dynamic: bool, next_conversion: Optional[AnyConversion] = None, ) -> SerializationMethod: if not dynamic and self.pass_through_type(tp): return self._wrap(tp, IDENTITY_METHOD) else: return super().visit_conversion(tp, conversion, dynamic, next_conversion) @cache def serialization_method_factory( additional_properties: bool, aliaser: Aliaser, check_type: bool, conversion: Optional[AnyConversion], default_conversion: DefaultConversion, exclude_defaults: bool, exclude_none: bool, exclude_unset: bool, fall_back_on_any: bool, no_copy: bool, pass_through: PassThroughOptions, ) -> SerializationMethodFactory: @lru_cache() def factory(tp: AnyType) -> SerializationMethod: return SerializationMethodVisitor( additional_properties, aliaser, check_type, default_conversion, exclude_defaults, exclude_none, exclude_unset, fall_back_on_any, no_copy, pass_through, ).visit_with_conv(tp, conversion) return factory def serialization_method( type: AnyType, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, check_type: Optional[bool] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, exclude_defaults: Optional[bool] = None, exclude_none: Optional[bool] = None, exclude_unset: Optional[bool] = None, fall_back_on_any: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[PassThroughOptions] = None, ) -> Callable[[Any], Any]: from apischema import settings method = serialization_method_factory( opt_or(additional_properties, settings.additional_properties), opt_or(aliaser, settings.aliaser), opt_or(check_type, settings.serialization.check_type), conversion, opt_or(default_conversion, settings.serialization.default_conversion), opt_or(exclude_defaults, settings.serialization.exclude_defaults), opt_or(exclude_none, settings.serialization.exclude_none), opt_or(exclude_unset, settings.serialization.exclude_unset), opt_or(fall_back_on_any, settings.serialization.fall_back_on_any), opt_or(no_copy, settings.serialization.no_copy), opt_or(pass_through, settings.serialization.pass_through), )(type) return optimized_identity if method is IDENTITY_METHOD else method.serialize # type: ignore NO_OBJ = object() @overload def serialize( type: AnyType, obj: Any, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, check_type: Optional[bool] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, exclude_defaults: Optional[bool] = None, exclude_none: Optional[bool] = None, exclude_unset: Optional[bool] = None, fall_back_on_any: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[PassThroughOptions] = None, ) -> Any: ... @overload def serialize( obj: Any, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, check_type: Optional[bool] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, exclude_defaults: Optional[bool] = None, exclude_none: Optional[bool] = None, exclude_unset: Optional[bool] = None, fall_back_on_any: bool = True, no_copy: Optional[bool] = None, pass_through: Optional[PassThroughOptions] = None, ) -> Any: ... def serialize( # type: ignore type: AnyType = Any, obj: Any = NO_OBJ, *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, check_type: Optional[bool] = None, conversion: Optional[AnyConversion] = None, default_conversion: Optional[DefaultConversion] = None, exclude_defaults: Optional[bool] = None, exclude_none: Optional[bool] = None, exclude_unset: Optional[bool] = None, fall_back_on_any: Optional[bool] = None, no_copy: Optional[bool] = None, pass_through: Optional[PassThroughOptions] = None, ) -> Any: # Handle overloaded signature without type if obj is NO_OBJ: type, obj = Any, type if fall_back_on_any is None: fall_back_on_any = True return serialization_method( type, additional_properties=additional_properties, aliaser=aliaser, check_type=check_type, conversion=conversion, default_conversion=default_conversion, exclude_defaults=exclude_defaults, exclude_none=exclude_none, exclude_unset=exclude_unset, fall_back_on_any=fall_back_on_any, no_copy=no_copy, pass_through=pass_through, )(obj) def serialization_default( *, additional_properties: Optional[bool] = None, aliaser: Optional[Aliaser] = None, default_conversion: Optional[DefaultConversion] = None, exclude_defaults: Optional[bool] = None, exclude_none: Optional[bool] = None, exclude_unset: Optional[bool] = None, ) -> Callable[[Any], Any]: from apischema import settings factory = serialization_method_factory( opt_or(additional_properties, settings.additional_properties), opt_or(aliaser, settings.aliaser), False, None, opt_or(default_conversion, settings.serialization.default_conversion), opt_or(exclude_defaults, settings.serialization.exclude_defaults), opt_or(exclude_none, settings.serialization.exclude_none), opt_or(exclude_unset, settings.serialization.exclude_unset), False, True, PassThroughOptions(any=True), ) def method(obj: Any) -> Any: return factory(obj.__class__).serialize(obj) return method apischema-0.18.3/apischema/serialization/errors.py000066400000000000000000000003741467672046000222520ustar00rootroot00000000000000from typing import Sequence, Union class TypeCheckError(TypeError): def __init__(self, msg: str, loc: Sequence[Union[int, str]]): self.msg = msg self.loc = loc def __str__(self): return f"{list(self.loc)} {self.msg}" apischema-0.18.3/apischema/serialization/methods.py000066400000000000000000000302021467672046000223720ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import AbstractSet, Any, Callable, Dict, Optional, Tuple, Union from apischema.conversions.utils import Converter from apischema.fields import FIELDS_SET_ATTR from apischema.serialization.errors import TypeCheckError from apischema.types import AnyType, Undefined from apischema.utils import Lazy class SerializationMethod: def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: raise NotImplementedError class IdentityMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return obj class ListMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return list(obj) class DictMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return dict(obj) class StrMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return str(obj) class IntMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return int(obj) class BoolMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return bool(obj) class FloatMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return float(obj) class NoneMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return None @dataclass class RecMethod(SerializationMethod): lazy: Lazy[SerializationMethod] method: Optional[SerializationMethod] = field(init=False) def __post_init__(self): self.method = None def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: if self.method is None: self.method = self.lazy() return self.method.serialize(obj) @dataclass class AnyMethod(SerializationMethod): factory: Callable[[AnyType], SerializationMethod] def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: method: SerializationMethod = self.factory( obj.__class__ ) # tmp variable for substitution return method.serialize(obj, path) class Fallback: def fall_back(self, obj: Any, path: Union[int, str, None]) -> Any: raise NotImplementedError @dataclass class NoFallback(Fallback): tp: AnyType def fall_back(self, obj: Any, path: Union[int, str, None]) -> Any: raise TypeCheckError( f"Expected {self.tp}, found {obj.__class__}", [path] if path is not None else [], ) @dataclass class AnyFallback(Fallback): any_method: SerializationMethod def fall_back(self, obj: Any, key: Union[int, str, None]) -> Any: return self.any_method.serialize(obj, key) @dataclass class TypeCheckIdentityMethod(SerializationMethod): expected: AnyType # `type` would require exact match (i.e. no EnumMeta) fallback: Fallback def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return ( obj if isinstance(obj, self.expected) else self.fallback.fall_back(obj, path) ) @dataclass class TypeCheckMethod(SerializationMethod): method: SerializationMethod expected: AnyType # `type` would require exact match (i.e. no EnumMeta) fallback: Fallback def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: if isinstance(obj, self.expected): try: return self.method.serialize(obj) except TypeCheckError as err: if path is None: raise raise TypeCheckError(err.msg, [path, *err.loc]) else: return self.fallback.fall_back(obj, path) @dataclass class CollectionCheckOnlyMethod(SerializationMethod): value_method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: for i, elt in enumerate(obj): self.value_method.serialize(elt, i) return obj @dataclass class CollectionMethod(SerializationMethod): value_method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return [self.value_method.serialize(elt, i) for i, elt in enumerate(obj)] class ValueMethod(SerializationMethod): def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return obj.value @dataclass class EnumMethod(SerializationMethod): any_method: AnyMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return self.any_method.serialize(obj.value) @dataclass class MappingCheckOnlyMethod(SerializationMethod): key_method: SerializationMethod value_method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: for key, value in obj.items(): self.key_method.serialize(key, key) self.value_method.serialize(value, key) return obj @dataclass class MappingMethod(SerializationMethod): key_method: SerializationMethod value_method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return { self.key_method.serialize(key, key): self.value_method.serialize(value, key) for key, value in obj.items() } @dataclass class BaseField: name: str alias: str def update_result(self, obj: Any, result: dict): raise NotImplementedError @dataclass class IdentityField(BaseField): def update_result(self, obj: Any, result: dict): result[self.alias] = getattr(obj, self.name) @dataclass class SimpleField(BaseField): method: SerializationMethod def update_result(self, obj: Any, result: dict): result[self.alias] = self.method.serialize(getattr(obj, self.name), self.alias) @dataclass class ComplexField(BaseField): method: SerializationMethod typed_dict: bool required: bool exclude_unset: bool skip_if: Optional[Callable] undefined: bool skip_none: bool skip_default: bool default_value: Any # https://github.com/cython/cython/issues/4383 skippable: bool = field(init=False) def __post_init__(self): self.skippable = bool( self.skip_if or self.undefined or self.skip_none or self.skip_default ) def update_result(self, obj: Any, result: dict): if ( (self.required or self.name in obj) if self.typed_dict else (not self.exclude_unset or self.name in getattr(obj, FIELDS_SET_ATTR)) ): value = obj[self.name] if self.typed_dict else getattr(obj, self.name) if not self.skippable or not ( (self.skip_if is not None and self.skip_if(value)) or (self.undefined and value is Undefined) or (self.skip_none and value is None) or (self.skip_default and value == self.default_value) ): if self.alias is not None: result[self.alias] = self.method.serialize(value, self.alias) else: result.update(self.method.serialize(value, self.alias)) @dataclass class SerializedField(BaseField): func: Callable[[Any], Any] undefined: bool skip_none: bool method: SerializationMethod def update_result(self, obj: Any, result: dict): value = self.func(obj) if not (self.undefined and value is Undefined) and not ( self.skip_none and value is None ): result[self.alias] = self.method.serialize(value, self.alias) @dataclass class SimpleObjectMethod(SerializationMethod): fields: Tuple[str, ...] def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return {name: getattr(obj, name) for name in self.fields} @dataclass class ObjectMethod(SerializationMethod): fields: Tuple[BaseField, ...] def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: result: dict = {} for field in self.fields: field.update_result(obj, result) return result @dataclass class ObjectAdditionalMethod(ObjectMethod): field_names: AbstractSet[str] any_method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: result: dict = super().serialize(obj) for key, value in obj.items(): if isinstance(key, str) and not (key in self.field_names or key in result): result[key] = self.any_method.serialize(value, key) return result @dataclass class TupleCheckOnlyMethod(SerializationMethod): nb_elts: int elt_methods: Tuple[SerializationMethod, ...] def serialize(self, obj: tuple, path: Union[int, str, None] = None) -> Any: for i, method in enumerate(self.elt_methods): method.serialize(obj[i], i) return obj @dataclass class TupleMethod(SerializationMethod): nb_elts: int elt_methods: Tuple[SerializationMethod, ...] def serialize(self, obj: tuple, path: Union[int, str, None] = None) -> Any: elts: list = [None] * len(self.elt_methods) for i, method in enumerate(self.elt_methods): elts[i] = method.serialize(obj[i], i) return elts @dataclass class CheckedTupleMethod(SerializationMethod): nb_elts: int method: SerializationMethod def serialize(self, obj: tuple, path: Union[int, str, None] = None) -> Any: if not len(obj) == self.nb_elts: raise TypeError(f"Expected {self.nb_elts}-tuple, found {len(obj)}-tuple") return self.method.serialize(obj) # There is no need of an OptionalIdentityMethod because it would mean that all methods # are IdentityMethod, which gives IdentityMethod. @dataclass class OptionalMethod(SerializationMethod): value_method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return self.value_method.serialize(obj, path) if obj is not None else None @dataclass class UnionAlternative(SerializationMethod): cls: AnyType # `type` would require exact match (i.e. no EnumMeta) method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return self.method.serialize(obj, path) @dataclass class DiscriminatedAlternative(UnionAlternative): alias: str key: str def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: res = super().serialize(obj, path) if isinstance(res, dict) and self.alias not in res: res[self.alias] = self.key return res @dataclass class UnionMethod(SerializationMethod): alternatives: Tuple[UnionAlternative, ...] fallback: Fallback def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: for alternative in self.alternatives: if isinstance(obj, alternative.cls): try: return alternative.serialize(obj, path) except Exception: pass return self.fallback.fall_back(obj, path) @dataclass class WrapperMethod(SerializationMethod): wrapped: Callable[[Any], Any] def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return self.wrapped(obj) @dataclass class ConversionMethod(SerializationMethod): converter: Converter method: SerializationMethod def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: return self.method.serialize(self.converter(obj)) @dataclass class DiscriminateTypedDict(SerializationMethod): field_name: str mapping: Dict[str, SerializationMethod] fallback: Fallback def serialize(self, obj: Any, path: Union[int, str, None] = None) -> Any: try: method: SerializationMethod = self.mapping[obj[self.field_name]] except Exception: return self.fallback.fall_back(obj, path) return method.serialize(obj, path) def identity(arg: Any) -> Any: return arg apischema-0.18.3/apischema/serialization/serialized_methods.py000066400000000000000000000123321467672046000246110ustar00rootroot00000000000000from collections import defaultdict from dataclasses import dataclass from functools import wraps from inspect import Parameter, signature from typing import ( Any, Callable, Collection, Dict, Mapping, MutableMapping, NoReturn, Optional, Tuple, Type, TypeVar, Union, overload, ) from apischema.cache import CacheAwareDict from apischema.conversions.conversions import AnyConversion from apischema.methods import method_registerer from apischema.ordering import Ordering from apischema.schemas import Schema from apischema.types import AnyType, Undefined, UndefinedType from apischema.typing import generic_mro, get_type_hints, is_type from apischema.utils import ( get_args2, get_origin_or_type, get_origin_or_type2, substitute_type_vars, subtyping_substitution, ) @dataclass(frozen=True) class SerializedMethod: func: Callable alias: str conversion: Optional[AnyConversion] error_handler: Optional[Callable] ordering: Optional[Ordering] schema: Optional[Schema] def error_type(self) -> AnyType: assert self.error_handler is not None types = get_type_hints(self.error_handler, include_extras=True) if "return" not in types: raise TypeError("Error handler must be typed") return types["return"] def return_type(self, return_type: AnyType) -> AnyType: if self.error_handler is not None: error_type = self.error_type() if error_type is not NoReturn: return Union[return_type, error_type] return return_type def types(self, owner: AnyType = None) -> Mapping[str, AnyType]: types = get_type_hints(self.func, include_extras=True) if "return" not in types: if is_type(self.func): types["return"] = self.func else: raise TypeError("Function must be typed") types["return"] = self.return_type(types["return"]) if get_args2(owner): first_param = next(iter(signature(self.func).parameters)) substitution, _ = subtyping_substitution( types.get(first_param, get_origin_or_type2(owner)), owner ) types = { name: substitute_type_vars(tp, substitution) for name, tp in types.items() } return types _serialized_methods: MutableMapping[Type, Dict[str, SerializedMethod]] = CacheAwareDict( defaultdict(dict) ) S = TypeVar("S", bound=SerializedMethod) def _get_methods( tp: AnyType, all_methods: Mapping[Type, Mapping[str, S]] ) -> Collection[Tuple[S, Mapping[str, AnyType]]]: result = {} for base in reversed(generic_mro(tp)): for name, method in all_methods[get_origin_or_type(base)].items(): result[name] = (method, method.types(base)) return result.values() def get_serialized_methods( tp: AnyType, ) -> Collection[Tuple[SerializedMethod, Mapping[str, AnyType]]]: return _get_methods(tp, _serialized_methods) ErrorHandler = Union[Callable, None, UndefinedType] def none_error_handler(error: Exception, obj: Any, alias: str) -> None: return None MethodOrProp = TypeVar("MethodOrProp", Callable, property) @overload def serialized(__method_or_property: MethodOrProp) -> MethodOrProp: ... @overload def serialized( alias: Optional[str] = None, *, conversion: Optional[AnyConversion] = None, error_handler: ErrorHandler = Undefined, order: Optional[Ordering] = None, schema: Optional[Schema] = None, owner: Optional[Type] = None, ) -> Callable[[MethodOrProp], MethodOrProp]: ... def serialized( __arg=None, *, alias: Optional[str] = None, conversion: Optional[AnyConversion] = None, error_handler: ErrorHandler = Undefined, order: Optional[Ordering] = None, schema: Optional[Schema] = None, owner: Optional[Type] = None, ): def register(func: Callable, owner: Type, alias2: str): alias2 = alias or alias2 parameters = list(signature(func).parameters.values()) for param in parameters[1:]: if ( param.kind not in {Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD} and param.default is Parameter.empty ): raise TypeError("Serialized method cannot have required parameter") error_handler2 = error_handler if error_handler2 is None: error_handler2 = none_error_handler if error_handler2 is Undefined: error_handler2 = None else: wrapped = func @wraps(wrapped) def func(self): try: return wrapped(self) except Exception as error: assert ( error_handler2 is not None and error_handler2 is not Undefined ) return error_handler2(error, self, alias2) assert not isinstance(error_handler2, UndefinedType) _serialized_methods[owner][alias2] = SerializedMethod( func, alias2, conversion, error_handler2, order, schema ) if isinstance(__arg, str): alias = __arg __arg = None return method_registerer(__arg, owner, register) apischema-0.18.3/apischema/settings.py000066400000000000000000000101301467672046000177100ustar00rootroot00000000000000from inspect import Parameter from typing import Any, Callable, Optional, Sequence, Union from apischema import cache from apischema.aliases import Aliaser from apischema.conversions.conversions import DefaultConversion from apischema.conversions.converters import ( default_deserialization, default_serialization, ) from apischema.deserialization.coercion import Coercer from apischema.deserialization.coercion import coerce as coerce_ from apischema.json_schema import JsonSchemaVersion from apischema.objects import ObjectField from apischema.objects.fields import default_object_fields as default_object_fields_ from apischema.schemas import Schema from apischema.serialization import PassThroughOptions from apischema.type_names import TypeName from apischema.type_names import default_type_name as default_type_name_ from apischema.types import AnyType from apischema.utils import CollectionOrPredicate, to_camel_case class ResetCache(type): def __setattr__(self, name, value): super().__setattr__(name, value) cache.reset() class MetaSettings(ResetCache): @property def camel_case(cls) -> bool: return settings.aliaser is to_camel_case @camel_case.setter def camel_case(cls, value: bool): settings.aliaser = to_camel_case if value else lambda s: s ConstraintError = Union[str, Callable[[Any, Any], str]] class settings(metaclass=MetaSettings): additional_properties: bool = False aliaser: Aliaser = lambda s: s default_object_fields: Callable[ [type], Optional[Sequence[ObjectField]] ] = default_object_fields_ default_type_name: Callable[[AnyType], Optional[TypeName]] = default_type_name_ json_schema_version: JsonSchemaVersion = JsonSchemaVersion.DRAFT_2020_12 class base_schema: field: Callable[[AnyType, str, str], Optional[Schema]] = lambda *_: None method: Callable[[AnyType, Callable, str], Optional[Schema]] = lambda *_: None parameter: Callable[ [Callable, Parameter, str], Optional[Schema] ] = lambda *_: None type: Callable[[AnyType], Optional[Schema]] = lambda *_: None class errors: minimum: ConstraintError = "less than {} (minimum)" maximum: ConstraintError = "greater than {} (maximum)" exclusive_minimum: ConstraintError = ( "less than or equal to {} (exclusiveMinimum)" ) exclusive_maximum: ConstraintError = ( "greater than or equal to {} (exclusiveMinimum)" ) multiple_of: ConstraintError = "not a multiple of {} (multipleOf)" min_length: ConstraintError = "string length lower than {} (minLength)" max_length: ConstraintError = "string length greater than {} (maxLength)" pattern: ConstraintError = "not matching pattern {} (pattern)" min_items: ConstraintError = "item count lower than {} (minItems)" max_items: ConstraintError = "item count greater than {} (maxItems)" unique_items: ConstraintError = "duplicate items (uniqueItems)" min_properties: ConstraintError = "property count lower than {} (minProperties)" max_properties: ConstraintError = ( "property count greater than {} (maxProperties)" ) one_of: ConstraintError = "not one of {} (oneOf)" unexpected_property: str = "unexpected property" missing_property: str = "missing property" class deserialization(metaclass=ResetCache): coerce: bool = False coercer: Coercer = coerce_ default_conversion: DefaultConversion = default_deserialization fall_back_on_default: bool = False no_copy: bool = True override_dataclass_constructors = False pass_through: CollectionOrPredicate[type] = () class serialization(metaclass=ResetCache): check_type: bool = False fall_back_on_any: bool = False default_conversion: DefaultConversion = default_serialization exclude_defaults: bool = False exclude_none: bool = False exclude_unset: bool = True no_copy: bool = True pass_through: PassThroughOptions = PassThroughOptions() apischema-0.18.3/apischema/std_types.py000066400000000000000000000056111467672046000200760ustar00rootroot00000000000000import operator import re import sys from base64 import b64decode, b64encode from collections import deque from datetime import date, datetime, time from decimal import Decimal from ipaddress import ( IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network, ) from pathlib import ( Path, PosixPath, PurePath, PurePosixPath, PureWindowsPath, WindowsPath, ) from typing import TypeVar from uuid import UUID from apischema import ValidationError, deserializer, schema, serializer, type_name from apischema.conversions import Conversion, as_str, catch_value_error # =================== bytes ===================== deserializer(Conversion(b64decode, source=str, target=bytes)) @serializer def to_base64(b: bytes) -> str: return b64encode(b).decode() type_name(graphql="Bytes")(bytes) schema(encoding="base64")(bytes) # ================ collections ================== T = TypeVar("T") if sys.version_info >= (3, 10): deserializer(Conversion(deque, source=list[T], target=deque[T])) # type: ignore serializer(Conversion(list, source=deque[T], target=list[T])) # type: ignore else: from typing import Deque, List deserializer(Conversion(deque, source=List[T], target=Deque[T])) serializer(Conversion(list, source=Deque[T], target=List[T])) # ================== datetime =================== for cls, format in [(date, "date"), (datetime, "date-time"), (time, "time")]: fromisoformat = catch_value_error(cls.fromisoformat) # type: ignore deserializer(Conversion(fromisoformat, source=str, target=cls)) serializer(Conversion(cls.isoformat, source=cls, target=str)) # type: ignore type_name(graphql=cls.__name__.capitalize())(cls) schema(format=format)(cls) # ================== decimal ==================== deserializer(Conversion(catch_value_error(Decimal), source=float, target=Decimal)) serializer(Conversion(float, source=Decimal, target=float)) type_name(None)(Decimal) # ================= ipaddress =================== for classes, format in [ ((IPv4Address, IPv4Interface, IPv4Network), "ipv4"), ((IPv6Address, IPv6Interface, IPv6Network), "ipv6"), ]: for cls in classes: as_str(cls) type_name(graphql=cls.__name__)(cls) schema(format=format)(cls) # ==================== path ===================== for cls in (PurePath, PurePosixPath, PureWindowsPath, Path, PosixPath, WindowsPath): as_str(cls) type_name(None)(cls) # =================== pattern =================== @deserializer def _compile(pattern: str) -> re.Pattern: try: return re.compile(pattern) except re.error as err: raise ValidationError(str(err)) serializer(Conversion(operator.attrgetter("pattern"), source=re.Pattern, target=str)) type_name(None)(re.Pattern) # ==================== uuid ===================== as_str(UUID) type_name(graphql="UUID") schema(format="uuid")(UUID) apischema-0.18.3/apischema/tagged_unions.py000066400000000000000000000071761467672046000207160ustar00rootroot00000000000000__all__ = ["Tagged", "TaggedUnion", "get_tagged"] from dataclasses import dataclass, field from typing import Any, ClassVar, Generic, Tuple, Type, TypeVar, Union, overload from apischema.metadata.keys import ( DEFAULT_AS_SET_METADATA, FALL_BACK_ON_DEFAULT_METADATA, FLATTEN_METADATA, INIT_VAR_METADATA, POST_INIT_METADATA, PROPERTIES_METADATA, REQUIRED_METADATA, SKIP_METADATA, ) from apischema.schemas import schema from apischema.types import Metadata, MetadataImplem, Undefined, UndefinedType from apischema.typing import get_type_hints from apischema.utils import PREFIX, get_args2, get_origin2 TAGS_ATTR = f"{PREFIX}tags" T = TypeVar("T", bound="TaggedUnion") V = TypeVar("V") class Tag(str, Generic[T, V]): def __new__(cls, tag: str, type: Type[T]): return super().__new__(cls, tag) def __init__(self, tag: str, type: Type[T]): super().__init__() self.type = type def __call__(self, value: V) -> T: return self.type(**{self: value}) # type: ignore INVALID_METADATA = { DEFAULT_AS_SET_METADATA, FALL_BACK_ON_DEFAULT_METADATA, INIT_VAR_METADATA, FLATTEN_METADATA, POST_INIT_METADATA, PROPERTIES_METADATA, REQUIRED_METADATA, SKIP_METADATA, } @dataclass(frozen=True) class Tagged(Generic[V]): metadata: Metadata = field(default_factory=MetadataImplem) def __post_init__(self): if self.metadata.keys() & INVALID_METADATA: raise TypeError("Invalid metadata in a TaggedUnion field") @overload def __get__(self, instance: None, owner: Type[T]) -> Tag[T, V]: ... @overload def __get__(self, instance: Any, owner) -> Union[V, UndefinedType]: ... def __get__(self, instance, owner): raise NotImplementedError class TaggedUnion: def __init__(self, **kwargs): if len(kwargs) != 1: raise ValueError("TaggedUnion constructor expects only one field") tags = getattr(self, TAGS_ATTR) for tag in tags: setattr(self, tag, Undefined) for tag, value in kwargs.items(): if tag not in tags: raise TypeError(f"{type(self)} has no tag {tag}") setattr(self, tag, value) def __repr__(self): tag, value = get_tagged(self) return f"{type(self).__name__}({tag}={value!r})" def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) tags = set(getattr(cls, TAGS_ATTR, ())) types = get_type_hints(cls, include_extras=True) for tag, tp in types.items(): if get_origin2(tp) == Tagged: tagged = cls.__dict__.get(tag, Tagged()) setattr(cls, tag, field(default=Undefined, metadata=tagged.metadata)) cls.__annotations__[tag] = Union[ get_args2(types[tag])[0], UndefinedType ] tags.add(tag) elif tag not in tags: if get_origin2(tp) != ClassVar: cls.__annotations__[tag] = ClassVar[tp] else: raise TypeError( "Only Tagged or ClassVar fields are allowed in TaggedUnion" ) setattr(cls, TAGS_ATTR, tags) schema(min_props=1, max_props=1)(dataclass(init=False, repr=False)(cls)) for tag in tags: setattr(cls, tag, Tag(tag, cls)) def get_tagged(tagged_union: TaggedUnion) -> Tuple[str, Any]: defined = { tag: getattr(tagged_union, tag) for tag in getattr(tagged_union, TAGS_ATTR) if getattr(tagged_union, tag) is not Undefined } return next(iter(defined.items())) apischema-0.18.3/apischema/type_names.py000066400000000000000000000062571467672046000202330ustar00rootroot00000000000000import collections.abc from contextlib import suppress from dataclasses import dataclass from typing import Any, Callable, MutableMapping, NamedTuple, Optional, TypeVar, Union from apischema.cache import CacheAwareDict from apischema.types import PRIMITIVE_TYPES, AnyType from apischema.typing import ( get_args, get_origin, is_named_tuple, is_type_var, is_typed_dict, ) from apischema.utils import has_type_vars, merge_opts, replace_builtins class TypeName(NamedTuple): json_schema: Optional[str] = None graphql: Optional[str] = None NameOrFactory = Union[str, None, Callable[..., Optional[str]]] def _apply_args(name_or_factory: NameOrFactory, *args) -> Optional[str]: return name_or_factory(*args) if callable(name_or_factory) else name_or_factory _type_names: MutableMapping[AnyType, "TypeNameFactory"] = CacheAwareDict({}) T = TypeVar("T") @dataclass(frozen=True) class TypeNameFactory: json_schema: NameOrFactory graphql: NameOrFactory def __call__(self, tp: T) -> T: self.check_type(tp) _type_names[replace_builtins(tp)] = self return tp def check_type(self, tp: AnyType): if is_type_var(tp): raise TypeError("TypeVar cannot have a type_name") if has_type_vars(tp): if get_args(tp): raise TypeError("Generic alias cannot have a type_name") elif isinstance(self.json_schema, str) or isinstance(self.graphql, str): raise TypeError( "Unspecialized generic type must used factory type_name" ) def to_type_name(self, tp: AnyType, *args) -> TypeName: self.check_type(tp) return TypeName( _apply_args(self.json_schema, tp, *args), _apply_args(self.graphql, tp, *args), ) def type_name( ref: Optional[NameOrFactory] = None, *, json_schema: Optional[NameOrFactory] = None, graphql: Optional[NameOrFactory] = None, ) -> TypeNameFactory: return TypeNameFactory(json_schema or ref, graphql or ref) no_type_name = {*PRIMITIVE_TYPES, Any} def default_type_name(tp: AnyType) -> Optional[TypeName]: if ( hasattr(tp, "__name__") and not get_args(tp) and not has_type_vars(tp) and tp not in no_type_name and ( not isinstance(tp, type) or not issubclass(tp, collections.abc.Collection) or is_named_tuple(tp) or is_typed_dict(tp) ) ): return TypeName(tp.__name__, tp.__name__) else: return None def get_type_name(tp: AnyType) -> TypeName: from apischema import settings tp = replace_builtins(tp) with suppress(KeyError, TypeError): return _type_names[tp].to_type_name(tp) origin, args = get_origin(tp), get_args(tp) if args and not has_type_vars(tp): with suppress(KeyError, TypeError): return _type_names[origin].to_type_name(origin, *args) return settings.default_type_name(tp) or TypeName() @merge_opts def merge_type_name(default: TypeName, override: TypeName) -> TypeName: return TypeName( override.json_schema or default.json_schema, override.graphql or default.graphql ) apischema-0.18.3/apischema/types.py000066400000000000000000000041041467672046000172200ustar00rootroot00000000000000import collections.abc from enum import Enum, auto from types import MappingProxyType from typing import ( TYPE_CHECKING, AbstractSet, Any, Collection, Dict, FrozenSet, List, Mapping, MutableMapping, MutableSequence, MutableSet, Sequence, Set, Tuple, Type, Union, ) AnyType = Any NoneType: Type[None] = type(None) Number = Union[int, float] PRIMITIVE_TYPES = (str, int, bool, float, NoneType) COLLECTION_TYPES = ( Collection, collections.abc.Collection, Sequence, collections.abc.Sequence, Tuple, tuple, MutableSequence, collections.abc.MutableSequence, List, list, AbstractSet, collections.abc.Set, FrozenSet, frozenset, MutableSet, collections.abc.MutableSet, Set, set, ) MAPPING_TYPES = ( Mapping, collections.abc.Mapping, MutableMapping, collections.abc.MutableMapping, Dict, dict, MappingProxyType, ) class Metadata(Mapping[str, Any]): def __or__(self, other: Mapping[str, Any]) -> "Metadata": return MetadataImplem({**self, **other}) def __ror__(self, other: Mapping[str, Any]) -> "Metadata": return MetadataImplem({**other, **self}) class MetadataMixin(Metadata): key: str def __getitem__(self, key): if key != self.key: raise KeyError(key) return self def __iter__(self): return iter((self.key,)) def __len__(self): return 1 class MetadataImplem(dict, Metadata): # type: ignore def __hash__(self): return hash(tuple(sorted(self.items()))) # Singleton type, see https://www.python.org/dev/peps/pep-0484/#id30 if TYPE_CHECKING: class UndefinedType(Enum): Undefined = auto() Undefined = UndefinedType.Undefined else: class UndefinedType: def __new__(cls): return Undefined def __repr__(self): return "Undefined" def __str__(self): return "Undefined" def __bool__(self): return False Undefined = object.__new__(UndefinedType) apischema-0.18.3/apischema/typing.py000066400000000000000000000161661467672046000174010ustar00rootroot00000000000000"""Kind of typing_extensions for this package""" __all__ = ["get_args", "get_origin", "get_type_hints"] import sys from types import ModuleType, new_class from typing import Any, Callable, Dict, Generic, Protocol, TypeVar, Union class _FakeType: pass if sys.version_info >= (3, 9): # pragma: no cover from typing import Annotated, get_args, get_origin, get_type_hints else: # pragma: no cover try: from typing_extensions import Annotated except ImportError: pass try: from typing_extensions import get_type_hints as gth except ImportError: from typing import get_type_hints as _gth def gth(obj, globalns=None, localns=None, include_extras=False): return _gth(obj, globalns, localns) def get_type_hints(obj, globalns=None, localns=None, include_extras=False): # TODO This has been fixed in recent 3.7 and 3.8 # fix https://bugs.python.org/issue37838 if not isinstance(obj, (type, ModuleType)) and globalns is None: nsobj = obj while hasattr(nsobj, "__wrapped__"): nsobj = nsobj.__wrapped__ globalns = getattr(nsobj, "__globals__", None) localns = {"unicode": str, **(localns or {})} return gth(obj, globalns, localns, include_extras) try: from typing_extensions import get_args, get_origin except ImportError: def get_origin(tp): if isinstance(tp, _AnnotatedAlias): return None if tp.__args__ is None else Annotated if tp is Generic: return Generic return getattr(tp, "__origin__", None) def get_args(tp): if isinstance(tp, _AnnotatedAlias): return () if tp.__args__ is None else (tp.__args__[0], *tp.__metadata__) res = getattr(tp, "__args__", ()) if get_origin(tp) is Callable and res[0] is not Ellipsis: res = (list(res[:-1]), res[-1]) return res if sys.version_info >= (3, 13): from typing import _collect_type_parameters elif sys.version_info >= (3, 11): from typing import _collect_parameters as _collect_type_parameters # type: ignore else: from typing import _collect_type_vars as _collect_type_parameters def _generic_mro(result, tp): origin = get_origin(tp) if origin is None: origin = tp result[origin] = tp if hasattr(origin, "__orig_bases__"): parameters = _collect_type_parameters(origin.__orig_bases__) substitution = dict(zip(parameters, get_args(tp))) for base in origin.__orig_bases__: if get_origin(base) in result: continue base_parameters = getattr(base, "__parameters__", ()) if base_parameters: base = base[tuple(substitution.get(p, p) for p in base_parameters)] _generic_mro(result, base) # sentinel value to avoid to subscript Generic and Protocol BASE_GENERIC_MRO = {Generic: Generic, Protocol: Protocol} def generic_mro(tp): origin = get_origin(tp) if origin is None and not hasattr(tp, "__orig_bases__"): if not isinstance(tp, type): raise TypeError(f"{tp!r} is not a type or a generic alias") return tp.__mro__ result = BASE_GENERIC_MRO.copy() _generic_mro(result, tp) cls = origin if origin is not None else tp return tuple(result.get(sub_cls, sub_cls) for sub_cls in cls.__mro__) def resolve_type_hints(obj: Any) -> Dict[str, Any]: """Wrap get_type_hints to resolve type vars in case of generic inheritance. `obj` can also be a parametrized generic class.""" origin_or_obj = get_origin(obj) or obj if isinstance(origin_or_obj, type): hints = {} for base in reversed(generic_mro(obj)): base_origin = get_origin(base) or base base_annotations = getattr(base_origin, "__dict__", {}).get( "__annotations__", {} ) substitution = dict( zip(getattr(base_origin, "__parameters__", ()), get_args(base)) ) for name, hint in get_type_hints(base_origin, include_extras=True).items(): if name not in base_annotations: continue if isinstance(hint, TypeVar): hints[name] = substitution.get(hint, hint) elif getattr(hint, "__parameters__", ()): hints[name] = (Union if is_union(hint) else hint)[ tuple(substitution.get(p, p) for p in hint.__parameters__) ] else: hints[name] = hint return hints else: return get_type_hints(obj, include_extras=True) _T = TypeVar("_T") _GenericAlias: Any = type(Generic[_T]) try: _AnnotatedAlias: Any = type(Annotated[_T, ...]) except NameError: _AnnotatedAlias = _FakeType def is_new_type(tp: Any) -> bool: return hasattr(tp, "__supertype__") def is_annotated(tp: Any) -> bool: try: from typing import Annotated return get_origin(tp) == Annotated except ImportError: try: from typing_extensions import Annotated # type: ignore return get_origin(tp) == Annotated except ImportError: return False def is_literal(tp: Any) -> bool: from typing import Literal origin = get_origin(tp) if origin is Literal: return True try: from typing_extensions import Literal as Literal2 return get_origin(tp) is Literal2 except ImportError: return False def is_literal_string(tp: Any) -> bool: try: from typing import LiteralString if tp is LiteralString: return True except ImportError: pass try: from typing_extensions import LiteralString return tp is LiteralString except ImportError: return False def is_named_tuple(tp: Any) -> bool: return issubclass(tp, tuple) and hasattr(tp, "_fields") def is_typed_dict(tp: Any) -> bool: # TODO use python 3.10 typing.is_typeddict from typing import TypedDict if isinstance(tp, type(new_class("_TypedDictImplem", (TypedDict,)))): return True try: from typing_extensions import TypedDict as TypedDict2 return isinstance(tp, type(new_class("_TypedDictImplem", (TypedDict2,)))) except ImportError: return False def is_type_var(tp: Any) -> bool: return isinstance(tp, TypeVar) # py38 get_origin of builtin wrapped generics return the unsubscriptable builtin # type. if sys.version_info < (3, 9): import typing TYPING_ALIASES = { getattr(elt, "__origin__", None): elt for elt in typing.__dict__.values() } def typing_origin(origin: Any) -> Any: return TYPING_ALIASES.get(origin, origin) else: typing_origin = lambda tp: tp def is_type(tp: Any) -> bool: """isinstance is not enough because in py39: isinstance(list[int], type) == True""" return isinstance(tp, type) and not get_args(tp) def is_union(tp: Any) -> bool: try: from types import UnionType return tp in (UnionType, Union) except ImportError: return tp is Union apischema-0.18.3/apischema/utils.py000066400000000000000000000205561467672046000172250ustar00rootroot00000000000000import collections.abc import inspect import re from contextlib import contextmanager, suppress from dataclasses import dataclass from enum import Enum from functools import wraps from types import MappingProxyType from typing import ( AbstractSet, Any, Awaitable, Callable, Collection, Container, Generic, Iterable, Mapping, NoReturn, Optional, Sequence, Tuple, Type, TypeVar, Union, cast, ) from apischema.types import COLLECTION_TYPES, MAPPING_TYPES, PRIMITIVE_TYPES, AnyType from apischema.typing import ( _collect_type_parameters, generic_mro, get_args, get_origin, get_type_hints, is_annotated, is_type_var, is_union, typing_origin, ) try: from apischema.typing import Annotated except ImportError: Annotated = ... # type: ignore PREFIX = "_apischema_" T = TypeVar("T") U = TypeVar("U") def identity(x: T) -> T: return x Lazy = Callable[[], T] @dataclass(frozen=True) # dataclass enable equality check class LazyValue(Generic[T]): default: T def __call__(self) -> T: return self.default def is_hashable(obj: Any) -> bool: return isinstance(obj, collections.abc.Hashable) def opt_or(opt: Optional[T], default: U) -> Union[T, U]: return opt if opt is not None else default SNAKE_CASE_REGEX = re.compile(r"_([a-z\d])") CAMEL_CASE_REGEX = re.compile(r"([a-z\d])([A-Z])") def to_camel_case(s: str) -> str: return SNAKE_CASE_REGEX.sub(lambda m: m.group(1).upper(), s) def to_snake_case(s: str) -> str: return CAMEL_CASE_REGEX.sub(lambda m: m.group(1) + "_" + m.group(2).lower(), s) def to_pascal_case(s: str) -> str: camel = to_camel_case(s) return camel[0].upper() + camel[1:] if camel else camel def merge_opts( func: Callable[[T, T], T] ) -> Callable[[Optional[T], Optional[T]], Optional[T]]: def wrapper(opt1, opt2): if opt1 is None: return opt2 if opt2 is None: return opt1 return func(opt1, opt2) return wrapper K = TypeVar("K") V = TypeVar("V") @merge_opts def merge_opts_mapping(m1: Mapping[K, V], m2: Mapping[K, V]) -> Mapping[K, V]: return {**m1, **m2} def has_type_vars(tp: AnyType) -> bool: return is_type_var(tp) or bool(getattr(tp, "__parameters__", ())) TV = AnyType # TypeVar is not supported as a type # 10 should be enough for all builtin types _type_vars = [TypeVar(f"T{i}") for i in range(10)] def get_parameters(tp: AnyType) -> Iterable[TV]: if hasattr(tp, "__parameters__"): return tp.__parameters__ elif hasattr(tp, "__orig_bases__"): return _collect_type_parameters(tp.__orig_bases__) elif is_type_var(tp): return (tp,) else: return _type_vars def substitute_type_vars(tp: AnyType, substitution: Mapping[TV, AnyType]) -> AnyType: if is_type_var(tp): try: return substitution[tp] except KeyError: return Union[tp.__constraints__] if tp.__constraints__ else Any elif getattr(tp, "__parameters__", ()): return (Union if is_union(tp) else tp)[ tuple(substitution.get(p, p) for p in tp.__parameters__) ] else: return tp Func = TypeVar("Func", bound=Callable) def typed_wraps(wrapped: Func) -> Callable[[Callable], Func]: return cast(Func, wraps(wrapped)) def is_subclass(tp: AnyType, base: AnyType) -> bool: tp, base = get_origin_or_type(tp), get_origin_or_type(base) return tp == base or ( isinstance(tp, type) and isinstance(base, type) and issubclass(tp, base) ) def no_annotated(tp: AnyType) -> AnyType: return get_args(tp)[0] if is_annotated(tp) else tp def get_origin_or_type(tp: AnyType) -> AnyType: origin = get_origin(tp) return origin if origin is not None else tp def get_origin2(tp: AnyType) -> Optional[Type]: return get_origin(no_annotated(tp)) # type: ignore def get_args2(tp: AnyType) -> Tuple[AnyType, ...]: return get_args(no_annotated(tp)) def get_origin_or_type2(tp: AnyType) -> AnyType: tp2 = no_annotated(tp) origin = get_origin(tp2) return origin if origin is not None else tp2 def keep_annotations(tp: AnyType, annotated: AnyType) -> AnyType: return Annotated[(tp, *get_args(annotated)[1:])] if is_annotated(annotated) else tp def with_parameters(tp: AnyType) -> AnyType: return tp[tp.__parameters__] if getattr(tp, "__parameters__", ()) else tp def is_union_of(tp: AnyType, of: AnyType) -> bool: return tp == of or (is_union(get_origin_or_type2(tp)) and of in get_args2(tp)) LIST_ORIGIN = typing_origin(list) SET_ORIGIN = typing_origin(set) TUPLE_ORIGIN = typing_origin(tuple) DICT_ORIGIN = typing_origin(dict) def replace_builtins(tp: AnyType) -> AnyType: origin = get_origin2(tp) if origin is None: return tp args = tuple(map(replace_builtins, get_args2(tp))) replacement: Any if origin in COLLECTION_TYPES: if issubclass(origin, collections.abc.Set): replacement = SET_ORIGIN elif not issubclass(origin, tuple): replacement = LIST_ORIGIN elif len(args) == 2 and args[1] is ...: args = args[:1] replacement = LIST_ORIGIN else: replacement = TUPLE_ORIGIN elif origin in MAPPING_TYPES: replacement = DICT_ORIGIN elif is_union(origin): replacement = Union else: replacement = typing_origin(origin) res = replacement[args] if args else replacement return keep_annotations(res, tp) def stop_signature_abuse() -> NoReturn: raise TypeError("Stop signature abuse") empty_dict: Mapping[str, Any] = MappingProxyType({}) ITERABLE_TYPES = { *COLLECTION_TYPES, *MAPPING_TYPES, Iterable, collections.abc.Iterable, Container, collections.abc.Container, } def subtyping_substitution( supertype: AnyType, subtype: AnyType ) -> Tuple[Mapping[AnyType, AnyType], Mapping[AnyType, AnyType]]: if not get_args(subtype) and not isinstance(subtype, type): return {}, {} supertype, subtype = with_parameters(supertype), with_parameters(subtype) supertype_to_subtype, subtype_to_supertype = {}, {} super_origin = get_origin_or_type2(supertype) for base in generic_mro(subtype): base_origin = get_origin_or_type2(base) if base_origin == super_origin or ( base_origin in ITERABLE_TYPES and super_origin in ITERABLE_TYPES ): for base_arg, super_arg in zip(get_args2(base), get_args2(supertype)): if is_type_var(super_arg): supertype_to_subtype[super_arg] = base_arg if is_type_var(base_arg): subtype_to_supertype[base_arg] = super_arg break return supertype_to_subtype, subtype_to_supertype def literal_values(values: Sequence[Any]) -> Sequence[Any]: primitive_values = [v.value if isinstance(v, Enum) else v for v in values] if any(not isinstance(v, PRIMITIVE_TYPES) for v in primitive_values): raise TypeError("Only primitive types are supported for Literal/Enum") return primitive_values awaitable_origin = get_origin(Awaitable[Any]) def is_async(func: Callable, types: Optional[Mapping[str, AnyType]] = None) -> bool: wrapped_func = func while hasattr(wrapped_func, "__wrapped__"): wrapped_func = wrapped_func.__wrapped__ if inspect.iscoroutinefunction(wrapped_func): return True if types is None: try: types = get_type_hints(func) except Exception: types = {} return get_origin_or_type2(types.get("return")) == awaitable_origin @contextmanager def context_setter(obj: Any): dict_copy = obj.__dict__.copy() try: yield finally: obj.__dict__.clear() obj.__dict__.update(dict_copy) CollectionOrPredicate = Union[Collection[T], Callable[[T], bool]] def as_predicate( collection_or_predicate: CollectionOrPredicate[T], ) -> Callable[[T], bool]: if not isinstance(collection_or_predicate, Collection): return collection_or_predicate elif not collection_or_predicate: return lambda _: False collection = collection_or_predicate if not isinstance(collection, AbstractSet): with suppress(Exception): collection = set(collection) def wrapper(elt: T) -> bool: try: return elt in collection except Exception: return False return wrapper apischema-0.18.3/apischema/validation/000077500000000000000000000000001467672046000176355ustar00rootroot00000000000000apischema-0.18.3/apischema/validation/__init__.py000066400000000000000000000004431467672046000217470ustar00rootroot00000000000000__all__ = [ "Discard", "LocalizedError", "ValidationError", "ValidatorResult", "get_validators", "validate", "validator", ] from .errors import LocalizedError, ValidationError, ValidatorResult from .validators import Discard, get_validators, validate, validator apischema-0.18.3/apischema/validation/dependencies.py000066400000000000000000000037361467672046000226460ustar00rootroot00000000000000import ast import inspect import textwrap from typing import AbstractSet, Callable, Collection, Dict, Set Dependencies = AbstractSet[str] class DependencyFinder(ast.NodeVisitor): def __init__(self, param: str): self.param = param self.dependencies: Set[str] = set() def visit_Attribute(self, node): self.generic_visit(node) if isinstance(node.value, ast.Name) and node.value.id == self.param: self.dependencies.add(node.attr) # TODO Add warning in case of function call with self in parameter # or better, follow the call, but it would be too hard (local import, etc.) def first_parameter(func: Callable) -> str: try: return next(iter(inspect.signature(func).parameters)) except StopIteration: raise TypeError("Cannot compute dependencies if no parameter") def find_dependencies(func: Callable) -> Dependencies: try: finder = DependencyFinder(first_parameter(func)) finder.visit(ast.parse(textwrap.dedent(inspect.getsource(func)))) except ValueError: return set() return finder.dependencies cache: Dict[Callable, Dependencies] = {} def find_all_dependencies( cls: type, func: Callable, rec_guard: Collection[str] = () ) -> Dependencies: """Dependencies contains class variables (because they can be "fake" ones as in dataclasses)""" if func not in cache: dependencies = set(find_dependencies(func)) for attr in list(dependencies): if not hasattr(cls, attr): continue member = getattr(cls, attr) if isinstance(member, property): member = member.fget if callable(member): dependencies.remove(attr) if member in rec_guard: continue rec_deps = find_all_dependencies(cls, member, {*rec_guard, member}) dependencies.update(rec_deps) cache[func] = dependencies return cache[func] apischema-0.18.3/apischema/validation/errors.py000066400000000000000000000110071467672046000215220ustar00rootroot00000000000000from functools import reduce from typing import ( Any, Collection, Dict, Generator, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple, TypedDict, TypeVar, Union, overload, ) from apischema.aliases import Aliaser from apischema.objects import AliasedStr from apischema.utils import merge_opts try: from apischema.typing import Annotated except ImportError: Annotated = ... # type: ignore ErrorMsg = str Error = Union[ErrorMsg, Tuple[Any, ErrorMsg]] # where Any = Union[Field, int, str, Iterable[Union[Field, int, str,]]] # but Field being kind of magic not understood by type checkers, it's hidden behind Any ErrorKey = Union[str, int] T = TypeVar("T") ValidatorResult = Generator[Error, None, T] class LocalizedError(TypedDict): loc: Sequence[ErrorKey] err: ErrorMsg class ValidationError(Exception): @overload def __init__(self, __message: str): ... @overload def __init__( self, messages: Optional[Sequence[ErrorMsg]] = None, children: Optional[Mapping[ErrorKey, "ValidationError"]] = None, ): ... def __init__( self, messages: Optional[Union[ErrorMsg, Sequence[ErrorMsg]]] = None, children: Optional[Mapping[ErrorKey, "ValidationError"]] = None, ): if isinstance(messages, str): messages = [messages] self.messages: Sequence[str] = messages or [] self.children: Mapping[ErrorKey, "ValidationError"] = children or {} def __str__(self): return f"{ValidationError.__name__}: {self.errors}" def _errors(self) -> Iterator[Tuple[List[ErrorKey], ErrorMsg]]: for msg in self.messages: yield [], msg for child_key in sorted(self.children): for path, error in self.children[child_key]._errors(): yield [child_key, *path], error @property def errors(self) -> List[LocalizedError]: return [{"loc": path, "err": error} for path, error in self._errors()] @staticmethod def from_errors(errors: Sequence[LocalizedError]) -> "ValidationError": return reduce( merge_errors, [_rec_build_error(err["loc"], err["err"]) for err in errors], ValidationError(), ) @overload def merge_errors( err1: Optional[ValidationError], err2: ValidationError ) -> ValidationError: ... @overload def merge_errors( err1: ValidationError, err2: Optional[ValidationError] ) -> ValidationError: ... @overload def merge_errors( err1: Optional[ValidationError], err2: Optional[ValidationError] ) -> Optional[ValidationError]: ... @merge_opts # type: ignore def merge_errors(err1: ValidationError, err2: ValidationError) -> ValidationError: if err1 is None: return err2 if err2 is None: return err1 return ValidationError( [*err1.messages, *err2.messages], { key: merge_errors( # type: ignore err1.children.get(key), err2.children.get(key) ) for key in err1.children.keys() | err2.children.keys() }, ) def apply_aliaser(error: ValidationError, aliaser: Aliaser) -> ValidationError: aliased, aliased_children = False, {} for key, child in error.children.items(): if isinstance(key, AliasedStr): key = str(aliaser(key)) # str because it could be a str subclass aliased = True child2 = apply_aliaser(child, aliaser) aliased |= child2 is not child aliased_children[key] = child2 return ValidationError(error.messages, aliased_children) if aliased else error def _rec_build_error(path: Sequence[ErrorKey], msg: ErrorMsg) -> ValidationError: if not path: return ValidationError([msg]) else: return ValidationError(children={path[0]: _rec_build_error(path[1:], msg)}) def build_validation_error(errors: Iterable[Error]) -> ValidationError: messages: List[ErrorMsg] = [] children: Dict[ErrorKey, ValidationError] = {} for error in errors: if isinstance(error, ErrorMsg): messages.append(error) continue path, msg = error if not path: messages.append(msg) else: if isinstance(path, str) or not isinstance(path, Collection): path = (path,) key, *remain = path children[key] = merge_errors( children.get(key), _rec_build_error(remain, msg) ) return ValidationError(messages, children) apischema-0.18.3/apischema/validation/mock.py000066400000000000000000000035051467672046000211430ustar00rootroot00000000000000from dataclasses import dataclass from functools import partial from types import FunctionType, MethodType from typing import TYPE_CHECKING, Any, Mapping, Optional, Type, TypeVar from apischema.fields import FIELDS_SET_ATTR from apischema.objects import object_fields if TYPE_CHECKING: from apischema.validation.validators import Validator MOCK_FIELDS_FIELD = "__mock_fields__" MOCK_CLS_FIELD = "__mock_cls__" class NonTrivialDependency(Exception): def __init__(self, attr: str): self.attr = attr self.validator: Optional["Validator"] = None @dataclass(init=False) class ValidatorMock: def __init__(self, cls: Type, values: Mapping[str, Any]): self.cls = cls self.values = values def __getattribute__(self, name: str) -> Any: values = super().__getattribute__("values") if name in values: return values[name] cls = super().__getattribute__("cls") fields = object_fields(cls, deserialization=True) if name in fields: if fields[name].required: raise NonTrivialDependency(name) return fields[name].get_default() if name == "__class__": return cls if name == "__dict__": return {**values, FIELDS_SET_ATTR: set(values)} if name == FIELDS_SET_ATTR: return set(values) if hasattr(cls, name): member = getattr(cls, name) # for classmethod (staticmethod are not handled) if isinstance(member, MethodType): return member if isinstance(member, FunctionType): return partial(member, self) if isinstance(member, property): return member.fget(self) # type: ignore return member raise NonTrivialDependency(name) T = TypeVar("T") apischema-0.18.3/apischema/validation/validators.py000066400000000000000000000150161467672046000223620ustar00rootroot00000000000000from collections import defaultdict from functools import wraps from inspect import Parameter, isgeneratorfunction, signature from itertools import chain from types import MethodType from typing import ( AbstractSet, Any, Callable, Collection, Iterable, List, Mapping, MutableMapping, Optional, Sequence, Type, TypeVar, overload, ) from apischema.aliases import Aliaser from apischema.cache import CacheAwareDict from apischema.methods import is_method, method_class from apischema.objects import get_alias from apischema.objects.fields import FieldOrName, check_field_or_name, get_field_name from apischema.types import AnyType from apischema.typing import get_type_hints from apischema.utils import get_origin_or_type2 from apischema.validation.dependencies import find_all_dependencies from apischema.validation.errors import ( ValidationError, apply_aliaser, build_validation_error, merge_errors, ) from apischema.validation.mock import NonTrivialDependency _validators: MutableMapping[Type, List["Validator"]] = CacheAwareDict(defaultdict(list)) def get_validators(tp: AnyType) -> Sequence["Validator"]: return list( chain.from_iterable(_validators[cls] for cls in getattr(tp, "__mro__", [tp])) ) class Discard(Exception): def __init__(self, fields: Optional[AbstractSet[str]], error: ValidationError): self.fields = fields self.error = error class Validator: def __init__( self, func: Callable, field: Optional[FieldOrName] = None, discard: Optional[Collection[FieldOrName]] = None, ): wraps(func)(self) self.func = func self.field = field # Cannot use field.name because fields are not yet initialized with __set_name__ if field is not None and discard is None: self.discard: Optional[Collection[FieldOrName]] = (field,) else: self.discard = discard self.dependencies: AbstractSet[str] = set() try: parameters = signature(func).parameters except ValueError: self.params: AbstractSet[str] = set() else: if not parameters: raise TypeError("Validator must have at least one parameter") if any(p.kind == Parameter.VAR_KEYWORD for p in parameters.values()): raise TypeError("Validator cannot have variadic keyword parameter") if any(p.kind == Parameter.VAR_POSITIONAL for p in parameters.values()): raise TypeError("Validator cannot have variadic positional parameter") self.params = set(list(parameters)[1:]) if isgeneratorfunction(func): def validate(*args, **kwargs): errors = list(func(*args, **kwargs)) if errors: raise build_validation_error(errors) self.validate = validate else: self.validate = func def __get__(self, instance, owner): return self if instance is None else MethodType(self.func, instance) def __call__(self, *args, **kwargs): raise RuntimeError("Method __set_name__ has not been called") def _register(self, owner: Type): self.owner = owner self.dependencies = find_all_dependencies(owner, self.func) | self.params _validators[owner].append(self) def __set_name__(self, owner, name): self._register(owner) setattr(owner, name, self.func) T = TypeVar("T") def validate( obj: T, validators: Optional[Iterable[Validator]] = None, kwargs: Optional[Mapping[str, Any]] = None, *, aliaser: Aliaser = lambda s: s, ) -> T: if validators is None: validators = get_validators(obj.__class__) else: validators = list(validators) error: Optional[ValidationError] = None for i, validator in enumerate(validators): try: if not kwargs: validator.validate(obj) elif validator.params == kwargs.keys(): validator.validate(obj, **kwargs) else: validator.validate(obj, **{k: kwargs[k] for k in validator.params}) except ValidationError as e: err = apply_aliaser(e, aliaser) except NonTrivialDependency as exc: exc.validator = validator raise else: continue if validator.field is not None: alias = getattr(get_alias(validator.owner), get_field_name(validator.field)) err = ValidationError(children={aliaser(alias): err}) error = merge_errors(error, err) if validator.discard: try: discarded = set(map(get_field_name, validator.discard)) next_validators = ( v for v in validators[i:] if v.dependencies.isdisjoint(discarded) ) validate(obj, next_validators, kwargs, aliaser=aliaser) except ValidationError as err: raise merge_errors(error, err) else: raise error if error is not None: raise error return obj V = TypeVar("V", bound=Callable) @overload def validator(func: V) -> V: ... @overload def validator( field: Any = None, *, discard: Any = None, owner: Optional[Type] = None ) -> Callable[[V], V]: ... def validator(arg=None, *, field=None, discard=None, owner=None): if callable(arg): validator_ = Validator(arg, field, discard) if is_method(arg): cls = method_class(arg) if cls is None: if owner is not None: raise TypeError("Validator owner cannot be set for class validator") return validator_ elif owner is None: owner = cls if owner is None: try: first_param = next(iter(signature(arg).parameters)) owner = get_origin_or_type2(get_type_hints(arg)[first_param]) except Exception: raise ValueError("Validator first parameter must be typed") validator_._register(owner) return arg else: field = field or arg if field is not None: check_field_or_name(field) if discard is not None: if not isinstance(discard, Collection) or isinstance(discard, str): discard = [discard] for discarded in discard: check_field_or_name(discarded) return lambda func: validator(func, field=field, discard=discard, owner=owner) # type: ignore apischema-0.18.3/apischema/visitor.py000066400000000000000000000153141467672046000175600ustar00rootroot00000000000000from dataclasses import ( # type: ignore _FIELD_CLASSVAR, _FIELDS, Field, InitVar, is_dataclass, make_dataclass, ) from enum import Enum from types import MappingProxyType from typing import ( Any, Collection, Generic, Mapping, Sequence, Tuple, Type, TypeVar, Union, ) from apischema.types import COLLECTION_TYPES, MAPPING_TYPES, PRIMITIVE_TYPES, AnyType from apischema.typing import ( get_args, get_origin, get_type_hints, is_annotated, is_literal, is_literal_string, is_named_tuple, is_type_var, is_typed_dict, is_union, resolve_type_hints, ) from apischema.utils import PREFIX, get_origin_or_type, has_type_vars try: from apischema.typing import Annotated except ImportError: Annotated = ... # type: ignore TUPLE_TYPE = get_origin(Tuple[Any]) def dataclass_types_and_fields( tp: AnyType, ) -> Tuple[Mapping[str, AnyType], Sequence[Field], Sequence[Field]]: from apischema.metadata.keys import INIT_VAR_METADATA cls = get_origin_or_type(tp) assert is_dataclass(cls) types = resolve_type_hints(tp) fields, init_fields = [], [] for field in getattr(cls, _FIELDS).values(): assert isinstance(field, Field) if field._field_type == _FIELD_CLASSVAR: # type: ignore continue field_type = types[field.name] if isinstance(field_type, InitVar): types[field.name] = field_type.type init_fields.append(field) elif field_type is InitVar: metadata = getattr(cls, _FIELDS)[field.name].metadata if INIT_VAR_METADATA not in metadata: raise TypeError("Before 3.8, InitVar requires init_var metadata") init_field = (PREFIX, metadata[INIT_VAR_METADATA], ...) tmp_cls = make_dataclass("Tmp", [init_field], bases=(cls,)) types[field.name] = get_type_hints(tmp_cls, include_extras=True)[PREFIX] if has_type_vars(types[field.name]): raise TypeError("Generic InitVar are not supported before 3.8") init_fields.append(field) else: fields.append(field) # Use immutable return because of cache return MappingProxyType(types), tuple(fields), tuple(init_fields) class Unsupported(TypeError): def __init__(self, tp: AnyType): self.type = tp Result = TypeVar("Result", covariant=True) class Visitor(Generic[Result]): def annotated(self, tp: AnyType, annotations: Sequence[Any]) -> Result: if Unsupported in annotations: raise Unsupported(Annotated[(tp, *annotations)]) return self.visit(tp) def any(self) -> Result: raise NotImplementedError def collection(self, cls: Type[Collection], value_type: AnyType) -> Result: raise NotImplementedError def dataclass( self, tp: AnyType, types: Mapping[str, AnyType], fields: Sequence[Field], init_vars: Sequence[Field], ) -> Result: raise NotImplementedError def enum(self, cls: Type[Enum]) -> Result: raise NotImplementedError def literal(self, values: Sequence[Any]) -> Result: raise NotImplementedError def mapping( self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType ) -> Result: raise NotImplementedError def named_tuple( self, tp: AnyType, types: Mapping[str, AnyType], defaults: Mapping[str, Any] ) -> Result: raise NotImplementedError def new_type(self, tp: AnyType, super_type: AnyType) -> Result: return self.visit(super_type) def primitive(self, cls: Type) -> Result: raise NotImplementedError def subprimitive(self, cls: Type, superclass: Type) -> Result: return self.primitive(superclass) def tuple(self, types: Sequence[AnyType]) -> Result: raise NotImplementedError def typed_dict( self, tp: AnyType, types: Mapping[str, AnyType], required_keys: Collection[str] ) -> Result: raise NotImplementedError def union(self, types: Sequence[AnyType]) -> Result: raise NotImplementedError def unsupported(self, tp: AnyType) -> Result: raise Unsupported(tp) def visit(self, tp: AnyType) -> Result: origin, args = get_origin_or_type(tp), get_args(tp) if args: if is_annotated(tp): return self.annotated(args[0], args[1:]) if is_union(origin): return self.union(args[0]) if len(args) == 1 else self.union(args) if origin is TUPLE_TYPE: if len(args) < 2 or args[1] is not ...: return self.tuple(args) if origin in COLLECTION_TYPES: return self.collection(origin, args[0]) if origin in MAPPING_TYPES: return self.mapping(origin, args[0], args[1]) if is_literal(tp): return self.literal(args) if origin in PRIMITIVE_TYPES: return self.primitive(origin) if is_dataclass(origin): return self.dataclass(tp, *dataclass_types_and_fields(tp)) if hasattr(origin, "__supertype__"): return self.new_type(origin, origin.__supertype__) if origin is Any: return self.any() if origin in COLLECTION_TYPES: return self.collection(origin, Any) if origin in MAPPING_TYPES: return self.mapping(origin, Any, Any) if isinstance(origin, type): if issubclass(origin, Enum): return self.enum(origin) for primitive in PRIMITIVE_TYPES: if issubclass(origin, primitive): return self.subprimitive(origin, primitive) # NamedTuple if is_named_tuple(origin): if hasattr(origin, "__annotations__"): types = resolve_type_hints(origin) # TODO is __field_types for python 3.6 only? elif hasattr(origin, "__field_types"): # pragma: no cover types = origin.__field_types else: # pragma: no cover types = {f: Any for f in origin._fields} # noqa: E501 return self.named_tuple(origin, types, origin._field_defaults) if is_typed_dict(origin): required_keys = getattr(origin, "__required_keys__", ()) # py38 return self.typed_dict(origin, resolve_type_hints(origin), required_keys) if is_literal_string(origin): return self.primitive(str) if is_type_var(origin): if origin.__constraints__: return self.visit(Union[origin.__constraints__]) else: return self.any() return self.unsupported(tp) apischema-0.18.3/benchmark/000077500000000000000000000000001467672046000155035ustar00rootroot00000000000000apischema-0.18.3/benchmark/benchmarks/000077500000000000000000000000001467672046000176205ustar00rootroot00000000000000apischema-0.18.3/benchmark/benchmarks/__init__.py000066400000000000000000000000001467672046000217170ustar00rootroot00000000000000apischema-0.18.3/benchmark/benchmarks/apischema.py000066400000000000000000000022751467672046000221320ustar00rootroot00000000000000from dataclasses import dataclass, field from datetime import datetime from typing import NewType from common import Benchmark, Methods, Payment import apischema apischema.settings.camel_case = True apischema.settings.deserialization.override_dataclass_constructors = True @dataclass(frozen=True) class Message: title: str body: str addresses: list[str] | None = None persistence: int | None = None PositiveFloat = NewType("PositiveFloat", float) apischema.schema(min=0)(PositiveFloat) @dataclass(frozen=True) class Client: id: int = field(metadata=apischema.schema(min=0)) first_name: str last_name: str @dataclass(frozen=True) class Item: name: str price: PositiveFloat quantity: int = field(default=1, metadata=apischema.schema(min=1)) @dataclass(frozen=True) class Receipt: store: str address: str date: datetime items: list[Item] payment: Payment client: Client | None = None special_offers: PositiveFloat | None = None def methods(cls: type) -> Methods: return Methods( apischema.deserialization_method(cls), apischema.serialization_method(cls) ) benchmarks = Benchmark(methods(Message), methods(Receipt)) apischema-0.18.3/benchmark/benchmarks/cattr.py000066400000000000000000000023541467672046000213130ustar00rootroot00000000000000from datetime import datetime import attrs import cattr from common import Benchmark, Methods, Payment @attrs.frozen class Message: title: str body: str addresses: list[str] | None = None persistence: int | None = None @attrs.frozen class Client: id: int = attrs.field(validator=attrs.validators.ge(0)) firstName: str # Cattrs recommand using camelCase attributes lastName: str @attrs.frozen class Item: name: str price: float = attrs.field(validator=attrs.validators.ge(0)) quantity: int = attrs.field(default=1, validator=attrs.validators.ge(1)) @attrs.frozen class Receipt: store: str address: str date: datetime items: list[Item] payment: Payment client: Client | None = None specialOffers: float | None = attrs.field( default=None, validator=attrs.validators.optional(attrs.validators.ge(0)) ) cattr.register_unstructure_hook(datetime, lambda v: v.isoformat()) cattr.register_structure_hook(datetime, lambda v, _: datetime.fromisoformat(v)) def methods(cls: type) -> Methods: return Methods( lambda data: cattr.structure(data, cls), lambda obj: cattr.unstructure(obj, cls) ) benchmark = Benchmark(methods(Message), methods(Receipt), "cattrs") apischema-0.18.3/benchmark/benchmarks/marshmallow.py000066400000000000000000000041261467672046000225230ustar00rootroot00000000000000import marshmallow.validate from common import Benchmark, Methods, Payment, to_camel_case # https://marshmallow.readthedocs.io/en/latest/examples.html#inflection-camel-casing-keys class CamelCaseSchema(marshmallow.Schema): """Schema that uses camel-case for its external representation and snake-case for its internal representation. """ def on_bind_field(self, field_name, field_obj): field_obj.data_key = to_camel_case(field_obj.data_key or field_name) class Message(CamelCaseSchema): title = marshmallow.fields.Str(required=True) body = marshmallow.fields.Str(required=True) addresses = marshmallow.fields.List(marshmallow.fields.Str) persistence = marshmallow.fields.Int() class Client(CamelCaseSchema): id = marshmallow.fields.Int( required=True, validate=marshmallow.validate.Range(min=0) ) first_name = marshmallow.fields.Str(required=True) last_name = marshmallow.fields.Str(required=True) class Item(CamelCaseSchema): name = marshmallow.fields.Str(required=True) price = marshmallow.fields.Float( required=True, validate=marshmallow.validate.Range(min=0) ) quantity = marshmallow.fields.Int( default=1, validate=marshmallow.validate.Range(min=1) ) class Receipt(CamelCaseSchema): store = marshmallow.fields.Str(required=True) address = marshmallow.fields.Str(required=True) date = marshmallow.fields.DateTime(required=True) items = marshmallow.fields.List(marshmallow.fields.Nested(Item), required=True) payment = marshmallow.fields.Str( required=True, validate=marshmallow.validate.OneOf({e.value for e in Payment}) ) client = marshmallow.fields.Nested(Client) special_offers = marshmallow.fields.Float( validate=marshmallow.validate.Range(min=0) ) @marshmallow.post_load def convert_payment_to_enum(self, data, **kwargs): data["payment"] = Payment(data["payment"]) return data def methods(cls: type[CamelCaseSchema]) -> Methods: return Methods(cls().load, cls().dump) benchmarks = Benchmark(methods(Message), methods(Receipt)) apischema-0.18.3/benchmark/benchmarks/mashumaro.py000066400000000000000000000031211467672046000221630ustar00rootroot00000000000000from dataclasses import dataclass, field from datetime import datetime from typing import Optional import mashumaro from common import Benchmark, Methods, Payment @dataclass(frozen=True) class Message(mashumaro.DataClassDictMixin): title: str body: str addresses: Optional[list[str]] = None # no Python 3.10 support in 2.9.1... persistence: Optional[int] = None @dataclass(frozen=True) class Client(mashumaro.DataClassDictMixin): id: int first_name: str = field(metadata=mashumaro.field_options(alias="firstName")) last_name: str = field(metadata=mashumaro.field_options(alias="lastName")) def __post_init__(self): # The only way I've found to add constraints if self.id < 0: raise ValueError @dataclass(frozen=True) class Item(mashumaro.DataClassDictMixin): name: str price: float quantity: int = 1 def __post_init__(self): if self.price < 0 or self.quantity < 1: raise ValueError @dataclass(frozen=True) class Receipt(mashumaro.DataClassDictMixin): store: str address: str date: datetime items: list[Item] payment: Payment client: Optional[Client] = None special_offers: Optional[float] = field( default=None, metadata=mashumaro.field_options(alias="specialOffers") ) def __post_init__(self): if self.special_offers is not None and self.special_offers < 0: raise ValueError def methods(cls: type[mashumaro.DataClassDictMixin]) -> Methods: return Methods(cls.from_dict, cls.to_dict) benchmarks = Benchmark(methods(Message), methods(Receipt)) apischema-0.18.3/benchmark/benchmarks/pydantic.py000066400000000000000000000022431467672046000220060ustar00rootroot00000000000000from datetime import datetime from typing import Optional, Type import pydantic from common import Benchmark, Methods, Payment, to_camel_case class CamelModel(pydantic.BaseModel): model_config = pydantic.ConfigDict(alias_generator=to_camel_case) class Message(CamelModel): title: str body: str addresses: Optional[list[str]] = None persistence: Optional[int] = None class Client(CamelModel): id: int = pydantic.Field(ge=0) first_name: str last_name: str class Item(CamelModel): name: str price: float = pydantic.Field(ge=0) number: int = pydantic.Field(1, ge=1) class Receipt(CamelModel): store: str address: str date: datetime items: list[Item] payment: Payment client: Optional[Client] = None special_offers: Optional[float] = pydantic.Field(None, ge=0) def methods(model: Type[CamelModel]) -> Methods: def serialize_receipts(obj: Receipt): obj.date.isoformat() return obj.model_dump() return Methods( lambda data: model(**data), model.model_dump if model is Message else serialize_receipts, ) benchmarks = Benchmark(methods(Message), methods(Receipt)) apischema-0.18.3/benchmark/benchmarks/serde.py000066400000000000000000000025131467672046000212750ustar00rootroot00000000000000from dataclasses import dataclass from datetime import datetime from typing import Optional import serde from common import Benchmark, Methods, Payment from serde.core import SETTINGS SETTINGS["debug"] = True @serde.serde @dataclass(frozen=True) class Message: title: str body: str addresses: Optional[list[str]] = None persistence: Optional[int] = None @serde.serde @dataclass(frozen=True) class Client: id: int firstName: str lastName: str def __post_init__(self): # The only way I've found to add constraints if self.id < 0: raise ValueError @serde.serde @dataclass(frozen=True) class Item: name: str price: float quantity: int = 1 def __post_init__(self): if self.price < 0 or self.quantity < 1: raise ValueError @serde.serde @dataclass(frozen=True) class Receipt: store: str address: str date: datetime items: list[Item] payment: Payment client: Optional[Client] = None specialOffers: Optional[float] = None def __post_init__(self): if self.specialOffers is not None and self.specialOffers < 0: raise ValueError def methods(cls: type) -> Methods: return Methods(lambda data: serde.from_dict(cls, data), serde.to_dict) benchmarks = Benchmark(methods(Message), methods(Receipt), "pyserde") apischema-0.18.3/benchmark/benchmarks/typedload.py000066400000000000000000000034651467672046000221670ustar00rootroot00000000000000from dataclasses import dataclass, field from datetime import datetime from typing import Optional import typedload.datadumper # noqa import typedload.dataloader # noqa from common import Benchmark, Methods, Payment @dataclass(frozen=True) class Message: title: str body: str addresses: Optional[list[str]] = None # no Python 3.10 support in 2.14 persistence: Optional[int] = None @dataclass(frozen=True) class Client: id: int first_name: str = field(metadata={"name": "firstName"}) last_name: str = field(metadata={"name": "lastName"}) def __post_init__(self): # The only way I've found to add constraints if self.id < 0: raise ValueError @dataclass(frozen=True) class Item: name: str price: float quantity: int = 1 def __post_init__(self): if self.price < 0 or self.quantity < 1: raise ValueError @dataclass(frozen=True) class Receipt: store: str address: str date: datetime items: list[Item] payment: Payment client: Optional[Client] = None special_offers: Optional[float] = field( default=None, metadata={"name": "specialOffers"} ) def __post_init__(self): if self.special_offers is not None and self.special_offers < 0: raise ValueError loader = typedload.dataloader.Loader() loader.handlers.insert( loader.index(datetime), (lambda tp: tp is datetime, lambda _, value, tp: datetime.fromisoformat(value)), ) dumper = typedload.datadumper.Dumper() dumper.handlers.insert( dumper.index(datetime.now()), (lambda tp: tp is datetime, lambda _, value, tp: value.isoformat()), ) def methods(cls: type) -> Methods: load = loader.load return Methods(lambda data: load(data, cls), dumper.dump) benchmarks = Benchmark(methods(Message), methods(Receipt)) apischema-0.18.3/benchmark/common.py000066400000000000000000000007071467672046000173510ustar00rootroot00000000000000from collections.abc import Callable from enum import Enum from typing import Any, NamedTuple from apischema.utils import to_camel_case class Methods(NamedTuple): deserializer: Callable[[Any], Any] serializer: Callable[[Any], Any] class Benchmark(NamedTuple): simple: Methods complex: Methods library: str | None = None class Payment(str, Enum): CASH = "CASH" CREDIT_CARD = "CREDIT_CARD" to_camel_case = to_camel_case apischema-0.18.3/benchmark/data.json000066400000000000000000000022051467672046000173060ustar00rootroot00000000000000{ "simple": [ { "title": "Benchmark running", "body": "A benchmark is running against competitor libraries", "addresses": [ "https://github.com/wyfo/" ], "persistence": 20 }, { "title": "Benchmark result", "body": "apischema is faster." } ], "complex": [ { "store": "BigMarket", "address": "In a galaxy far far away", "date": "2021-12-29T15:10:00", "items": [ { "name": "Lorem ipsum", "price": 1.25 }, { "name": "Dolor sit amet", "price": 7.99, "quantity": 2 }, { "name": "Consectetur", "price": 26.70 } ], "payment": "CASH", "specialOffers": 1.07 }, { "store": "BigMarket", "address": "In a galaxy far far away", "date": "2021-12-29T15:10:00", "items": [ { "name": "Lorem ipsum", "price": 1.25 } ], "payment": "CREDIT_CARD", "client": { "id": 42, "firstName": "Joseph", "lastName": "Perez" } } ] } apischema-0.18.3/benchmark/main.py000066400000000000000000000136651467672046000170140ustar00rootroot00000000000000import importlib.metadata import json import pathlib import time import timeit from collections.abc import Callable, Collection, Mapping, Sequence from dataclasses import dataclass, replace from typing import Any, NamedTuple import benchmarks import matplotlib.pyplot as plt import pandas from common import Benchmark, Methods ROOT_DIR = pathlib.Path(__file__).parent.parent DATA_PATH = ROOT_DIR / "benchmark" / "data.json" TABLE_PATH = ROOT_DIR / "examples" / "benchmark_table.md" LIGHT_CHART_PATH = ROOT_DIR / "docs" / "benchmark_chart_light.svg" DARK_CHART_PATH = ROOT_DIR / "docs" / "benchmark_chart_dark.svg" CHART_TRUNCATE = 20 packages = [ path.stem for path in pathlib.Path(benchmarks.__file__).parent.iterdir() if not path.name.startswith("_") ] packages = ["pydantic"] def time_it(func: Callable, arg: Any) -> float: timer = timeit.Timer(lambda: func(arg)) number, _ = timer.autorange() return min(timer.repeat(number=number)) / number def time_it_mean(func: Callable, args: Collection) -> float: return sum(time_it(func, arg) for arg in args) / len(args) class BenchmarkResult(NamedTuple): first_run: float deserialization: float serialization: float def run_benchmark( methods: Methods, data: Mapping[str, Any], key: str ) -> BenchmarkResult: print(f"\t{key}") deserializer, serializer = methods first_run_start = time.perf_counter_ns() deserialized = [deserializer(elt) for elt in data[key]] for obj in deserialized: serializer(obj) first_run_end = time.perf_counter_ns() first_run = (first_run_end - first_run_start) * 1e-9 print(f"\t\tfirst run: {first_run}") deserialization = time_it_mean(deserializer, data[key]) print(f"\t\tdeserialization: {deserialization}") serialization = time_it_mean(serializer, deserialized) print(f"\t\tserialization: {serialization}") return BenchmarkResult(first_run, deserialization, serialization) class FullBenchmarkResult(NamedTuple): simple_deserialization: float complex_deserialization: float simple_serialization: float complex_serialization: float @dataclass(frozen=True) class LibraryBenchmarkResult: library: str version: str result: FullBenchmarkResult def total(self) -> float: return sum(self.result) def relative(self, ref: "LibraryBenchmarkResult") -> "LibraryBenchmarkResult": result = FullBenchmarkResult(*(a / b for a, b in zip(self.result, ref.result))) return replace(self, result=result) def run_library_benchmark( package: str, data: Mapping[str, Any] ) -> LibraryBenchmarkResult: print("====================") print(package) # import module before importing benchmark module in order to remove it # from startup_time importlib.import_module(package) start_import = time.perf_counter_ns() module = importlib.import_module(f"{benchmarks.__name__}.{package}") end_import = time.perf_counter_ns() startup_time = (end_import - start_import) * 1e-9 simple_methods, complex_methods, library = next( val for val in module.__dict__.values() if isinstance(val, Benchmark) ) library = library or package simple_results, complex_results = [ run_benchmark(methods, data, key) for methods, key in [(simple_methods, "simple"), (complex_methods, "complex")] ] print( f"startup time: {startup_time + simple_results.first_run + complex_results.first_run}" ) return LibraryBenchmarkResult( library, importlib.metadata.version(library), FullBenchmarkResult( simple_results.deserialization, complex_results.deserialization, simple_results.serialization, complex_results.serialization, ), ) def export_table(results: Sequence[LibraryBenchmarkResult]): with open(TABLE_PATH, "w") as table: table.write("|library|version|deserialization|serialization|\n") table.write("|-|-|-:|-:|\n") for res in results: if all(r == 1.0 for r in res.result): deserialization, serialization = "/", "/" else: deserialization, serialization = [ f"x{round(sum(res.result[index:index + 2]) / 2, 1)}" f" ({round(res.result[index], 1)}/{round(res.result[index + 1], 1)})" for index in (0, 2) ] table.write( f"|{res.library}|{res.version}|{deserialization}|{serialization}|\n" ) def export_chart( results: Sequence[LibraryBenchmarkResult], path: pathlib.Path, style: str ): plt.style.use(style) columns = [ f"{op} ({bench})" for op in ("deserialization", "serialization") for bench in ("simple", "complex") ] # I've used pandas because I was not able to do what I wanted with matplotlib alone df = pandas.DataFrame( [ [res.library] + [min(r, CHART_TRUNCATE) for r in res.result] for res in results ], columns=["library"] + columns, ) ax = df.plot.bar(x="library", title="Benchmark (lower is better)", rot=45) ax.legend(framealpha=0, loc="upper left") plt.xlabel("") plt.tight_layout() for container in ax.containers: ax.bar_label( container, labels=["" if v < CHART_TRUNCATE else "··" for v in container.datavalues], padding=2, rotation=90, ) plt.savefig(str(path), transparent=True) def main(): with open(DATA_PATH) as json_file: data = json.load(json_file) results = sorted( (run_library_benchmark(p, data) for p in packages), key=LibraryBenchmarkResult.total, ) relative_results = [res.relative(results[0]) for res in results] export_table(relative_results) export_chart(relative_results, LIGHT_CHART_PATH, "default") export_chart(relative_results, DARK_CHART_PATH, "dark_background") if __name__ == "__main__": main() apischema-0.18.3/benchmark/requirements.txt000066400000000000000000000002031467672046000207620ustar00rootroot00000000000000cattrs==23.1.2 marshmallow==3.20.1 mashumaro==3.10 matplotlib==3.8.0 pandas==2.1.1 pydantic==2.4.2 typedload==2.26 pyserde==0.12.3 apischema-0.18.3/docs/000077500000000000000000000000001467672046000145015ustar00rootroot00000000000000apischema-0.18.3/docs/conversions.md000066400000000000000000000305271467672046000174020ustar00rootroot00000000000000# Conversions – (de)serialization customization *apischema* covers the majority of standard data types, but of course that's not enough, which is why it enables you to add support for all your classes and the libraries you use. Actually, *apischema* itself uses this conversion feature to provide a basic support for standard library data types like UUID/datetime/etc. (see [std_types.py](https://github.com/wyfo/apischema/blob/master/apischema/std_types.py)) ORM support can easily be achieved with this feature (see [SQLAlchemy example](examples/sqlalchemy_support.md)). In fact, you can even add support for competitor libraries like *Pydantic* (see [*Pydantic* compatibility example](examples/pydantic_support.md)) ## Principle - apischema conversions An *apischema* conversion is composed of a source type, let's call it `Source`, a target type `Target` and a converter function with signature `(Source) -> Target`. When a class (actually, a non-builtin class, so not `int`/`list`/etc.) is deserialized, *apischema* will check if there is a conversion where this type is the target. If found, the source type of conversion will be deserialized, then the converter will be applied to get an object of the expected type. Serialization works the same way but inverted: look for a conversion with type as source, apply then converter, and get the target type. Conversions are also handled in schema generation: for a deserialization schema, source schema is merged to target schema, while target schema is merged to source schema for a serialization schema. ## Register a conversion Conversion is registered using `apischema.deserializer`/`apischema.serializer` for deserialization/serialization respectively. When used as function decorator, the `Source`/`Target` types are directly extracted from the conversion function signature. `serializer` can be called on methods/properties, in which case `Source` type is inferred to be the owning type. ```python {!conversions.py!} ``` !!! warning (De)serializer methods cannot be used with `typing.NamedTuple`; in fact, *apischema* uses the `__set_name__` magic method but it is not called on `NamedTuple` subclass fields. ### Multiple deserializers Sometimes, you want to have several possibilities to deserialize a type. If it's possible to register a deserializer with a `Union` param, it's not very practical. That's why *apischema* make it possible to register several deserializers for the same type. They will be handled with a `Union` source type (ordered by deserializers registration), with the right serializer selected according to the matching alternative. ```python {!multiple_deserializers.py!} ``` On the other hand, serializer registration overwrites the previous registration if any. `apischema.conversions.reset_deserializers`/`apischema.conversions.reset_serializers` can be used to reset (de)serializers (even those of the standard types embedded in *apischema*) ### Inheritance All serializers are naturally inherited. In fact, with a conversion function `(Source) -> Target`, you can always pass a subtype of `Source` and get a `Target` in return. Moreover, when serializer is a method/property, overriding this method/property in a subclass will override the inherited serializer. ```python {!serializer_inheritance.py!} ``` !!! note Inheritance can also be toggled off in specific cases, like in the [Class as union of its subclasses](examples/subclass_union.md) example On the other hand, deserializers cannot be inherited, because the same `Source` passed to a conversion function `(Source) -> Target` will always give the same `Target` (not ensured to be the desired subtype). !!! note Pseudo-inheritance could be achieved by registering a conversion (using for example a `classmethod`) for each subclass in `__init_subclass__` method (or a metaclass), or by using `__subclasses__`; see [example](examples/inherited_deserializer.md) ## Generic conversions `Generic` conversions are supported out of the box. ```python {!generic_conversions.py!} ``` However, you're not allowed to register a conversion of a specialized generic type, like `Foo[int]`. ## Conversion object In the previous example, conversions were registered using only converter functions. However, it can also be done by passing a `apischema.conversions.Conversion` instance. It allows specifying additional metadata to conversion (see [next sections](#sub-conversions) for examples) and precise converter source/target when annotations are not available. ```python {!conversion_object.py!} ``` ## Dynamic conversions — select conversions at runtime Whether or not a conversion is registered for a given type, conversions can also be provided at runtime, using the `conversion` parameter of `deserialize`/`serialize`/`deserialization_schema`/`serialization_schema`. ```python {!dynamic_conversions.py!} ``` !!! note For `definitions_schema`, conversions can be added with types by using a tuple instead, for example `definitions_schema(serializations=[(list[Foo], foo_to_bar)])`. The `conversion` parameter can also take a tuple of conversions, when you have a `Union`, a `tuple` or when you want to have several deserializations for the same type. ### Dynamic conversions are local Dynamic conversions are discarded after having been applied (or after class without conversion having been encountered). For example, you can't apply directly a dynamic conversion to a dataclass field when calling `serialize` on an instance of this dataclass. Reasons for this design are detailed in the [FAQ](#whats-the-difference-between-conversion-and-default_conversion-parameters). ```python {!local_conversions.py!} ``` !!! note Dynamic conversion is not discarded when the encountered type is a container (`list`, `dict`, `Collection`, etc. or `Union`) or a registered conversion from/to a container; the dynamic conversion can then apply to the container elements ### Dynamic conversions interact with `type_name` Dynamic conversions are applied before looking for a ref registered with `type_name` ```python {!dynamic_type_name.py!} ``` ### Bypass registered conversion Using `apischema.identity` as a dynamic conversion allows you to bypass a registered conversion, i.e. to (de)serialize the given type as it would be without conversion registered. ```python {!bypass_conversions.py!} ``` !!! note For a more precise selection of bypassed conversion, for `tuple` or `Union` member for example, it's possible to pass the concerned class as the source *and* the target of conversion *with* `identity` converter, as shown in the example. ### Liskov substitution principle LSP is taken into account when applying dynamic conversion: the serializer source can be a subclass of the actual class and the deserializer target can be a superclass of the actual class. ```python {!dynamic_conversions_lsp.py!} ``` ### Generic dynamic conversions `Generic` dynamic conversions are supported out of the box. Also, contrary to registered conversions, partially specialized generics are allowed. ```python {!dynamic_generic_conversions.py!} ``` ## Field conversions It is possible to register a conversion for a particular dataclass field using `conversion` metadata. ```python {!field_conversions.py!} ``` !!! note It's possible to pass a conversion only for deserialization or only for serialization ## Serialized method conversions Serialized methods can also have dedicated conversions for their return ```python {!serialized_conversions.py!} ``` ## Default conversions As with almost every default behavior in *apischema*, default conversions can be configured using `apischema.settings.deserialization.default_conversion`/`apischema.settings.serialization.default_conversion`. The initial value of these settings are the function which retrieved conversions registered with `deserializer`/`serializer`. You can for example [support *attrs*](examples/attrs_support.md) classes with this feature: ```python {!examples/attrs_support.py!} ``` *apischema* functions (`deserialize`/`serialize`/`deserialization_schema`/`serialization_schema`/`definitions_schema`) also have a `default_conversion` parameter to dynamically modify default conversions. See [FAQ](#whats-the-difference-between-conversion-and-default_conversion-parameters) for the difference between `conversion` and `default_conversion` parameters. ## Sub-conversions Sub-conversions are [dynamic conversions](#dynamic-conversions--select-conversions-at-runtime) applied on the result of a conversion. ```python {!sub_conversions.py!} ``` Sub-conversions can also be used to [bypass registered conversions](#bypass-registered-conversion) or to define [recursive conversions](#lazyrecursive-conversions). ## Lazy/recursive conversions Conversions can be defined lazily, i.e. using a function returning `Conversion` (single, or a tuple of it); this function must be wrapped into a `apischema.conversions.LazyConversion` instance. It allows creating recursive conversions or using a conversion object which can be modified after its definition (for example a conversion for a base class modified by `__init_subclass__`) It is used by *apischema* itself for the generated JSON schema. It is indeed a recursive data, and the [different versions](json_schema.md#json-schema--openapi-version) are handled by a conversion with a lazy recursive sub-conversion. ```python {!recursive_conversions.py!} ``` ### Lazy registered conversions Lazy conversions can also be registered, but the deserialization target/serialization source has to be passed too. ```python {!lazy_registered_conversion.py!} ``` ## Conversion helpers ### String conversions A common pattern of conversion concerns classes that have a string constructor and a `__str__` method, for example standard types `uuid.UUID`, `pathlib.Path`, or `ipaddress.IPv4Address`. Using `apischema.conversions.as_str` will register a string-deserializer from the constructor and a string-serializer from the `__str__` method. `ValueError` raised by the constructor is caught and converted to `ValidationError`. ```python {!as_str.py!} ``` !!! note Previously mentioned standard types are handled by *apischema* using `as_str`. ### ValueErrorCatching Converters can be wrapped with `apischema.conversions.catch_value_error` in order to catch `ValueError` and reraise it as a `ValidationError`. It's notably used but `as_str` and other standard types. !!! note This wrapper is in fact inlined in deserialization, so it has better performance than writing the *try-catch* in the code. ### Use `Enum` names `Enum` subclasses are (de)serialized using values. However, you may want to use enumeration names instead, that's why *apischema* provides `apischema.conversion.as_names` to decorate `Enum` subclasses. ```python {!as_names.py!} ``` ### Class as union of its subclasses ### Object deserialization — transform function into a dataclass deserializer `apischema.objects.object_deserialization` can convert a function into a new function taking a unique parameter, a dataclass whose fields are mapped from the original function parameters. It can be used for example to build a deserialization conversion from an alternative constructor. ```python {!object_deserialization.py!} ``` !!! note Parameters metadata can be specified using `typing.Annotated`, or be passed with `parameters_metadata` parameter, which is a mapping of parameter names as key and mapped metadata as value. ### Object serialization — select only a subset of fields `apischema.objects.object_serialization` can be used to serialize only a subset of an object fields and methods. ```python {!object_serialization.py!} ``` ## FAQ #### What's the difference between `conversion` and `default_conversion` parameters? Dynamic conversions (`conversion` parameter) exists to ensure consistency and reuse of subschemas referenced (with a `$ref`) in the JSON/OpenAPI schema. In fact, different global conversions (`default_conversion` parameter) could lead to having a field with different schemas depending on global conversions, so a class would not be able to be referenced consistently. Because dynamic conversions are local, they cannot mess with an object field schema. Schema generation uses the same default conversions for all definitions (which can have associated dynamic conversion). `default_conversion` parameter allows having different (de)serialization contexts, for example to map date to string between frontend and backend, and to timestamp between backend services. apischema-0.18.3/docs/data_model.md000066400000000000000000000304431467672046000171200ustar00rootroot00000000000000# Data model *apischema* handles every class/type you need. By the way, it's done in an additive way, meaning that it doesn't affect your types. ### PEP 585 With Python 3.9 and [PEP 585](https://www.python.org/dev/peps/pep-0585/), typing is substantially shaken up; all container types of `typing` module are now deprecated. apischema fully support 3.9 and PEP 585, as shown in the different examples. However, `typing` containers can still be used, especially/necessarily when using an older version. ## Dataclasses Because the library aims to bring the minimum boilerplate, it's built on the top of standard library. [Dataclasses](https://docs.python.org/3/library/dataclasses.html) are thus the core structure of the data model. Dataclasses bring the possibility of field customization, with more than just a default value. In addition to the common parameters of [`dataclasses.field`](https://docs.python.org/3/library/dataclasses.html#dataclasses.field), customization is done with the `metadata` parameter; metadata can also be passed using PEP 593 `typing.Annotated`. With some teasing of features presented later: ```python {!field_metadata.py!} ``` !!! note Field's metadata are just an ordinary `dict`; *apischema* provides some functions to enrich these metadata with its own keys (`alias("foo_bar")` is roughly equivalent to `{"_apischema_alias": "foo_bar"}) and use them when the time comes, but metadata are not reserved to *apischema* and other keys can be added. Because [PEP 584](https://www.python.org/dev/peps/pep-0584/) is painfully missing before Python 3.9, *apischema* metadata use their own subclass of `dict` just to add `|` operator for convenience in all Python versions. Dataclasses `__post_init__` and `field(init=False)` are fully supported. Implications of this feature usage are documented in the relative sections. !!! warning Before 3.8, `InitVar` is doing [type erasure](https://bugs.python.org/issue33569), which is why it's not possible for *apischema* to retrieve type information of init variables. To fix this behavior, a field metadata `init_var` can be used to put back the type of the field (`init_var` also accepts stringified type annotations). Dataclass-like types (*attrs*/*SQLAlchemy*/etc.) can also be supported with a few lines of code, see [next section](#dataclass-like-types) ## Standard library types *apischema* natively handles most of the types provided by the standard library. They are sorted in the following categories: #### Primitive `str`, `int`, `float`, `bool`, `None`, subclasses of them They correspond to JSON primitive types. #### Collection - `collection.abc.Collection` (*`typing.Collection`*) - `collection.abc.Sequence` (*`typing.Sequence`*) - `tuple` (*`typing.Tuple`*) - `collection.abc.MutableSequence` (*`typing.MutableSequence`*) - `list` (*`typing.List`*) - `collection.abc.Set` (*`typing.AbstractSet`*) - `collection.abc.MutableSet` (*`typing.MutableSet`*) - `frozenset` (*`typing.FrozenSet`*) - `set` (*`typing.Set`*) They correspond to JSON *array* and are serialized to `list`. #### Mapping - `collection.abc.Mapping` (*`typing.Mapping`*) - `collection.abc.MutableMapping` (*`typing.MutableMapping`*) - `dict` (*`typing.Dict`*) They correpond to JSON *object* and are serialized to `dict`. #### Enumeration `enum.Enum` subclasses, `typing.Literal` !!! warning `Enum` subclasses are (de)serialized using **values**, not names. *apischema* also provides a [conversion](conversions.md#using-enum-names) to use names instead. #### Typing facilities - `typing.Optional`/`typing.Union` (`Optional[T]` is strictly equivalent to `Union[T, None]`) : Deserialization select the first matching alternative; unsupported alternatives are ignored - `tuple` (*`typing.Tuple`*) : Can be used as collection as well as true tuple, like `tuple[str, int]` - `typing.NewType` : Serialized according to its base type - `typing.NamedTuple` : Handled as an object type, roughly like a dataclass; fields metadata can be passed using `Annotated` - `typing.TypedDict` : Handled as an object type, but with a dictionary shape; fields metadata can be passed using `Annotated` - `typing.Any` : Untouched by deserialization, serialized according to the object runtime class - `typing.LiteralString` : Handled as `str` #### Other standard library types - `bytes` : with `str` (de)serialization using base64 encoding - `datetime.datetime` - `datetime.date` - `datetime.time` : Supported only in 3.7+ with `fromisoformat`/`isoformat` - `Decimal` : With `float` (de)serialization - `ipaddress.IPv4Address` - `ipaddress.IPv4Interface` - `ipaddress.IPv4Network` - `ipaddress.IPv6Address` - `ipaddress.IPv6Interface` - `ipaddress.IPv6Network` - `pathlib.Path` - `re.Pattern` (*`typing.Pattern`*) - `uuid.UUID` : With `str` (de)serialization ## Generic `typing.Generic` can be used out of the box like in the following example: ```python {!generic.py!} ``` !!! warning Generic types don't have default *type name* (used in JSON/GraphQL schema) — should `Group[Foo]` be named `GroupFoo`/`FooGroup`/something else? — so they require by-class or default [`type_name` assignment](json_schema.md#set-reference-name). ## Recursive types, string annotations and PEP 563 Recursive classes can be typed as they usually do, with or without [PEP 563](https://www.python.org/dev/peps/pep-0563/). Here with string annotations: ```python {!recursive.py!} ``` Here with PEP 563 (requires 3.7+) ```python {!recursive_postponned.py!} ``` !!! warning To resolve annotations, *apischema* uses `typing.get_type_hints`; this doesn't work really well when used on objects defined outside of global scope. !!! warning "Warning (minor)" Currently, PEP 585 can have surprising behavior when used outside the box, see [bpo-41370](https://bugs.python.org/issue41370) ## `null` vs. `undefined` Contrary to Javascript, Python doesn't have an `undefined` equivalent (if we consider `None` to be the equivalent of `null`). But it can be useful to distinguish (especially when thinking about HTTP `PATCH` method) between a `null` field and an `undefined`/absent field. That's why *apischema* provides an `Undefined` constant (a single instance of `UndefinedType` class) which can be used as a default value everywhere where this distinction is needed. In fact, default values are used when field are absent, thus a default `Undefined` will *mark* the field as absent. Dataclass/`NamedTuple` fields are ignored by serialization when `Undefined`. ```python {!undefined.py!} ``` !!! note `UndefinedType` must only be used inside an `Union`, as it has no sense as a standalone type. By the way, no suitable name was found to shorten `Union[T, UndefinedType]` but propositions are welcomed. !!! note `Undefined` is a falsy constant, i.e. `bool(Undefined) is False`. ### Use `None` as if it was `Undefined` Using `None` can be more convenient than `Undefined` as a placeholder for missing value, but `Optional` types are translated to nullable fields. That's why *apischema* provides `none_as_undefined` metadata, allowing `None` to be handled as if it was `Undefined`: type will not be nullable and field not serialized if its value is `None`. ```python {!none_as_undefined.py!} ``` ## Annotated - PEP 593 [PEP 593](https://www.python.org/dev/peps/pep-0593/) is fully supported; annotations stranger to *apischema* are simply ignored. ## Custom types *apischema* can support almost all of your custom types in a few lines of code, using the [conversion feature](conversions.md). However, it also provides a simple and direct way to support dataclass-like types, as presented [below](#dataclass-like-types-aka-object-types). Otherwise, when *apischema* encounters a type that it doesn't support, `apischema.Unsupported` exception will be raised. !!! note In the rare case when a union member should be ignored by apischema, it's possible to use mark it as unsupported using `Union[Foo, Annotated[Bar, Unsupported]]`. ### Dataclass-like types, aka object types Internally, *apischema* handle standard object types — dataclasses, named tuple and typed dictionary — the same way by mapping them to a set of `apischema.objects.ObjectField`, which has the following definition: ```python @dataclass(frozen=True) class ObjectField: name: str # field's name type: Any # field's type required: bool = True # if the field is required metadata: Mapping[str, Any] = field(default_factory=dict) # field's metadata default: InitVar[Any] = ... # field's default value default_factory: Optional[Callable[[], Any]] = None # field's default factory kind: FieldKind = FieldKind.NORMAL # NORMAL/READ_ONLY/WRITE_ONLY ``` Thus, support of dataclass-like types (*attrs*, *SQLAlchemy* traditional mappers, etc.) can be achieved by mapping the concerned class to its own list of `ObjectField`s; this is done using `apischema.objects.set_object_fields`. ```python {!set_object_fields.py!} ``` Another way to set object fields is to directly modify *apischema* default behavior, using `apischema.settings.default_object_fields`. !!! note `set_object_fields`/`settings.default_object_fields` can be used to override existing fields. Current fields can be retrieved using `apischema.objects.object_fields`. ```python from collections.abc import Sequence from typing import Optional from apischema import settings from apischema.objects import ObjectField previous_default_object_fields = settings.default_object_field def default_object_fields(cls) -> Optional[Sequence[ObjectField]]: return [...] if ... else previous_default_object_fields(cls) settings.default_object_fields = default_object_fields ``` !!! note Almost every default behavior of apischema can be customized using `apischema.settings`. Examples of [*SQLAlchemy* support](examples/sqlalchemy_support.md) and [attrs support](examples/attrs_support.md) illustrate both methods (which could also be combined). ## Skip field Dataclass fields can be excluded from *apischema* processing by using `apischema.metadata.skip` in the field metadata. It can be parametrized with `deserialization`/`serialization` boolean parameters to skip a field only for the given operations. ```python {!skip.py!} ``` !!! note Fields skipped in deserialization should have a default value if deserialized, because deserialization of the class could raise otherwise. ### Skip field serialization depending on condition Field can also be skipped when serializing, depending on the condition given by `serialization_if`, or when the field value is equal to its default value with `serialization_default=True`. ```python {!skip_if.py!} ``` ## Composition over inheritance - composed dataclasses flattening Dataclass fields which are themselves dataclass can be "flattened" into the owning one by using `flatten` metadata. Then, when the class is (de)serialized, "flattened" fields will be (de)serialized at the same level as the owning class. ```python {!flattened.py!} ``` !!! note Generated JSON schema use [`unevaluatedProperties` keyword](https://json-schema.org/understanding-json-schema/reference/object.html?highlight=unevaluated#unevaluated-properties). This feature is very convenient for building model by composing smaller components. If some kind of reuse could also be achieved with inheritance, it can be less practical when it comes to use it in code, because there is no easy way to build an inherited class when you have an instance of the super class; you have to copy all the fields by hand. On the other hand, using composition (of flattened fields), it's easy to instantiate the class when the smaller component is just a field of it. ## FAQ #### Why isn't `Iterable` handled with other collection types? Iterable could be handled (actually, it was at the beginning), however, this doesn't really make sense from a data point of view. Iterables are computation objects, they can be infinite, etc. They don't correspond to a serialized data; `Collection` is way more appropriate in this context. #### What happens if I override dataclass `__init__`? *apischema* always assumes that dataclass `__init__` can be called with all its fields as kwargs parameters. If that's no longer the case after a modification of `__init__` (what means if an exception is thrown when the constructor is called because of bad parameters), *apischema* treats then the class as [not supported](#unsupported-types). apischema-0.18.3/docs/de_serialization.md000066400000000000000000000343351467672046000203600ustar00rootroot00000000000000# (De)serialization *apischema* aims to help with deserialization/serialization of API data, mostly JSON. Let's start again with the [overview example](index.md#example) ```python {!quickstart.py!} ``` ## Deserialization `apischema.deserialize` deserializes Python types from JSON-like data: `dict`/`list`/`str`/`int`/`float`/`bool`/`None` — in short, what you get when you execute `json.loads`. Types can be dataclasses as well as `list[int]`, `NewType`s, or whatever you want (see [conversions](conversions.md) to extend deserialization support to every type you want). ```python {!deserialization.py!} ``` Deserialization performs a validation of data, based on typing annotations and other information (see [schema](json_schema.md) and [validation](validation.md)). ### Deserialization passthrough In some case, e.g. MessagePack loading with raw bytes inside, some data will have other type than JSON primitive ones. These types can be allowed using `pass_through` parameter; it must be collection of classes, or a predicate. Behavior can also be set globally using `apischema.settings.deserialization.pass_through`. Only non JSON primitive classes can be allowed, because *apischema* relies on a type check with `isinstance` to skip deserialization. That exclude `NewType` but also `TypeDict`. ```python {!deserialization_pass_through.py!} ``` !!! note Equivalent serialization feature is presented in [optimizations documentation](optimizations_and_benchmark.md#serialization-passthrough). ### Strictness #### Coercion *apischema* is strict by default. You ask for an integer, you have to receive an integer. However, in some cases, data has to be be coerced, for example when parsing a configuration file. That can be done using `coerce` parameter; when set to `True`, all primitive types will be coerced to the expected type of the data model like the following: ```python {!coercion.py!} ``` `bool` can be coerced from `str` with the following case-insensitive mapping: | False | True | | --- | --- | | 0 | 1 | | f | t | | n | y | | no | yes | | false | true | | off | on | | ko | ok | The `coerce` parameter can also receive a coercion function which will then be used instead of default one. ```python {!coercion_function.py!} ``` !!! note If coercer result is not an instance of class passed in argument, a ValidationError will be raised with an appropriate error message !!! warning Coercer first argument is a primitive json type `str`/`bool`/`int`/`float`/`list`/`dict`/`type(None)`; it can be `type(None)`, so returning `cls(data)` will fail in this case. #### Additional properties *apischema* is strict too about the number of fields received for an *object*. In JSON schema terms, *apischema* put `"additionalProperties": false` by default (this can be configured by class with [properties field](#additional-and-pattern-properties)). This behavior can be controlled by `additional_properties` parameter. When set to `True`, it prevents the rejection of unexpected properties. ```python {!additional_properties.py!} ``` #### Fall back on default Validation errors can happen when deserializing an ill-formed field. However, if this field has a default value/factory, deserialization can fall back on this default; this is enabled by `fall_back_on_default` parameter. This behavior can also be configured for each field using metadata. ```python {!fall_back_on_default.py!} ``` #### Strictness configuration *apischema* global configuration is managed through `apischema.settings` object. It has, among other, three global variables `settings.additional_properties`, `settings.deserialization.coerce` and `settings.deserialization.fall_back_on_default` whose values are used as default parameter values for the `deserialize`; by default, `additional_properties=False`, `coerce=False` and `fall_back_on_default=False`. !!! note `additional_properties` settings is in `settings.deserialization` because it's also used in [serialization](). Global coercion function can be set with `settings.coercer` following this example: ```python import json from apischema import ValidationError, settings prev_coercer = settings.coercer def coercer(cls, data): """In case of coercion failures, try to deserialize json data""" try: return prev_coercer(cls, data) except ValidationError as err: if not isinstance(data, str): raise try: return json.loads(data) except json.JSONDecodeError: raise err settings.coercer = coercer ``` ## Fields set Sometimes, it can be useful to know which field has been set by the deserialization, for example in the case of *PATCH* requests, to know which field has been updated. Moreover, it is also used in serialization to limit the fields serialized (see [next section](#exclude-unset-fields)) Because *apischema* use vanilla dataclasses, this feature is not enabled by default and must be set explicitly on a per-class basis. *apischema* provides a simple API to get/set this metadata. ```python {!fields_set.py!} ``` !!! warning The `with_fields_set` decorator MUST be put above `dataclass` one. This is because both of them modify `__init__` method, but only the first is built to take the second in account. !!! warning `dataclasses.replace` works by setting all the fields of the replaced object. Because of this issue, *apischema* provides a little wrapper `apischema.dataclasses.replace`. ## Serialization `apischema.serialize` is used to serialize Python objects to JSON-like data. Contrary to `apischema.deserialize`, Python type can be omitted; in this case, the object will be serialized with an `typing.Any` type, i.e. the class of the serialized object will be used. ```python {!serialization.py!} ``` !!! note Omitting type with `serialize` can have unwanted side effects, as it makes loose any type annotations of the serialized object. In fact, generic specialization as well as PEP 593 annotations cannot be retrieved from an object instance; [conversions](conversions.md) can also be impacted That's why it's advisable to pass the type when it is available. ### Type checking Serialization can be configured using `check_type` (default to `False`) and `fall_back_on_any` (default to `False`) parameters. If `check_type` is `True`, the serialized object type will be checked to match the serialized type. If it doesn't, `fall_back_on_any` allows bypassing the serialized type to use `typing.Any` instead, i.e. to use the serialized object class. The default values of these parameters can be modified through `apischema.settings.serialization.check_type` and `apischema.settings.serialization.fall_back_on_any`. !!! note *apischema* relies on typing annotations, and assumes that the code is well statically type-checked. That's why it doesn't add the overhead of type checking by default (it's more than 10% performance impact). ### Serialized methods/properties *apischema* can execute methods/properties during serialization and add the computed values with the other fields values; just put `apischema.serialized` decorator on top of methods/properties you want to be serialized. The function name is used unless an alias is given in decorator argument. ```python {!serialized.py!} ``` !!! note Serialized methods must not have parameters without default, as *apischema* needs to execute them without arguments !!! note Overriding of a serialized method in a subclass will also override the serialization of the subclass. #### Error handling Errors occurring in serialized methods can be caught in a dedicated error handler registered with `error_handler` parameter. This function takes in parameters the exception, the object and the alias of the serialized method; it can return a new value or raise the current or another exception — it can for example be used to log errors without throwing the complete serialization. The resulting serialization type will be a `Union` of the normal type and the error handling type; if the error handler always raises, use [`typing.NoReturn`](https://docs.python.org/3/library/typing.html#typing.NoReturn) annotation. `error_handler=None` correspond to a default handler which only return `None` — exception is thus discarded and serialization type becomes `Optional`. The error handler is only executed by *apischema* serialization process, it's not added to the function, so this one can be executed normally and raise an exception in the rest of your code. ```python {!serialized_error.py!} ``` #### Non-required serialized methods Serialized methods (or their error handler) can return `apischema.Undefined`, in which case the property will not be included into the serialization; accordingly, the property loses the *required* qualification in the JSON schema. ```python {!serialized_undefined.py!} ``` #### Generic serialized methods Serialized methods of generic classes get the right type when their owning class is specialized. ```python {!serialized_generic.py!} ``` ### Exclude unset fields When a class has a lot of optional fields, it can be convenient to not include all of them, to avoid a bunch of useless fields in your serialized data. Using the previous feature of [fields set tracking](#fields-set), `serialize` can exclude unset fields using its `exclude_unset` parameter or `settings.serialization.exclude_unset` (default is `True`). ```python {!exclude_unset.py!} ``` !!! note As written in comment in the example, `with_fields_set` is necessary to benefit from the feature. If the dataclass don't use it, the feature will have no effect. Sometimes, some fields must be serialized, even with their default value; this behavior can be enforced using field metadata. With it, a field will be marked as set even if its default value is used at initialization. ```python {!default_as_set.py!} ``` !!! note This metadata has effect only in combination with `with_fields_set` decorator. ### Exclude fields with default value or `None` Fields metadata [`apischema.skip`](data_model.md#skip-field-serialization-depending-on-condition) already allows skipping fields serialization depending on a condition, for example if the field is `None` or equal to its default value. However, it must be added on each concerned fields, and that can be tedious when you want to set that behavior globally. That's why *apischema* provides the two following settings: - `settings.serialization.exclude_defaults`: whether fields which are equal to their default values should be excluded from serialization; default `False` - `settings.serialization.exclude_none`: whether fields which are equal to `None` should be excluded from serialization; default `False` These settings can also be set directly using `serialize` parameters, like in the following example: ```python {!exclude_defaults_none.py!} ``` ### Field ordering Usually, JSON object properties are unordered, but sometimes, order does matter. By default, fields, are ordered according to their declaration; serialized methods are appended after the fields. However, it's possible to change the ordering using `apischema.order`. #### Class-level ordering `order` can be used to decorate a class with the field ordered as expected: ```python {!class_ordering.py!} ``` #### Field-level ordering Each field has an order "value" (0 by default), and ordering is done by sorting fields using this value; if several fields have the same order value, they are sorted by their declaration order. For instance, assigning `-1` to a field will put it before every other fields, and `999` will surely put it at the end. This order value is set using `order`, this time as a field metadata (or passed to `order` argument of [serialized methods/properties](#serialized-methodsproperties)). It has the following overloaded signature: - `order(value: int, /)`: set the order value of the field - `order(*, after)`: ignore the order value and put the field after the given field/method/property - `order(*, before)`: ignore the order value and put the field before the given field/method/property !!! note `after` and `before` can be raw strings, but also dataclass fields, methods or properties. Also, `order` can again be used as class decorator to override ordering metadata, by passing this time a mapping of field with their overridden order. ```python {!ordering.py!} ``` ### TypedDict additional properties `TypedDict` can contain additional keys, which are not serialized by default. Setting `additional_properties` parameter to `True` (or `apischema.settings.additional_properties`) will toggle on their serialization (without aliasing). ## FAQ #### Why isn't coercion the default behavior? Because ill-formed data can be symptomatic of deeper issues, it has been decided that highlighting them would be better than hiding them. By the way, this is easily globally configurable. #### Why isn't `with_fields_set` enabled by default? It's true that this feature has the little cost of adding a decorator everywhere. However, keeping dataclass decorator allows IDEs/linters/type checkers/etc. to handle the class as such, so there is no need to develop a plugin for them. Standard compliance can be worth the additional decorator. (And little overhead can be avoided when not useful) #### Why isn't serialization type checking enabled by default? Type checking has a runtime cost, which means poorer performance. Moreover, as explained in [performances section](optimizations_and_benchmark.md#serialization-passthrough), it prevents "passthrough" optimization. At last, code is supposed to be statically verified, and thus types already checked. (If some silly things are done and leads to have unsupported types passed to the JSON library, an error will be raised anyway). Runtime type checking is more a development feature, which could for example be with `apischema.settings.serialization.check_type = __debug__`. #### Why not use json library `default` fallback parameter for serialization? Some *apischema* features like [conversions](conversions.md) can simply not be implemented with `default` fallback. By the way, *apischema* can perform [surprisingly better](optimizations_and_benchmark.md#passing-through-is-not-always-faster) than using `default`. However, `default` can be used in combination with [passthrough optimization](optimizations_and_benchmark.md#serialization-passthrough) when needed to improve performance. apischema-0.18.3/docs/difference_with_pydantic.md000066400000000000000000000170061467672046000220470ustar00rootroot00000000000000# Difference with pydantic As the question is often asked, it is answered in a dedicated section. Here are some the key differences between *apischema* and *pydantic*: ### *apischema* is (a lot) faster According to [benchmark](optimizations_and_benchmark.md), *apischema* is a lot faster than *pydantic*, especially for serialization. Both use Cython to optimize the code, but even without compilation (running only Python modules), *apischema* is still faster than Cythonized *pydantic*. Better performance, but not at the cost of fewer functionalities; that's rather the opposite: [dynamic aliasing](json_schema.md#dynamic-aliasing-and-default-aliaser), [conversions](conversions.md), [flattened fields](data_model.md#composition-over-inheritance---composed-dataclasses-flattening), etc. ### *apischema* can generate [GraphQL schema](graphql/overview.md) from your resolvers Not just a simple printable schema but a complete `graphql.GraphQLSchema` (using [*graphql-core*](https://github.com/graphql-python/graphql-core/) library) which can be used to execute your queries/mutations/subscriptions through your resolvers, powered by *apischema* (de)serialization and conversions features. Types and resolvers can be used both in traditional JSON-oriented API and GraphQL API. ### *apischema* uses standard dataclasses and types *pydantic* uses its own `BaseModel` class, or its own pseudo-`dataclass`, so you are forced to tie all your code to the library, and you cannot easily reuse code written in a more standard way or in external libraries. By the way, Pydantic use expressions in typing annotations (`conint`, etc.), while it's not recommended and treated as an error by tools like *Mypy* ### *apischema* doesn't require external plugins for editors, linters, etc. *pydantic* requires a plugin to allow *Mypy* to type check `BaseModel` and other *pydantic* singularities (and to not raise errors on it); plugins are also needed for editors. ### *apischema* doesn't mix up (de)serialization with your code While *pydantic* mixes up model constructor with deserializer, *apischema* uses dedicated functions for its features, meaning your dataclasses are instantiated normally with type checking. In your code, you manipulate objects; (de)serialization is for input/output. *apischema* also doesn't mix up validation of external data with your statically checked code; there is no runtime validation in constructors. ### *apischema* truly works out-of-the-box with forward type references (especially for recursive model) *pydantic* requires calling `update_forward_refs` method on recursive types, while *apischema* "just works". ### *apischema* supports `Generic` without requiring additional stuff *pydantic* `BaseModel` cannot be used with generic model, you have to use `GenericModel`. With *apischema*, you just write your generic classes normally. ### *apischema* [conversions](conversions.md) feature allows to support any type defined in your code, but also in external libraries *pydantic* doesn't make it easy to support external types, like `bson.ObjectId`; see this [issue](https://github.com/tiangolo/fastapi/issues/68) on the subject. You could dynamically add a `__get_validators__` method to foreign classes, but that doesn't work with builtin types like `collection.deque` and other types written in C. Serialization customization is harder, with definition of encoding function by model; it cannot be done at the same place as deserialization. There is also no correlation done between (de)serialization customization and model JSON schema; you could have to overwrite the generated schema if you don't want to get an inconsistency. *apischema* only requires a few lines of code to support any type you want, from `bson.ObjectId` to *SQLAlchemy* models by way of builtin and generic like `collection.deque`, and even [*pydantic*](#apischema-supports-pydantic). Conversions are also integrated in JSON schema this one is generated according to the source/target of the conversion Here is a comparison of a custom type support: ```python {!pydantic_conversion.py!} ``` ### *apischema* can also customize serialization with computed fields [Serialized methods/properties](de_serialization.md#serialized-methodsproperties) are regular methods/properties which are included in serialization effortlessly. ### *apischema* allows you to use composition over inheritance [Flattened fields](data_model.md#composition-over-inheritance---composed-dataclasses-flattening) is a distinctive *apischema* feature that is very handy to build complex model from smaller fragments; you don't have to merge the fields of your fragments in a complex class with a lot of fields yourself, *apischema* deal with it for you, and your code is kept simple. ### *apischema* has a functional approach, *pydantic* has an object one *pydantic* features are based on `BaseModel` methods. You have to have a `BaseModel` instance to do anything, even if you manipulate only an integer. Complex *pydantic* stuff like `__root__` model or deserialization customization come from this approach. *apischema* is functional, it doesn't use method but simple functions, which works with all types. You can also register conversions for any types similarly you would implement a type class in a functional language. And your class namespace don't mix up with a mandatory base class' one. ### *apischema* can use both *camelCase* and *snake_case* with the same types While *pydantic* field aliases are fixed at model creation, *apischema* [lets you choose](json_schema.md#dynamic-aliasing-and-default-aliaser) which aliasing you want at (de)serialization time. It can be convenient if you need to juggle with cases for the same models between frontends and other backend services for example. ### *apischema* doesn't coerce by default Your API respects its schema. It can also coerce, for example to parse configuration file, and coercion can be adjusted (for example coercing list from comma-separated string). ### *apischema* has a better integration of JSON schema/OpenAPI With *pydantic*, if you want to have a `nullable` field in the generated schema, you have to put `nullable` into schema extra keywords. *apischema* is bound to the last JSON schema version but offers conversion to other version like OpenAPI 3.0 and `nullable` is added for `Optional` types. *apischema* also supports more advanced features like `dependentRequired` or `unevaluatedProperties`. Reference handling is also more [flexible](json_schema.md#complexrecursive-types---json-schema-definitionsopenapi-components) ### *apischema* can add validators and JSON schema to `NewType` So it will be used in deserialization validation. You can use `NewType` everywhere, to gain a better type checking, self-documented code. ### *apischema* validators are regular methods with [automatic dependencies management](validation.md#automatic-dependency-management) Using regular methods allows benefiting of type checking of fields, where *pydantic* validators use dynamic stuff (name of the fields as strings) and are not type-checked or have to get redundant type annotations. *apischema* validators also have automatic dependency management. And *apischema* directly supports JSON schema [property dependencies](json_schema.md#property-dependencies). Comparison is simple with an example (validator is taken from [*pydantic* documentation](https://pydantic-docs.helpmanual.io/usage/validators/#root-validators): ```python {!pydantic_validator.py!} ``` ### *apischema* supports *pydantic* It's not a feature, is just the result of [30 lines of code](examples/pydantic_support.md). apischema-0.18.3/docs/examples/000077500000000000000000000000001467672046000163175ustar00rootroot00000000000000apischema-0.18.3/docs/examples/attrs_support.md000066400000000000000000000000741467672046000215730ustar00rootroot00000000000000# Attrs support ```python {!examples/attrs_support.py!} ```apischema-0.18.3/docs/examples/inherited_deserializer.md000066400000000000000000000001161467672046000233540ustar00rootroot00000000000000# Inherited deserializer ```python {!examples/inherited_deserializer.py!} ```apischema-0.18.3/docs/examples/pydantic_support.md000066400000000000000000000013411467672046000222470ustar00rootroot00000000000000# Pydantic support It takes only 30 lines of code to support `pydantic.BaseModel` and all of its subclasses. You could add these lines to your project using *pydantic* and start to benefit from *apischema* features. This example deliberately doesn't use `set_object_fields` but instead the [conversions feature](../conversions.md) in order to roughly include *pydantic* "as is": it will reuse *pydantic* coercion, error messages, JSON schema, etc. This makes a full retro-compatible support. As a result, lot of *apischema* features like GraphQL schema generation or `NewType` validation cannot be supported using this method — but they could be by using `set_object_fields` instead. ```python {!examples/pydantic_support.py!} ```apischema-0.18.3/docs/examples/recoverable_fields.md000066400000000000000000000002761467672046000224650ustar00rootroot00000000000000# Recoverable fields Inspired by [https://github.com/samuelcolvin/pydantic/issues/800](https://github.com/samuelcolvin/pydantic/issues/800) ```python {!examples/recoverable_fields.py!} ```apischema-0.18.3/docs/examples/sqlalchemy_support.md000066400000000000000000000001731467672046000226000ustar00rootroot00000000000000# SQLAlchemy support This example shows simple support for *SQLAlchemy*. ```python {!examples/sqlalchemy_support.py!} ```apischema-0.18.3/docs/examples/subclass_tagged_union.md000066400000000000000000000004271467672046000232060ustar00rootroot00000000000000# Class as tagged union of its subclasses *From [https://github.com/wyfo/apischema/discussions/56](https://github.com/wyfo/apischema/discussions/56)* Tagged unions are useful when it comes to GraphQL input (or even output). ```python {!examples/subclass_tagged_union.py!} ```apischema-0.18.3/docs/examples/subclass_union.md000066400000000000000000000005611467672046000216720ustar00rootroot00000000000000# Class as union of its subclasses *Inspired by [https://github.com/samuelcolvin/pydantic/issues/2036](https://github.com/samuelcolvin/pydantic/issues/2036)* A class can easily be deserialized as a union of its subclasses using deserializers. Indeed, when more than one deserializer is registered, it results in a union. ```python {!examples/subclass_union.py!} ```apischema-0.18.3/docs/graphql/000077500000000000000000000000001467672046000161375ustar00rootroot00000000000000apischema-0.18.3/docs/graphql/data_model_and_resolvers.md000066400000000000000000000203711467672046000235030ustar00rootroot00000000000000# Data model and resolvers Almost everything in the [Data model section](../data_model.md) remains valid in GraphQL integration, with a few differences. ## GraphQL specific data model ### `Enum` `Enum` members are represented in the schema using their **name** instead of their value. This is more consistent with the way GraphQL represents enumerations. ### `TypedDict` `TypedDict` is not supported as an output type. (see [FAQ](#why-typeddict-is-not-supported-as-an-output-type)) ### `Union` Unions are only supported between **output** object type, which means `dataclass` and `NamedTuple` (and [conversions](../conversions.md)/[dataclass model](../conversions.md#dataclass-model---automatic-conversion-fromto-dataclass)). There are 2 exceptions which can be always be used in `Union`: - `None`/`Optional`: Types are non-null (marked with an exclamation mark `!` in GraphQL schema) by default; `Optional` types however results in normal GraphQL types (without `!`). - `apischema.UndefinedType`: it is simply ignored. It is useful in resolvers, see [following section](#undefined_param_default) ## Non-null Types are assumed to be non-null by default, as in Python typing. Nullable types are obtained using `typing.Optional` (or `typing.Union` with a `None` argument). !!! note There is one exception, when resolver parameter default value is not serializable (and thus cannot be included in the schema), the parameter type is then set as nullable to make the parameter non-required. For example parameters not `Optional` but with `Undefined` default value will be marked as nullable. This is only for the schema, the default value is still used at execution. ## Undefined In output, `Undefined` is converted to `None`; so in the schema, `Union[T, UndefinedType]` will be nullable. In input, fields become nullable when `Undefined` is their default value. ## Interfaces Interfaces are simply classes marked with `apischema.graphql.interface` decorator. An object type implements an interface when its class inherits from an interface-marked class, or when it has [flattened fields](../data_model.md#composition-over-inheritance---composed-dataclasses-flattening) of interface-marked dataclass. ```python {!interface.py!} ``` ## Resolvers All `dataclass`/`NamedTuple` fields (excepted [skipped](../data_model.md#skip-dataclass-field)) are resolved with their [alias](../json_schema.md#field-alias) in the GraphQL schema. Custom resolvers can also be added by marking methods with `apischema.graphql.resolver` decorator — resolvers share a common interface with [`apischema.serialized`](../de_serialization.md#serialized-methodsproperties), with a few differences. Methods can be synchronous or asynchronous (defined with `async def` or annotated with an `typing.Awaitable` return type). Resolvers parameters are included in the schema with their type, and their default value. ```python {!resolver.py!} ``` ### `GraphQLResolveInfo` parameter Resolvers can have an additional parameter of type [`graphql.GraphQLResolveInfo`](https://graphql-core-3.readthedocs.io/en/latest/modules/type.html?highlight=GraphQLResolveInfo#graphql.type.GraphQLResolveInfo) (or `Optional[graphql.GraphQLResolveInfo]`), which is automatically injected when the resolver is executed in the context of a GraphQL request. This parameter contains the info about the current GraphQL request being executed. ### Undefined parameter default — `null` vs. `undefined` `Undefined` can be used as default value of resolver parameters. It can be to distinguish a `null` input from an absent/`undefined` input. In fact, `null` value will result in a `None` argument where no value will use the default value, `Undefined` so. ```python {!undefined_default.py!} ``` ### Error handling Errors occurring in resolvers can be caught in a dedicated error handler registered with `error_handler` parameter. This function takes in parameters the exception, the object, the [info](#graphqlresolveinfo-parameter) and the *kwargs* of the failing resolver; it can return a new value or raise the current or another exception — it can for example be used to log errors without throwing the complete serialization. The resulting serialization type will be a `Union` of the normal type and the error handling type; if the error handler always raises, use [`typing.NoReturn`](https://docs.python.org/3/library/typing.html#typing.NoReturn) annotation. `error_handler=None` correspond to a default handler which only return `None` — exception is thus discarded and the resolver type becomes `Optional`. The error handler is only executed by *apischema* serialization process, it's not added to the function, so this one can be executed normally and raise an exception in the rest of your code. Error handler can be synchronous or asynchronous. ```python {!resolver_error.py!} ``` ### Parameters metadata Resolvers parameters can have metadata like dataclass fields. They can be passed using `typing.Annotated`. ```python {!resolver_metadata.py!} ``` !!! note Metadata can also be passed with `parameters_metadata` parameter; it takes a mapping of parameter names as key and mapped metadata as value. ### Parameters base schema Following the example of [type/field/method base schema](../json_schema.md#base-schema), resolver parameters also support a base schema definition ```python {!base_schema_parameter.py!} ``` ## Scalars `NewType` or non-object types annotated with `type_name` will be translated in the GraphQL schema by a `scalar`. By the way, `Any` will automatically be translated to a `JSON` scalar, as it is deserialized from and serialized to JSON. ```python {!scalar.py!} ``` ## ID type GraphQL `ID` has no precise specification and is defined according API needs; it can be a UUID or/and ObjectId, etc. `apischema.graphql_schema` has a parameter `id_types` which can be used to define which types will be marked as `ID` in the generated schema. Parameter value can be either a collection of types (each type will then be mapped to `ID` scalar), or a predicate returning if the given type must be marked as `ID`. ```python {!id_type.py!} ``` !!! note `ID` type could also be identified using `typing.Annotated` and a predicate looking into annotations. *apischema* also provides a simple `ID` type with `apischema.graphql.ID`. It is just defined as a `NewType` of string, so you can use it when you want to manipulate raw `ID` strings in your resolvers. ### ID encoding `ID` encoding can directly be controlled the `id_encoding` parameters of `graphql_schema`. A current practice is to use *base64* encoding for `ID`. ```python {!id_conversion.py!} ``` !!! note You can also use `relay.base64_encoding` (see [next section](relay.md#id-encoding)) !!! note `ID` serialization (respectively deserialization) is applied **after** *apischema* conversions (respectively before *apischema* conversion): in the example, uuid is already converted into string before being passed to `id_serializer`. If you use base64 encodeing and an ID type which is converted by *apischema* to a base64 str, you will get a double encoded base64 string ## Tagged unions !!! important This feature has a provisional status, as the concerned [GraphQL RFC](https://github.com/graphql/graphql-spec/pull/733) is not finalized. *apischema* provides a `apischema.tagged_unions.TaggedUnion` base class which helps to implement the *tagged union* pattern. It's fields **must** be typed using `apischema.tagged_unions.Tagged` generic type. ```python {!tagged_union.py!} ``` ### JSON schema Tagged unions JSON schema uses `minProperties: 1` and `maxProperties: 1`. ```python {!tagged_union_json_schema.py!} ``` ### GraphQL schema As tagged unions are not (yet?) part of the GraphQL spec, they are just implemented as normal (input) object type with nullable fields. An error is raised if several tags are passed in input. ```python {!tagged_union_graphql_schema.py!} ``` ## FAQ #### Why `TypedDict` is not supported as an output type? At first, `TypedDict` subclasses are not real classes, so they cannot be used to check types at runtime. Runtime check is however requried to disambiguate unions/interfaces. A hack could be done to solve this issue, but there is another one which cannot be hacked: `TypedDict` inheritance hierarchy is lost at runtime, so they don't play nicely with the interface concept. apischema-0.18.3/docs/graphql/overview.md000066400000000000000000000026511467672046000203330ustar00rootroot00000000000000# GraphQL Overview *apischema* supports GraphQL through the [*graphql-core*](https://github.com/graphql-python/graphql-core) library. You can install this dependency directly with *apischema* using the following extra requirement: ```shell pip install apischema[graphql] ``` GraphQL supports consists of generating a GraphQL schema `graphql.GraphQLSchema` from your data model and endpoints (queries/mutations/subscribtions), in a similar way than the JSON schema generation. This schema can then be used through *graphql-core* library to query/mutate/subscribe. ```python {!graphql_overview.py!} ``` GraphQL is fully integrated with the rest of *apischema* features, especially [conversions](../conversions.md), so it's easy to integrate ORM and other custom types in the generated schema; this concerns query results but also arguments. By the way, while GraphQL doesn't support constraints, *apischema* still offers you all the power of its [validation feature](../validation.md). In fact, *apischema* deserialize and validate all the arguments passed to resolvers. ## FAQ #### Is it possible to use the same classes to do both GraphQL and REST-API? Yes it is. GraphQL has some restrictions in comparison to JSON schema (see [next section](data_model_and_resolvers.md)), but this taken in account, all of your code can be reused. In fact, GraphQL endpoints can also be used both by a GraphQL API and a more traditional REST or RPC API. apischema-0.18.3/docs/graphql/relay.md000066400000000000000000000127371467672046000176070ustar00rootroot00000000000000# Relay *apischema* provides some facilities to implement a GraphQL server following [*Relay* GraphQL server specification](https://relay.dev/docs/en/graphql-server-specification). They are included in the module `apischema.graphql.relay`. !!! note These facilities are independent of each others — you could keep only the mutations part and use your own identification and connection system for example. ## (Global) Object Identification *apischema* defines a generic `relay.Node[Id]` interface which can be used which can be used as base class of all identified resources. This class contains a unique generic field of type `Id`, which will be automatically converted into an `ID!` in the schema. The `Id` type chosen has to be serializable into a string-convertible value (it can register [conversions](../conversions.md) if needed). Each node has to implement the `classmethod` `get_by_id(cls: type[T], id: Id, info: graphql.GraphQLResolveInfo=None) -> T`. All nodes defined can be retrieved using `relay.nodes`, while the `node` query is defined as `relay.node`. `relay.nodes()` can be passed to `graphql_schema` [`types` parameter](schema.md#additional-types) in order to add them in the schema even if they don't appear in any resolvers. ```python {!relay_node.py!} ``` !!! warning For now, even if its result is not used, `relay.nodes` must be called before generating the schema. ### Global ID *apischema* defines a `relay.GlobalId` type with the following signature : ```python @dataclass class GlobalId(Generic[Node]): id: str node_class: type[Node] ``` In fact, it is `GlobalId` type which is serialized and deserialized as an `ID!`, not the `Id` parameter of the `Node` class; *apischema* automatically add a [field converter](../conversions.md#field-conversions) to make the conversion between the `Id` (for example an `UUID`) of a given node and the corresponding `GlobalId`. Node instance global id can be retrieved with `global_id` property. ```python {!relay_global_id.py!} ``` ### Id encoding *Relay* specifications encourage the use of base64 encoding, so *apischema* defines a `relay.base64_encoding` that you can pass to `graphql_schema` `id_encoding` parameter. ## Connections *apischema* provides a generic `relay.Connection[Node, Cursor, Edge]` type, which can be used directly without subclassing it; it's also possible to subclass it to add fields to a given connection (or to all the connection which will subclass the subclass). `relay.Edge[Node, Cursor]` can also be subclassed to add fields to the edges. `Connection` dataclass has the following declaration: ```python @dataclass class Connection(Generic[Node, Cursor, Edge]): edges: Optional[Sequence[Optional[Edge]]] has_previous_page: bool = field(default=False, metadata=skip) has_next_page: bool = field(default=False, metadata=skip) start_cursor: Optional[Cursor] = field(default=None, metadata=skip) end_cursor: Optional[Cursor] = field(default=None, metadata=skip) @resolver def page_info(self) -> PageInfo[Cursor]: ... ``` The `pageInfo` field is computed by a resolver; it uses the cursors of the first and the last edge when they are not provided. Here is an example of `Connection` use: ```python {!relay_connection.py!} ``` ### Custom connections/edges Connections can be customized by simply subclassing `relay.Connection` class and adding the additional fields. For the edges, `relay.Edge` can be subclassed too, and the subclass has then to be passed as type argument to the generic connection. ```python {!relay_connection_subclass.py!} ``` ## Mutations *Relay* compliant mutations can be declared with a dataclass subclassing the `relay.Mutation` class; its fields will be put in the payload type of the mutation. This class must implement a `classmethod`/`staticmethod` name `mutate`; it can be synchronous or asynchronous. The arguments of the method will correspond to the input type fields. The mutation will be named after the name of the mutation class. All the mutations declared can be retrieved with `relay.mutations`, in order to be passed to `graphql_schema`. ```python {!relay_mutation.py!} ``` ### ClientMutationId As you can see in the previous example, the field named `clientMutationId` is automatically added to the input and the payload types. The forward of the mutation id from the input to the payload is automatically handled. It's value can be accessed by declaring a parameter of type `relay.ClientMutationId` — even if the parameter is not named `client_mutation_id`, it will be renamed internally. This feature is controlled by a `Mutation` class variable `_client_mutation_id`, with 3 possible values: - `None` (automatic, the default): `clientMutationId` field will be nullable unless it's declared as a required parameter (without default value) in the `mutate` method. - `False`: their will be no `clientMutationId` field added (having a dedicated parameter will raise an error) - `True`: `clientMutationId` is added and forced to be non-null. ```python {!relay_client_mutation_id.py!} ``` ### Error handling and other resolver arguments *Relay* mutation are [operations](schema.md#operations), so they can be configured with the same parameters. As they are declared as classes, parameters will be passed as class variables, prefixed by `_` (`error_handler` becomes `_error_handler`) !!! note Because parameters are class variables, you can reuse them by setting their value in a base class; for example, to share a same `error_handler` in a group of mutations. apischema-0.18.3/docs/graphql/schema.md000066400000000000000000000071071467672046000177260ustar00rootroot00000000000000# GraphQL schema GraphQL schema is generated by passing all the operations (query/mutation/subscription) functions to `apischema.graphql.graphql_schema`. Functions parameters and return types are then processed by *apischema* to generate the `Query`/`Mutation`/`Subscription` types with their resolvers/subscribers, which are then passed to `graphql.GraphQLSchema`. In fact, `graphql_schema` is just a wrapper around `graphql.GraphQLSchema` (same parameters plus a few extras); it just uses *apischema* abstraction to build `GraphQL` object types directly from your code. ## Operations metadata GraphQL operations can be passed to `graphql_schema` either using simple functions or wrapping it into `apischema.graphql.Query`/`apischema.graphql.Mutation`/`apischema.graphql.Subscription`. These wrappers have the same parameters as `apischema.graphql.resolver`: `alias`, `conversions`, `error_handler`, `order` and `schema` (`Subscription` has an [additional parameter](#subscriptions)). ```python {!operation.py!} ``` ## *camelCase* GraphQL use *camelCase* as a convention for resolvers; *apischema* follows this convention by automatically convert all resolver names (and their parameters) to *camelCase*. `graphql_schema` has an `aliaser` parameter if you want to use another case. ## Type names Schema types are named the same way they are in generated JSON schema: type name is used by default, and it can be overridden using [`apischema.type_name`](../json_schema.md#customize-ref) ```python {!graphql_type_name.py!} ``` !!! note Type names can be distinguished between JSON schema and GraphQL schema using `type_name` named parameter. Indeed, `type_name("foo")` is equivalent to `type_name(json_schema="foo", graphql="foo")`. However, in GraphQL schema, unions must be named, so `typing.Union` used should be annotated with `apischema.type_name`. `graphql_schema` also provides a `union_ref` parameter which can be passed as a function to generate a type name from the union argument. Default `union_ref` is `"Or".join` meaning `typing.Union[Foo, Bar]` will result in `union FooOrBar = Foo | Bar` ```python {!union_type_name.py!} ``` ## `Enum` metadata Contrary to dataclasses, `Enum` doesn't provide a way to set metadata for its members, especially description, but also deprecation reason. They can however be passed using `enum_schemas` parameter of `graphql_schema`. ```python {!enum_schemas.py!} ``` ## Additional types *apischema* will only include in the schema the types annotating resolvers. However, it is possible to add other types by using the `types` parameter of `graphql_schema`. This is especially useful to add interface implementations where only interface is used in resolver types. ```python {!additional_types.py!} ``` ## Subscriptions Subscriptions are particular operations which must return an `AsyncIterable`; this event generator can come with a dedicated resolver to post-process the event. ### Event generator only ```python {!subscription.py!} ``` !!! note Because there is no post-processing of generated event in a dedicated resolver, `error_handler` cannot be called, but it will still modify the type of the event. ### Event generator + resolver A resolver can be added by using the `resolver` parameter of `Subscription`. In this case, *apischema* will map subscription name, parameters and return type on the resolver instead of the event generator. It allows using the same event generator with several resolvers to create different subscriptions. The first resolver argument will be the event yielded by the event generator. ```python {!subscription_resolve.py!} ``` apischema-0.18.3/docs/index.md000066400000000000000000000065111467672046000161350ustar00rootroot00000000000000# Overview ## apischema JSON (de)serialization, GraphQL and JSON schema generation using Python typing. *apischema* makes your life easier when dealing with API data. ## Install ```shell pip install apischema ``` It requires only Python 3.8+. *PyPy3* is also fully supported. ## Why another library? This library fulfills the following goals: - stay as close as possible to the standard library (dataclasses, typing, etc.) — as a consequence we do not need plugins for editors/linters/etc.; - avoid object-oriented limitations — do not require a base class — thus handle easily every type (`Foo`, `list[Bar]`, `NewType(Id, int)`, etc.) the same way. - be adaptable, provide tools to support any types (ORM, etc.); - avoid dynamic things like using raw strings for attributes name - play nicely with your IDE. No known alternative achieves all of this, and apischema is also [(a lot) faster](optimizations_and_benchmark.md#benchmark) than all of them. On top of that, because APIs are not only JSON, *apischema* is also a complete GraphQL library !!! note Actually, *apischema* is even adaptable enough to enable support of competitor libraries in a few dozens of line of code ([pydantic support example](examples/pydantic_support.md) using [conversions feature](conversions.md)) ## Example ```python {!quickstart.py!} ``` *apischema* works out of the box with your data model. !!! note This example and further ones are using *pytest* API because they are in fact run as tests in the library CI ### Run the documentation examples All documentation examples are written using the last Python minor version — currently 3.10 — in order to provide up-to-date documentation. Because Python 3.10 specificities (like [PEP 585](https://www.python.org/dev/peps/pep-0604/)) are used, this version is "mandatory" to execute the examples as-is. In addition to *pytest*, some examples use third-party libraries like *SQLAlchemy* or *attrs*. All of this dependencies can be downloaded using the `examples` extra with ```shell pip install apischema[examples] ``` Once dependencies are installed, you can simply copy-paste examples and execute them, using the proper Python version. ## FAQ #### What is the difference between *apischema* and *pydantic*? See the [dedicated section](difference_with_pydantic.md) — there are many differences. #### I already have my data model with my *SQLAlchemy*/ORM tables, will I have to duplicate my code, making one dataclass per table? No, `apischema` works with user-defined types as well as types from foreign libraries. Using the [conversion](conversions.md) feature, you can add default serialization for all your tables, or register a different serializer that you can select according to your API endpoint, or both. #### I need more accurate validation than "ensure this is an integer and not a string ", can I do that? See the [validation](validation.md) section. You can use standard JSON schema validation (`maxItems`, `pattern`, etc.) that will be embedded in your schema or add custom Python validators for each class/fields/`NewType` you want.apischema-0.18.3/docs/json_schema.md000066400000000000000000000246451467672046000173270ustar00rootroot00000000000000# JSON schema ## JSON schema generation JSON schema can be generated from data model. However, because of all possible [customizations](conversions.md), the schema can differ between deserilialization and serialization. In common cases, `deserialization_schema` and `serialization_schema` will give the same result. ```python {!json_schema.py!} ``` ## Field alias Sometimes dataclass field names can clash with a language keyword, sometimes the property name is not convenient. Hopefully, field can define an `alias` which will be used in schema and deserialization/serialization. ```python {!alias.py!} ``` ### Alias all fields Field aliasing can also be done at class level by specifying an aliasing function. This aliaser is applied to field alias if defined or field name, or not applied if `override=False` is specified. ```python {!aliaser.py!} ``` Class-level aliasing can be used to define a *camelCase* API. ### Dynamic aliasing and default aliaser *apischema* operations `deserialize`/`serialize`/`deserialization_schema`/`serialization_schema` provide an `aliaser` parameter which will be applied on every fields being processed in this operation. Similar to [`strictness configuration`](de_serialization.md#strictness-configuration), this parameter has a default value controlled by `apischema.settings.aliaser`. It can be used for example to make all an application use *camelCase*. Actually, there is a shortcut for that: Otherwise, it's used the same way than [`settings.coercer`](de_serialization.md#strictness-configuration). ```python from apischema import settings settings.camel_case = True ``` !!! note Dynamic aliaser ignores `override=False` ## Schema annotations Type annotations are not enough to express a complete schema, but *apischema* has a function for that; `schema` can be used both as type decorator or field metadata. ```python {!schema.py!} ``` !!! note Schema are particularly useful with `NewType`. For example, if you use prefixed ids, you can use a `NewType` with a `pattern` schema to validate them, and benefit of more precise type checking. The following keys are available (they are sometimes shorten compared to JSON schema original for code concision and snake_case): Key | JSON schema keyword | type restriction --- | --- | --- title | / | / description | / | / default | / | / examples | / | / min | minimum | `int` max | maximum | `int` exc_min | exclusiveMinimum | `int` exc_max | exclusiveMaximum | `int` mult_of | multipleOf | `int` format | / | `str` media_type | contentMediaType | `str` encoding | contentEncoding | `str` min_len | minLength | `str` max_len | maxLength | `str` pattern | / | `str` min_items | minItems | `list` max_items | maxItems | `list` unique | / | `list` min_props | minProperties | `dict` max_props | maxProperties | `dict` !!! note In case of field schema, field default value will be serialized (if possible) to add `default` keyword to the schema. ### Constraints validation JSON schema constrains the data deserialized; these constraints are naturally used for validation. ```python {!validation_error.py!} ``` !!! note Error message are fully [customizable](validation.md#constraint-errors-customization) ### Extra schema `schema` has two other arguments: `extra` and `override`, which give a finer control of the JSON schema generated: `extra` and `override`. It can be used for example to build "strict" unions (using `oneOf` instead of `anyOf`) ```python {!strict_union.py!} ``` ### Base `schema` `apischema.settings.base_schema` can be used to define "base schema" for the different kind of objects: types, object fields or (serialized) methods. ```python {!base_schema.py!} ``` Base schema will be merged with `schema` defined at type/field/method level. ## Required field with default value By default, a dataclass/namedtuple field will be tagged `required` if it doesn't have a default value. However, you may want to have a default value for a field in order to be more convenient in your code, but still make the field required. One could think about some schema model where version is fixed but is required, for example JSON-RPC with `"jsonrpc": "2.0"`. That's done with field metadata `required`. ```python {!required.py!} ``` ## Additional properties / pattern properties ### With `Mapping` Schema of a `Mapping`/`dict` type is naturally translated to `"additionalProperties": `. However when the schema of the key has a `pattern`, it will give a `"patternProperties": {: }` ### With dataclass `additionalProperties`/`patternProperties` can be added to dataclasses by using fields annotated with `properties` metadata. Properties not mapped on regular fields will be deserialized into this fields; they must have a `Mapping` type, or be [deserializable](conversions.md) from a `Mapping`, because they are instantiated with a mapping. ```python {!properties.py!} ``` !!! note Of course, a dataclass can only have a single `properties` field without pattern, because it makes no sens to have several `additionalProperties`. ## Property dependencies *apischema* supports [property dependencies](https://json-schema.org/understanding-json-schema/reference/conditionals.html#dependentrequired) for dataclass through a class member. Dependencies are also used in validation. ```python {!dependent_required.py!} ``` Because bidirectional dependencies are a common idiom, *apischema* provides a shortcut notation; it's indeed possible to write `dependent_required([credit_card, billing_adress])`. ## JSON schema reference For complex schema with type reuse, it's convenient to extract definitions of schema components in order to reuse them — it's even mandatory for recursive types; JSON schema use JSON pointers "$ref" to refer to the definitions. *apischema* handles this feature natively. ```python {!complex_schema.py!} ``` ### Use reference only for reused types *apischema* can control the reference use through the boolean `all_ref` parameter of `deserialization_schema`/`serialization_schema`: - `all_refs=True` -> all types with a reference will be put in the definitions and referenced with `$ref`; - `all_refs=False` -> only types which are reused in the schema are put in definitions `all_refs` default value depends on the [JSON schema version](#json-schemaopenapi-version): it's `False` for JSON schema drafts but `True` for OpenAPI. ```python {!all_refs.py!} ``` ### Set reference name In the previous examples, types were referenced using their name. This is indeed the default behavior for every classes/`NewType`s (except primitive `int`/`str`/`bool`/`float`). It's possible to override the default reference name using `apischema.type_name`; passing `None` instead of a string will remove the reference, making the type unable to be referenced as a separate definition in the schema. ```python {!type_name.py!} ``` !!! note Builtin collections are interchangeable when a type_name is registered. For example, if a name is registered for `list[Foo]`, this name will also be used for `Sequence[Foo]` or `Collection[Foo]`. Generic aliases can have a type name, but they need to be specialized; `Foo[T, int]` cannot have a type name but `Foo[str, int]` can. However, generic classes can get a dynamic type name depending on their generic argument, passing a name factory to `type_name`: ```python {!generic_type_name.py!} ``` The default behavior can also be customized using `apischema.settings.default_type_name`: ### Reference factory In JSON schema, `$ref` looks like `#/$defs/Foo`, not just `Foo`. In fact, schema generation use the ref given by `type_name`/`default_type_name` and pass it to a `ref_factory` function (a parameter of schema generation functions) which will convert it to its final form. [JSON schema version](#json-schemaopenapi-version) comes with its default `ref_factory`, for draft 2020-12, it prefixes the ref with `#/$defs/`, while it prefixes with `#/components/schema` in case of OpenAPI. ```python {!ref_factory.py!} ``` !!! note When `ref_factory` is passed in arguments, definitions are not added to the generated schema. That's because `ref_factory` would surely change definitions location, so there would be no interest to add them with a wrong location. These definitions can of course be generated separately with `definitions_schema`. ### Definitions schema Definitions schemas can also be extracted using `apischema.json_schema.definitions_schema`. It takes two lists `deserialization`/`serialization` of types (or tuple of type + [dynamic conversion](conversions.md)) and returns a dictionary of all referenced schemas. !!! note This is especially useful when it comes to OpenAPI schema to generate the components section. ```python {!definitions_schema.py!} ``` ## JSON schema / OpenAPI version JSON schema has several versions — OpenAPI is treated as a JSON schema version. If *apischema* natively use the last one: draft 2020-12, it is possible to specify a schema version which will be used for the generation. ```python {!schema_versions.py!} ``` ## OpenAPI Discriminator OpenAPI defines a [discriminator object](https://spec.openapis.org/oas/v3.1.0#discriminator-object) which can be used to shortcut deserialization of union of object types. *apischema* provides two different ways to declare a discriminator: - as an `Annotated` metadata of a union ; ```python {!union_discriminator.py!} ``` - as a decorator of base class. ```python {!inherited_discriminator.py!} ``` !!! note Using discriminator doesn't require to have a dedicated field (except for `TypedDict`) Performance of union deserialization can be [improved](optimizations_and_benchmark.md#discriminator) using discriminator. ## `readOnly` / `writeOnly` Dataclasses `InitVar` and `field(init=False)` fields will be flagged respectively with `"writeOnly": true` and `"readOnly": true` in the generated schema. In [definitions schema](#definitions-schema), if a type appears both in deserialization and serialization, properties are merged and the resulting schema contains then `readOnly` and `writeOnly` properties. By the way, the `required` is not merged because it can't (it would mess up validation if some not-init field was required), so deserialization `required` is kept because it's more important as it can be used in validation (OpenAPI 3.0 semantic which allows the merge [has been dropped](https://www.openapis.org/blog/2020/06/18/openapi-3-1-0-rc0-its-here) in 3.1, so it has not been judged useful to be supported) apischema-0.18.3/docs/optimizations_and_benchmark.md000066400000000000000000000166611467672046000226020ustar00rootroot00000000000000# Optimizations and benchmark *apischema* is (a lot) [faster](#benchmark) than its known alternatives, thanks to advanced optimizations. ![benchmark chart](benchmark_chart_light.svg#only-light) ![benchmark chart](benchmark_chart_dark.svg#only-dark) !!! note Chart is truncated to a relative performance of 20x slower. Benchmark results are detailed in the [results table](#relative-execution-time-lower-is-better). ## Precomputed (de)serialization methods *apischema* precomputes (de)serialization methods depending on the (de)serialized type (and other parameters); type annotations processing is done in the precomputation. Methods are then cached using `functools.lru_cache`, so `deserialize` and `serialize` don't recompute them every time. !!! note The cache is automatically reset when global settings are modified, because it impacts the generated methods. However, if `lru_cache` is fast, using the methods directly is faster, so *apischema* provides `apischema.deserialization_method` and `apischema.serialization_method`. These functions share the same parameters than `deserialize`/`serialize`, except the data/object parameter to (de)serialize. Using the computed methods directly can increase performances by 10%. ```python {!de_serialization_methods.py!} ``` !!! warning Methods computed before settings modification will not be updated and use the old settings. Be careful to set your settings first. ## Avoid unnecessary copies As an example, when a list of integers is deserialized, `json.load` already return a list of integers. The loaded data can thus be "reused", and the deserialization just become a validation step. The same principle applies to serialization. It's controlled by the settings `apischema.settings.deserialization.no_copy`/`apischema.settings.serialization.no_copy`, or `no_copy` parameter of `deserialize`/`serialize` methods. Default behavior is to avoid these unnecessary copies, i.e. `no_copy=False`. ```python {!no_copy.py!} ``` ## Serialization passthrough JSON serialization libraries expect primitive data types (`dict`/`list`/`str`/etc.). A non-negligible part of objects to be serialized are primitive. When [type checking](#type-checking) is disabled (this is default), objects annotated with primitive types doesn't need to be transformed or checked; *apischema* can simply "pass through" them, and it will result into an identity serialization method, just returning its argument. Container types like `list` or `dict` are passed through only when the contained types are passed through too (and when `no_copy=True`) ```python {!pass_through_primitives.py!} ``` !!! note `Enum` subclasses which also inherit `str`/`int` are also passed through ### Passthrough options Some JSON serialization libraries natively support types like `UUID` or `datetime`, sometimes with a faster implementation than the *apischema* one — [orjson](https://github.com/ijl/orjson), written in Rust, is a good example. To take advantage of that, *apischema* provides `apischema.PassThroughOptions` class to specify which type should be passed through, whether they are supported natively by JSON libraries (or handled in a `default` fallback). `apischema.serialization_default` can be used as `default` fallback in combination to `PassThroughOptions`. It has to be instantiated with the same kwargs parameters (`aliaser`, etc.) than `serialization_method`. ```python {!pass_through.py!} ``` !!! important Passthrough optimization is a lot diminished with `check_type=False`. `PassThroughOptions` has the following parameters: #### `any` — pass through `Any` #### `collections` — pass through collections Standard collections `list`, `tuple` and `dict` are natively handled by JSON libraries, but `set`, for example, isn't. Moreover, standard abstract collections like `Collection` or `Mapping`, which are used a lot, are not guaranteed to have their runtime type supported (having a `set` annotated with `Collection` for instance). But, most of the time, collections runtime types are `list`/`dict`, so others can be handled in `default` fallback. !!! note Set-like type will not be passed through. #### `dataclasses` - pass through dataclasses Some JSON libraries, like [orjson](https://github.com/ijl/orjson), support dataclasses natively. However, because *apischema* has a lot of specific features ([aliasing](json_schema.md#field-alias), [flatten fields](data_model.md#composition-over-inheritance---composed-dataclasses-flattening), [conditional skipping](data_model.md#skip-field-serialization-depending-on-condition), [fields ordering](de_serialization.md#field-ordering), etc.), only dataclasses with none of these features, and only passed through fields, will be passed through too. #### `enums` — pass through `enum.Enum` subclasses #### `tuple` — pass through `tuple` Even if `tuple` is often supported by JSON serializers, if this options is not enabled, tuples will be serialized as lists. It also allows easier test writing for example. !!! note `collections=True` implies `tuple=True`; #### `types` — pass through arbitrary types Either a collection of types, or a predicate to determine if type has to be passed through. ## Binary compilation using Cython *apischema* use Cython in order to compile critical parts of the code, i.e. the (de)serialization methods. However, *apischema* remains a pure Python library — it can work without binary modules. Cython source files (`.pyx`) are in fact generated from Python modules. It allows notably keeping the code simple, by adding *switch-case* optimization to replace dynamic dispatch, avoiding big chains of `elif` in Python code. !!! note Compilation is disabled when using PyPy, because it's even faster with the bare Python code. That's another interest of generating `.pyx` files: keeping Python source for PyPy. ## Override dataclass constructors !!! warning This feature is still experimental and disabled by default. Test carefully its impact on your code before enable it in production. Dataclass constructors calls is the slowest part of the deserialization, about 50% of its runtime! They are indeed pure Python functions and cannot be compiled. In case of "normal" dataclass (no `__slots__`, `__post_init__`, or `__init__`/`__new__`/`__setattr__` overriding), *apischema* can override the constructor with a compilable code. This feature can be toggled on/off globally using `apischema.settings.deserialization.override_dataclass_constructors` ## Discriminator [OpenAPI discriminator](json_schema.md#openapi-discriminator) allows making union deserialization time more homogeneous. ```python {!discriminator_perf.py!} ``` !!! note As you can notice in the example, discriminator brings its own additional cost, but it's completely worth it. ## Benchmark Benchmark code is located [benchmark directory](https://github.com/wyfo/apischema/tree/master/benchmark) or *apischema* repository. Performances are measured on two datasets: a simple, a more complex one. Benchmark is run by Github Actions workflow on `ubuntu-latest` with Python 3.10. Results are given relatively to the fastest library, i.e. *apischema*; simple and complex results are detailed in the table, displayed result is the mean of both. #### Relative execution time (lower is better) {!benchmark_table.md!} !!! note Benchmark use [binary optimization](#binary-compilation-using-cython), but even running as a pure Python library, *apischema* still performs better than almost all of the competitors.apischema-0.18.3/docs/requirements.txt000066400000000000000000000001201467672046000177560ustar00rootroot00000000000000markdown-include==0.8.1 mike==1.1.2 mkdocs-exclude==1.0.2 mkdocs-material==9.4.6apischema-0.18.3/docs/validation.md000066400000000000000000000216521467672046000171630ustar00rootroot00000000000000# Validation Validation is an important part of deserialization. By default, *apischema* validates types of data according to typing annotations, and [`schema`](json_schema.md#constraints-validation) constraints. But custom validators can also be add for a more precise validation. ## Deserialization and validation error `ValidationError` is raised when validation fails. This exception contains all the information about the ill-formed part of the data. It can be formatted/serialized using its `errors` property. ```python {!validation_error.py!} ``` As shown in the example, *apischema* will not stop at the first error met but tries to validate all parts of the data. !!! note `ValidationError` can also be serialized using `apischema.serialize` (this will use `errors` internally). ## Constraint errors customization Constraints are validated at deserialization, with *apischema* providing default error messages. Messages can be customized by setting the corresponding attribute of `apischema.settings.errors`. They can be either a string which will be formatted with the constraint value (using `str.format`), e.g. `less than {} (minimum)`, or a function with 2 parameters: the constraint value and the invalid data. ```python {!settings_errors.py!} ``` !!! note Default error messages doesn't include the invalid data for security reason (data could for example be a password too short). !!! note Other error message can be customized, for example `missing property` for missing required properties, etc. ## Dataclass validators Dataclass validation can be completed by custom validators. These are simple decorated methods which will be executed during validation, after all fields having been deserialized. !!! note Previously to v0.17, validators could raise arbitrary exceptions (except AssertionError of course); see [FAQ](#why-validators-cannot-raise-arbitrary-exception) for the reason of this change. ```python {!validator.py!} ``` !!! warning **DO NOT use `assert`** statement to validate external data, ever. In fact, this statement is made to be disabled when executed in optimized mode (see [documentation](https://docs.python.org/3/reference/simple_stmts.html#the-assert-statement)), so validation would be disabled too. This warning doesn't concern only *apischema*; `assert` is only for internal assertion in debug/development environment. That's why *apischema* will not catch `AssertionError` as a validation error but reraises it, making `deserialize` fail. !!! note Validators are always executed in order of declaration. ### Automatic dependency management It makes no sense to execute a validator using a field that is ill-formed. Hopefully, *apischema* is able to compute validator dependencies — the fields used in validator; validator is executed only if the all its dependencies are ok. ```python {!computed_dependencies.py!} ``` !!! note Despite the fact that validator uses the `self` argument, it can be called during validation even if all the fields of the class are not ok and the class not really instantiated. In fact, instance is kind of mocked for validation with only the needed field. ### Raise more than one error with `yield` Validation of a list field can require raising several exceptions, one for each bad element. With `raise`, this is not possible, because you can raise only once. However, *apischema* provides a way of raising as many errors as needed by using `yield`. Moreover, with this syntax, it is possible to add a "path" (see [below](#error-path)) to the error to precise its location in the validated data. This path will be added to the `loc` key of the error. ```python {!validator_yield.py!} ``` #### Error path In the example, the validator yields a tuple of an "error path" and the error message. Error path can be: - a field alias (obtained with `apischema.objects.get_alias`); - an integer, for list indices; - a raw string, for dict key (or field); - an `apischema.objects.AliasedStr`, a string subclass which will be aliased by deserialization aliaser; - an iterable, e.g. a tuple, of this 4 components. `yield` can also be used with only an error message. !!! note For dataclass field error path, it's advised to use `apischema.objects.get_alias` instead of raw string, because it will take into account potential aliasing and it will be better handled by IDE (refactoring, cross-referencing, etc.) ### Discard If one of your validators fails because a field is corrupted, maybe you don't want subsequent validators to be executed. `validator` decorator provides a `discard` parameter to discard fields of the remaining validation. All the remaining validators having discarded fields in [dependencies](#automatic-dependencies-management) will not be executed. ```python {!discard.py!} ``` You can notice in this example that *apischema* tries to avoid using raw strings to identify fields. In every function of the library using fields identifier (`apischema.validator`, `apischema.dependent_required`, `apischema.fields.set_fields`, etc.), you have always three ways to pass them: - using field object, preferred in dataclass definition; - using `apischema.objects.get_field`, to be used outside of class definition; it works with `NamedTuple` too — the object returned is the *apischema* internal field representation, common to `dataclass`, `NamedTuple` and `TypedDict`; - using raw strings, thus not handled by static tools like refactoring, but it works; ### Field validators #### At field level Fields are validated according to their types and schema. But it's also possible to add validators to fields. ```python {!field_validator.py!} ``` When validation fails for a field, it is discarded and cannot be used in class validators, as is the case when field schema validation fails. !!! note `field_validator` allows reusing the the same validator for several fields. However in this case, using a custom type (for example a `NewType`) with validators (see [next section](#validators-for-every-new-types)) could often be a better solution. #### Using other fields A common pattern can be to validate a field using other fields values. This is achieved with dataclass validators seen above. However, there is a shortcut for this use case: ```python {!validator_field.py!} ``` ### Validators inheritance Validators are inherited just like other class fields. ```python {!validator_inheritance.py!} ``` ### Validator with `InitVar` Dataclasses `InitVar` are accessible in validators by using parameters the same way `__post_init__` does. Only the needed fields have to be put in parameters, they are then added to validator dependencies. ```python {!validator_post_init.py!} ``` ### Validators are not run on default values If all validator dependencies are initialized with their default values, they are not run. ```python {!validator_default.py!} ``` ## Validators for every type Validators can also be declared as regular functions, in which case annotation of the first param is used to associate it to the validated type (you can also use the `owner` parameter); this allows adding a validator to every type. Last but not least, validators can be embedded directly into `Annotated` arguments using `validators` metadata. ```python {!validator_function.py!} ``` ## FAQ #### How are validator dependencies computed? `ast.NodeVisitor` and the Python black magic begins... #### Why only validate at deserialization and not at instantiation? *apischema* uses type annotations, so every objects used can already be statically type-checked (with *Mypy*/*Pycharm*/etc.) at instantiation but also at modification. #### Why use validators for dataclasses instead of doing validation in `__post_init__`? Actually, validation can completely be done in `__post_init__`, there is no problem with that. However, validators offers one thing that cannot be achieved with `__post_init__`: they are run before `__init__`, so they can validate incomplete data. Moreover, they are only run during deserialization, so they don't add overhead to normal class instantiation. #### Why validators cannot raise arbitrary exception? Allowing arbitrary exception is in fact a security issue, because unwanted exception could be raised, and their message displayed in validation error. It could either contain sensitive data, or give information about the implementation which could be used to hack it. By the way, it's possible to define a decorator to convert precise exceptions to `ValidationError`: ```python from collections.abc import Callable from functools import wraps from typing import TypeVar from apischema import ValidationError Func = TypeVar("Func", bound=Callable) def catch(*exceptions) -> Callable[[Func], Func]: def decorator(func: Func) -> Func: @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as err: raise ValidationError(str(err)) if isinstance(err, exceptions) else err return wrapper return decorator ```apischema-0.18.3/examples/000077500000000000000000000000001467672046000153675ustar00rootroot00000000000000apischema-0.18.3/examples/__init__.py000066400000000000000000000000001467672046000174660ustar00rootroot00000000000000apischema-0.18.3/examples/additional_properties.py000066400000000000000000000004671467672046000223340ustar00rootroot00000000000000from dataclasses import dataclass import pytest from apischema import ValidationError, deserialize @dataclass class Foo: bar: str data = {"bar": "bar", "other": 42} with pytest.raises(ValidationError): deserialize(Foo, data) assert deserialize(Foo, data, additional_properties=True) == Foo("bar") apischema-0.18.3/examples/additional_types.py000066400000000000000000000010331467672046000212720ustar00rootroot00000000000000from dataclasses import dataclass from graphql import print_schema from apischema.graphql import graphql_schema, interface @interface @dataclass class Bar: bar: int @dataclass class Foo(Bar): baz: str def bar() -> Bar: ... schema = graphql_schema(query=[bar], types=[Foo]) # type Foo would have not been present if Foo was not put in types schema_str = """\ type Foo implements Bar { bar: Int! baz: String! } interface Bar { bar: Int! } type Query { bar: Bar! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/alias.py000066400000000000000000000010761467672046000170360ustar00rootroot00000000000000from dataclasses import dataclass, field from apischema import alias, deserialize, serialize from apischema.json_schema import deserialization_schema @dataclass class Foo: class_: str = field(metadata=alias("class")) assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "additionalProperties": False, "properties": {"class": {"type": "string"}}, "required": ["class"], "type": "object", } assert deserialize(Foo, {"class": "bar"}) == Foo("bar") assert serialize(Foo, Foo("bar")) == {"class": "bar"} apischema-0.18.3/examples/aliaser.py000066400000000000000000000013071467672046000173620ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Any from apischema import alias from apischema.json_schema import deserialization_schema @alias(lambda s: f"foo_{s}") @dataclass class Foo: field1: Any field2: Any = field(metadata=alias(override=False)) field3: Any = field(metadata=alias("field03")) field4: Any = field(metadata=alias("field04", override=False)) assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "additionalProperties": False, "properties": {"foo_field1": {}, "field2": {}, "foo_field03": {}, "field04": {}}, "required": ["foo_field1", "field2", "foo_field03", "field04"], "type": "object", } apischema-0.18.3/examples/all_refs.py000066400000000000000000000024651467672046000175370ustar00rootroot00000000000000from dataclasses import dataclass from apischema.json_schema import deserialization_schema @dataclass class Bar: baz: str @dataclass class Foo: bar1: Bar bar2: Bar assert deserialization_schema(Foo, all_refs=False) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$defs": { "Bar": { "additionalProperties": False, "properties": {"baz": {"type": "string"}}, "required": ["baz"], "type": "object", } }, "additionalProperties": False, "properties": {"bar1": {"$ref": "#/$defs/Bar"}, "bar2": {"$ref": "#/$defs/Bar"}}, "required": ["bar1", "bar2"], "type": "object", } assert deserialization_schema(Foo, all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$defs": { "Bar": { "additionalProperties": False, "properties": {"baz": {"type": "string"}}, "required": ["baz"], "type": "object", }, "Foo": { "additionalProperties": False, "properties": { "bar1": {"$ref": "#/$defs/Bar"}, "bar2": {"$ref": "#/$defs/Bar"}, }, "required": ["bar1", "bar2"], "type": "object", }, }, "$ref": "#/$defs/Foo", } apischema-0.18.3/examples/as_names.py000066400000000000000000000011021467672046000175210ustar00rootroot00000000000000from enum import Enum from apischema import deserialize, serialize from apischema.conversions import as_names from apischema.json_schema import deserialization_schema, serialization_schema @as_names class MyEnum(Enum): FOO = object() BAR = object() assert deserialize(MyEnum, "FOO") == MyEnum.FOO assert serialize(MyEnum, MyEnum.FOO) == "FOO" assert ( deserialization_schema(MyEnum) == serialization_schema(MyEnum) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "string", "enum": ["FOO", "BAR"], } ) apischema-0.18.3/examples/as_str.py000066400000000000000000000011021467672046000172260ustar00rootroot00000000000000import bson import pytest from apischema import Unsupported, deserialize, serialize from apischema.conversions import as_str with pytest.raises(Unsupported): deserialize(bson.ObjectId, "0123456789ab0123456789ab") with pytest.raises(Unsupported): serialize(bson.ObjectId, bson.ObjectId("0123456789ab0123456789ab")) as_str(bson.ObjectId) assert deserialize(bson.ObjectId, "0123456789ab0123456789ab") == bson.ObjectId( "0123456789ab0123456789ab" ) assert ( serialize(bson.ObjectId, bson.ObjectId("0123456789ab0123456789ab")) == "0123456789ab0123456789ab" ) apischema-0.18.3/examples/base_schema.py000066400000000000000000000037571467672046000202070ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Any, Callable, get_origin import docstring_parser from apischema import schema, serialized, settings from apischema.json_schema import serialization_schema from apischema.schemas import Schema from apischema.type_names import get_type_name @dataclass class Foo: """Foo class :var bar: bar attribute""" bar: str = field(metadata=schema(max_len=10)) @serialized @property def baz(self) -> int: """baz method""" ... def type_base_schema(tp: Any) -> Schema | None: if not hasattr(tp, "__doc__"): return None return schema( title=get_type_name(tp).json_schema, description=docstring_parser.parse(tp.__doc__).short_description, ) def field_base_schema(tp: Any, name: str, alias: str) -> Schema | None: title = alias.replace("_", " ").capitalize() tp = get_origin(tp) or tp # tp can be generic for meta in docstring_parser.parse(tp.__doc__).meta: if meta.args == ["var", name]: return schema(title=title, description=meta.description) return schema(title=title) def method_base_schema(tp: Any, method: Callable, alias: str) -> Schema | None: return schema( title=alias.replace("_", " ").capitalize(), description=docstring_parser.parse(method.__doc__).short_description, ) settings.base_schema.type = type_base_schema settings.base_schema.field = field_base_schema settings.base_schema.method = method_base_schema assert serialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "additionalProperties": False, "title": "Foo", "description": "Foo class", "properties": { "bar": { "description": "bar attribute", "title": "Bar", "type": "string", "maxLength": 10, }, "baz": {"description": "baz method", "title": "Baz", "type": "integer"}, }, "required": ["bar", "baz"], "type": "object", } apischema-0.18.3/examples/base_schema_parameter.py000066400000000000000000000023651467672046000222410ustar00rootroot00000000000000import inspect from dataclasses import dataclass from typing import Any, Callable import docstring_parser from graphql.utilities import print_schema from apischema import schema, settings from apischema.graphql import graphql_schema, resolver from apischema.schemas import Schema @dataclass class Foo: @resolver def bar(self, arg: str) -> int: """bar method :param arg: arg parameter """ ... def method_base_schema(tp: Any, method: Callable, alias: str) -> Schema | None: return schema(description=docstring_parser.parse(method.__doc__).short_description) def parameter_base_schema( method: Callable, parameter: inspect.Parameter, alias: str ) -> Schema | None: for doc_param in docstring_parser.parse(method.__doc__).params: if doc_param.arg_name == parameter.name: return schema(description=doc_param.description) return None settings.base_schema.method = method_base_schema settings.base_schema.parameter = parameter_base_schema def foo() -> Foo: ... schema_ = graphql_schema(query=[foo]) schema_str = '''\ type Query { foo: Foo! } type Foo { """bar method""" bar( """arg parameter""" arg: String! ): Int! }''' assert print_schema(schema_) == schema_str apischema-0.18.3/examples/bypass_conversions.py000066400000000000000000000012711467672046000216730ustar00rootroot00000000000000from dataclasses import dataclass from apischema import identity, serialize, serializer from apischema.conversions import Conversion @dataclass class RGB: red: int green: int blue: int @serializer @property def hexa(self) -> str: return f"#{self.red:02x}{self.green:02x}{self.blue:02x}" assert serialize(RGB, RGB(0, 0, 0)) == "#000000" # dynamic conversion used to bypass the registered one assert serialize(RGB, RGB(0, 0, 0), conversion=identity) == { "red": 0, "green": 0, "blue": 0, } # Expended bypass form assert serialize( RGB, RGB(0, 0, 0), conversion=Conversion(identity, source=RGB, target=RGB) ) == {"red": 0, "green": 0, "blue": 0} apischema-0.18.3/examples/class_ordering.py000066400000000000000000000004131467672046000207350ustar00rootroot00000000000000import json from dataclasses import dataclass from apischema import order, serialize @order(["baz", "bar", "biz"]) @dataclass class Foo: bar: int baz: int biz: str assert json.dumps(serialize(Foo, Foo(0, 0, ""))) == '{"baz": 0, "bar": 0, "biz": ""}' apischema-0.18.3/examples/coercion.py000066400000000000000000000002601467672046000175400ustar00rootroot00000000000000import pytest from apischema import ValidationError, deserialize with pytest.raises(ValidationError): deserialize(bool, "ok") assert deserialize(bool, "ok", coerce=True) apischema-0.18.3/examples/coercion_function.py000066400000000000000000000007601467672046000214520ustar00rootroot00000000000000from typing import TypeVar, cast import pytest from apischema import ValidationError, deserialize T = TypeVar("T") def coerce(cls: type[T], data) -> T: """Only coerce int to bool""" if cls is bool and isinstance(data, int): return cast(T, bool(data)) else: return data with pytest.raises(ValidationError): deserialize(bool, 0) with pytest.raises(ValidationError): assert deserialize(bool, "ok", coerce=coerce) assert deserialize(bool, 1, coerce=coerce) apischema-0.18.3/examples/complex_schema.py000066400000000000000000000013401467672046000207260ustar00rootroot00000000000000from dataclasses import dataclass from typing import Optional from apischema.json_schema import deserialization_schema @dataclass class Node: value: int child: Optional["Node"] = None assert deserialization_schema(Node) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$ref": "#/$defs/Node", "$defs": { "Node": { "type": "object", "properties": { "value": {"type": "integer"}, "child": { "anyOf": [{"$ref": "#/$defs/Node"}, {"type": "null"}], "default": None, }, }, "required": ["value"], "additionalProperties": False, } }, } apischema-0.18.3/examples/computed_dependencies.py000066400000000000000000000011341467672046000222660ustar00rootroot00000000000000from dataclasses import dataclass import pytest from apischema import ValidationError, deserialize, validator @dataclass class PasswordForm: password: str confirmation: str @validator def password_match(self): if self.password != self.confirmation: raise ValueError("password doesn't match its confirmation") with pytest.raises(ValidationError) as err: deserialize(PasswordForm, {"password": "p455w0rd"}) assert err.value.errors == [ # validator is not executed because confirmation is missing {"loc": ["confirmation"], "err": "missing property"} ] apischema-0.18.3/examples/conversion_object.py000066400000000000000000000005461467672046000214610ustar00rootroot00000000000000from base64 import b64decode from apischema import deserialize, deserializer from apischema.conversions import Conversion deserializer(Conversion(b64decode, source=str, target=bytes)) # Roughly equivalent to: # def decode_bytes(source: str) -> bytes: # return b64decode(source) # but saving a function call assert deserialize(bytes, "Zm9v") == b"foo" apischema-0.18.3/examples/conversions.py000066400000000000000000000020571467672046000203150ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialize, schema, serialize from apischema.conversions import deserializer, serializer from apischema.json_schema import deserialization_schema, serialization_schema @schema(pattern=r"^#[0-9a-fA-F]{6}$") @dataclass class RGB: red: int green: int blue: int @serializer @property def hexa(self) -> str: return f"#{self.red:02x}{self.green:02x}{self.blue:02x}" # serializer can also be called with methods/properties outside of the class # For example, `serializer(RGB.hexa)` would have the same effect as the decorator above @deserializer def from_hexa(hexa: str) -> RGB: return RGB(int(hexa[1:3], 16), int(hexa[3:5], 16), int(hexa[5:7], 16)) assert deserialize(RGB, "#000000") == RGB(0, 0, 0) assert serialize(RGB, RGB(0, 0, 42)) == "#00002a" assert ( deserialization_schema(RGB) == serialization_schema(RGB) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "string", "pattern": "^#[0-9a-fA-F]{6}$", } ) apischema-0.18.3/examples/dataclass_init.py000066400000000000000000000023561467672046000207310ustar00rootroot00000000000000from dataclasses import InitVar, dataclass, field import pytest from apischema import ValidationError, deserialize, serialize, validator from apischema.json_schema import definitions_schema from apischema.metadata import init_var @dataclass class Foo: bar: int init_only: InitVar[int] = field(metadata=init_var(int)) no_init: int = field(init=False) def __post_init__(self, init_only: int): self.no_init = init_only # InitVar are passed as kwargs, like in __post_init__ @validator def validate(self, init_only: int): if self.bar == init_only: raise ValueError("Error") assert deserialize(Foo, {"bar": 0, "init_only": 1}) == Foo(0, 1) assert serialize(Foo, Foo(0, 1)) == {"bar": 0, "no_init": 1} with pytest.raises(ValidationError) as err: deserialize(Foo, {"bar": 0}) assert definitions_schema( deserialization=[Foo], serialization=[Foo], all_refs=True ) == { "Foo": { "type": "object", "properties": { "bar": {"type": "integer"}, "no_init": {"readOnly": True, "type": "integer"}, "init_only": {"writeOnly": True, "type": "integer"}, }, "additionalProperties": False, "required": ["bar", "init_only"], } } apischema-0.18.3/examples/de_serialization_methods.py000066400000000000000000000004761467672046000230200ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialization_method, serialization_method @dataclass class Foo: bar: int deserialize_foo = deserialization_method(Foo) serialize_foo = serialization_method(Foo) assert deserialize_foo({"bar": 0}) == Foo(0) assert serialize_foo(Foo(0)) == {"bar": 0} apischema-0.18.3/examples/default_as_set.py000066400000000000000000000006341467672046000207260ustar00rootroot00000000000000from dataclasses import dataclass, field from apischema import serialize from apischema.fields import with_fields_set from apischema.metadata import default_as_set # Decorator needed to benefit from the feature @with_fields_set @dataclass class Foo: bar: int | None = field(default=None, metadata=default_as_set) assert serialize(Foo, Foo()) == {"bar": None} assert serialize(Foo, Foo(0)) == {"bar": 0} apischema-0.18.3/examples/definitions_schema.py000066400000000000000000000010661467672046000215770ustar00rootroot00000000000000from dataclasses import dataclass from apischema.json_schema import definitions_schema @dataclass class Bar: baz: int = 0 @dataclass class Foo: bar: Bar assert definitions_schema(deserialization=[list[Foo]], all_refs=True) == { "Foo": { "type": "object", "properties": {"bar": {"$ref": "#/$defs/Bar"}}, "required": ["bar"], "additionalProperties": False, }, "Bar": { "type": "object", "properties": {"baz": {"type": "integer", "default": 0}}, "additionalProperties": False, }, } apischema-0.18.3/examples/dependent_required.py000066400000000000000000000025101467672046000216050ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ( Undefined, UndefinedType, ValidationError, dependent_required, deserialize, ) from apischema.json_schema import deserialization_schema @dataclass class Billing: name: str # Fields used in dependencies MUST be declared with `field` credit_card: int | UndefinedType = field(default=Undefined) billing_address: str | UndefinedType = field(default=Undefined) dependencies = dependent_required({credit_card: [billing_address]}) # it can also be done outside the class with # dependent_required({"credit_card": ["billing_address"]}, owner=Billing) assert deserialization_schema(Billing) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "additionalProperties": False, "dependentRequired": {"credit_card": ["billing_address"]}, "properties": { "name": {"type": "string"}, "credit_card": {"type": "integer"}, "billing_address": {"type": "string"}, }, "required": ["name"], "type": "object", } with pytest.raises(ValidationError) as err: deserialize(Billing, {"name": "Anonymous", "credit_card": 1234_5678_9012_3456}) assert err.value.errors == [ { "loc": ["billing_address"], "err": "missing property (required by ['credit_card'])", } ] apischema-0.18.3/examples/deserialization.py000066400000000000000000000006441467672046000211330ustar00rootroot00000000000000from collections.abc import Collection, Mapping from dataclasses import dataclass from typing import NewType from apischema import deserialize @dataclass class Foo: bar: str MyInt = NewType("MyInt", int) assert deserialize(Foo, {"bar": "bar"}) == Foo("bar") assert deserialize(MyInt, 0) == MyInt(0) == 0 assert deserialize(Mapping[str, Collection[Foo]], {"key": [{"bar": "42"}]}) == { "key": [Foo("42")] } apischema-0.18.3/examples/deserialization_pass_through.py000066400000000000000000000007111467672046000237140ustar00rootroot00000000000000from datetime import datetime, timedelta from apischema import deserialize start, end = datetime.now(), datetime.now() + timedelta(1) assert deserialize( tuple[datetime, datetime], [start, end], pass_through={datetime} ) == (start, end) # Passing through types can also be deserialized normally from JSON types assert deserialize( tuple[datetime, datetime], [start.isoformat(), end.isoformat()], pass_through={datetime}, ) == (start, end) apischema-0.18.3/examples/discard.py000066400000000000000000000030451467672046000173540ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ValidationError, deserialize, validator from apischema.objects import get_alias, get_field @dataclass class BoundedValues: # field must be assign to be used, even with empty `field()` bounds: tuple[int, int] = field() values: list[int] # validator("bounds") would also work, but it's not handled by IDE refactoring, etc. @validator(discard=bounds) def bounds_are_sorted(self): min_bound, max_bound = self.bounds if min_bound > max_bound: yield get_alias(self).bounds, "bounds are not sorted" @validator def values_dont_exceed_bounds(self): min_bound, max_bound = self.bounds for index, value in enumerate(self.values): if not min_bound <= value <= max_bound: yield (get_alias(self).values, index), "value exceeds bounds" # Outside class, fields can still be accessed in a "static" way, to avoid use raw string @validator(discard=get_field(BoundedValues).bounds) def bounds_are_sorted_equivalent(bounded: BoundedValues): min_bound, max_bound = bounded.bounds if min_bound > max_bound: yield get_alias(bounded).bounds, "bounds are not sorted" with pytest.raises(ValidationError) as err: deserialize(BoundedValues, {"bounds": [10, 0], "values": [-1, 2, 4]}) assert err.value.errors == [ {"loc": ["bounds"], "err": "bounds are not sorted"} # Without discard, there would have been an other error # {"loc": ["values", 1], "err": "value exceeds bounds"} ] apischema-0.18.3/examples/discriminator_perf.py000066400000000000000000000015411467672046000216250ustar00rootroot00000000000000from dataclasses import dataclass from timeit import timeit from typing import Annotated from apischema import deserialization_method, discriminator @dataclass class Cat: love_dog: bool = False @dataclass class Dog: love_cat: bool = False deserialize_union = deserialization_method(Cat | Dog) deserialize_discriminated = deserialization_method( Annotated[Cat | Dog, discriminator("type")] ) ##### Without discrimininator print(timeit('deserialize_union({"love_dog": False})', globals=globals())) # Cat: 0.760085788 print(timeit('deserialize_union({"love_cat": False})', globals=globals())) # Dog: 3.078876515 ≈ x4 ##### With discriminator print(timeit('deserialize_discriminated({"type": "Cat"})', globals=globals())) # Cat: 1.244204702 print(timeit('deserialize_discriminated({"type": "Dog"})', globals=globals())) # Dog: 1.234058598 ≈ same apischema-0.18.3/examples/dynamic_conversions.py000066400000000000000000000017421467672046000220210ustar00rootroot00000000000000import os import time from dataclasses import dataclass from datetime import datetime from typing import Annotated from apischema import deserialize, serialize from apischema.metadata import conversion # Set UTC timezone for example os.environ["TZ"] = "UTC" time.tzset() def datetime_from_timestamp(timestamp: int) -> datetime: return datetime.fromtimestamp(timestamp) date = datetime(2017, 9, 2) assert deserialize(datetime, 1504310400, conversion=datetime_from_timestamp) == date @dataclass class Foo: bar: int baz: int def sum(self) -> int: return self.bar + self.baz @property def diff(self) -> int: return int(self.bar - self.baz) assert serialize(Foo, Foo(0, 1)) == {"bar": 0, "baz": 1} assert serialize(Foo, Foo(0, 1), conversion=Foo.sum) == 1 assert serialize(Foo, Foo(0, 1), conversion=Foo.diff) == -1 # conversions can be specified using Annotated assert serialize(Annotated[Foo, conversion(serialization=Foo.sum)], Foo(0, 1)) == 1 apischema-0.18.3/examples/dynamic_conversions_lsp.py000066400000000000000000000006331467672046000226750ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialize, serialize @dataclass class Foo: field: int @dataclass class Bar(Foo): other: str def foo_to_int(foo: Foo) -> int: return foo.field def bar_from_int(i: int) -> Bar: return Bar(i, str(i)) assert serialize(Bar, Bar(0, ""), conversion=foo_to_int) == 0 assert deserialize(Foo, 0, conversion=bar_from_int) == Bar(0, "0") apischema-0.18.3/examples/dynamic_generic_conversions.py000066400000000000000000000012751467672046000235160ustar00rootroot00000000000000from collections.abc import Mapping, Sequence from operator import itemgetter from typing import TypeVar from apischema import serialize from apischema.json_schema import serialization_schema T = TypeVar("T") Priority = int def sort_by_priority(values_with_priority: Mapping[T, Priority]) -> Sequence[T]: return [k for k, _ in sorted(values_with_priority.items(), key=itemgetter(1))] assert serialize( dict[str, Priority], {"a": 1, "b": 0}, conversion=sort_by_priority ) == ["b", "a"] assert serialization_schema(dict[str, Priority], conversion=sort_by_priority) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "array", "items": {"type": "string"}, } apischema-0.18.3/examples/dynamic_type_name.py000066400000000000000000000012601467672046000214250ustar00rootroot00000000000000from dataclasses import dataclass from apischema import type_name from apischema.json_schema import serialization_schema @dataclass class Foo: pass @dataclass class Bar: pass def foo_to_bar(_: Foo) -> Bar: return Bar() type_name("Bars")(list[Bar]) assert serialization_schema(list[Foo], conversion=foo_to_bar, all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$ref": "#/$defs/Bars", "$defs": { # Bars is present because `list[Foo]` is dynamically converted to `list[Bar]` "Bars": {"type": "array", "items": {"$ref": "#/$defs/Bar"}}, "Bar": {"type": "object", "additionalProperties": False}, }, } apischema-0.18.3/examples/enum_schemas.py000066400000000000000000000011361467672046000204110ustar00rootroot00000000000000from enum import Enum from graphql import graphql_sync from graphql.utilities import print_schema from apischema import schema from apischema.graphql import graphql_schema class MyEnum(Enum): FOO = "FOO" BAR = "BAR" def echo(enum: MyEnum) -> MyEnum: return enum schema_ = graphql_schema( query=[echo], enum_schemas={MyEnum.FOO: schema(description="foo")} ) schema_str = '''\ type Query { echo(enum: MyEnum!): MyEnum! } enum MyEnum { """foo""" FOO BAR }''' assert print_schema(schema_) == schema_str assert graphql_sync(schema_, "{echo(enum: FOO)}").data == {"echo": "FOO"} apischema-0.18.3/examples/examples/000077500000000000000000000000001467672046000172055ustar00rootroot00000000000000apischema-0.18.3/examples/examples/__init__.py000066400000000000000000000000001467672046000213040ustar00rootroot00000000000000apischema-0.18.3/examples/examples/attrs_support.py000066400000000000000000000020171467672046000225100ustar00rootroot00000000000000from typing import Sequence import attrs from apischema import deserialize, serialize, settings from apischema.json_schema import deserialization_schema from apischema.objects import ObjectField prev_default_object_fields = settings.default_object_fields def attrs_fields(cls: type) -> Sequence[ObjectField] | None: if hasattr(cls, "__attrs_attrs__"): return [ ObjectField( a.name, a.type, required=a.default == attrs.NOTHING, default=a.default ) for a in getattr(cls, "__attrs_attrs__") ] else: return prev_default_object_fields(cls) settings.default_object_fields = attrs_fields @attrs.define class Foo: bar: int assert deserialize(Foo, {"bar": 0}) == Foo(0) assert serialize(Foo, Foo(0)) == {"bar": 0} assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {"type": "integer"}}, "required": ["bar"], "additionalProperties": False, } apischema-0.18.3/examples/examples/inherited_deserializer.py000066400000000000000000000031071467672046000242750ustar00rootroot00000000000000from collections.abc import Iterator from dataclasses import dataclass from typing import TypeVar from apischema import deserialize, deserializer from apischema.conversions import Conversion Foo_ = TypeVar("Foo_", bound="Foo") # Use a dataclass in order to be easily testable with == @dataclass class Foo: value: int @classmethod def deserialize(cls: type[Foo_], value: int) -> Foo_: return cls(value) def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) # Register subclasses' conversion in __init_subclass__ deserializer(Conversion(cls.deserialize, target=cls)) # Register main conversion after the class definition deserializer(Conversion(Foo.deserialize, target=Foo)) class Bar(Foo): pass assert deserialize(Foo, 0) == Foo(0) assert deserialize(Bar, 0) == Bar(0) # For external types (defines in imported library) @dataclass class ForeignType: value: int class ForeignSubtype(ForeignType): pass T = TypeVar("T") # Recursive implementation of type.__subclasses__ def rec_subclasses(cls: type[T]) -> Iterator[type[T]]: for sub_cls in cls.__subclasses__(): yield sub_cls yield from rec_subclasses(sub_cls) # Register deserializers for all subclasses for cls in (ForeignType, *rec_subclasses(ForeignType)): # cls=cls is an lambda idiom to capture variable by value inside loop deserializer(Conversion(lambda value, cls=cls: cls(value), source=int, target=cls)) assert deserialize(ForeignType, 0) == ForeignType(0) assert deserialize(ForeignSubtype, 0) == ForeignSubtype(0) apischema-0.18.3/examples/examples/pydantic_support.py000066400000000000000000000050121467672046000231640ustar00rootroot00000000000000import inspect from collections.abc import Mapping from typing import Any import pydantic import pytest from apischema import ( ValidationError, deserialize, schema, serialize, serializer, settings, ) from apischema.conversions import AnyConversion, Conversion from apischema.json_schema import deserialization_schema from apischema.schemas import Schema # ---------- Pydantic support code starts here ---------- prev_deserialization = settings.deserialization.default_conversion def default_deserialization(tp: Any) -> AnyConversion | None: if inspect.isclass(tp) and issubclass(tp, pydantic.BaseModel): def deserialize_pydantic(data): try: return tp.parse_obj(data) except pydantic.ValidationError as error: raise ValidationError.from_errors( [{"loc": err["loc"], "err": err["msg"]} for err in error.errors()] ) return Conversion( deserialize_pydantic, source=tp.__annotations__.get("__root__", Mapping[str, Any]), target=tp, ) else: return prev_deserialization(tp) settings.deserialization.default_conversion = default_deserialization prev_schema = settings.base_schema.type def default_schema(tp: Any) -> Schema | None: if inspect.isclass(tp) and issubclass(tp, pydantic.BaseModel): return schema(extra=tp.schema(), override=True) else: return prev_schema(tp) settings.base_schema.type = default_schema # No need to use settings.serialization because serializer is inherited @serializer def serialize_pydantic(obj: pydantic.BaseModel) -> Any: # There is currently no way to retrieve `serialize` parameters inside converters, # so exclude_unset is set to True as it's the default apischema setting return getattr(obj, "__root__", obj.dict(exclude_unset=True)) # ---------- Pydantic support code ends here ---------- class Foo(pydantic.BaseModel): bar: int assert deserialize(Foo, {"bar": 0}) == Foo(bar=0) assert serialize(Foo, Foo(bar=0)) == {"bar": 0} assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "title": "Foo", # pydantic title "type": "object", "properties": {"bar": {"title": "Bar", "type": "integer"}}, "required": ["bar"], } with pytest.raises(ValidationError) as err: deserialize(Foo, {"bar": "not an int"}) assert err.value.errors == [ {"loc": ["bar"], "err": "value is not a valid integer"} # pydantic error message ] apischema-0.18.3/examples/examples/recoverable_fields.py000066400000000000000000000034771467672046000234110ustar00rootroot00000000000000from typing import Any, Dict, Generic, TypeVar import pytest from apischema import deserialize, deserializer, schema, serialize, serializer from apischema.json_schema import deserialization_schema, serialization_schema # Add a dummy placeholder comment in order to not have an empty schema # (because Union member with empty schema would "contaminate" whole Union schema) @schema(extra={"$comment": "recoverable"}) class RecoverableRaw(Exception): def __init__(self, raw: Any): self.raw = raw deserializer(RecoverableRaw) T = TypeVar("T") def remove_recoverable_schema(json_schema: Dict[str, Any]): if "anyOf" in json_schema: # deserialization schema value_schema, recoverable_comment = json_schema.pop("anyOf") assert recoverable_comment == {"$comment": "recoverable"} json_schema.update(value_schema) @schema(extra=remove_recoverable_schema) class Recoverable(Generic[T]): def __init__(self, value: T | RecoverableRaw): self._value = value @property def value(self) -> T: if isinstance(self._value, RecoverableRaw): raise self._value return self._value @value.setter def value(self, value: T): self._value = value deserializer(Recoverable) serializer(Recoverable.value) assert deserialize(Recoverable[int], 0).value == 0 with pytest.raises(RecoverableRaw) as err: _ = deserialize(Recoverable[int], "bad").value assert err.value.raw == "bad" assert serialize(Recoverable[int], Recoverable(0)) == 0 with pytest.raises(RecoverableRaw) as err: serialize(Recoverable[int], Recoverable(RecoverableRaw("bad"))) assert err.value.raw == "bad" assert ( deserialization_schema(Recoverable[int]) == serialization_schema(Recoverable[int]) == {"$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "integer"} ) apischema-0.18.3/examples/examples/sqlalchemy_support.py000066400000000000000000000040571467672046000235230ustar00rootroot00000000000000from collections.abc import Collection from inspect import getmembers from itertools import starmap from typing import Any from graphql import print_schema from sqlalchemy import Column, Integer, String from sqlalchemy.ext.declarative import as_declarative from apischema import Undefined, deserialize, serialize from apischema.graphql import graphql_schema from apischema.json_schema import deserialization_schema from apischema.objects import ObjectField, set_object_fields def column_field(name: str, column: Column) -> ObjectField: required = False default: Any = ... if column.default is not None: default = column.default elif column.server_default is not None: default = Undefined elif column.nullable: default = None else: required = True col_type = column.type.python_type if column.nullable: col_type = col_type | None return ObjectField(column.name or name, col_type, required, default=default) # Very basic SQLAlchemy support @as_declarative() class Base: def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) columns = getmembers(cls, lambda m: isinstance(m, Column)) if not columns: return set_object_fields(cls, starmap(column_field, columns)) class Foo(Base): __tablename__ = "foo" bar = Column(Integer, primary_key=True) baz = Column(String) foo = deserialize(Foo, {"bar": 0}) assert isinstance(foo, Foo) assert foo.bar == 0 assert serialize(Foo, foo) == {"bar": 0, "baz": None} assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "bar": {"type": "integer"}, "baz": {"type": ["string", "null"], "default": None}, }, "required": ["bar"], "additionalProperties": False, } def foos() -> Collection[Foo] | None: ... schema = graphql_schema(query=[foos]) schema_str = """\ type Query { foos: [Foo!] } type Foo { bar: Int! baz: String }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/examples/subclass_tagged_union.py000066400000000000000000000130051467672046000241200ustar00rootroot00000000000000from collections import defaultdict from collections.abc import AsyncIterable, Callable, Iterator from dataclasses import dataclass, field from types import new_class from typing import Annotated, Any, TypeVar, get_type_hints import graphql from apischema import deserializer, schema, serializer, type_name from apischema.conversions import Conversion from apischema.graphql import graphql_schema from apischema.metadata import conversion from apischema.objects import object_deserialization from apischema.tagged_unions import Tagged, TaggedUnion, get_tagged from apischema.utils import to_pascal_case _alternative_constructors: dict[type, list[Callable]] = defaultdict(list) Func = TypeVar("Func", bound=Callable) def alternative_constructor(func: Func) -> Func: _alternative_constructors[get_type_hints(func)["return"]].append(func) return func def rec_subclasses(cls: type) -> Iterator[type]: """Recursive implementation of type.__subclasses__""" for sub_cls in cls.__subclasses__(): yield sub_cls yield from rec_subclasses(sub_cls) Cls = TypeVar("Cls", bound=type) def as_tagged_union(cls: Cls) -> Cls: def serialization() -> Conversion: annotations = {sub.__name__: Tagged[sub] for sub in rec_subclasses(cls)} namespace = {"__annotations__": annotations} tagged_union = new_class( cls.__name__, (TaggedUnion,), exec_body=lambda ns: ns.update(namespace) ) return Conversion( lambda obj: tagged_union(**{obj.__class__.__name__: obj}), source=cls, target=tagged_union, # Conversion must not be inherited because it would lead to # infinite recursion otherwise inherited=False, ) def deserialization() -> Conversion: annotations: dict[str, Any] = {} namespace: dict[str, Any] = {"__annotations__": annotations} for sub in rec_subclasses(cls): annotations[sub.__name__] = Tagged[sub] # Add tagged fields for all its alternative constructors for constructor in _alternative_constructors.get(sub, ()): # Build the alias of the field alias = to_pascal_case(constructor.__name__) # object_deserialization uses get_type_hints, but the constructor # return type is stringified and the class not defined yet, # so it must be assigned manually constructor.__annotations__["return"] = sub # Use object_deserialization to wrap constructor as deserializer deserialization = object_deserialization(constructor, type_name(alias)) # Add constructor tagged field with its conversion annotations[alias] = Tagged[sub] namespace[alias] = Tagged(conversion(deserialization=deserialization)) # Create the deserialization tagged union class tagged_union = new_class( cls.__name__, (TaggedUnion,), exec_body=lambda ns: ns.update(namespace) ) return Conversion( lambda obj: get_tagged(obj)[1], source=tagged_union, target=cls ) deserializer(lazy=deserialization, target=cls) serializer(lazy=serialization, source=cls) return cls @as_tagged_union class Drawing: def points(self) -> AsyncIterable[float]: raise NotImplementedError @dataclass class Line(Drawing): start: float stop: float step: float = field(default=1, metadata=schema(exc_min=0)) async def points(self) -> AsyncIterable[float]: point = self.start while point <= self.stop: yield point point += self.step @alternative_constructor def sized_line( start: float, stop: float, size: Annotated[float, schema(min=1)] ) -> "Line": return Line(start=start, stop=stop, step=(stop - start) / (size - 1)) @dataclass class Concat(Drawing): left: Drawing right: Drawing async def points(self) -> AsyncIterable[float]: async for point in self.left.points(): yield point async for point in self.right.points(): yield point def echo(drawing: Drawing = None) -> Drawing | None: return drawing drawing_schema = graphql_schema(query=[echo]) assert ( graphql.utilities.print_schema(drawing_schema) == """\ type Query { echo(drawing: DrawingInput): Drawing } type Drawing { Line: Line Concat: Concat } type Line { start: Float! stop: Float! step: Float! } type Concat { left: Drawing! right: Drawing! } input DrawingInput { Line: LineInput SizedLine: SizedLineInput Concat: ConcatInput } input LineInput { start: Float! stop: Float! step: Float! = 1 } input SizedLineInput { start: Float! stop: Float! size: Float! } input ConcatInput { left: DrawingInput! right: DrawingInput! }""" ) query = """\ { echo(drawing: { Concat: { left: { SizedLine: { start: 0, stop: 12, size: 3 }, }, right: { Line: { start: 12, stop: 13 }, } } }) { Concat { left { Line { start stop step } } right { Line { start stop step } } } } }""" assert graphql.graphql_sync(drawing_schema, query).data == { "echo": { "Concat": { "left": {"Line": {"start": 0.0, "stop": 12.0, "step": 6.0}}, "right": {"Line": {"start": 12.0, "stop": 13.0, "step": 1.0}}, } } } apischema-0.18.3/examples/examples/subclass_union.py000066400000000000000000000031361467672046000226110ustar00rootroot00000000000000from dataclasses import dataclass from typing import Any, Union from apischema import deserialize, deserializer, identity, serializer from apischema.conversions import Conversion from apischema.json_schema import deserialization_schema, serialization_schema class Base: _union: Any = None # You can use __init_subclass__ to register new subclass automatically def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) # Deserializers stack directly as a Union deserializer(Conversion(identity, source=cls, target=Base)) # Only Base serializer must be registered (and updated for each subclass) as # a Union, and not be inherited Base._union = cls if Base._union is None else Union[Base._union, cls] serializer( Conversion(identity, source=Base, target=Base._union, inherited=False) ) @dataclass class Foo(Base): foo: int @dataclass class Bar(Base): bar: str assert ( deserialization_schema(Base) == serialization_schema(Base) == { "anyOf": [ { "type": "object", "properties": {"foo": {"type": "integer"}}, "required": ["foo"], "additionalProperties": False, }, { "type": "object", "properties": {"bar": {"type": "string"}}, "required": ["bar"], "additionalProperties": False, }, ], "$schema": "http://json-schema.org/draft/2020-12/schema#", } ) assert deserialize(Base, {"foo": 0}) == Foo(0) apischema-0.18.3/examples/exclude_defaults_none.py000066400000000000000000000004011467672046000222730ustar00rootroot00000000000000from dataclasses import dataclass from apischema import serialize @dataclass class Foo: bar: int = 0 baz: str | None = None assert serialize(Foo, Foo(), exclude_defaults=True) == {} assert serialize(Foo, Foo(), exclude_none=True) == {"bar": 0} apischema-0.18.3/examples/exclude_unset.py000066400000000000000000000005541467672046000206140ustar00rootroot00000000000000from dataclasses import dataclass from apischema import serialize from apischema.fields import with_fields_set # Decorator needed to benefit from the feature @with_fields_set @dataclass class Foo: bar: int baz: str | None = None assert serialize(Foo, Foo(0)) == {"bar": 0} assert serialize(Foo, Foo(0), exclude_unset=False) == {"bar": 0, "baz": None} apischema-0.18.3/examples/fall_back_on_default.py000066400000000000000000000007151467672046000220420ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ValidationError, deserialize from apischema.metadata import fall_back_on_default @dataclass class Foo: bar: str = "bar" baz: str = field(default="baz", metadata=fall_back_on_default) with pytest.raises(ValidationError): deserialize(Foo, {"bar": 0}) assert deserialize(Foo, {"bar": 0}, fall_back_on_default=True) == Foo() assert deserialize(Foo, {"baz": 0}) == Foo() apischema-0.18.3/examples/field_conversions.py000066400000000000000000000015241467672046000214560ustar00rootroot00000000000000import os import time from dataclasses import dataclass, field from datetime import datetime from apischema import deserialize, serialize from apischema.conversions import Conversion from apischema.metadata import conversion # Set UTC timezone for example os.environ["TZ"] = "UTC" time.tzset() from_timestamp = Conversion(datetime.fromtimestamp, source=int, target=datetime) def to_timestamp(d: datetime) -> int: return int(d.timestamp()) @dataclass class Foo: some_date: datetime = field(metadata=conversion(from_timestamp, to_timestamp)) other_date: datetime assert deserialize(Foo, {"some_date": 0, "other_date": "2019-10-13"}) == Foo( datetime(1970, 1, 1), datetime(2019, 10, 13) ) assert serialize(Foo, Foo(datetime(1970, 1, 1), datetime(2019, 10, 13))) == { "some_date": 0, "other_date": "2019-10-13T00:00:00", } apischema-0.18.3/examples/field_metadata.py000066400000000000000000000007311467672046000206650ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Annotated from apischema import alias, schema from apischema.metadata import required @dataclass class Foo: bar: int = field( default=0, metadata=alias("foo_bar") | schema(title="foo! bar!", min=0, max=42) | required, ) baz: Annotated[ int, alias("foo_baz"), schema(title="foo! baz!", min=0, max=32), required ] = 0 # pipe `|` operator can also be used in Annotated apischema-0.18.3/examples/field_validator.py000066400000000000000000000010471467672046000210730ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ValidationError, deserialize from apischema.metadata import validators def check_no_duplicate_digits(n: int): if len(str(n)) != len(set(str(n))): raise ValidationError("number has duplicate digits") @dataclass class Foo: bar: str = field(metadata=validators(check_no_duplicate_digits)) with pytest.raises(ValidationError) as err: deserialize(Foo, {"bar": "11"}) assert err.value.errors == [{"loc": ["bar"], "err": "number has duplicate digits"}] apischema-0.18.3/examples/fields_set.py000066400000000000000000000020041467672046000200560ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialize from apischema.fields import ( fields_set, is_set, set_fields, unset_fields, with_fields_set, ) # This decorator enable the feature @with_fields_set @dataclass class Foo: bar: int baz: str | None = None # Retrieve fields set foo1 = Foo(0, None) assert fields_set(foo1) == {"bar", "baz"} foo2 = Foo(0) assert fields_set(foo2) == {"bar"} # Test fields individually (with autocompletion and refactoring) assert is_set(foo1).baz assert not is_set(foo2).baz # Mark fields as set/unset set_fields(foo2, "baz") assert fields_set(foo2) == {"bar", "baz"} unset_fields(foo2, "baz") assert fields_set(foo2) == {"bar"} set_fields(foo2, "baz", overwrite=True) assert fields_set(foo2) == {"baz"} # Fields modification are taken in account foo2.bar = 0 assert fields_set(foo2) == {"bar", "baz"} # Because deserialization use normal constructor, it works with the feature foo3 = deserialize(Foo, {"bar": 0}) assert fields_set(foo3) == {"bar"} apischema-0.18.3/examples/flattened.py000066400000000000000000000042011467672046000177040ustar00rootroot00000000000000from dataclasses import dataclass, field from apischema import Undefined, UndefinedType, alias, deserialize, serialize from apischema.fields import with_fields_set from apischema.json_schema import deserialization_schema from apischema.metadata import flatten @dataclass class JsonSchema: title: str | UndefinedType = Undefined description: str | UndefinedType = Undefined format: str | UndefinedType = Undefined ... @with_fields_set @dataclass class RootJsonSchema: schema: str | UndefinedType = field(default=Undefined, metadata=alias("$schema")) defs: list[JsonSchema] = field(default_factory=list, metadata=alias("$defs")) # This field schema is flattened inside the owning one json_schema: JsonSchema = field(default_factory=JsonSchema, metadata=flatten) data = { "$schema": "http://json-schema.org/draft/2020-12/schema#", "title": "flattened example", } root_schema = RootJsonSchema( schema="http://json-schema.org/draft/2020-12/schema#", json_schema=JsonSchema(title="flattened example"), ) assert deserialize(RootJsonSchema, data) == root_schema assert serialize(RootJsonSchema, root_schema) == data assert deserialization_schema(RootJsonSchema) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$defs": { "JsonSchema": { "type": "object", "properties": { "title": {"type": "string"}, "description": {"type": "string"}, "format": {"type": "string"}, }, "additionalProperties": False, } }, # It results in allOf + unevaluatedProperties=False "allOf": [ # RootJsonSchema (without JsonSchema) { "type": "object", "properties": { "$schema": {"type": "string"}, "$defs": { "type": "array", "items": {"$ref": "#/$defs/JsonSchema"}, "default": [], }, }, "additionalProperties": False, }, # JonsSchema {"$ref": "#/$defs/JsonSchema"}, ], "unevaluatedProperties": False, } apischema-0.18.3/examples/generic.py000066400000000000000000000005411467672046000173550ustar00rootroot00000000000000from dataclasses import dataclass from typing import Generic, TypeVar import pytest from apischema import ValidationError, deserialize T = TypeVar("T") @dataclass class Box(Generic[T]): content: T assert deserialize(Box[str], {"content": "void"}) == Box("void") with pytest.raises(ValidationError): deserialize(Box[str], {"content": 42}) apischema-0.18.3/examples/generic_conversions.py000066400000000000000000000016611467672046000220110ustar00rootroot00000000000000from typing import Generic, TypeVar import pytest from apischema import ValidationError, deserialize, serialize from apischema.conversions import deserializer, serializer from apischema.json_schema import deserialization_schema, serialization_schema T = TypeVar("T") class Wrapper(Generic[T]): def __init__(self, wrapped: T): self.wrapped = wrapped @serializer def unwrap(self) -> T: return self.wrapped # Wrapper constructor can be used as a function too (so deserializer could work as decorator) deserializer(Wrapper) assert deserialize(Wrapper[list[int]], [0, 1]).wrapped == [0, 1] with pytest.raises(ValidationError): deserialize(Wrapper[int], "wrapped") assert serialize(Wrapper[str], Wrapper("wrapped")) == "wrapped" assert ( deserialization_schema(Wrapper[int]) == {"$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "integer"} == serialization_schema(Wrapper[int]) ) apischema-0.18.3/examples/generic_type_name.py000066400000000000000000000023571467672046000214250ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Generic, TypeVar from apischema import type_name from apischema.json_schema import deserialization_schema from apischema.metadata import flatten T = TypeVar("T") # Type name factory takes the type and its arguments as (positional) parameters @type_name(lambda tp, arg: f"{arg.__name__}Resource") @dataclass class Resource(Generic[T]): id: int content: T = field(metadata=flatten) ... @dataclass class Foo: bar: str assert deserialization_schema(Resource[Foo], all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$ref": "#/$defs/FooResource", "$defs": { "FooResource": { "allOf": [ { "type": "object", "properties": {"id": {"type": "integer"}}, "required": ["id"], "additionalProperties": False, }, {"$ref": "#/$defs/Foo"}, ], "unevaluatedProperties": False, }, "Foo": { "type": "object", "properties": {"bar": {"type": "string"}}, "required": ["bar"], "additionalProperties": False, }, }, } apischema-0.18.3/examples/graphql_overview.py000066400000000000000000000030651467672046000213310ustar00rootroot00000000000000from dataclasses import dataclass from datetime import date, datetime from typing import Collection from uuid import UUID, uuid4 from graphql import graphql_sync, print_schema from apischema.graphql import graphql_schema, resolver @dataclass class User: id: UUID username: str birthday: date | None = None @resolver def posts(self) -> Collection["Post"]: return [post for post in POSTS if post.author.id == self.id] @dataclass class Post: id: UUID author: User date: datetime content: str USERS = [User(uuid4(), "foo"), User(uuid4(), "bar")] POSTS = [Post(uuid4(), USERS[0], datetime.now(), "Hello world!")] def users() -> Collection[User]: return USERS def posts() -> Collection[Post]: return POSTS def user(username: str) -> User | None: for user in users(): if user.username == username: return user else: return None schema = graphql_schema(query=[users, user, posts], id_types={UUID}) schema_str = """\ type Query { users: [User!]! user(username: String!): User posts: [Post!]! } type User { id: ID! username: String! birthday: Date posts: [Post!]! } scalar Date type Post { id: ID! author: User! date: Datetime! content: String! } scalar Datetime""" assert print_schema(schema) == schema_str query = """ { users { username posts { content } } }""" assert graphql_sync(schema, query).data == { "users": [ {"username": "foo", "posts": [{"content": "Hello world!"}]}, {"username": "bar", "posts": []}, ] } apischema-0.18.3/examples/graphql_type_name.py000066400000000000000000000006141467672046000214410ustar00rootroot00000000000000from dataclasses import dataclass from graphql import print_schema from apischema import type_name from apischema.graphql import graphql_schema @type_name("Foo") @dataclass class FooFoo: bar: int def foo() -> FooFoo | None: ... schema = graphql_schema(query=[foo]) schema_str = """\ type Query { foo: Foo } type Foo { bar: Int! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/id_conversion.py000066400000000000000000000011401467672046000205760ustar00rootroot00000000000000from base64 import b64decode, b64encode from dataclasses import dataclass from uuid import UUID from graphql import graphql_sync from apischema.graphql import graphql_schema @dataclass class Foo: id: UUID def foo() -> Foo | None: return Foo(UUID("58c88e87-5769-4723-8974-f9ec5007a38b")) schema = graphql_schema( query=[foo], id_types={UUID}, id_encoding=( lambda s: b64decode(s).decode(), lambda s: b64encode(s.encode()).decode(), ), ) assert graphql_sync(schema, "{foo{id}}").data == { "foo": {"id": "NThjODhlODctNTc2OS00NzIzLTg5NzQtZjllYzUwMDdhMzhi"} } apischema-0.18.3/examples/id_type.py000066400000000000000000000006751467672046000174060ustar00rootroot00000000000000from dataclasses import dataclass from uuid import UUID from graphql import print_schema from apischema.graphql import graphql_schema @dataclass class Foo: bar: UUID def foo() -> Foo | None: ... # id_types={UUID} is equivalent to id_types=lambda t: t in {UUID} schema = graphql_schema(query=[foo], id_types={UUID}) schema_str = """\ type Query { foo: Foo } type Foo { bar: ID! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/inherited_discriminator.py000066400000000000000000000020721467672046000226440ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialize, discriminator, serialize from apischema.json_schema import deserialization_schema @discriminator("type") class Pet: pass @dataclass class Cat(Pet): pass @dataclass class Dog(Pet): pass data = {"type": "Dog"} assert deserialize(Pet, data) == deserialize(Cat | Dog, data) == Dog() assert serialize(Pet, Dog()), serialize(Cat | Dog, Dog()) == data assert ( deserialization_schema(Pet) == deserialization_schema(Cat | Dog) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "oneOf": [{"$ref": "#/$defs/Cat"}, {"$ref": "#/$defs/Dog"}], "$defs": { "Pet": { "type": "object", "required": ["type"], "properties": {"type": {"type": "string"}}, "discriminator": {"propertyName": "type"}, }, "Cat": {"allOf": [{"$ref": "#/$defs/Pet"}, {"type": "object"}]}, "Dog": {"allOf": [{"$ref": "#/$defs/Pet"}, {"type": "object"}]}, }, } ) apischema-0.18.3/examples/interface.py000066400000000000000000000007211467672046000177010ustar00rootroot00000000000000from dataclasses import dataclass from graphql import print_schema from apischema.graphql import graphql_schema, interface @interface @dataclass class Bar: bar: int @dataclass class Foo(Bar): baz: str def foo() -> Foo | None: ... schema = graphql_schema(query=[foo]) schema_str = """\ type Query { foo: Foo } type Foo implements Bar { bar: Int! baz: String! } interface Bar { bar: Int! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/json_schema.py000066400000000000000000000007031467672046000202320ustar00rootroot00000000000000from dataclasses import dataclass from apischema.json_schema import deserialization_schema, serialization_schema @dataclass class Foo: bar: str assert deserialization_schema(Foo) == serialization_schema(Foo) assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "additionalProperties": False, "properties": {"bar": {"type": "string"}}, "required": ["bar"], "type": "object", } apischema-0.18.3/examples/lazy_as_object.py000066400000000000000000000005641467672046000207360ustar00rootroot00000000000000from apischema import deserialize, serialize from apischema.objects import ObjectField, set_object_fields class Foo: def __init__(self, bar: int): self.bar = bar set_object_fields(Foo, lambda: [ObjectField("bar", int, required=True)]) foo = deserialize(Foo, {"bar": 0}) assert type(foo) == Foo and foo.bar == 0 assert serialize(Foo, Foo(0)) == {"bar": 0} apischema-0.18.3/examples/lazy_registered_conversion.py000066400000000000000000000007201467672046000234010ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialize, deserializer, serialize, serializer from apischema.conversions import Conversion @dataclass class Foo: bar: int deserializer( lazy=lambda: Conversion(lambda bar: Foo(bar), source=int, target=Foo), target=Foo ) serializer( lazy=lambda: Conversion(lambda foo: foo.bar, source=Foo, target=int), source=Foo ) assert deserialize(Foo, 0) == Foo(0) assert serialize(Foo, Foo(0)) == 0 apischema-0.18.3/examples/local_conversions.py000066400000000000000000000012311467672046000214600ustar00rootroot00000000000000import os import time from dataclasses import dataclass from datetime import datetime from apischema import serialize # Set UTC timezone for example os.environ["TZ"] = "UTC" time.tzset() def to_timestamp(d: datetime) -> int: return int(d.timestamp()) @dataclass class Foo: bar: datetime # timestamp conversion is not applied on Foo field because it's discarded # when encountering Foo assert serialize(Foo, Foo(datetime(2019, 10, 13)), conversion=to_timestamp) == { "bar": "2019-10-13T00:00:00" } # timestamp conversion is applied on every member of list assert serialize(list[datetime], [datetime(1970, 1, 1)], conversion=to_timestamp) == [0] apischema-0.18.3/examples/multiple_deserializers.py000066400000000000000000000013011467672046000225140ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialize, deserializer from apischema.json_schema import deserialization_schema @dataclass class Expression: value: int @deserializer def evaluate_expression(expr: str) -> Expression: return Expression(int(eval(expr))) # Could be shorten into deserializer(Expression), because class is callable too @deserializer def expression_from_value(value: int) -> Expression: return Expression(value) assert deserialization_schema(Expression) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": ["string", "integer"], } assert deserialize(Expression, 0) == deserialize(Expression, "1 - 1") == Expression(0) apischema-0.18.3/examples/no_copy.py000066400000000000000000000006431467672046000174120ustar00rootroot00000000000000from timeit import timeit from apischema import deserialize ints = list(range(100)) assert deserialize(list[int], ints, no_copy=True) is ints # default assert deserialize(list[int], ints, no_copy=False) is not ints print(timeit("deserialize(list[int], ints, no_copy=True)", globals=globals())) # 8.596703557006549 print(timeit("deserialize(list[int], ints, no_copy=False)", globals=globals())) # 9.365363762015477 apischema-0.18.3/examples/none_as_undefined.py000066400000000000000000000013311467672046000214020ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ValidationError, deserialize, serialize from apischema.json_schema import deserialization_schema, serialization_schema from apischema.metadata import none_as_undefined @dataclass class Foo: bar: str | None = field(default=None, metadata=none_as_undefined) assert ( deserialization_schema(Foo) == serialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {"type": "string"}}, "additionalProperties": False, } ) with pytest.raises(ValidationError): deserialize(Foo, {"bar": None}) assert serialize(Foo, Foo(None)) == {} apischema-0.18.3/examples/object_deserialization.py000066400000000000000000000014641467672046000224620ustar00rootroot00000000000000from apischema import deserialize, deserializer, type_name from apischema.json_schema import deserialization_schema from apischema.objects import object_deserialization def create_range(start: int, stop: int, step: int = 1) -> range: return range(start, stop, step) range_conv = object_deserialization(create_range, type_name("Range")) # Conversion can be registered deserializer(range_conv) assert deserialize(range, {"start": 0, "stop": 10}) == range(0, 10) assert deserialization_schema(range) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "start": {"type": "integer"}, "stop": {"type": "integer"}, "step": {"type": "integer", "default": 1}, }, "required": ["start", "stop"], "additionalProperties": False, } apischema-0.18.3/examples/object_serialization.py000066400000000000000000000036661467672046000221570ustar00rootroot00000000000000from dataclasses import dataclass from typing import Any from apischema import alias, serialize, type_name from apischema.json_schema import JsonSchemaVersion, definitions_schema from apischema.objects import get_field, object_serialization @dataclass class Data: id: int content: str @property def size(self) -> int: return len(self.content) def get_details(self) -> Any: ... # Serialization fields can be a str/field or a function/method/property size_only = object_serialization( Data, [get_field(Data).id, Data.size], type_name("DataSize") ) # ["id", Data.size] would also work def complete_data(): return [ ..., # shortcut to include all the fields Data.size, (Data.get_details, alias("details")), # add/override metadata using tuple ] # Serialization fields computation can be deferred in a function # The serialization name will then be defaulted to the function name complete = object_serialization(Data, complete_data) data = Data(0, "data") assert serialize(Data, data, conversion=size_only) == {"id": 0, "size": 4} assert serialize(Data, data, conversion=complete) == { "id": 0, "content": "data", "size": 4, "details": None, # because get_details return None in this example } assert definitions_schema( serialization=[(Data, size_only), (Data, complete)], version=JsonSchemaVersion.OPEN_API_3_0, ) == { "DataSize": { "type": "object", "properties": {"id": {"type": "integer"}, "size": {"type": "integer"}}, "required": ["id", "size"], "additionalProperties": False, }, "CompleteData": { "type": "object", "properties": { "id": {"type": "integer"}, "content": {"type": "string"}, "size": {"type": "integer"}, "details": {}, }, "required": ["id", "content", "size", "details"], "additionalProperties": False, }, } apischema-0.18.3/examples/operation.py000066400000000000000000000007411467672046000177430ustar00rootroot00000000000000from dataclasses import dataclass from graphql import print_schema from apischema.graphql import Query, graphql_schema, resolver @dataclass class Foo: @resolver async def bar(self, arg: int = 0) -> str: ... async def get_foo() -> Foo: ... schema = graphql_schema(query=[Query(get_foo, alias="foo", error_handler=None)]) schema_str = """\ type Query { foo: Foo } type Foo { bar(arg: Int! = 0): String! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/ordering.py000066400000000000000000000020631467672046000175530ustar00rootroot00000000000000import json from dataclasses import dataclass, field from datetime import date from apischema import order, serialize, serialized @order({"trigram": order(-1)}) @dataclass class User: firstname: str lastname: str address: str = field(metadata=order(after="birthdate")) birthdate: date = field() @serialized @property def trigram(self) -> str: return (self.firstname[0] + self.lastname[0] + self.lastname[-1]).lower() @serialized(order=order(before=birthdate)) @property def age(self) -> int: age = date.today().year - self.birthdate.year if age > 0 and (date.today().month, date.today().day) < ( self.birthdate.month, self.birthdate.day, ): age -= 1 return age user = User("Harry", "Potter", "London", date(1980, 7, 31)) dump = f"""{{ "trigram": "hpr", "firstname": "Harry", "lastname": "Potter", "age": {user.age}, "birthdate": "1980-07-31", "address": "London" }}""" assert json.dumps(serialize(User, user), indent=4) == dump apischema-0.18.3/examples/pass_through.py000066400000000000000000000005121467672046000204450ustar00rootroot00000000000000from collections.abc import Collection from uuid import UUID, uuid4 from apischema import PassThroughOptions, serialization_method uuids_method = serialization_method( Collection[UUID], pass_through=PassThroughOptions(collections=True, types={UUID}) ) uuids = [uuid4() for _ in range(5)] assert uuids_method(uuids) is uuids apischema-0.18.3/examples/pass_through_primitives.py000066400000000000000000000001411467672046000227160ustar00rootroot00000000000000from apischema import serialize ints = list(range(5)) assert serialize(list[int], ints) is ints apischema-0.18.3/examples/properties.py000066400000000000000000000023341467672046000201370ustar00rootroot00000000000000from collections.abc import Mapping from dataclasses import dataclass, field from typing import Annotated from apischema import deserialize, properties, schema from apischema.json_schema import deserialization_schema @dataclass class Config: active: bool = True server_options: Mapping[str, bool] = field( default_factory=dict, metadata=properties(pattern=r"^server_") ) client_options: Mapping[ Annotated[str, schema(pattern=r"^client_")], bool # noqa: F722 ] = field(default_factory=dict, metadata=properties(...)) options: Mapping[str, bool] = field(default_factory=dict, metadata=properties) assert deserialize( Config, {"use_lightsaber": True, "server_auto_restart": False, "client_timeout": False}, ) == Config( True, {"server_auto_restart": False}, {"client_timeout": False}, {"use_lightsaber": True}, ) assert deserialization_schema(Config) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"active": {"type": "boolean", "default": True}}, "additionalProperties": {"type": "boolean"}, "patternProperties": { "^server_": {"type": "boolean"}, "^client_": {"type": "boolean"}, }, } apischema-0.18.3/examples/pydantic_conversion.py000066400000000000000000000040541467672046000220240ustar00rootroot00000000000000import re from typing import NamedTuple, NewType import pydantic.validators import apischema # Serialization can only be customized into the enclosing models class RGB(NamedTuple): red: int green: int blue: int # If you don't put this method, RGB schema will be: # {'title': 'Rgb', 'type': 'array', 'items': {}} @classmethod def __modify_schema__(cls, field_schema) -> None: field_schema.update({"type": "string", "pattern": r"#[0-9A-Fa-f]{6}"}) field_schema.pop("items", ...) @classmethod def __get_validators__(cls): yield pydantic.validators.str_validator yield cls.validate @classmethod def validate(cls, value) -> "RGB": if ( not isinstance(value, str) or re.fullmatch(r"#[0-9A-Fa-f]{6}", value) is None ): raise ValueError("Invalid RGB") return RGB( red=int(value[1:3], 16), green=int(value[3:5], 16), blue=int(value[5:7], 16) ) # Simpler with apischema class RGB(NamedTuple): red: int green: int blue: int # NewType can be used to add schema to conversion source/target # but Annotated[str, apischema.schema(pattern=r"#[0-9A-Fa-f]{6}")] would have worked too HexaRGB = NewType("HexaRGB", str) # pattern is used in JSON schema and in deserialization validation apischema.schema(pattern=r"#[0-9A-Fa-f]{6}")(HexaRGB) @apischema.deserializer # could be declared as a staticmethod of RGB class def from_hexa(hexa: HexaRGB) -> RGB: return RGB(int(hexa[1:3], 16), int(hexa[3:5], 16), int(hexa[5:7], 16)) @apischema.serializer # could be declared as a method/property of RGB class def to_hexa(rgb: RGB) -> HexaRGB: return HexaRGB(f"#{rgb.red:02x}{rgb.green:02x}{rgb.blue:02x}") assert ( # schema is inherited from deserialized type apischema.json_schema.deserialization_schema(RGB) == apischema.json_schema.deserialization_schema(HexaRGB) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "string", "pattern": "#[0-9A-Fa-f]{6}", } ) apischema-0.18.3/examples/pydantic_validator.py000066400000000000000000000021051467672046000216170ustar00rootroot00000000000000from dataclasses import dataclass import pydantic import apischema class UserModel(pydantic.BaseModel): username: str password1: str password2: str @pydantic.root_validator def check_passwords_match(cls, values): # This is a classmethod (it needs a plugin to not raise a warning in your IDE) # What is the type of of values? of values['password1']? # If you rename password1 field, validator will hardly be updated # You also have to test yourself that values are provided pw1, pw2 = values.get("password1"), values.get("password2") if pw1 is not None and pw2 is not None and pw1 != pw2: raise ValueError("passwords do not match") return values @dataclass class LoginForm: username: str password1: str password2: str @apischema.validator def check_password_match(self): # Typed checked, simpler, and not executed if password1 or password2 # are missing/invalid if self.password1 != self.password2: raise ValueError("passwords do not match") apischema-0.18.3/examples/quickstart.py000066400000000000000000000035571467672046000201450ustar00rootroot00000000000000from collections.abc import Collection from dataclasses import dataclass, field from uuid import UUID, uuid4 import pytest from graphql import print_schema from apischema import ValidationError, deserialize, serialize from apischema.graphql import graphql_schema from apischema.json_schema import deserialization_schema # Define a schema with standard dataclasses @dataclass class Resource: id: UUID name: str tags: set[str] = field(default_factory=set) # Get some data uuid = uuid4() data = {"id": str(uuid), "name": "wyfo", "tags": ["some_tag"]} # Deserialize data resource = deserialize(Resource, data) assert resource == Resource(uuid, "wyfo", {"some_tag"}) # Serialize objects assert serialize(Resource, resource) == data # Validate during deserialization with pytest.raises(ValidationError) as err: # pytest checks exception is raised deserialize(Resource, {"id": "42", "name": "wyfo"}) assert err.value.errors == [ {"loc": ["id"], "err": "badly formed hexadecimal UUID string"} ] # Generate JSON Schema assert deserialization_schema(Resource) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "id": {"type": "string", "format": "uuid"}, "name": {"type": "string"}, "tags": { "type": "array", "items": {"type": "string"}, "uniqueItems": True, "default": [], }, }, "required": ["id", "name"], "additionalProperties": False, } # Define GraphQL operations def resources(tags: Collection[str] | None = None) -> Collection[Resource] | None: ... # Generate GraphQL schema schema = graphql_schema(query=[resources], id_types={UUID}) schema_str = """\ type Query { resources(tags: [String!]): [Resource!] } type Resource { id: ID! name: String! tags: [String!]! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/recursive.py000066400000000000000000000004001467672046000177420ustar00rootroot00000000000000from dataclasses import dataclass from typing import Optional from apischema import deserialize @dataclass class Node: value: int child: Optional["Node"] = None assert deserialize(Node, {"value": 0, "child": {"value": 1}}) == Node(0, Node(1)) apischema-0.18.3/examples/recursive_conversions.py000066400000000000000000000012021467672046000223730ustar00rootroot00000000000000from dataclasses import dataclass from apischema import serialize from apischema.conversions import Conversion, LazyConversion @dataclass class Foo: elements: list["int | Foo"] def foo_elements(foo: Foo) -> list[int | Foo]: return foo.elements # Recursive conversion pattern tmp = None conversion = Conversion(foo_elements, sub_conversion=LazyConversion(lambda: tmp)) tmp = conversion assert serialize(Foo, Foo([0, Foo([1])]), conversion=conversion) == [0, [1]] # Without the recursive sub-conversion, it would have been: assert serialize(Foo, Foo([0, Foo([1])]), conversion=foo_elements) == [ 0, {"elements": [1]}, ] apischema-0.18.3/examples/recursive_postponned.py000066400000000000000000000004031467672046000222160ustar00rootroot00000000000000from __future__ import annotations from dataclasses import dataclass from apischema import deserialize @dataclass class Node: value: int child: Node | None = None assert deserialize(Node, {"value": 0, "child": {"value": 1}}) == Node(0, Node(1)) apischema-0.18.3/examples/ref_factory.py000066400000000000000000000006511467672046000202460ustar00rootroot00000000000000from dataclasses import dataclass from apischema.json_schema import deserialization_schema @dataclass class Foo: bar: int def ref_factory(ref: str) -> str: return f"http://some-domain.org/path/to/{ref}.json#" assert deserialization_schema(Foo, all_refs=True, ref_factory=ref_factory) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$ref": "http://some-domain.org/path/to/Foo.json#", } apischema-0.18.3/examples/relay_client_mutation_id.py000066400000000000000000000020671467672046000230140ustar00rootroot00000000000000from dataclasses import dataclass from graphql.utilities import print_schema from apischema.graphql import graphql_schema, relay @dataclass class Ship: ... @dataclass class Faction: ... @dataclass class IntroduceShip(relay.Mutation): ship: Ship faction: Faction @staticmethod def mutate( # mut_id is required because no default value faction_id: str, ship_name: str, mut_id: relay.ClientMutationId, ) -> "IntroduceShip": ... def hello() -> str: return "world" schema = graphql_schema(query=[hello], mutation=relay.mutations()) # clientMutationId field becomes non nullable in introduceShip types schema_str = """\ type Query { hello: String! } type Mutation { introduceShip(input: IntroduceShipInput!): IntroduceShipPayload! } type IntroduceShipPayload { ship: Ship! faction: Faction! clientMutationId: String! } type Ship type Faction input IntroduceShipInput { factionId: String! shipName: String! clientMutationId: String! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/relay_connection.py000066400000000000000000000035531467672046000213020ustar00rootroot00000000000000from dataclasses import dataclass from typing import Optional, TypeVar import graphql from graphql.utilities import print_schema from apischema.graphql import graphql_schema, relay, resolver Cursor = int # let's use an integer cursor in all our connection Node = TypeVar("Node") Connection = relay.Connection[Node, Cursor, relay.Edge[Node, Cursor]] # Connection can now be used just like Connection[Ship] or Connection[Faction | None] @dataclass class Ship: name: str @dataclass class Faction: @resolver def ships( self, first: int | None, after: Cursor | None ) -> Connection[Optional[Ship]] | None: edges = [relay.Edge(Ship("X-Wing"), 0), relay.Edge(Ship("B-Wing"), 1)] return Connection(edges, relay.PageInfo.from_edges(edges)) def faction() -> Faction | None: return Faction() schema = graphql_schema(query=[faction]) schema_str = """\ type Query { faction: Faction } type Faction { ships(first: Int, after: Int): ShipConnection } type ShipConnection { edges: [ShipEdge] pageInfo: PageInfo! } type ShipEdge { node: Ship cursor: Int! } type Ship { name: String! } type PageInfo { hasPreviousPage: Boolean! hasNextPage: Boolean! startCursor: Int endCursor: Int }""" assert print_schema(schema) == schema_str query = """ { faction { ships { pageInfo { endCursor hasNextPage } edges { cursor node { name } } } } }""" assert graphql.graphql_sync(schema, query).data == { "faction": { "ships": { "pageInfo": {"endCursor": 1, "hasNextPage": False}, "edges": [ {"cursor": 0, "node": {"name": "X-Wing"}}, {"cursor": 1, "node": {"name": "B-Wing"}}, ], } } } apischema-0.18.3/examples/relay_connection_subclass.py000066400000000000000000000023251467672046000231750ustar00rootroot00000000000000from dataclasses import dataclass from typing import Optional, TypeVar from graphql import print_schema from apischema.graphql import graphql_schema, relay, resolver Cursor = int Node = TypeVar("Node") Edge = TypeVar("Edge", bound=relay.Edge) @dataclass class MyConnection(relay.Connection[Node, Cursor, Edge]): connection_field: bool @dataclass class MyEdge(relay.Edge[Node, Cursor]): edge_field: int | None Connection = MyConnection[Node, MyEdge[Node]] @dataclass class Ship: name: str @dataclass class Faction: @resolver def ships( self, first: int | None, after: Cursor | None ) -> Connection[Optional[Ship]] | None: ... def faction() -> Faction | None: return Faction() schema = graphql_schema(query=[faction]) schema_str = """\ type Query { faction: Faction } type Faction { ships(first: Int, after: Int): ShipConnection } type ShipConnection { edges: [ShipEdge] pageInfo: PageInfo! connectionField: Boolean! } type ShipEdge { node: Ship cursor: Int! edgeField: Int } type Ship { name: String! } type PageInfo { hasPreviousPage: Boolean! hasNextPage: Boolean! startCursor: Int endCursor: Int }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/relay_global_id.py000066400000000000000000000015451467672046000210560ustar00rootroot00000000000000from dataclasses import dataclass import graphql from apischema import serialize from apischema.graphql import graphql_schema, relay @dataclass class Faction(relay.Node[int]): name: str @classmethod def get_by_id(cls, id: int, info: graphql.GraphQLResolveInfo = None) -> "Faction": return [Faction(0, "Empire"), Faction(1, "Rebels")][id] schema = graphql_schema(query=[relay.node], types=relay.nodes()) some_global_id = Faction.get_by_id(0).global_id # Let's pick a global id ... assert some_global_id == relay.GlobalId("0", Faction) query = """ query factionName($id: ID!) { node(id: $id) { ... on Faction { name } } }""" assert graphql.graphql_sync( # ... and use it in a query schema, query, variable_values={"id": serialize(relay.GlobalId, some_global_id)} ).data == {"node": {"name": "Empire"}} apischema-0.18.3/examples/relay_mutation.py000066400000000000000000000015731467672046000210030ustar00rootroot00000000000000from dataclasses import dataclass from graphql.utilities import print_schema from apischema.graphql import graphql_schema, relay @dataclass class Ship: ... @dataclass class Faction: ... @dataclass class IntroduceShip(relay.Mutation): ship: Ship faction: Faction @staticmethod def mutate(faction_id: str, ship_name: str) -> "IntroduceShip": ... def hello() -> str: return "world" schema = graphql_schema(query=[hello], mutation=relay.mutations()) schema_str = """\ type Query { hello: String! } type Mutation { introduceShip(input: IntroduceShipInput!): IntroduceShipPayload! } type IntroduceShipPayload { ship: Ship! faction: Faction! clientMutationId: String } type Ship type Faction input IntroduceShipInput { factionId: String! shipName: String! clientMutationId: String }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/relay_node.py000066400000000000000000000016011467672046000200600ustar00rootroot00000000000000from dataclasses import dataclass from uuid import UUID import graphql from graphql.utilities import print_schema from apischema.graphql import graphql_schema, relay @dataclass class Ship(relay.Node[UUID]): # Let's use an UUID for Ship id name: str @classmethod async def get_by_id(cls, id: UUID, info: graphql.GraphQLResolveInfo = None): ... @dataclass class Faction(relay.Node[int]): # Nodes can have different id types name: str @classmethod def get_by_id(cls, id: int, info: graphql.GraphQLResolveInfo = None) -> "Faction": ... schema = graphql_schema(query=[relay.node], types=relay.nodes()) schema_str = """\ type Ship implements Node { id: ID! name: String! } interface Node { id: ID! } type Faction implements Node { id: ID! name: String! } type Query { node(id: ID!): Node! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/required.py000066400000000000000000000005711467672046000175640ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ValidationError, deserialize from apischema.metadata import required @dataclass class Foo: bar: int | None = field(default=None, metadata=required) with pytest.raises(ValidationError) as err: deserialize(Foo, {}) assert err.value.errors == [{"loc": ["bar"], "err": "missing property"}] apischema-0.18.3/examples/resolver.py000066400000000000000000000007551467672046000176110ustar00rootroot00000000000000from dataclasses import dataclass from graphql import print_schema from apischema.graphql import graphql_schema, resolver @dataclass class Bar: baz: int @dataclass class Foo: @resolver async def bar(self, arg: int = 0) -> Bar: ... async def foo() -> Foo | None: ... schema = graphql_schema(query=[foo]) schema_str = """\ type Query { foo: Foo } type Foo { bar(arg: Int! = 0): Bar! } type Bar { baz: Int! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/resolver_error.py000066400000000000000000000022371467672046000210170ustar00rootroot00000000000000from dataclasses import dataclass from logging import getLogger from typing import Any import graphql from graphql.utilities import print_schema from apischema.graphql import graphql_schema, resolver logger = getLogger(__name__) def log_error( error: Exception, obj: Any, info: graphql.GraphQLResolveInfo, **kwargs ) -> None: logger.error( "Resolve error in %s", ".".join(map(str, info.path.as_list())), exc_info=error ) return None @dataclass class Foo: @resolver(error_handler=log_error) def bar(self) -> int: raise RuntimeError("Bar error") @resolver def baz(self) -> int: raise RuntimeError("Baz error") def foo(info: graphql.GraphQLResolveInfo) -> Foo: return Foo() schema = graphql_schema(query=[foo]) # Notice that bar is Int while baz is Int! schema_str = """\ type Query { foo: Foo! } type Foo { bar: Int baz: Int! }""" assert print_schema(schema) == schema_str # Logs "Resolve error in foo.bar", no error raised assert graphql.graphql_sync(schema, "{foo{bar}}").data == {"foo": {"bar": None}} # Error is raised assert graphql.graphql_sync(schema, "{foo{baz}}").errors[0].message == "Baz error" apischema-0.18.3/examples/resolver_metadata.py000066400000000000000000000012031467672046000214360ustar00rootroot00000000000000from dataclasses import dataclass from typing import Annotated from graphql.utilities import print_schema from apischema import alias, schema from apischema.graphql import graphql_schema, resolver @dataclass class Foo: @resolver def bar( self, param: Annotated[int, alias("arg") | schema(description="argument")] ) -> int: return param def foo() -> Foo: return Foo() schema_ = graphql_schema(query=[foo]) # Notice that bar is Int while baz is Int! schema_str = '''\ type Query { foo: Foo! } type Foo { bar( """argument""" arg: Int! ): Int! }''' assert print_schema(schema_) == schema_str apischema-0.18.3/examples/scalar.py000066400000000000000000000007061467672046000172110ustar00rootroot00000000000000from dataclasses import dataclass from typing import Any from uuid import UUID from graphql.utilities import print_schema from apischema.graphql import graphql_schema @dataclass class Foo: id: UUID content: Any def foo() -> Foo | None: ... schema = graphql_schema(query=[foo]) schema_str = """\ type Query { foo: Foo } type Foo { id: UUID! content: JSON } scalar UUID scalar JSON""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/schema.py000066400000000000000000000021451467672046000172030ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import NewType from apischema import schema from apischema.json_schema import deserialization_schema Tag = NewType("Tag", str) schema(min_len=3, pattern=r"^\w*$", examples=["available", "EMEA"])(Tag) @dataclass class Resource: id: int tags: list[Tag] = field( default_factory=list, metadata=schema( description="regroup multiple resources", max_items=3, unique=True ), ) assert deserialization_schema(Resource) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "additionalProperties": False, "properties": { "id": {"type": "integer"}, "tags": { "description": "regroup multiple resources", "items": { "examples": ["available", "EMEA"], "minLength": 3, "pattern": "^\\w*$", "type": "string", }, "maxItems": 3, "type": "array", "uniqueItems": True, "default": [], }, }, "required": ["id"], "type": "object", } apischema-0.18.3/examples/schema_versions.py000066400000000000000000000060651467672046000211400ustar00rootroot00000000000000from dataclasses import dataclass from typing import Literal from apischema.json_schema import ( JsonSchemaVersion, definitions_schema, deserialization_schema, ) @dataclass class Bar: baz: int | None constant: Literal[0] = 0 @dataclass class Foo: bar: Bar assert deserialization_schema(Foo, all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$ref": "#/$defs/Foo", "$defs": { "Foo": { "type": "object", "properties": {"bar": {"$ref": "#/$defs/Bar"}}, "required": ["bar"], "additionalProperties": False, }, "Bar": { "type": "object", "properties": { "baz": {"type": ["integer", "null"]}, "constant": {"type": "integer", "const": 0, "default": 0}, }, "required": ["baz"], "additionalProperties": False, }, }, } assert deserialization_schema( Foo, all_refs=True, version=JsonSchemaVersion.DRAFT_7 ) == { "$schema": "http://json-schema.org/draft-07/schema#", # $ref is isolated in allOf + draft 7 prefix "allOf": [{"$ref": "#/definitions/Foo"}], "definitions": { # not "$defs" "Foo": { "type": "object", "properties": {"bar": {"$ref": "#/definitions/Bar"}}, "required": ["bar"], "additionalProperties": False, }, "Bar": { "type": "object", "properties": { "baz": {"type": ["integer", "null"]}, "constant": {"type": "integer", "const": 0, "default": 0}, }, "required": ["baz"], "additionalProperties": False, }, }, } assert deserialization_schema(Foo, version=JsonSchemaVersion.OPEN_API_3_1) == { # No definitions for OpenAPI, use definitions_schema for it "$ref": "#/components/schemas/Foo" # OpenAPI prefix } assert definitions_schema( deserialization=[Foo], version=JsonSchemaVersion.OPEN_API_3_1 ) == { "Foo": { "type": "object", "properties": {"bar": {"$ref": "#/components/schemas/Bar"}}, "required": ["bar"], "additionalProperties": False, }, "Bar": { "type": "object", "properties": { "baz": {"type": ["integer", "null"]}, "constant": {"type": "integer", "const": 0, "default": 0}, }, "required": ["baz"], "additionalProperties": False, }, } assert definitions_schema( deserialization=[Foo], version=JsonSchemaVersion.OPEN_API_3_0 ) == { "Foo": { "type": "object", "properties": {"bar": {"$ref": "#/components/schemas/Bar"}}, "required": ["bar"], "additionalProperties": False, }, "Bar": { "type": "object", # "nullable" instead of "type": "null" "properties": { "baz": {"type": "integer", "nullable": True}, "constant": {"type": "integer", "enum": [0], "default": 0}, }, "required": ["baz"], "additionalProperties": False, }, } apischema-0.18.3/examples/serialization.py000066400000000000000000000006251467672046000206210ustar00rootroot00000000000000from dataclasses import dataclass from typing import Any from apischema import serialize @dataclass class Foo: bar: str assert serialize(Foo, Foo("baz")) == {"bar": "baz"} assert serialize(tuple[int, int], (0, 1)) == [0, 1] assert ( serialize(Any, {"key": ("value", 42)}) == serialize({"key": ("value", 42)}) == {"key": ["value", 42]} ) assert serialize(Foo("baz")) == {"bar": "baz"} apischema-0.18.3/examples/serialized.py000066400000000000000000000021351467672046000200750ustar00rootroot00000000000000from dataclasses import dataclass from apischema import serialize, serialized from apischema.json_schema import serialization_schema @dataclass class Foo: @serialized @property def bar(self) -> int: return 0 # Serialized method can have default argument @serialized def baz(self, some_arg_with_default: int = 1) -> int: return some_arg_with_default @serialized("aliased") @property def with_alias(self) -> int: return 2 # Serialized method can also be defined outside class, # but first parameter must be annotated @serialized def function(foo: Foo) -> int: return 3 assert serialize(Foo, Foo()) == {"bar": 0, "baz": 1, "aliased": 2, "function": 3} assert serialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "aliased": {"type": "integer"}, "bar": {"type": "integer"}, "baz": {"type": "integer"}, "function": {"type": "integer"}, }, "required": ["bar", "baz", "aliased", "function"], "additionalProperties": False, } apischema-0.18.3/examples/serialized_conversions.py000066400000000000000000000007141467672046000225260ustar00rootroot00000000000000import os import time from dataclasses import dataclass from datetime import datetime from apischema import serialize, serialized # Set UTC timezone for example os.environ["TZ"] = "UTC" time.tzset() def to_timestamp(d: datetime) -> int: return int(d.timestamp()) @dataclass class Foo: @serialized(conversion=to_timestamp) def some_date(self) -> datetime: return datetime(1970, 1, 1) assert serialize(Foo, Foo()) == {"some_date": 0} apischema-0.18.3/examples/serialized_error.py000066400000000000000000000015441467672046000213110ustar00rootroot00000000000000from dataclasses import dataclass from logging import getLogger from typing import Any from apischema import serialize, serialized from apischema.json_schema import serialization_schema logger = getLogger(__name__) def log_error(error: Exception, obj: Any, alias: str) -> None: logger.error( "Serialization error in %s.%s", type(obj).__name__, alias, exc_info=error ) return None @dataclass class Foo: @serialized(error_handler=log_error) def bar(self) -> int: raise RuntimeError("Some error") assert serialize(Foo, Foo()) == {"bar": None} # Logs "Serialization error in Foo.bar" assert serialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {"type": ["integer", "null"]}}, "required": ["bar"], "additionalProperties": False, } apischema-0.18.3/examples/serialized_generic.py000066400000000000000000000013231467672046000215670ustar00rootroot00000000000000from dataclasses import dataclass from typing import Generic, TypeVar from apischema import serialized from apischema.json_schema import serialization_schema T = TypeVar("T") U = TypeVar("U") @dataclass class Foo(Generic[T]): @serialized def bar(self) -> T: ... @serialized def baz(foo: Foo[U]) -> U: ... @dataclass class FooInt(Foo[int]): ... assert ( serialization_schema(Foo[int]) == serialization_schema(FooInt) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {"type": "integer"}, "baz": {"type": "integer"}}, "required": ["bar", "baz"], "additionalProperties": False, } ) apischema-0.18.3/examples/serialized_undefined.py000066400000000000000000000010001467672046000221040ustar00rootroot00000000000000from dataclasses import dataclass from apischema import Undefined, UndefinedType, serialize, serialized from apischema.json_schema import serialization_schema @dataclass class Foo: @serialized def bar(self) -> int | UndefinedType: return Undefined assert serialize(Foo, Foo()) == {} assert serialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {"type": "integer"}}, "additionalProperties": False, } apischema-0.18.3/examples/serializer_inheritance.py000066400000000000000000000007711467672046000224700ustar00rootroot00000000000000from apischema import serialize, serializer class Foo: pass @serializer def serialize_foo(foo: Foo) -> int: return 0 class Foo2(Foo): pass # Deserializer is inherited assert serialize(Foo, Foo()) == serialize(Foo2, Foo2()) == 0 class Bar: @serializer def serialize(self) -> int: return 0 class Bar2(Bar): def serialize(self) -> int: return 1 # Deserializer is inherited and overridden assert serialize(Bar, Bar()) == 0 != serialize(Bar2, Bar2()) == 1 apischema-0.18.3/examples/set_object_fields.py000066400000000000000000000013431467672046000214110ustar00rootroot00000000000000from apischema import deserialize, serialize from apischema.json_schema import deserialization_schema from apischema.objects import ObjectField, set_object_fields class Foo: def __init__(self, bar): self.bar = bar set_object_fields(Foo, [ObjectField("bar", int)]) # Fields can also be passed in a factory set_object_fields(Foo, lambda: [ObjectField("bar", int)]) foo = deserialize(Foo, {"bar": 0}) assert isinstance(foo, Foo) and foo.bar == 0 assert serialize(Foo, Foo(0)) == {"bar": 0} assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {"type": "integer"}}, "required": ["bar"], "additionalProperties": False, } apischema-0.18.3/examples/settings_errors.py000066400000000000000000000005741467672046000212030ustar00rootroot00000000000000import pytest from apischema import ValidationError, deserialize, schema, settings settings.errors.max_items = ( lambda constraint, data: f"too-many-items: {len(data)} > {constraint}" ) with pytest.raises(ValidationError) as err: deserialize(list[int], [0, 1, 2, 3], schema=schema(max_items=3)) assert err.value.errors == [{"loc": [], "err": "too-many-items: 4 > 3"}] apischema-0.18.3/examples/skip.py000066400000000000000000000017031467672046000167100ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Any from apischema.json_schema import deserialization_schema, serialization_schema from apischema.metadata import skip @dataclass class Foo: bar: Any deserialization_only: Any = field(metadata=skip(serialization=True)) serialization_only: Any = field(default=None, metadata=skip(deserialization=True)) baz: Any = field(default=None, metadata=skip) assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {}, "deserialization_only": {}}, "required": ["bar", "deserialization_only"], "additionalProperties": False, } assert serialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {}, "serialization_only": {}}, "required": ["bar", "serialization_only"], "additionalProperties": False, } apischema-0.18.3/examples/skip_if.py000066400000000000000000000005421467672046000173660ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Any from apischema import serialize from apischema.metadata import skip @dataclass class Foo: bar: Any = field(metadata=skip(serialization_if=lambda x: not x)) baz: Any = field(default_factory=list, metadata=skip(serialization_default=True)) assert serialize(Foo(False, [])) == {} apischema-0.18.3/examples/strict_union.py000066400000000000000000000016771467672046000204740ustar00rootroot00000000000000from dataclasses import dataclass from typing import Annotated, Any from apischema import schema from apischema.json_schema import deserialization_schema # schema extra can be callable to modify the schema in place def to_one_of(schema: dict[str, Any]): if "anyOf" in schema: schema["oneOf"] = schema.pop("anyOf") OneOf = schema(extra=to_one_of) # or extra can be a dictionary which will update the schema @schema( extra={"$ref": "http://some-domain.org/path/to/schema.json#/$defs/Foo"}, override=True, # override apischema generated schema, using only extra ) @dataclass class Foo: bar: int # Use Annotated with OneOf to make a "strict" Union assert deserialization_schema(Annotated[Foo | int, OneOf]) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "oneOf": [ # oneOf instead of anyOf {"$ref": "http://some-domain.org/path/to/schema.json#/$defs/Foo"}, {"type": "integer"}, ], } apischema-0.18.3/examples/sub_conversions.py000066400000000000000000000015111467672046000211600ustar00rootroot00000000000000from dataclasses import dataclass from typing import Generic, TypeVar from apischema.conversions import Conversion from apischema.json_schema import serialization_schema T = TypeVar("T") class Query(Generic[T]): ... def query_to_list(q: Query[T]) -> list[T]: ... def query_to_scalar(q: Query[T]) -> T | None: ... @dataclass class FooModel: bar: int class Foo: def serialize(self) -> FooModel: ... assert serialization_schema( Query[Foo], conversion=Conversion(query_to_list, sub_conversion=Foo.serialize) ) == { # We get an array of Foo "type": "array", "items": { "type": "object", "properties": {"bar": {"type": "integer"}}, "required": ["bar"], "additionalProperties": False, }, "$schema": "http://json-schema.org/draft/2020-12/schema#", } apischema-0.18.3/examples/subscription.py000066400000000000000000000013541467672046000204700ustar00rootroot00000000000000import asyncio from typing import AsyncIterable import graphql from graphql import print_schema from apischema.graphql import graphql_schema def hello() -> str: return "world" async def events() -> AsyncIterable[str]: yield "bonjour" yield "au revoir" schema = graphql_schema(query=[hello], subscription=[events]) schema_str = """\ type Query { hello: String! } type Subscription { events: String! }""" assert print_schema(schema) == schema_str async def test(): subscription = await graphql.subscribe( schema, graphql.parse("subscription {events}") ) assert [event.data async for event in subscription] == [ {"events": "bonjour"}, {"events": "au revoir"}, ] asyncio.run(test()) apischema-0.18.3/examples/subscription_resolve.py000066400000000000000000000020361467672046000222250ustar00rootroot00000000000000import asyncio from dataclasses import dataclass from typing import AsyncIterable import graphql from graphql import print_schema from apischema.graphql import Subscription, graphql_schema def hello() -> str: return "world" async def events() -> AsyncIterable[str]: yield "bonjour" yield "au revoir" @dataclass class Message: body: str # Message can also be used directly as a function schema = graphql_schema( query=[hello], subscription=[Subscription(events, alias="messageReceived", resolver=Message)], ) schema_str = """\ type Query { hello: String! } type Subscription { messageReceived: Message! } type Message { body: String! }""" assert print_schema(schema) == schema_str async def test(): subscription = await graphql.subscribe( schema, graphql.parse("subscription {messageReceived {body}}") ) assert [event.data async for event in subscription] == [ {"messageReceived": {"body": "bonjour"}}, {"messageReceived": {"body": "au revoir"}}, ] asyncio.run(test()) apischema-0.18.3/examples/tagged_union.py000066400000000000000000000025611467672046000204100ustar00rootroot00000000000000from dataclasses import dataclass import pytest from apischema import Undefined, ValidationError, alias, deserialize, schema, serialize from apischema.tagged_unions import Tagged, TaggedUnion, get_tagged @dataclass class Bar: field: str class Foo(TaggedUnion): bar: Tagged[Bar] # Tagged can have metadata like a dataclass fields i: Tagged[int] = Tagged(alias("baz") | schema(min=0)) # Instantiate using class fields tagged_bar = Foo.bar(Bar("value")) # you can also use default constructor, but it's not typed-checked assert tagged_bar == Foo(bar=Bar("value")) # All fields that are not tagged are Undefined assert tagged_bar.bar is not Undefined and tagged_bar.i is Undefined # get_tagged allows to retrieve the tag and it's value # (but the value is not typed-checked) assert get_tagged(tagged_bar) == ("bar", Bar("value")) # (De)serialization works as expected assert deserialize(Foo, {"bar": {"field": "value"}}) == tagged_bar assert serialize(Foo, tagged_bar) == {"bar": {"field": "value"}} with pytest.raises(ValidationError) as err: deserialize(Foo, {"unknown": 42}) assert err.value.errors == [{"loc": ["unknown"], "err": "unexpected property"}] with pytest.raises(ValidationError) as err: deserialize(Foo, {"bar": {"field": "value"}, "baz": 0}) assert err.value.errors == [ {"loc": [], "err": "property count greater than 1 (maxProperties)"} ] apischema-0.18.3/examples/tagged_union_graphql_schema.py000066400000000000000000000016001467672046000234370ustar00rootroot00000000000000from dataclasses import dataclass from graphql import graphql_sync from graphql.utilities import print_schema from apischema.graphql import graphql_schema from apischema.tagged_unions import Tagged, TaggedUnion @dataclass class Bar: field: str class Foo(TaggedUnion): bar: Tagged[Bar] baz: Tagged[int] def query(foo: Foo) -> Foo: return foo schema = graphql_schema(query=[query]) schema_str = """\ type Query { query(foo: FooInput!): Foo! } type Foo { bar: Bar baz: Int } type Bar { field: String! } input FooInput { bar: BarInput baz: Int } input BarInput { field: String! }""" assert print_schema(schema) == schema_str query_str = """ { query(foo: {bar: {field: "value"}}) { bar { field } baz } }""" assert graphql_sync(schema, query_str).data == { "query": {"bar": {"field": "value"}, "baz": None} } apischema-0.18.3/examples/tagged_union_json_schema.py000066400000000000000000000015221467672046000227550ustar00rootroot00000000000000from dataclasses import dataclass from apischema.json_schema import deserialization_schema, serialization_schema from apischema.tagged_unions import Tagged, TaggedUnion @dataclass class Bar: field: str class Foo(TaggedUnion): bar: Tagged[Bar] baz: Tagged[int] assert ( deserialization_schema(Foo) == serialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "bar": { "type": "object", "properties": {"field": {"type": "string"}}, "required": ["field"], "additionalProperties": False, }, "baz": {"type": "integer"}, }, "additionalProperties": False, "minProperties": 1, "maxProperties": 1, } ) apischema-0.18.3/examples/type_name.py000066400000000000000000000017051467672046000177250ustar00rootroot00000000000000from dataclasses import dataclass from typing import Annotated from apischema import type_name from apischema.json_schema import deserialization_schema # Type name can be added as a decorator @type_name("Resource") @dataclass class BaseResource: id: int # or using typing.Annotated tags: Annotated[set[str], type_name("ResourceTags")] assert deserialization_schema(BaseResource, all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$defs": { "Resource": { "type": "object", "properties": { "id": {"type": "integer"}, "tags": {"$ref": "#/$defs/ResourceTags"}, }, "required": ["id", "tags"], "additionalProperties": False, }, "ResourceTags": { "type": "array", "items": {"type": "string"}, "uniqueItems": True, }, }, "$ref": "#/$defs/Resource", } apischema-0.18.3/examples/undefined.py000066400000000000000000000013501467672046000177010ustar00rootroot00000000000000from dataclasses import dataclass from apischema import Undefined, UndefinedType, deserialize, serialize from apischema.json_schema import deserialization_schema @dataclass class Foo: bar: int | UndefinedType = Undefined baz: int | UndefinedType | None = Undefined assert deserialize(Foo, {"bar": 0, "baz": None}) == Foo(0, None) assert deserialize(Foo, {}) == Foo(Undefined, Undefined) assert serialize(Foo, Foo(Undefined, 42)) == {"baz": 42} # Foo.bar and Foo.baz are not required assert deserialization_schema(Foo) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"bar": {"type": "integer"}, "baz": {"type": ["integer", "null"]}}, "additionalProperties": False, } apischema-0.18.3/examples/undefined_default.py000066400000000000000000000006671467672046000214170ustar00rootroot00000000000000from graphql import graphql_sync from apischema import Undefined, UndefinedType from apischema.graphql import graphql_schema def arg_is_absent(arg: int | UndefinedType | None = Undefined) -> bool: return arg is Undefined schema = graphql_schema(query=[arg_is_absent]) assert graphql_sync(schema, "{argIsAbsent(arg: null)}").data == {"argIsAbsent": False} assert graphql_sync(schema, "{argIsAbsent}").data == {"argIsAbsent": True} apischema-0.18.3/examples/union_discriminator.py000066400000000000000000000023741467672046000220260ustar00rootroot00000000000000from dataclasses import dataclass from typing import Annotated import pytest from apischema import ValidationError, deserialize, discriminator, serialize from apischema.json_schema import deserialization_schema @dataclass class Cat: pass @dataclass class Dog: pass @dataclass class Lizard: pass Pet = Annotated[Cat | Dog | Lizard, discriminator("type", {"dog": Dog})] assert deserialize(Pet, {"type": "dog"}) == Dog() assert deserialize(Pet, {"type": "Cat"}) == Cat() assert serialize(Pet, Dog()) == {"type": "dog"} with pytest.raises(ValidationError) as err: assert deserialize(Pet, {"type": "not a pet"}) assert err.value.errors == [ {"loc": ["type"], "err": "not one of ['dog', 'Cat', 'Lizard'] (oneOf)"} ] assert deserialization_schema(Pet) == { "oneOf": [ {"$ref": "#/$defs/Cat"}, {"$ref": "#/$defs/Dog"}, {"$ref": "#/$defs/Lizard"}, ], "discriminator": {"propertyName": "type", "mapping": {"dog": "#/$defs/Dog"}}, "$defs": { "Dog": {"type": "object", "additionalProperties": False}, "Cat": {"type": "object", "additionalProperties": False}, "Lizard": {"type": "object", "additionalProperties": False}, }, "$schema": "http://json-schema.org/draft/2020-12/schema#", } apischema-0.18.3/examples/union_type_name.py000066400000000000000000000010151467672046000211270ustar00rootroot00000000000000from dataclasses import dataclass from graphql import print_schema from apischema.graphql import graphql_schema @dataclass class Foo: foo: int @dataclass class Bar: bar: int def foo_or_bar() -> Foo | Bar: ... # union_ref default value is made explicit here schema = graphql_schema(query=[foo_or_bar], union_name="Or".join) schema_str = """\ type Query { fooOrBar: FooOrBar! } union FooOrBar = Foo | Bar type Foo { foo: Int! } type Bar { bar: Int! }""" assert print_schema(schema) == schema_str apischema-0.18.3/examples/validate.py000066400000000000000000000011241467672046000175300ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ValidationError, schema, validator from apischema.validation import validate @dataclass class Foo: bar: int = field(metadata=schema(min=0, max=10)) baz: int @validator def not_equal(self): if self.bar == self.baz: yield "bar cannot be equal to baz" # validate don't validate constraints, but only validators validate(Foo(-1, 0)) with pytest.raises(ValidationError) as err: validate(Foo(2, 2)) assert err.value.errors == [{"loc": [], "err": "bar cannot be equal to baz"}] apischema-0.18.3/examples/validation_error.py000066400000000000000000000017071467672046000213110ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import NewType import pytest from apischema import ValidationError, deserialize, schema Tag = NewType("Tag", str) schema(min_len=3, pattern=r"^\w*$", examples=["available", "EMEA"])(Tag) @dataclass class Resource: id: int tags: list[Tag] = field( default_factory=list, metadata=schema( description="regroup multiple resources", max_items=3, unique=True ), ) with pytest.raises(ValidationError) as err: # pytest check exception is raised deserialize( Resource, {"id": 42, "tags": ["tag", "duplicate", "duplicate", "bad&", "_"]} ) assert err.value.errors == [ {"loc": ["tags"], "err": "item count greater than 3 (maxItems)"}, {"loc": ["tags"], "err": "duplicate items (uniqueItems)"}, {"loc": ["tags", 3], "err": "not matching pattern ^\\w*$ (pattern)"}, {"loc": ["tags", 4], "err": "string length lower than 3 (minLength)"}, ] apischema-0.18.3/examples/validator.py000066400000000000000000000011351467672046000177260ustar00rootroot00000000000000from dataclasses import dataclass import pytest from apischema import ValidationError, deserialize, validator @dataclass class PasswordForm: password: str confirmation: str @validator def password_match(self): # DO NOT use assert if self.password != self.confirmation: raise ValidationError("password doesn't match its confirmation") with pytest.raises(ValidationError) as err: deserialize(PasswordForm, {"password": "p455w0rd", "confirmation": "..."}) assert err.value.errors == [ {"loc": [], "err": "password doesn't match its confirmation"} ] apischema-0.18.3/examples/validator_default.py000066400000000000000000000006051467672046000214330ustar00rootroot00000000000000from dataclasses import dataclass, field from apischema import deserialize, validator validator_run = False @dataclass class Foo: bar: int = field(default=0) @validator(bar) def password_match(self): global validator_run validator_run = True if self.bar < 0: raise ValueError("negative") deserialize(Foo, {}) assert not validator_run apischema-0.18.3/examples/validator_field.py000066400000000000000000000023471467672046000210770ustar00rootroot00000000000000from dataclasses import dataclass, field from enum import Enum import pytest from apischema import ValidationError, deserialize, validator from apischema.objects import get_alias, get_field class Parity(Enum): EVEN = "even" ODD = "odd" @dataclass class NumberWithParity: parity: Parity number: int = field() @validator(number) def check_parity(self): if (self.parity == Parity.EVEN) != (self.number % 2 == 0): yield "number doesn't respect parity" # A field validator is equivalent to a discard argument and all error paths prefixed # with the field alias @validator(discard=number) def check_parity_equivalent(self): if (self.parity == Parity.EVEN) != (self.number % 2 == 0): yield get_alias(self).number, "number doesn't respect parity" @validator(get_field(NumberWithParity).number) def check_parity_other_equivalent(number2: NumberWithParity): if (number2.parity == Parity.EVEN) != (number2.number % 2 == 0): yield "number doesn't respect parity" with pytest.raises(ValidationError) as err: deserialize(NumberWithParity, {"parity": "even", "number": 1}) assert err.value.errors == [{"loc": ["number"], "err": "number doesn't respect parity"}] apischema-0.18.3/examples/validator_function.py000066400000000000000000000014761467672046000216430ustar00rootroot00000000000000from typing import Annotated, NewType import pytest from apischema import ValidationError, deserialize, validator from apischema.metadata import validators Palindrome = NewType("Palindrome", str) @validator # could also use @validator(owner=Palindrome) def check_palindrome(s: Palindrome): for i in range(len(s) // 2): if s[i] != s[-1 - i]: raise ValidationError("Not a palindrome") assert deserialize(Palindrome, "tacocat") == "tacocat" with pytest.raises(ValidationError) as err: deserialize(Palindrome, "palindrome") assert err.value.errors == [{"loc": [], "err": "Not a palindrome"}] # Using Annotated with pytest.raises(ValidationError) as err: deserialize(Annotated[str, validators(check_palindrome)], "palindrom") assert err.value.errors == [{"loc": [], "err": "Not a palindrome"}] apischema-0.18.3/examples/validator_inheritance.py000066400000000000000000000012551467672046000223020ustar00rootroot00000000000000from dataclasses import dataclass import pytest from apischema import ValidationError, deserialize, validator @dataclass class PasswordForm: password: str confirmation: str @validator def password_match(self): if self.password != self.confirmation: raise ValidationError("password doesn't match its confirmation") @dataclass class CompleteForm(PasswordForm): username: str with pytest.raises(ValidationError) as err: deserialize( CompleteForm, {"username": "wyfo", "password": "p455w0rd", "confirmation": "..."}, ) assert err.value.errors == [ {"loc": [], "err": "password doesn't match its confirmation"} ] apischema-0.18.3/examples/validator_post_init.py000066400000000000000000000007571467672046000220270ustar00rootroot00000000000000from dataclasses import InitVar, dataclass, field import pytest from apischema import ValidationError, deserialize, validator from apischema.metadata import init_var @dataclass class Foo: bar: InitVar[int] = field(metadata=init_var(int)) @validator(bar) def validate(self, bar: int): if bar < 0: yield "negative" with pytest.raises(ValidationError) as err: deserialize(Foo, {"bar": -1}) assert err.value.errors == [{"loc": ["bar"], "err": "negative"}] apischema-0.18.3/examples/validator_yield.py000066400000000000000000000015231467672046000211150ustar00rootroot00000000000000from dataclasses import dataclass from ipaddress import IPv4Address, IPv4Network import pytest from apischema import ValidationError, deserialize, validator from apischema.objects import get_alias @dataclass class SubnetIps: subnet: IPv4Network ips: list[IPv4Address] @validator def check_ips_in_subnet(self): for index, ip in enumerate(self.ips): if ip not in self.subnet: # yield , yield (get_alias(self).ips, index), "ip not in subnet" with pytest.raises(ValidationError) as err: deserialize( SubnetIps, {"subnet": "126.42.18.0/24", "ips": ["126.42.18.1", "126.42.19.0", "0.0.0.0"]}, ) assert err.value.errors == [ {"loc": ["ips", 1], "err": "ip not in subnet"}, {"loc": ["ips", 2], "err": "ip not in subnet"}, ] apischema-0.18.3/mkdocs.yml000066400000000000000000000026551467672046000155640ustar00rootroot00000000000000site_name: apischema repo_name: wyfo/apischema repo_url: https://github.com/wyfo/apischema theme: name: material palette: - media: "(prefers-color-scheme: light)" scheme: default primary: green accent: indigo toggle: icon: material/brightness-4 name: "Switch to dark mode" - media: "(prefers-color-scheme: dark)" scheme: slate primary: green accent: indigo toggle: icon: material/brightness-7 name: "Switch to light mode" nav: - index.md - data_model.md - de_serialization.md - json_schema.md - validation.md - Conversions: conversions.md - GraphQL: - Overview: graphql/overview.md - graphql/data_model_and_resolvers.md - graphql/schema.md - graphql/relay.md - Examples: - examples/sqlalchemy_support.md - examples/pydantic_support.md - examples/attrs_support.md - examples/subclass_union.md - examples/subclass_tagged_union.md - examples/recoverable_fields.md - examples/inherited_deserializer.md - optimizations_and_benchmark.md - difference_with_pydantic.md - Releases: https://github.com/wyfo/apischema/releases markdown_extensions: - pymdownx.highlight: anchor_linenums: true - pymdownx.inlinehilite - pymdownx.snippets - pymdownx.superfences - markdown_include.include: base_path: examples - toc: permalink: True # toc_depth: 3 - admonition plugins: - search - exclude: glob: - requirements.txt extra: version: provider: mike apischema-0.18.3/pyproject.toml000066400000000000000000000025301467672046000164650ustar00rootroot00000000000000[build-system] requires = ["setuptools==75.1.0", "wheel~=0.44.0"] build-backend = "setuptools.build_meta" [project] name = "apischema" version = "0.19.0" authors = [{ name = "Joseph Perez", email = "joperez@hotmail.fr" }] license = { text = "MIT" } description = "JSON (de)serialization, GraphQL and JSON schema generation using Python typing." readme = "README.md" requires-python = ">=3.9" classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Software Development :: Libraries :: Python Modules", ] [project.urls] Repository = "https://github.com/wyfo/apischema" Documentation = "https://wyfo.github.io/apischema" [project.optional-dependencies] graphql = ["graphql-core>=3.0.0"] examples = [ "graphql-core>=3.0.0", "attrs", "docstring_parser", "bson", "orjson", "pydantic", "pytest", "sqlalchemy", ] [tool.setuptools.packages.find] include = ["apischema*"] [tool.setuptools.package-data] apischema = ["py.typed"] "apischema.deserialization" = ["methods.c"] "apischema.serialization" = ["methods.c"] apischema-0.18.3/scripts/000077500000000000000000000000001467672046000152405ustar00rootroot00000000000000apischema-0.18.3/scripts/cythonize.py000077500000000000000000000274761467672046000176510ustar00rootroot00000000000000#!/usr/bin/env python3 import collections.abc import dataclasses import importlib import inspect import re import sys from contextlib import contextmanager from dataclasses import dataclass from functools import lru_cache from pathlib import Path from types import FunctionType from typing import ( AbstractSet, Any, Callable, Iterable, List, Mapping, Match, NamedTuple, Optional, Pattern, TextIO, Tuple, Type, TypeVar, Union, get_type_hints, ) from Cython.Build import cythonize try: from typing import Literal CythonDef = Literal["cdef", "cpdef", "cdef inline", "cpdef inline"] except ImportError: CythonDef = str # type: ignore ROOT_DIR = Path(__file__).parent.parent DISPATCH_FIELD = "_dispatch" CYTHON_TYPES = { type: "type", bytes: "bytes", bytearray: "bytearray", bool: "bint", str: "str", tuple: "tuple", Tuple: "tuple", list: "list", int: "long", dict: "dict", Mapping: "dict", collections.abc.Mapping: "dict", set: "set", AbstractSet: "set", collections.abc.Set: "set", } Elt = TypeVar("Elt", type, FunctionType) @lru_cache() def module_elements(module: str, cls: Type[Elt]) -> Iterable[Elt]: return [ obj for obj in importlib.import_module(module).__dict__.values() if isinstance(obj, cls) and obj.__module__ == module ] @lru_cache() def module_type_mapping(module: str) -> Mapping[type, str]: mapping = CYTHON_TYPES.copy() for cls in module_elements(module, type): mapping[cls] = cls.__name__ mapping[Optional[cls]] = cls.__name__ if sys.version_info >= (3, 10): mapping[cls | None] = cls.__name__ # type: ignore return mapping # type: ignore def method_name(cls: type, method: str) -> str: return f"{cls.__name__}_{method}" def cython_type(tp: Any, module: str) -> str: return module_type_mapping(module).get(getattr(tp, "__origin__", tp), "object") def cython_signature( def_type: CythonDef, func: FunctionType, self_type: Optional[type] = None ) -> str: parameters = list(inspect.signature(func).parameters.values()) types = get_type_hints(func) param_with_types = [] if parameters[0].name == "self": if self_type is not None: types["self"] = self_type else: param_with_types.append("self") parameters.pop(0) for param in parameters: param_type = cython_type(types[param.name], func.__module__) assert param.default is inspect.Parameter.empty or param.default is None param_with_types.append( f"{param_type} {param.name}" + (" = None" if param.default is None else "") ) func_name = method_name(self_type, func.__name__) if self_type else func.__name__ return f"{def_type} {func_name}(" + ", ".join(param_with_types) + "):" class IndentedWriter: def __init__(self, file: TextIO): self.file = file self.indentation = "" def write(self, txt: str): self.file.write(txt) def writelines(self, lines: Iterable[str]): self.file.writelines(lines) def writeln(self, txt: str = ""): self.write((self.indentation + txt + "\n") if txt else "\n") @contextmanager def indent(self): self.indentation += 4 * " " yield self.indentation = self.indentation[:-4] @contextmanager def write_block(self, txt: str): self.writeln(txt) with self.indent(): yield def rec_subclasses(cls: type) -> Iterable[type]: for sub_cls in cls.__subclasses__(): yield sub_cls yield from rec_subclasses(sub_cls) @lru_cache() def get_dispatch(base_class: type) -> Mapping[type, int]: return {cls: i for i, cls in enumerate(rec_subclasses(base_class))} class Method(NamedTuple): base_class: type function: FunctionType @property def name(self) -> str: return self.function.__name__ @lru_cache() def module_methods(module: str) -> Mapping[str, Method]: all_methods = [ Method(cls, func) # type: ignore for cls in module_elements(module, type) if cls.__bases__ == (object,) and cls.__subclasses__() for func in cls.__dict__.values() if isinstance(func, FunctionType) and not func.__name__.startswith("_") ] methods_by_name = {method.name: method for method in all_methods} assert len(methods_by_name) == len( all_methods ), "method substitution requires unique method names" return methods_by_name ReRepl = Callable[[Match], str] @dataclass class LineSubstitutor: lines: Iterable[str] def __call__(self, pattern: Pattern) -> Callable[[ReRepl], ReRepl]: def decorator(repl: ReRepl) -> ReRepl: self.lines = (re.sub(pattern, repl, l) for l in self.lines) return repl return decorator def get_body(func: FunctionType, cls: Optional[type] = None) -> Iterable[str]: lines, _ = inspect.getsourcelines(func) line_iter = iter(lines) for line in line_iter: if line.split("#")[0].rstrip().endswith(":"): break else: raise NotImplementedError substitutor = LineSubstitutor(line_iter) if cls is not None: @substitutor(re.compile(r"super\(\)\.(\w+)\(")) def replace_super(match: Match) -> str: assert cls is not None super_cls = cls.__bases__[0].__name__ return f"{super_cls}_{match.group(1)}(<{super_cls}>self, " @substitutor( re.compile( r"(\s+)for ((\w+) in self\.(\w+)|(\w+), (\w+) in enumerate\(self\.(\w+)\)):" ) ) def replace_for_loop(match: Match) -> str: assert cls is not None tab = match.group(1) index = match.group(5) or "__i" elt = match.group(3) or match.group(6) field = match.group(4) or match.group(7) field_type = get_type_hints(cls)[field] assert ( field_type.__origin__ in (Tuple, tuple) and field_type.__args__[1] is ... ) elt_type = cython_type(field_type.__args__[0], func.__module__) return f"{tab}for {index} in range(len(self.{field})):\n{tab} {elt}: {elt_type} = self.{field}[{index}]" @substitutor(re.compile(r"^(\s+\w+:)([^#=]*)(?==)")) def replace_variable_annotations(match: Match) -> str: tp = eval(match.group(2), func.__globals__) return match.group(1) + cython_type(tp, func.__module__) methods = module_methods(func.__module__) method_names = "|".join(methods) @substitutor(re.compile(rf"([\w.]+)\.({method_names})\(")) def replace_method(match: Match) -> str: self, name = match.groups() cls, _ = methods[name] return f"{cls.__name__}_{name}({self}, " return substitutor.lines def import_lines(path: Union[str, Path]) -> Iterable[str]: # could also be retrieved with ast with open(path) as field: for line in field: if not line.strip() or any( # " " and ")" because of multiline imports map(line.startswith, ("from ", "import ", " ", ")")) ): yield line else: break def write_class(pyx: IndentedWriter, cls: type): bases = ", ".join(b.__name__ for b in cls.__bases__ if b is not object) with pyx.write_block(f"cdef class {cls.__name__}({bases}):"): annotations = cls.__dict__.get("__annotations__", {}) for name, tp in get_type_hints(cls).items(): if name in annotations: pyx.writeln(f"cdef readonly {cython_type(tp, cls.__module__)} {name}") dispatch = None if cls.__bases__ == (object,): if cls.__subclasses__(): pyx.writeln(f"cdef int {DISPATCH_FIELD}") else: base_class = cls.__mro__[-2] dispatch = get_dispatch(base_class)[cls] for name, obj in cls.__dict__.items(): if ( not name.startswith("_") and name not in annotations and isinstance(obj, FunctionType) ): pyx.writeln() base_method = getattr(base_class, name) with pyx.write_block(cython_signature("cpdef", base_method)): args = ", ".join(inspect.signature(base_method).parameters) pyx.writeln(f"return {cls.__name__}_{name}({args})") if annotations or dispatch is not None: pyx.writeln() init_fields: List[str] = [] if dataclasses.is_dataclass(cls): init_fields.extend( field.name for field in dataclasses.fields(cls) if field.init ) with pyx.write_block( "def __init__(" + ", ".join(["self"] + init_fields) + "):" ): for name in init_fields: pyx.writeln(f"self.{name} = {name}") if hasattr(cls, "__post_init__"): lines, _ = inspect.getsourcelines(cls.__post_init__) pyx.writelines(lines[1:]) if dispatch is not None: pyx.writeln(f"self.{DISPATCH_FIELD} = {dispatch}") def write_function(pyx: IndentedWriter, func: FunctionType): pyx.writeln(cython_signature("cpdef inline", func)) pyx.writelines(get_body(func)) def write_methods(pyx: IndentedWriter, method: Method): for cls, dispatch in get_dispatch(method.base_class).items(): if method.name in cls.__dict__: sub_method = cls.__dict__[method.name] with pyx.write_block(cython_signature("cdef inline", sub_method, cls)): pyx.writelines(get_body(sub_method, cls)) pyx.writeln() def write_dispatch(pyx: IndentedWriter, method: Method): with pyx.write_block(cython_signature("cdef inline", method.function, method.base_class)): # type: ignore pyx.writeln(f"cdef int {DISPATCH_FIELD} = self.{DISPATCH_FIELD}") for cls, dispatch in get_dispatch(method.base_class).items(): if method.name in cls.__dict__: if_ = "if" if dispatch == 0 else "elif" with pyx.write_block(f"{if_} {DISPATCH_FIELD} == {dispatch}:"): self, *params = inspect.signature(method.function).parameters args = ", ".join([f"<{cls.__name__}>{self}", *params]) pyx.writeln(f"return {method_name(cls, method.name)}({args})") def generate(package: str) -> str: module = f"apischema.{package}.methods" pyx_file_name = ROOT_DIR / "apischema" / package / "methods.pyx" with open(pyx_file_name, "w") as pyx_file: pyx = IndentedWriter(pyx_file) pyx.writeln("cimport cython") pyx.writeln("from cpython cimport *") pyx.writelines(import_lines(ROOT_DIR / "apischema" / package / "methods.py")) for cls in module_elements(module, type): write_class(pyx, cls) # type: ignore pyx.writeln() for func in module_elements(module, FunctionType): if not func.__name__.startswith("Py"): write_function(pyx, func) # type: ignore pyx.writeln() methods = module_methods(module) for method in methods.values(): write_methods(pyx, method) for method in methods.values(): write_dispatch(pyx, method) pyx.writeln() return str(pyx_file_name) packages = ["deserialization", "serialization"] def main(): # remove compiled before generate, because .so would be imported otherwise for ext in ["so", "pyd"]: for file in (ROOT_DIR / "apischema").glob(f"**/*.{ext}"): file.unlink() sys.path.append(str(ROOT_DIR)) cythonize(list(map(generate, packages)), language_level=3) if __name__ == "__main__": main() apischema-0.18.3/scripts/cythonize.sh000077500000000000000000000001471467672046000176150ustar00rootroot00000000000000#!/usr/bin/env bash python3 -m pip install -r $(dirname $0)/requirements.txt $(dirname $0)/cythonize.pyapischema-0.18.3/scripts/generate_readme.py000077500000000000000000000032761467672046000207340ustar00rootroot00000000000000#!/usr/bin/env python3 import pathlib import re import sys ROOT_DIR = pathlib.Path(__file__).parent.parent README = ROOT_DIR / "README.md" INDEX = ROOT_DIR / "docs" / "index.md" QUICKSTART = ROOT_DIR / "examples" / "quickstart.py" USED_FILES = {str(path.relative_to(ROOT_DIR)) for path in (INDEX, QUICKSTART)} def main(): content = INDEX.read_text() # Set title content = re.sub(r"# Overview\s*## apischema", "# apischema", content) # Remove FAQ content = content[: content.index("## FAQ")] # Remove admonitions content = re.sub(r"!!! note\n\s*(.*)\n", lambda m: f"> {m.group(1)}\n", content) # Add chart # TODO remove this unused part? content = content.replace( r"", "\n".join( "![benchmark chart](https://wyfo.github.io/apischema/dev/" f"benchmark_chart_{theme}#gh-{theme}-mode-only)" for theme in ("light", "dark") ), ) # Uncomment content = re.sub(r"", lambda m: m.group(1), content) # TODO remove this unused part? content = re.sub( r"(\d+\.\d+)/benchmark_chart\.svg", "dev/benchmark_chart.svg", content ) # Rewrite links content = re.sub( r"\(([\w/]+)\.(md|svg)(#[\w-]+)?\)", lambda m: f"(https://wyfo.github.io/apischema/dev/{m.group(1)}" + (".svg" if m.group(2) == "svg" else "") + (m.group(3) or "") + ")", content, ) # Add quickstart content = re.sub( "```python(.|\n)*?```", f"```python\n{QUICKSTART.read_text()}```", content ) README.write_text(content) if __name__ == "__main__": if not set(sys.argv).isdisjoint(USED_FILES): main() apischema-0.18.3/scripts/generate_tests_from_examples.py000077500000000000000000000076111467672046000235570ustar00rootroot00000000000000#!/usr/bin/env python3 import os import re import sys from itertools import takewhile from pathlib import Path from shutil import rmtree from typing import Iterable, Iterator, Tuple ROOT_DIR = Path(__file__).parent.parent EXAMPLES_PATH = ROOT_DIR / "examples" GENERATED_PATH = ROOT_DIR / "tests" / "__generated__" with open(ROOT_DIR / "scripts" / "test_wrapper.py") as wrapper_file: before_lines = [*takewhile(lambda l: not l.startswith("##"), wrapper_file), "##\n"] after_lines = ["##\n", *wrapper_file] def iter_paths() -> Iterator[Tuple[Path, Path]]: for example_path in EXAMPLES_PATH.glob("**/*.py"): if example_path.name == "__init__.py": continue relative_path = example_path.relative_to(EXAMPLES_PATH) test_dir = GENERATED_PATH / relative_path.parent test_dir.mkdir(parents=True, exist_ok=True) yield example_path, test_dir / f"test_{relative_path.name}" INDENTATION = 4 * " " union_regex = re.compile(r"..(\w+(\[.+?\])? \| )+(\w+)") # regex is not recursive and thus cannot catch things like Connection[Ship | None] | None try: from re import Match except ImportError: Match = ... # type: ignore def replace_union(match: Match) -> str: args = list(map(str.strip, match.group(0)[2:].split("|"))) if match.group(0)[0] == "=" and args[-1] != "None": # graphql types return match.group(0) joined = ", ".join(args) return match.group(0)[:2] + f"Union[{joined}]" def handle_union(line: str) -> str: return union_regex.sub(replace_union, line) def main(): if GENERATED_PATH.exists(): rmtree(GENERATED_PATH) GENERATED_PATH.mkdir(parents=True) for example_path, test_path in iter_paths(): example: Iterable[str] with open(example_path) as example: with open(test_path, "w") as test: if ( sys.version_info < (3, 10) or os.getenv("TOXENV", None) != "py310" or True ): example = map(handle_union, example) # 3.9 compatibility is added after __future__ import # However, Annotated/Literal/etc. can be an issue first_line = next(example) if first_line.startswith("from __future__ import"): test.write(first_line) test.writelines(before_lines) else: test.writelines(before_lines) test.write(first_line) test_count = 0 while example: # Classes must be declared in global namespace in order to get # get_type_hints and is_method to work # Test function begin at the first assertion. for line in example: if line.startswith("assert ") or line.startswith( "with raises(" ): test.write(f"def {test_path.stem}{test_count}():\n") test.write(INDENTATION + line) break test.write(line) else: break cur_indent = INDENTATION for line in example: if any(line.startswith(s) for s in ("class ", "@")): test.write(line) test_count += 1 break test.write(cur_indent + line) if '"""' in line: cur_indent = "" if cur_indent else INDENTATION else: break test.writelines(after_lines) for path in GENERATED_PATH.glob("**"): if path.is_dir(): open(path / "__init__.py", "w").close() if __name__ == "__main__": main() apischema-0.18.3/scripts/requirements.txt000066400000000000000000000000711467672046000205220ustar00rootroot00000000000000Cython==3.0.11 setuptools==75.1.0;python_version>="3.12" apischema-0.18.3/scripts/sort_all.py000077500000000000000000000015551467672046000174420ustar00rootroot00000000000000#!/usr/bin/env python3 import pathlib import re import sys PATH = pathlib.Path(__file__) ROOT_DIR = PATH.parent.parent ALL_REGEX = re.compile(r"__all__ = \[(.|\n)*?\]") WORD_REGEX = re.compile(r"\"\w+\"") def sort_all(match: re.Match) -> str: s = match.group() assert s.startswith("__all__ = [") words = sorted(WORD_REGEX.findall(s)) if len("__all__ = []") + sum(map(len, words)) + 2 * (len(words) - 1) > 88: return "__all__ = [\n " + ",\n ".join(words) + ",\n]" else: return "__all__ = [" + ", ".join(words) + "]" def main(): for filename in sys.argv[1:]: path = ROOT_DIR / filename if path == PATH: continue text = path.read_text() new_text = ALL_REGEX.sub(sort_all, text) if new_text != text: path.write_text(new_text) if __name__ == "__main__": main() apischema-0.18.3/scripts/test_wrapper.py000066400000000000000000000044461467672046000203410ustar00rootroot00000000000000# flake8: noqa # type: ignore import inspect import json import sys import timeit import typing from typing import * from unittest.mock import MagicMock import pytest from apischema import settings from apischema.typing import Annotated, get_args, get_origin, is_type typing.get_origin, typing.get_args = get_origin, get_args typing.Annotated = Annotated if "include_extras" not in inspect.signature(typing.get_type_hints).parameters: gth = typing.get_type_hints def get_type_hints(*args, include_extras=False, **kwargs): return gth(*args, **kwargs) typing.get_type_hints = get_type_hints inspect.isclass = is_type if sys.version_info < (3, 9): class CollectionABC: def __getattribute__(self, name): return globals()[name] if name in globals() else MagicMock() sys.modules["collections.abc"] = CollectionABC() del CollectionABC class Wrapper: def __init__(self, cls): self.cls = cls self.implem = cls.__origin__ def __getitem__(self, item): return self.cls[item] def __call__(self, *args, **kwargs): return self.implem(*args, **kwargs) def __instancecheck__(self, instance): return isinstance(instance, self.implem) def __subclasscheck__(self, subclass): return issubclass(subclass, self.implem) for cls in (Dict, List, Set, FrozenSet, Tuple, Type): # type: ignore # noqa wrapper = Wrapper(cls) globals()[wrapper.implem.__name__] = wrapper Set = AbstractSet del Wrapper __timeit = timeit.timeit timeit.timeit = lambda stmt, number=None, **kwargs: __timeit(stmt, number=1, **kwargs) sys.modules["orjson"] = json settings_classes = ( settings, settings.errors, settings.base_schema, settings.deserialization, settings.serialization, ) settings_dicts = {cls: dict(cls.__dict__) for cls in settings_classes} ## test body def set_settings(dicts: Mapping[type, Mapping[str, Any]]): for cls, dict_ in dicts.items(): for key, value in dict_.items(): if not key.startswith("_"): setattr(cls, key, value) test_dicts = {cls: dict(cls.__dict__) for cls in settings_classes} set_settings(settings_dicts) @pytest.fixture(autouse=True) def test_settings(monkeypatch): set_settings(test_dicts) yield set_settings(settings_dicts) apischema-0.18.3/setup.cfg000066400000000000000000000006251467672046000153750ustar00rootroot00000000000000[flake8] max-line-length = 88 ignore = E203, E302, E501, W503, E731, E741, F402 [isort] profile = black [tool:pytest] asyncio_mode = auto [coverage:report] ;fail_under = 100 precision = 2 exclude_lines = py36 pragma: no cover ^\s*\.\.\.$ raise NotImplementedError except ImportError if TYPE_CHECKING stop_signature_abuse() if sys.version_info def __getattr__(name): apischema-0.18.3/setup.py000066400000000000000000000007021467672046000152620ustar00rootroot00000000000000import os import platform from setuptools import Extension, setup ext_modules = [ Extension( f"apischema.{package}.methods", sources=[f"apischema/{package}/methods.c"], optional=True, ) for package in ("deserialization", "serialization") # Cythonization makes apischema slower using PyPy if platform.python_implementation() != "PyPy" and "NO_EXTENSION" not in os.environ ] setup(ext_modules=ext_modules) apischema-0.18.3/tests/000077500000000000000000000000001467672046000147135ustar00rootroot00000000000000apischema-0.18.3/tests/__init__.py000066400000000000000000000000001467672046000170120ustar00rootroot00000000000000apischema-0.18.3/tests/conftest.py000066400000000000000000000011141467672046000171070ustar00rootroot00000000000000import sys from apischema.graphql import relay from apischema.graphql.relay import global_identification relay.Node._node_key = classmethod( # type: ignore lambda cls: f"{cls.__module__}.{cls.__name__}" ) nodes_wrapped = relay.nodes def nodes(): exclude = set() for node_cls in global_identification._tmp_nodes: # The module currently imported should not have schema defined if hasattr(sys.modules[node_cls.__module__], "schema"): exclude.add(node_cls) return [cls for cls in nodes_wrapped() if cls not in exclude] relay.nodes = nodes apischema-0.18.3/tests/integration/000077500000000000000000000000001467672046000172365ustar00rootroot00000000000000apischema-0.18.3/tests/integration/__init__.py000066400000000000000000000000001467672046000213350ustar00rootroot00000000000000apischema-0.18.3/tests/integration/test_aliased_resolvers.py000066400000000000000000000014061467672046000243560ustar00rootroot00000000000000from typing import Optional from graphql import graphql_sync from apischema.graphql import graphql_schema def foo(test: int) -> int: return test def bar(my_test: int) -> int: return my_test def baz(my_test: Optional[int]) -> int: return my_test or 1 schema = graphql_schema(query=[foo, bar, baz]) def test_no_alias_needed(): query = """ { foo(test: 4) } """ assert graphql_sync(schema, query).data == {"foo": 4} def test_aliased_parameter(): query = """ { bar(myTest: 5) } """ assert graphql_sync(schema, query).data == {"bar": 5} def test_aliased_optional_parameter(): query = """ { baz(myTest: 6) } """ assert graphql_sync(schema, query).data == {"baz": 6} apischema-0.18.3/tests/integration/test_annotated_schema.py000066400000000000000000000043031467672046000241440ustar00rootroot00000000000000from dataclasses import dataclass, field from graphql.utilities import print_schema from apischema import schema, type_name from apischema.graphql import graphql_schema from apischema.json_schema import deserialization_schema, serialization_schema from apischema.typing import Annotated @dataclass class A: a: Annotated[ int, schema(max=10), schema(description="type description"), type_name("someInt"), schema(description="field description"), ] = field(metadata=schema(min=0)) def a() -> A: # type: ignore ... def test_annotated_schema(): assert ( deserialization_schema(A) == serialization_schema(A) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "a": { "type": "integer", "maximum": 10, "minimum": 0, "description": "field description", } }, "required": ["a"], "additionalProperties": False, } ) assert ( deserialization_schema(A, all_refs=True) == serialization_schema(A, all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$ref": "#/$defs/A", "$defs": { "A": { "additionalProperties": False, "properties": { "a": { "$ref": "#/$defs/someInt", "description": "field description", "minimum": 0, } }, "required": ["a"], "type": "object", }, "someInt": { "description": "type description", "maximum": 10, "type": "integer", }, }, } ) assert ( print_schema(graphql_schema(query=[a])) == '''\ type Query { a: A! } type A { """field description""" a: someInt! } """type description""" scalar someInt''' ) apischema-0.18.3/tests/integration/test_collections_implies_tuple_passthrough.py000066400000000000000000000007571467672046000305600ustar00rootroot00000000000000from typing import Tuple import pytest from apischema import PassThroughOptions, serialize @pytest.mark.parametrize( "pass_through, expected_cls", [ (None, list), (PassThroughOptions(tuple=True), tuple), (PassThroughOptions(collections=True), tuple), ], ) def test_collections_implies_tuple_passthrough(pass_through, expected_cls): obj = (0, "") assert serialize(Tuple[int, str], obj, pass_through=pass_through) == expected_cls( obj ) apischema-0.18.3/tests/integration/test_default_conversion_type_name.py000066400000000000000000000023311467672046000266000ustar00rootroot00000000000000from dataclasses import dataclass from graphql.utilities import print_schema from apischema import serializer from apischema.graphql import graphql_schema from apischema.json_schema import serialization_schema @dataclass class A: a: int @dataclass class B: b: int @serializer def to_a(self) -> A: return A(self.b) def b() -> B: return B(0) def test_default_conversion_type_name(): assert ( print_schema(graphql_schema(query=[b])) == """\ type Query { b: B! } type B { a: Int! }""" ) assert serialization_schema(B, all_refs=True) == { "$ref": "#/$defs/B", "$defs": { "B": {"$ref": "#/$defs/A"}, "A": { "type": "object", "properties": {"a": {"type": "integer"}}, "required": ["a"], "additionalProperties": False, }, }, "$schema": "http://json-schema.org/draft/2020-12/schema#", } assert serialization_schema(B) == { "type": "object", "properties": {"a": {"type": "integer"}}, "required": ["a"], "additionalProperties": False, "$schema": "http://json-schema.org/draft/2020-12/schema#", } apischema-0.18.3/tests/integration/test_descriptor_converters.py000066400000000000000000000010531467672046000252760ustar00rootroot00000000000000from dataclasses import dataclass from apischema import deserialize, deserializer, serialize, serializer @dataclass class A: a: int @deserializer @staticmethod def from_int(a: int) -> "A": return A(a) @serializer def to_int(self) -> int: return self.a @dataclass class B: b: int @serializer # type: ignore @property def as_int(self) -> int: return self.b def test_descriptor_converters(): assert deserialize(A, 0) == A(0) assert serialize(A, A(0)) == serialize(B(0)) == 0 apischema-0.18.3/tests/integration/test_deserialization_pass_through.py000066400000000000000000000022621467672046000266250ustar00rootroot00000000000000import sys from dataclasses import dataclass import pytest from apischema import ValidationError, deserialization_method, deserialize, validator def validate_checksum(b: bytes): if b and sum(b[:-1]) % 255 != int(b[-1]): raise ValidationError("Invalid checksum") valid_bytes = b"toto" + (sum(b"toto") % 255).to_bytes(1, byteorder=sys.byteorder) invalid_bytes = b"toto" + (sum(b"toto") % 255 + 42).to_bytes(1, byteorder=sys.byteorder) def test_pass_through_run_upper_validators(): method = deserialization_method( bytes, pass_through={bytes}, validators=[validate_checksum] ) assert method(valid_bytes) is valid_bytes with pytest.raises(ValidationError): method(invalid_bytes) @dataclass class MyClass: field: int @validator def field_is_not_zero(self): if self.field == 0: raise ValidationError("ZERO!") def test_pass_through_doesnt_run_type_validators(): obj = MyClass(0) method = deserialization_method(MyClass, pass_through={MyClass}) assert method(obj) is obj with pytest.raises(ValidationError): method({"fields": 0}) deserialize(MyClass, {"field": 0}, pass_through={MyClass}) apischema-0.18.3/tests/integration/test_deserialize_with_coercion.py000066400000000000000000000012541467672046000260650ustar00rootroot00000000000000from __future__ import annotations import json from dataclasses import dataclass from typing import Any, Mapping, Sequence, Union from apischema import deserialize def _coerce_json(cls, data): if not isinstance(data, cls) and isinstance(data, str): return json.loads(data) else: return data @dataclass class MyClass: my_property: Union[Sequence[str], Mapping[str, Any]] def test_coerce_json(): key = "test" value = 2 ret = deserialize( MyClass, {"my_property": f'{{"{key}": {value}}}'}, coerce=_coerce_json ) assert isinstance(ret, MyClass) assert isinstance(ret.my_property, dict) and ret.my_property[key] == value apischema-0.18.3/tests/integration/test_deserializer_registration_reset_deserialization_cache.py000066400000000000000000000007251467672046000337220ustar00rootroot00000000000000import pytest from apischema import ValidationError, deserialize, deserializer from apischema.conversions import Conversion, catch_value_error class Foo(int): pass def test_deserializer_registration_reset_deserialization_cache(): assert deserialize(Foo, 1) == Foo(1) deserializer(Conversion(catch_value_error(Foo), source=str, target=Foo)) assert deserialize(Foo, "1") == Foo(1) with pytest.raises(ValidationError): deserialize(Foo, 1) apischema-0.18.3/tests/integration/test_dict.py000066400000000000000000000045361467672046000216020ustar00rootroot00000000000000import sys from datetime import date from typing import Any, Dict, Mapping, TypedDict import pytest from apischema import ValidationError, deserialize, serialize from apischema.json_schema import deserialization_schema, serialization_schema from apischema.metadata import flatten from apischema.typing import Annotated if sys.version_info < (3, 9): from typing_extensions import TypedDict # type: ignore # noqa class MyDict(dict): pass @pytest.mark.parametrize( "tp", [dict, Dict[int, Any], pytest.param(MyDict, marks=pytest.mark.xfail), Mapping] ) def test_dict(tp): with pytest.raises(ValueError, match="string-convertible keys"): deserialization_schema(tp) with pytest.raises(ValueError, match="string-convertible keys"): serialization_schema(tp) class TD1(TypedDict, total=False): key1: str class TD2(TypedDict): key2: int class TD3(TD1, TD2, total=False): key3: bool def test_typed_dict(): assert ( deserialization_schema(TD3) == serialization_schema(TD3) == { "type": "object", "properties": { "key1": {"type": "string"}, "key2": {"type": "integer"}, "key3": {"type": "boolean"}, }, "required": ["key2"], "additionalProperties": False, "$schema": "http://json-schema.org/draft/2020-12/schema#", } ) assert deserialize(TD3, {"Key2": 0, "Key3": True}, aliaser=str.capitalize) == { "key2": 0, "key3": True, } with pytest.raises(ValidationError): assert deserialize(TD3, {}) assert serialize(TD1, {"key1": ""}) == {"key1": ""} class SimpleAdditional(TypedDict): key: str class ComplexAdditional(TypedDict): key: date class AggregateAdditional(TypedDict): simple: Annotated[SimpleAdditional, flatten] @pytest.mark.parametrize( "cls", [SimpleAdditional, ComplexAdditional, AggregateAdditional] ) def test_additional_properties(cls): data = {"key": "1970-01-01", "additional": 42} with pytest.raises(ValidationError): deserialize(cls, data) typed_dict = deserialize(cls, data, additional_properties=True) assert typed_dict["additional"] == 42 assert "additional" not in serialize(cls, typed_dict) assert serialize(cls, typed_dict, additional_properties=True)["additional"] == 42 apischema-0.18.3/tests/integration/test_discriminator.py000066400000000000000000000024471467672046000235250ustar00rootroot00000000000000from dataclasses import dataclass from typing import Literal, TypedDict, Union import pytest from apischema import deserialize, discriminator, serialize from apischema.json_schema import deserialization_schema from apischema.typing import Annotated class TypedDictWithoutField(TypedDict): pass class TD1(TypedDict): type: str class TD2(TypedDict): type: str def test_typed_dict_without_discriminator_field_cannot_have_discriminator(): with pytest.raises(TypeError): deserialization_schema( Annotated[Union[TD1, TypedDictWithoutField], discriminator("type")] ) def test_typed_dict_discriminator(): assert deserialize( Annotated[Union[TD1, TD2], discriminator("type")], {"type": "TD1"} ) == {"type": "TD1"} assert serialize( Annotated[Union[TD1, TD2], discriminator("type")], {"type": "TD1"} ) == {"type": "TD1"} @dataclass class A: type: Literal["a"] @dataclass class B: pass @pytest.mark.parametrize("type_, obj", [("a", A("a")), ("B", B())]) def test_discriminator_literal_field(type_, obj): assert ( deserialize(Annotated[Union[A, B], discriminator("type")], {"type": type_}) == obj ) assert serialize(Annotated[Union[A, B], discriminator("type")], obj) == { "type": type_ } apischema-0.18.3/tests/integration/test_field_generic_conversion.py000066400000000000000000000021271467672046000256750ustar00rootroot00000000000000from dataclasses import dataclass, field from operator import itemgetter from typing import Dict, Generic, Mapping, Sequence, TypeVar from apischema import alias, serialize from apischema.json_schema import serialization_schema from apischema.metadata import conversion T = TypeVar("T") V = TypeVar("V") def sort_by_priority(values_with_priority: Mapping[int, T]) -> Sequence[T]: return [v for _, v in sorted(values_with_priority.items(), key=itemgetter(0))] assert sort_by_priority({1: "a", 0: "b"}) == ["b", "a"] @dataclass class Foo(Generic[V]): values_with_priority: Dict[int, V] = field( metadata=alias("values") | conversion(serialization=sort_by_priority) ) def test_field_generic_conversion(): assert serialize(Foo[str], Foo({1: "a", 0: "b"})) == {"values": ["b", "a"]} assert serialization_schema(Foo[str]) == { "type": "object", "properties": {"values": {"type": "array", "items": {"type": "string"}}}, "required": ["values"], "additionalProperties": False, "$schema": "http://json-schema.org/draft/2020-12/schema#", } apischema-0.18.3/tests/integration/test_generic_conversion.py000066400000000000000000000020761467672046000245350ustar00rootroot00000000000000from dataclasses import dataclass from typing import Generic, TypeVar import pytest from apischema import ValidationError, deserialize from apischema.typing import Annotated T = TypeVar("T") @dataclass class A(Generic[T]): a: T @dataclass class B(Generic[T]): b: T def a_to_b(a: A[T]) -> B[T]: return B(a.a) def test_generic_conversion(): assert deserialize(B[int], {"a": 0}, conversion=a_to_b) == B(0) with pytest.raises(ValidationError): deserialize(B[int], {"a": ""}, conversion=a_to_b) def a_to_b_unparametrized(a: A) -> B: return B(a.a) def test_unparameterized_generic_conversion(): # With unparametrized conversion, generic args are lost assert deserialize(B[int], {"a": ""}, conversion=a_to_b_unparametrized) == B("") def a_to_b_annotated(a: Annotated[A[T], "a"]) -> B[T]: return B(a.a) def test_annotated_generic_conversion(): assert deserialize(B[int], {"a": 0}, conversion=a_to_b_annotated) == B(0) with pytest.raises(ValidationError): deserialize(B[int], {"a": ""}, conversion=a_to_b_annotated) apischema-0.18.3/tests/integration/test_generic_object_deserialization.py000066400000000000000000000011221467672046000270530ustar00rootroot00000000000000from typing import Collection, TypeVar import pytest from apischema import ValidationError, deserialize from apischema.objects import object_deserialization T = TypeVar("T") def repeat(item: T, number: int) -> Collection[T]: return [item] * number repeat_conv = object_deserialization(repeat) def test_generic_object_deserialization(): assert deserialize( Collection[int], {"item": 0, "number": 3}, conversion=repeat_conv ) == [0, 0, 0] with pytest.raises(ValidationError): deserialize(Collection[str], {"item": 0, "number": 3}, conversion=repeat_conv) apischema-0.18.3/tests/integration/test_int_as_float.py000066400000000000000000000005401467672046000233100ustar00rootroot00000000000000import pytest from apischema import ValidationError, deserialize, schema def test_int_as_float(): assert deserialize(float, 42) == 42.0 assert type(deserialize(float, 42)) == float assert deserialize(float, 42, schema=schema(min=0)) == 42.0 with pytest.raises(ValidationError): deserialize(float, -1.0, schema=schema(min=0)) apischema-0.18.3/tests/integration/test_new_type_conversion.py000066400000000000000000000005511467672046000247470ustar00rootroot00000000000000from typing import NewType from apischema import deserialize, serialize from apischema.conversions import Conversion Int = NewType("Int", int) def test_new_type_conversion(): assert ( deserialize(Int, "0", conversion=Conversion(int, source=str, target=Int)) == 0 ) assert serialize(Int, 0, conversion=Conversion(str, source=Int)) == "0" apischema-0.18.3/tests/integration/test_no_copy.py000066400000000000000000000010401467672046000223100ustar00rootroot00000000000000from typing import Dict, List, Optional import pytest from apischema import deserialize, serialize @pytest.mark.parametrize( "data, tp", [([0], List[int]), ({"": 0}, Dict[str, int]), ([None, 0], List[Optional[int]])], ) def test_no_copy(data, tp): assert deserialize(tp, data, no_copy=True) is data obj = deserialize(tp, data, no_copy=False) assert obj == data and obj is not data assert serialize(tp, obj, no_copy=True) is obj data = serialize(tp, obj, no_copy=False) assert data == obj and data is not obj apischema-0.18.3/tests/integration/test_object_fields_overriding.py000066400000000000000000000015771467672046000257050ustar00rootroot00000000000000import sys from dataclasses import dataclass, replace from typing import Optional import pytest from apischema import ValidationError, deserialize, serialize from apischema.metadata import none_as_undefined from apischema.objects import object_fields, set_object_fields @dataclass class Foo: bar: Optional[str] = None @pytest.mark.skipif( (sys.version_info < (3, 8) or ((3, 9) < sys.version_info < (3, 9, 5))), reason="dataclasses.replace bug with InitVar", ) def test_object_fields_overriding(): set_object_fields(Foo, []) assert serialize(Foo, Foo()) == {} set_object_fields( Foo, [ replace(f, metadata=none_as_undefined | f.metadata) for f in object_fields(Foo, default=None).values() ], ) assert serialize(Foo, Foo()) == {} with pytest.raises(ValidationError): deserialize(Foo, {"bar": None}) apischema-0.18.3/tests/integration/test_override_dataclass_constructors.py000066400000000000000000000010041467672046000273300ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import deserialize, settings @dataclass class Foo: no_default: int default: str = "" default_factory: list = field(default_factory=list) @pytest.mark.parametrize("override", [True, False]) def test_override_dataclass_constructors(monkeypatch, override): monkeypatch.setattr( settings.deserialization, "override_dataclass_constructors", override ) assert deserialize(Foo, {"no_default": 0}) == Foo(0, "", []) apischema-0.18.3/tests/integration/test_pattern_deserialization.py000066400000000000000000000005261467672046000255750ustar00rootroot00000000000000import re import pytest from apischema import ValidationError, deserialize def test_valid_pattern(): pattern = deserialize(re.Pattern, "(a|b)") assert isinstance(pattern, re.Pattern) assert pattern.pattern == "(a|b)" def test_invalid_pattern(): with pytest.raises(ValidationError): deserialize(re.Pattern, "(a") apischema-0.18.3/tests/integration/test_resolver_default_parameter_not_serializable.py000066400000000000000000000021451467672046000316640ustar00rootroot00000000000000from functools import wraps from typing import Union import pytest from graphql import graphql_sync from graphql.utilities import print_schema from apischema import Undefined, UndefinedType, Unsupported from apischema.graphql import graphql_schema from apischema.typing import Annotated class Foo: pass @pytest.mark.parametrize( "tp, default", [ (Union[UndefinedType, int], Undefined), (Union[int, Annotated[Foo, Unsupported]], Foo()), ], ) def test_resolver_default_parameter_not_serializable(tp, default): def resolver(arg=default) -> bool: return arg is default resolver.__annotations__["arg"] = tp # wraps in order to trigger the bug of get_type_hints with None default value resolver2 = wraps(resolver)(lambda arg=default: resolver(arg)) schema = graphql_schema(query=[resolver2]) assert ( print_schema(schema) == """\ type Query { resolver(arg: Int): Boolean! }""" ) assert ( graphql_sync(schema, "{resolver}").data == graphql_sync(schema, "{resolver(arg: null)}").data == {"resolver": True} ) apischema-0.18.3/tests/integration/test_serialization_conflicting_union.py000066400000000000000000000007501467672046000273150ustar00rootroot00000000000000from dataclasses import dataclass from typing import Tuple, Union import apischema @dataclass(frozen=True) class SomeTupleClass: bar: Union[Tuple[int, int], Tuple[int, int, int]] def test_correct_serialization() -> None: serialized_dict = {"bar": [0, 0, 0]} as_python_object = apischema.deserialize(type=SomeTupleClass, data=serialized_dict) assert as_python_object == SomeTupleClass(bar=(0, 0, 0)) assert apischema.serialize(as_python_object) == serialized_dict apischema-0.18.3/tests/integration/test_type_converter.py000066400000000000000000000003721467672046000237210ustar00rootroot00000000000000from apischema import deserialize, deserializer @deserializer class Foo: def __init__(self, bar: int) -> None: self.bar = bar def test_type_converter(): foo = deserialize(Foo, 42) assert isinstance(foo, Foo) and foo.bar == 42 apischema-0.18.3/tests/integration/test_union_any_schema.py000066400000000000000000000013061467672046000241660ustar00rootroot00000000000000from typing import Any, Generic, TypeVar, Union import pytest from apischema import deserializer, serializer from apischema.json_schema import deserialization_schema, serialization_schema T = TypeVar("T") @deserializer class Wrapper(Generic[T]): def __init__(self, value: T): self.value = value @serializer def wrapper_value(wrapper: Wrapper[T]) -> T: return wrapper.value @pytest.mark.parametrize("schema_factory", [deserialization_schema, serialization_schema]) # type: ignore @pytest.mark.parametrize("wrapper_type", [Wrapper, Wrapper[Any]]) def test_json_schema_union_any(schema_factory, wrapper_type): assert schema_factory(Union[int, wrapper_type], with_schema=False) == {} apischema-0.18.3/tests/integration/test_unsupported_union_member.py000066400000000000000000000007221467672046000257770ustar00rootroot00000000000000from dataclasses import dataclass from typing import Annotated, Union import pytest from apischema import Unsupported, ValidationError, deserialize @dataclass class Foo: bar: Union[int, Annotated[None, Unsupported]] = None def test_unsupported_union_member(): with pytest.raises(ValidationError) as err: deserialize(Foo, {"bar": None}) assert err.value.errors == [ {"loc": ["bar"], "err": "expected type integer, found null"} ] apischema-0.18.3/tests/integration/test_validator_aliasing.py000066400000000000000000000010541467672046000245030ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema import ValidationError, deserialize, validator from apischema.objects import AliasedStr, get_alias @dataclass class A: a: int = field() @validator(a) def validate_a(self): yield (get_alias(self).a, "b", 0, AliasedStr("c")), f"error {self.a}" def test_validator_aliasing(): with pytest.raises(ValidationError) as err: deserialize(A, {"A": 42}, aliaser=str.upper) assert err.value.errors == [{"loc": ["A", "A", "b", 0, "C"], "err": "error 42"}] apischema-0.18.3/tests/requirements.txt000066400000000000000000000003771467672046000202060ustar00rootroot00000000000000graphql-core==3.2.4 attrs==22.1.0 bson==0.5.10 docstring-parser==0.16 pydantic==1.10.18 pytest==7.4.2 pytest-cov==4.0.0 pytest-asyncio==0.20.3 SQLAlchemy==1.4.45 typing-extensions==4.8.0;python_version>="3.8" typing-extensions==4.7.1;python_version<"3.8" apischema-0.18.3/tests/unit/000077500000000000000000000000001467672046000156725ustar00rootroot00000000000000apischema-0.18.3/tests/unit/__init__.py000066400000000000000000000000001467672046000177710ustar00rootroot00000000000000apischema-0.18.3/tests/unit/test_alias.py000066400000000000000000000012271467672046000203760ustar00rootroot00000000000000from dataclasses import dataclass, field from apischema import alias from apischema.objects import object_fields @alias(lambda s: f"prefixed_{s}") @dataclass class Data: not_aliased: int = field(metadata=alias(override=False)) not_prefixed: int = field(metadata=alias("not_overridden", override=False)) prefixed: int prefixed_alias: str = field(metadata=alias("alias")) def test_alias(): assert {name: field.alias for name, field in object_fields(Data).items()} == { "not_aliased": "not_aliased", "not_prefixed": "not_overridden", "prefixed": "prefixed_prefixed", "prefixed_alias": "prefixed_alias", } apischema-0.18.3/tests/unit/test_coercion.py000066400000000000000000000010651467672046000211060ustar00rootroot00000000000000import pytest from apischema.deserialization.coercion import coerce from apischema.types import NoneType @pytest.mark.parametrize( "cls, data, result", [ (int, 0, 0), (str, 0, "0"), (bool, 0, False), (bool, "true", True), (NoneType, "", None), ], ) def test_coerce(cls, data, result): assert coerce(cls, data) == result @pytest.mark.parametrize("cls, data", [(int, None), (bool, "I SAY NO"), (NoneType, 42)]) def test_coerce_error(cls, data): with pytest.raises(Exception): coerce(cls, data) apischema-0.18.3/tests/unit/test_constraints.py000066400000000000000000000006561467672046000216610ustar00rootroot00000000000000from apischema.constraints import merge_constraints from apischema.schemas import Constraints def test_constraint_merging(): constraints = Constraints(min=1, max=10) other = Constraints(min=0, max=5) assert merge_constraints(constraints, other) == Constraints(min=1, max=5) base_schema = {"minimum": 0, "maximum": 5} constraints.merge_into(base_schema) assert base_schema == {"minimum": 1, "maximum": 5} apischema-0.18.3/tests/unit/test_conversions_resolver.py000066400000000000000000000042551467672046000236020ustar00rootroot00000000000000from typing import Collection, Dict, List, Mapping, Sequence, Tuple import pytest from apischema import serializer, settings from apischema.conversions.conversions import Conversion, LazyConversion from apischema.conversions.visitor import SerializationVisitor from apischema.json_schema.conversions_resolver import ( WithConversionsResolver, merge_results, ) from apischema.objects import set_object_fields from apischema.types import AnyType from apischema.utils import identity @pytest.mark.parametrize( "results, origin, expected", [ ([[int]], Collection, [[int]]), ([[int, str], [str]], Mapping, [[int, str], [str, str]]), ([[int], []], Mapping, []), ], ) def test_merge_results(results, origin, expected): assert list(merge_results(results, origin)) == [ origin[tuple(exp)] for exp in expected ] class Visitor(SerializationVisitor, WithConversionsResolver): def visit(self, tp: AnyType) -> Sequence[AnyType]: return self.resolve_conversion(tp) class A: pass serializer(Conversion(id, source=A, target=int)) tmp = None rec_conversion = Conversion(identity, A, Collection[A], LazyConversion(lambda: tmp)) tmp = rec_conversion class B: pass set_object_fields(B, []) @pytest.mark.parametrize( "tp, conversions, expected", [ (int, None, [int]), (int, Conversion(str, int), []), (List[int], None, [Collection[int]]), (List[int], Conversion(str, source=int), [Collection[str]]), ( Tuple[Dict[int, str], ...], [Conversion(str, source=int), Conversion(bool, source=str)], [Collection[Mapping[str, bool]]], ), ( List[int], Conversion(str, source=int, sub_conversion=Conversion(bool, source=str)), [Collection[bool]], ), (A, None, [A]), (Collection[A], None, [Collection[A], Collection[int]]), (A, rec_conversion, []), (B, None, [B]), ], ) def test_resolve_conversion(tp, conversions, expected): result = Visitor(settings.serialization.default_conversion).visit_with_conv( tp, conversions ) assert list(result) == list(expected) apischema-0.18.3/tests/unit/test_dataclasses.py000066400000000000000000000013721467672046000215750ustar00rootroot00000000000000from dataclasses import InitVar, dataclass, field from dataclasses import replace as std_replace from apischema.dataclasses import replace from apischema.fields import fields_set, with_fields_set from apischema.metadata.implem import init_var from apischema.visitor import dataclass_types_and_fields @dataclass class WithInitVar: a: InitVar[int] = field(metadata=init_var("int")) def test_resolve_init_var(): assert dataclass_types_and_fields(WithInitVar)[0] == {"a": int} @with_fields_set @dataclass class WithFieldsSet: a: int = 0 def test_replace(): obj = WithFieldsSet() assert fields_set(obj) == set() obj2 = std_replace(obj) assert fields_set(obj2) == {"a"} obj3 = replace(obj) assert fields_set(obj3) == set() apischema-0.18.3/tests/unit/test_deserialization.py000066400000000000000000000024601467672046000224730ustar00rootroot00000000000000from dataclasses import InitVar, dataclass, field from typing import Generic, TypeVar import pytest from apischema import settings from apischema.deserialization import get_deserialization_flattened_aliases from apischema.metadata import flatten, init_var from apischema.objects import object_fields @dataclass class A: a: int b: "B" = field(metadata=flatten) c: "C[int]" = field(metadata=flatten) d: "D" = field(metadata=flatten) e: InitVar[int] = field(metadata=init_var(int)) f: int = field(init=False) @dataclass class B: g: int T = TypeVar("T") @dataclass class C(Generic[T]): h: T @dataclass class D(Generic[T]): i: T @dataclass class Data: field: A = field(metadata=flatten) def test_flattened_aliases(): aliases = get_deserialization_flattened_aliases( Data, object_fields(Data)["field"], settings.deserialization.default_conversion ) assert set(aliases) == {"a", "g", "h", "i", "e"} @dataclass class BadData: field: int = field(metadata=flatten) def test_invalid_flattened(): with pytest.raises(TypeError): list( get_deserialization_flattened_aliases( BadData, object_fields(BadData)["field"], settings.deserialization.default_conversion, ) ) apischema-0.18.3/tests/unit/test_deserialization_methods.py000066400000000000000000000004261467672046000242160ustar00rootroot00000000000000from apischema.deserialization.methods import to_hashable def test_to_hashable(): hashable1 = to_hashable({"key1": 0, "key2": [1, 2]}) hashable2 = to_hashable({"key2": [1, 2], "key1": 0}) assert hashable1 == hashable2 assert hash(hashable1) == hash(hashable2) apischema-0.18.3/tests/unit/test_deserialization_serialization.py000066400000000000000000000115231467672046000254300ustar00rootroot00000000000000from collections import deque from dataclasses import dataclass, field from enum import Enum from typing import ( AbstractSet, Any, FrozenSet, List, Literal, Mapping, Optional, Sequence, Set, Tuple, Union, ) from uuid import UUID, uuid4 import pytest from apischema import schema from apischema.deserialization import deserialize from apischema.fields import with_fields_set from apischema.metadata import properties from apischema.serialization import serialize from apischema.validation.errors import ValidationError uuid = str(uuid4()) def bijection(cls, data, expected): obj = deserialize(cls, data) assert obj == expected assert type(obj) is type(expected) assert serialize(cls, obj) == data def error(data, cls): with pytest.raises(ValidationError): deserialize(cls, data) @dataclass(unsafe_hash=True) class SimpleDataclass: a: int class SimpleEnum(Enum): a = "a" @with_fields_set @dataclass class Dataclass: nested: SimpleDataclass opt: Optional[int] = field(default=None, metadata=schema(min=100)) def test_bool_as_int_error(): error(True, int) @pytest.mark.parametrize("data", ["", 0]) def test_any(data): bijection(Any, data, data) @pytest.mark.parametrize( "data, expected", [(None, None), ({"a": 0}, SimpleDataclass(0))] ) def test_optional(data, expected): bijection(Optional[SimpleDataclass], data, expected) def test_optional_error(): error(0, Optional[str]) @pytest.mark.parametrize("data, expected", [("", ""), ({"a": 0}, SimpleDataclass(0))]) def test_union(data, expected): bijection(Union[str, SimpleDataclass], data, expected) @pytest.mark.parametrize("data", [0, None]) def test_union_error(data): error(data, Union[str, SimpleDataclass]) @pytest.mark.parametrize("cls, data", [(int, 0), (str, ""), (bool, True), (float, 0.0)]) def test_primitive(cls, data): bijection(cls, data, data) @pytest.mark.parametrize("data", ["", None]) def test_primitive_error(data): error(data, int) # noinspection PyTypeChecker @pytest.mark.parametrize( "cls, expected", [ (List, [0, SimpleDataclass(0)]), (Set, {0, SimpleDataclass(0)}), (Sequence, [0, SimpleDataclass(0)]), (AbstractSet, {0, SimpleDataclass(0)}), (FrozenSet, frozenset([0, SimpleDataclass(0)])), ], ) def test_collection(cls, expected): data = [0, {"a": 0}] bijection(cls[Union[int, SimpleDataclass]], data, expected) def test_collection_tuple(): data = [0, {"a": 0}] expected = (0, SimpleDataclass(0)) bijection(Tuple[2 * (Union[int, SimpleDataclass],)], data, expected) def test_collection_tuple_variadic(): data = [0, {"a": 0}] expected = (0, SimpleDataclass(0)) bijection(Tuple[Union[int, SimpleDataclass], ...], data, expected) @pytest.mark.parametrize("data", [{}, ["", 0]]) def test_iterable_error(data): error(data, List[str]) @pytest.mark.parametrize( "key_cls, data, expected", [ (str, {"int": 0, "SC": {"a": 0}}, {"int": 0, "SC": SimpleDataclass(0)}), (UUID, {uuid: 0}, {UUID(uuid): 0}), (UUID, {uuid: 0}, {UUID(uuid): 0}), ], ) def test_mapping(key_cls, data, expected): bijection(Mapping[key_cls, Union[int, SimpleDataclass]], data, expected) # type: ignore @pytest.mark.parametrize("data", [[], {"key": ""}]) def test_mapping_error(data): error(data, Mapping[str, int]) @pytest.mark.parametrize("expected", [UUID(uuid), UUID(uuid)]) def test_model(expected): bijection(UUID, uuid, expected) @pytest.mark.parametrize("data", [0, "fake"]) def test_model_error(data): error(data, UUID) def test_enum(): bijection(SimpleEnum, "a", SimpleEnum.a) def test_enum_errors(): error("b", SimpleEnum) @pytest.mark.parametrize("data", [0, "ok"]) def test_literal(data): bijection(Literal[0, "ok"], data, data) def test_literal_error(): error(1, Literal[0, "ok"]) @pytest.mark.parametrize( "data, expected", [ ({"nested": {"a": 0}}, Dataclass(SimpleDataclass(0), None)), ({"nested": {"a": 0}, "opt": None}, Dataclass(SimpleDataclass(0), None)), ({"nested": {"a": 0}, "opt": 100}, Dataclass(SimpleDataclass(0), 100)), ], ) def test_dataclass(data, expected): bijection(Dataclass, data, expected) @pytest.mark.parametrize("data", [{}, {"nested": {}, "opt": 1}]) def test_dataclass_error(data): error(data, Dataclass) def test_with_class_context(): @schema(min=100) class BigInt(int): pass bijection(BigInt, 100, BigInt(100)) def test_properties(): @dataclass class Test: startswith_a: Mapping[str, Any] = field(metadata=properties("^a.*$")) others: Mapping[str, Any] = field(metadata=properties) assert deserialize(Test, {"plop": 0, "allo": 1}) == Test({"allo": 1}, {"plop": 0}) def test_deque(): bijection(deque, [0, 1], deque([0, 1])) apischema-0.18.3/tests/unit/test_field.py000066400000000000000000000027161467672046000203740ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from apischema.fields import fields_set, set_fields, unset_fields, with_fields_set @with_fields_set @dataclass class Data: without_default: int with_default: int = 0 with_default_factory: int = field(default_factory=lambda: 0) @dataclass class Inherited(Data): other: int = 42 @with_fields_set @dataclass class DecoratedInherited(Data): other: int = 42 def test_fields_set(): with pytest.raises(TypeError): fields_set(object()) assert fields_set(Data(0)) == {"without_default"} assert fields_set(Data(without_default=0)) == {"without_default"} assert fields_set(Data(0, 1)) == {"without_default", "with_default"} data = Data(0) data.with_default = 1 assert fields_set(data) == {"without_default", "with_default"} unset_fields(data, "without_default") assert fields_set(data) == {"with_default"} set_fields(data, "with_default_factory") assert fields_set(data) == {"with_default", "with_default_factory"} set_fields(data, "with_default", overwrite=True) assert fields_set(data) == {"with_default"} set_fields(data, "not_a_field") assert fields_set(data) == {"with_default", "not_a_field"} assert fields_set(Inherited(0, other=0)) == { "without_default", "with_default", "with_default_factory", "other", } assert fields_set(DecoratedInherited(0, other=0)) == {"without_default", "other"} apischema-0.18.3/tests/unit/test_flattened_conversion.py000066400000000000000000000074611467672046000235260ustar00rootroot00000000000000from dataclasses import dataclass, field import pytest from graphql import graphql_sync, print_schema from apischema import deserialize, serialize from apischema.graphql import graphql_schema from apischema.json_schema import deserialization_schema, serialization_schema from apischema.metadata import conversion, flatten from apischema.objects import ObjectField, set_object_fields class Field: def __init__(self, attr: int): self.attr = attr set_object_fields(Field, [ObjectField("attr", int)]) @dataclass class Data: data_field: Field = field(metadata=flatten) def get_data() -> Data: return Data(Field(0)) def test_flattened_dataclass_model(): data = deserialize(Data, {"attr": 0}) assert isinstance(data.data_field, Field) and data.data_field.attr == 0 assert serialize(Data, data) == {"attr": 0} assert ( deserialization_schema(Data) == serialization_schema(Data) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "allOf": [ {"type": "object", "additionalProperties": False}, { "type": "object", "properties": {"attr": {"type": "integer"}}, "required": ["attr"], "additionalProperties": False, }, ], "unevaluatedProperties": False, } ) schema = graphql_schema(query=[get_data]) assert graphql_sync(schema, "{getData{attr}}").data == {"getData": {"attr": 0}} assert ( print_schema(schema) == """\ type Query { getData: Data! } type Data { attr: Int! }""" ) class Field2: def __init__(self, attr: int): self.attr = attr @staticmethod def from_field(field: Field) -> "Field2": return Field2(field.attr) def to_field(self) -> Field: return Field(self.attr) @staticmethod def from_int(i: int) -> "Field2": return Field2(i) def to_int(self) -> int: return self.attr @dataclass class Data2: data_field2: Field2 = field( metadata=flatten | conversion(Field2.from_field, Field2.to_field) ) def get_data2() -> Data2: return Data2(Field2(0)) def test_flattened_converted(): data2 = deserialize(Data2, {"attr": 0}) assert isinstance(data2.data_field2, Field2) and data2.data_field2.attr == 0 assert serialize(Data2, data2) == {"attr": 0} assert ( deserialization_schema(Data) == serialization_schema(Data) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "allOf": [ {"type": "object", "additionalProperties": False}, { "type": "object", "properties": {"attr": {"type": "integer"}}, "required": ["attr"], "additionalProperties": False, }, ], "unevaluatedProperties": False, } ) schema = graphql_schema(query=[get_data2]) assert graphql_sync(schema, "{getData2{attr}}").data == {"getData2": {"attr": 0}} assert ( print_schema(schema) == """\ type Query { getData2: Data2! } type Data2 { attr: Int! }""" ) @dataclass class Data3: data_field2: Field2 = field( metadata=flatten | conversion(Field2.from_int, Field2.to_int) ) def get_data3() -> Data3: # type: ignore ... def test_flattened_converted_error(): with pytest.raises(TypeError): deserialize(Data3, {"attr": 0}) with pytest.raises(TypeError): serialize(Data3, Data3(Field2(0))) with pytest.raises(TypeError): deserialization_schema(Data3) with pytest.raises(TypeError): serialization_schema(Data3) with pytest.raises(TypeError): graphql_schema(query=[get_data3]) apischema-0.18.3/tests/unit/test_metadata.py000066400000000000000000000006671467672046000210740ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Generic, TypeVar from apischema import deserialize from apischema.metadata import flatten T = TypeVar("T") @dataclass class A(Generic[T]): pass @dataclass class B(Generic[T]): a1: A = field(metadata=flatten) a2: A[T] = field(metadata=flatten) a3: A[int] = field(metadata=flatten) def test_flattened_generic_dataclass(): deserialize(B, {}) # it works apischema-0.18.3/tests/unit/test_recursion.py000066400000000000000000000021441467672046000213150ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import List, Optional import pytest from apischema import settings from apischema.conversions import Conversion, LazyConversion from apischema.metadata import conversion from apischema.recursion import DeserializationRecursiveChecker, is_recursive class A: pass @dataclass class B: b: Optional["B"] @dataclass class C: b: B d: "D" f: "F" @dataclass class D: c: List[C] @dataclass class E: c: List[C] @dataclass class F: e: E rec_conv = None @dataclass class G: a: Optional[A] = field( metadata=conversion(deserialization=LazyConversion(lambda: rec_conv)) ) rec_conv = Conversion(lambda _: None, source=Optional[G], target=A) @pytest.mark.parametrize( "tp, expected", [(A, False), (B, True), (C, True), (D, True), (E, True), (F, True), (G, True)], ) def test_is_recursive(tp, expected): assert ( is_recursive( tp, None, settings.deserialization.default_conversion, DeserializationRecursiveChecker, ) == expected ) apischema-0.18.3/tests/unit/test_refs.py000066400000000000000000000061221467672046000202430ustar00rootroot00000000000000from dataclasses import dataclass from typing import Collection, Generic, List, Optional, Sequence, TypeVar import pytest from _pytest.python_api import raises from apischema import settings, type_name from apischema.conversions import Conversion, LazyConversion from apischema.json_schema import deserialization_schema, serialization_schema from apischema.json_schema.schema import DeserializationSchemaBuilder from apischema.type_names import get_type_name from apischema.typing import Annotated @type_name(None) @dataclass class A: a: int @dataclass class B: a: Optional[A] type_name("Bs")(List[B]) @type_name("DD") @dataclass class D: bs: Annotated[List[B], type_name("Bs2")] # noqa: F821 @dataclass class Recursive: rec: Optional["Recursive"] def test_find_refs(): refs: dict = {} DeserializationSchemaBuilder.RefsExtractor( settings.deserialization.default_conversion, refs ).visit(D) DeserializationSchemaBuilder.RefsExtractor( settings.deserialization.default_conversion, refs ).visit(Recursive) assert refs == { "B": (B, 1), "DD": (D, 1), "Bs": (Collection[B], 1), "Bs2": (Annotated[List[B], type_name("Bs2")], 1), "Recursive": (Recursive, 2), } T = TypeVar("T") U = TypeVar("U") @dataclass class DataGeneric(Generic[T]): a: T type_name("StrData")(DataGeneric[str]) @pytest.mark.parametrize("cls", [DataGeneric, DataGeneric[U]]) # type: ignore def test_generic_ref_error(cls): with raises(TypeError): type_name("Data")(cls) def test_generic_schema(): assert deserialization_schema(DataGeneric, all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"a": {}}, "required": ["a"], "additionalProperties": False, } assert deserialization_schema(DataGeneric[int], all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"a": {"type": "integer"}}, "required": ["a"], "additionalProperties": False, } assert deserialization_schema(DataGeneric[str], all_refs=True) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "$ref": "#/$defs/StrData", "$defs": { "StrData": { "type": "object", "properties": {"a": {"type": "string"}}, "required": ["a"], "additionalProperties": False, } }, } def test_collection_type_name(): type_name("test")(Sequence[A]) assert get_type_name(List[A]) == get_type_name(Collection[A]) == ("test", "test") @type_name(None) class RecConv: pass def rec_converter(rec: RecConv) -> List[RecConv]: # type: ignore ... def test_recursive_conversion_without_ref(): tmp = None conversion = Conversion(rec_converter, sub_conversion=LazyConversion(lambda: tmp)) tmp = conversion with raises(TypeError, match=r"Recursive type <.*> needs a ref.*"): serialization_schema(RecConv, conversion=conversion) apischema-0.18.3/tests/unit/test_schema.py000066400000000000000000000027541467672046000205530ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import NewType, Optional from apischema import deserializer, schema, type_name from apischema.json_schema import deserialization_schema class Foo: pass @dataclass class Bar: foo: Optional[Foo] @deserializer def foo(bar: Bar) -> Foo: return bar.foo or Foo() def test_recursive_by_conversion_schema(): assert deserialization_schema(Foo) == { "$ref": "#/$defs/Foo", "$defs": { "Foo": { "type": "object", "properties": { "foo": {"anyOf": [{"$ref": "#/$defs/Foo"}, {"type": "null"}]} }, "required": ["foo"], "additionalProperties": False, } }, "$schema": "http://json-schema.org/draft/2020-12/schema#", } MoreThanTwo = NewType("MoreThanTwo", int) schema(min=0, extra=lambda s: s.update({"minimum": 2}))(type_name(None)(MoreThanTwo)) @dataclass class WithSchema: attr1: MoreThanTwo = field(metadata=schema(min=3)) attr2: MoreThanTwo = field(metadata=schema(min=1)) def test_flattened_schema(): assert deserialization_schema(WithSchema) == { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": { "attr1": {"type": "integer", "minimum": 3}, "attr2": {"type": "integer", "minimum": 2}, }, "required": ["attr1", "attr2"], "additionalProperties": False, } apischema-0.18.3/tests/unit/test_serialized.py000066400000000000000000000025701467672046000214420ustar00rootroot00000000000000from dataclasses import dataclass, field from apischema import serialize, serialized from apischema.json_schema import serialization_schema from apischema.metadata import flatten @dataclass class Base: @serialized def serialized(self) -> int: return 0 base_schema = { "$schema": "http://json-schema.org/draft/2020-12/schema#", "type": "object", "properties": {"serialized": {"type": "integer"}}, "required": ["serialized"], "additionalProperties": False, } @dataclass class Inherited(Base): pass @dataclass class InheritedOverriden(Base): def serialized(self) -> int: return 1 def test_inherited_serialized(): assert ( serialization_schema(Base) == serialization_schema(Inherited) == serialization_schema(InheritedOverriden) == base_schema ) assert ( serialize(Base, Base()) == serialize(Inherited, Inherited()) == {"serialized": 0} ) assert serialize(InheritedOverriden, InheritedOverriden()) == {"serialized": 1} class WithFlattened(Base): base: Base = field(metadata=flatten) def test_flattened_serialized(): assert ( serialization_schema(Base) == serialization_schema(WithFlattened) == base_schema ) assert ( serialize(Base, Base()) == serialize(WithFlattened, WithFlattened()) == {"serialized": 0} ) apischema-0.18.3/tests/unit/test_subscriptions.py000066400000000000000000000051021467672046000222100ustar00rootroot00000000000000from dataclasses import dataclass, replace from typing import Any, AsyncIterable, Mapping, Optional import graphql import pytest from graphql.utilities import print_schema from apischema import Undefined from apischema.graphql import Subscription, graphql_schema EVENTS = ["bonjour", "au revoir"] @dataclass class Event: name: str def event_name(event: Event) -> str: return event.name async def events(**kwargs) -> AsyncIterable[Event]: for event in EVENTS: yield Event(event) async def anext(iterable): """Return the next item from an async iterator.""" return await iterable.__anext__() def wrap_event(event: str) -> Mapping[str, str]: return {"name": event} def events2(event: Event, dummy: Optional[bool] = None) -> Event: return replace(event, name=event.name.capitalize()) def hello() -> str: return "world" @pytest.mark.parametrize("alias", [None, "alias"]) @pytest.mark.parametrize("conversion", [None, event_name]) @pytest.mark.parametrize("error_handler", [Undefined, None]) @pytest.mark.parametrize("resolver", [None, events2]) async def test_subscription(alias, conversion, error_handler, resolver): if alias is not None: sub_name = alias elif resolver is not None: sub_name = resolver.__name__ else: sub_name = events.__name__ sub_op: Any if (alias, conversion, error_handler, resolver) == (None, None, Undefined, None): sub_op = events else: sub_op = Subscription( events, alias=alias, conversion=conversion, error_handler=error_handler, order=None, schema=None, resolver=resolver, ) schema = graphql_schema(query=[hello], subscription=[sub_op], types=[Event]) sub_field = sub_name if resolver is not None: sub_field += "(dummy: Boolean)" sub_field += f": {'String' if conversion else 'Event'}" if error_handler is Undefined: sub_field += "!" schema_str = """\ type Event { name: String! } type Query { hello: String! } type Subscription { %s }""" assert print_schema(schema) == schema_str % sub_field sub_query = sub_name if conversion is None: sub_query += "{name}" subscription = await graphql.subscribe( schema, graphql.parse("subscription {%s}" % sub_query) ) result: Any = EVENTS if resolver: result = [s.capitalize() for s in result] if not conversion: result = [{"name": s} for s in result] assert [ev.data async for ev in subscription] == [{sub_name: r} for r in result] apischema-0.18.3/tests/unit/test_subtyping_substitution.py000066400000000000000000000025001467672046000241600ustar00rootroot00000000000000from typing import Callable, Collection, Generic, List, Mapping, Sequence, TypeVar, cast import pytest from apischema.conversions import Conversion from apischema.conversions.conversions import ResolvedConversion from apischema.types import AnyType from apischema.utils import subtyping_substitution T = TypeVar("T") U = TypeVar("U") V = TypeVar("V") class A(Generic[T, U]): pass class B(Generic[V]): pass class C(B[int]): pass class D(B[T]): pass def conv(source: AnyType, target: AnyType) -> ResolvedConversion: return ResolvedConversion(Conversion(cast(Callable, ...), source, target)) @pytest.mark.parametrize( "supertype, subtype, super_to_sub, sub_to_super", [ (A[U, T], A[int, str], {U: int, T: str}, {}), # type: ignore (A, A[str, int], {T: str, U: int}, {}), (B[T], C, {T: int}, {}), # type: ignore (Sequence[T], List[str], {T: str}, {}), # type: ignore # (B[str], B[T], {}, {T: str}), # type: ignore (B[int], B, {}, {V: int}), (B[str], D[T], {}, {T: str}), # type: ignore (Collection[str], Mapping[T, int], {}, {T: str}), # type: ignore ], ) def test_subtyping_substitution(supertype, subtype, super_to_sub, sub_to_super): assert subtyping_substitution(supertype, subtype) == (super_to_sub, sub_to_super) apischema-0.18.3/tests/unit/test_types.py000066400000000000000000000006061467672046000204510ustar00rootroot00000000000000from apischema.types import MetadataImplem, MetadataMixin def test_metadata(): metadata = MetadataImplem({"a": 0, "b": 1}) assert metadata | {"b": 2} == {"a": 0, "b": 2} assert {"b": 2} | metadata == metadata class KeyMetadata(MetadataMixin): key = "key" def test_metadata_mixin(): instance = KeyMetadata() assert list(instance.items()) == [("key", instance)] apischema-0.18.3/tests/unit/test_typing.py000066400000000000000000000022271467672046000206200ustar00rootroot00000000000000from typing import Generic, TypeVar import pytest from apischema.typing import Annotated, generic_mro, resolve_type_hints T = TypeVar("T") U = TypeVar("U") class A(Generic[T, U]): t: T u: U class B(A[int, T]): v: T class C(B[str]): pass class D(C): d: Annotated[int, ""] test_cases = [ (A, [A], {"t": T, "u": U}), (A[int, str], [A[int, str]], {"t": int, "u": str}), (A[int, T], [A[int, T]], {"t": int, "u": T}), # type: ignore (B, [B, A[int, T]], {"t": int, "u": T, "v": T}), # type: ignore (B[U], [B[U], A[int, U]], {"t": int, "u": U, "v": U}), # type: ignore (B[str], [B[str], A[int, str]], {"t": int, "u": str, "v": str}), (C, [C, B[str], A[int, str]], {"t": int, "u": str, "v": str}), ( D, [D, C, B[str], A[int, str]], {"t": int, "u": str, "v": str, "d": Annotated[int, ""]}, ), ] @pytest.mark.parametrize("tp, result, _", test_cases) def test_generic_mro(tp, result, _): assert generic_mro(tp) == (*result, Generic, object) @pytest.mark.parametrize("tp, _, result", test_cases) def test_resolve_type_hints(tp, _, result): assert resolve_type_hints(tp) == result apischema-0.18.3/tests/unit/test_utils.py000066400000000000000000000063101467672046000204430ustar00rootroot00000000000000import collections.abc import sys from functools import lru_cache, wraps from itertools import repeat from typing import ( AbstractSet, Awaitable, Collection, Dict, Generic, List, Mapping, Set, Tuple, TypeVar, ) import pytest from apischema.typing import Annotated, typing_origin from apischema.utils import is_async, replace_builtins, to_camel_case def test_to_camel_case(): assert to_camel_case("min_length") == "minLength" def sync_func(): ... async def async_func(): ... def func_not_returning_awaitable() -> int: # type: ignore ... def func_returning_awaitable() -> Awaitable[int]: # type: ignore ... sync_cases = [ sync_func, wraps(sync_func)(lambda: sync_func()), lru_cache()(sync_func), func_not_returning_awaitable, ] async_cases = [ async_func, wraps(async_func)(lambda: async_func()), lru_cache()(async_func), func_returning_awaitable, ] @pytest.mark.parametrize( "func, expected", [*zip(sync_cases, repeat(False)), *zip(async_cases, repeat(True))] ) def test_is_async(func, expected): assert is_async(func) == expected @pytest.mark.parametrize( "types, expected", [ ({}, False), ({"return": int}, False), ({"return": Awaitable[int]}, True), ({"return": Annotated[int, ...]}, False), ({"return": Annotated[Awaitable[int], ...]}, True), ], ) def test_is_async_with_types(types, expected): assert is_async(lambda: ..., types) == expected T = TypeVar("T") class GenericClass(Generic[T]): pass if sys.version_info < (3, 9): typing_origin_cases = [(list, List), (collections.abc.Collection, Collection)] else: typing_origin_cases = [ (list, list), (collections.abc.Collection, collections.abc.Collection), (List, List), (Collection, Collection), ] @pytest.mark.parametrize( "tp, expected", [*typing_origin_cases, (GenericClass, GenericClass)] ) def test_typing_origin(tp, expected): assert typing_origin(tp) == expected if sys.version_info < (3, 9): replace_builtins_cases = [ (Collection[int], List[int]), (AbstractSet[int], Set[int]), (Tuple[int], Tuple[int]), (Mapping[int, int], Dict[int, int]), (Tuple[int, ...], List[int]), ] else: replace_builtins_cases = [ (Collection[int], list[int]), (AbstractSet[int], set[int]), (Tuple[int], tuple[int]), (Mapping[int, int], dict[int, int]), (Tuple[int, ...], list[int]), (collections.abc.Collection[int], list[int]), (set[int], set[int]), (tuple[int], tuple[int]), (dict[int, int], dict[int, int]), (tuple[int, ...], list[int]), ] @pytest.mark.parametrize("annotated", [False, True]) @pytest.mark.parametrize("wrapped", [False, True]) @pytest.mark.parametrize("tp, expected", replace_builtins_cases) def test_replace_builtins(tp, expected, annotated, wrapped): if wrapped: tp = Collection[tp] # type: ignore expected = (list if sys.version_info >= (3, 9) else List)[expected] # type: ignore if annotated: tp, expected = Annotated[tp, 0], Annotated[expected, 0] assert replace_builtins(tp) == expected apischema-0.18.3/tests/unit/test_visitor.py000066400000000000000000000103711467672046000210040ustar00rootroot00000000000000import collections.abc import sys from dataclasses import dataclass, fields from enum import Enum from typing import ( Any, Collection, Dict, Generic, List, Literal, Mapping, NamedTuple, NewType, Optional, Tuple, TypedDict, TypeVar, Union, ) from unittest.mock import Mock import pytest from apischema.types import NoneType from apischema.typing import Annotated from apischema.visitor import Unsupported, Visitor ARG = object() @pytest.fixture def visitor() -> Mock: return Mock() class NamedTupleExample(NamedTuple): a: int b: str = "" class EnumExample(Enum): A = "a" B = "b" NewTypeExample = NewType("NewTypeExample", int) def func(): pass @dataclass class DataclassExample: a: int b: str class TypedDictExample(TypedDict, total=True): key1: str key2: List[int] class MyInt(int): pass pep_585: list = [] if sys.version_info >= (3, 9): pep_585 = [ (list[int], Visitor.collection, [list, int]), (tuple[str, ...], Visitor.collection, [tuple, str]), ( collections.abc.Collection[int], Visitor.collection, [collections.abc.Collection, int], ), ( collections.abc.Mapping[str, int], Visitor.mapping, [collections.abc.Mapping, str, int], ), (dict[str, int], Visitor.mapping, [dict, str, int]), ] py310: list = [] if sys.version_info >= (3, 10): py310 = [(int | str, Visitor.union, [(int, str)])] py311: list = [] if sys.version_info >= (3, 11): from typing import LiteralString py311 = [(LiteralString, Visitor.primitive, [str])] @pytest.mark.parametrize( "cls, method, args", [ (List[int], Visitor.collection, [list, int]), (Tuple[str, ...], Visitor.collection, [tuple, str]), (Collection[int], Visitor.collection, [collections.abc.Collection, int]), (Mapping[str, int], Visitor.mapping, [collections.abc.Mapping, str, int]), (Dict[str, int], Visitor.mapping, [dict, str, int]), *pep_585, *py310, *py311, (Annotated[int, 42, "42"], Visitor.annotated, [int, (42, "42")]), (Any, Visitor.any, []), ( DataclassExample, Visitor.dataclass, [ DataclassExample, {"a": int, "b": str}, (fields(DataclassExample)[0], fields(DataclassExample)[1]), (), ], ), (EnumExample, Visitor.enum, [EnumExample]), (Literal[1, 2], Visitor.literal, [(1, 2)]), ( NamedTupleExample, Visitor.named_tuple, [NamedTupleExample, {"a": int, "b": str}, {"b": ""}], ), (NewTypeExample, Visitor.new_type, [NewTypeExample, int]), (int, Visitor.primitive, [int]), (str, Visitor.primitive, [str]), (MyInt, Visitor.subprimitive, [MyInt, int]), (Tuple[str, int], Visitor.tuple, [(str, int)]), ( TypedDictExample, Visitor.typed_dict, ( TypedDictExample, {"key1": str, "key2": List[int]}, {"key1", "key2"} if sys.version_info >= (3, 9) else (), ), ), (Optional[int], Visitor.union, [(int, NoneType)]), (Union[int, str], Visitor.union, [(int, str)]), ], ) def test_visitor(visitor, cls, method, args): Visitor.visit(visitor, cls) getattr(visitor, method.__name__).assert_called_once_with(*args) T = TypeVar("T") @dataclass class GenericExample(Generic[T]): attr: T def test_default_implementations(visitor): assert Visitor.annotated(visitor, int, (42,)) visitor.visit.assert_called_once_with(int) visitor.reset_mock() assert Visitor.new_type(visitor, ..., int) visitor.visit.assert_called_once_with(int) visitor.reset_mock() with pytest.raises(Unsupported) as err: Visitor.unsupported(..., Generic) # type: ignore assert err.value.type == Generic with pytest.raises(Unsupported) as err: Visitor.unsupported(..., Generic[T]) # type: ignore assert err.value.type == Generic[T] with pytest.raises(NotImplementedError): Visitor.named_tuple(..., ..., ..., ...) # type: ignore apischema-0.18.3/tests/unit/validation/000077500000000000000000000000001467672046000200245ustar00rootroot00000000000000apischema-0.18.3/tests/unit/validation/__init__.py000066400000000000000000000000001467672046000221230ustar00rootroot00000000000000apischema-0.18.3/tests/unit/validation/test_dependencies.py000066400000000000000000000017421467672046000240670ustar00rootroot00000000000000import pytest from apischema.validation.dependencies import find_all_dependencies, find_dependencies def a_equal_b(param): assert param.a == param.b @pytest.mark.parametrize("func, deps", [(a_equal_b, {"a", "b"}), (int, set())]) def test_find_dependencies(func, deps): assert find_dependencies(func) == deps def test_no_parameter(): with pytest.raises(TypeError): find_dependencies(lambda: None) def test_find_end_dependencies(): class Test: class_var = "" def __init__(self): self.a = 0 self.b = {} def pseudo_validate(self): if self.a not in self.method(0): yield self.class_var def method(self, arg): res = list(self.c) if len(res) < arg: return self.method(arg - 1) @property def c(self): return self.b.values() assert find_all_dependencies(Test, Test.pseudo_validate) == {"a", "b", "class_var"} apischema-0.18.3/tests/unit/validation/test_mock.py000066400000000000000000000021201467672046000223610ustar00rootroot00000000000000from dataclasses import dataclass from typing import ClassVar, cast import pytest from apischema.fields import FIELDS_SET_ATTR from apischema.validation.mock import NonTrivialDependency, ValidatorMock @dataclass class Data: a: int b: str = "1" c: ClassVar[int] = 42 d = 0 @property def property(self) -> int: return int(self.b) + self.a def method(self, arg: int) -> int: return self.a + arg @classmethod def classmethod(cls, arg: int): return cls.c + arg class SubData(Data): pass def test_mock(): mock = cast(Data, ValidatorMock(Data, {"a": 0})) assert mock.a == 0 assert mock.b == "1" assert mock.c == 42 assert mock.d == 0 assert mock.__dict__ == {"a": 0, FIELDS_SET_ATTR: {"a"}} assert getattr(mock, FIELDS_SET_ATTR) == {"a"} assert mock.property == 1 assert mock.method(1) == 1 assert mock.classmethod(0) == 42 assert mock.__class__ == Data assert isinstance(mock, Data) assert type(mock) is ValidatorMock with pytest.raises(NonTrivialDependency): _ = mock.e apischema-0.18.3/tests/unit/validation/test_validator.py000066400000000000000000000037441467672046000234320ustar00rootroot00000000000000from dataclasses import dataclass from typing import Callable, Type import pytest from apischema import ValidationError, validator from apischema.validation.mock import NonTrivialDependency, ValidatorMock from apischema.validation.validators import Validator, get_validators, validate @dataclass class Data: a: int b: int c: int = 0 @validator def a_gt_10(self): if self.a <= 10: yield "error" @validator def a_lt_100(self): if self.a >= 100: raise ValidationError("error2") @validator def non_trivial(self): non_trivial(self) def non_trivial(data: Data): return data.c == data.b def get_validators_by_method(cls: Type, method: Callable) -> Validator: return next(val for val in get_validators(cls) if val.func == method) def test_get_validators(): assert get_validators(Data) == [ get_validators_by_method(Data, method) for method in (Data.a_gt_10, Data.a_lt_100, Data.non_trivial) ] def test_validator_descriptor(): # Class field is descriptor validator = get_validators_by_method(Data, Data.a_gt_10) assert validator.dependencies == {"a"} # Can be called from class and instance with pytest.raises(ValidationError): assert Data(200, 0).a_lt_100() with pytest.raises(ValidationError): assert Data.a_lt_100(Data(200, 0)) def test_validate(): validate(Data(42, 0)) with pytest.raises(ValidationError) as err: validate(Data(0, 0)) assert err.value.errors == [{"loc": [], "err": "error"}] with pytest.raises(ValidationError) as err: validate(Data(200, 0)) assert err.value.errors == [{"loc": [], "err": "error2"}] def test_non_trivial(): with pytest.raises(NonTrivialDependency) as err: validate(ValidatorMock(Data, {"a": 42}), get_validators(Data)) # err.value.attr != "c" because `c` has a default value assert err.value.attr == "b" assert err.value.validator.func == Data.non_trivial