pax_global_header00006660000000000000000000000064144436344650014527gustar00rootroot0000000000000052 comment=c793d665fea7f6240afc92ccd93c885162cd28bd ormar-0.12.2/000077500000000000000000000000001444363446500127315ustar00rootroot00000000000000ormar-0.12.2/.codeclimate.yml000066400000000000000000000005451444363446500160070ustar00rootroot00000000000000version: "2" checks: method-complexity: config: threshold: 8 argument-count: config: threshold: 6 method-count: config: threshold: 25 method-length: config: threshold: 35 file-lines: config: threshold: 500 engines: bandit: enabled: true checks: assert_used: enabled: false ormar-0.12.2/.codecov.yml000066400000000000000000000007411444363446500151560ustar00rootroot00000000000000coverage: precision: 2 round: down range: "80...100" status: project: yes patch: yes changes: yes comment: layout: "reach, diff, flags, files" behavior: default require_changes: false # if true: only post the comment if coverage changes require_base: no # [yes :: must have a base report to post] require_head: yes # [yes :: must have a head report to post] branches: # branch names that can post comment - "master"ormar-0.12.2/.coveragerc000066400000000000000000000002021444363446500150440ustar00rootroot00000000000000[run] source = ormar, tests omit = ./tests/test.db, *py.typed* data_file = .coverage [report] omit = ./tests/test.db, *py.typed* ormar-0.12.2/.flake8000066400000000000000000000002401444363446500141000ustar00rootroot00000000000000[flake8] ignore = ANN101, ANN102, W503, S101, CFQ004, S311 max-complexity = 8 max-line-length = 88 import-order-style = pycharm exclude = p38venv,.pytest_cache ormar-0.12.2/.github/000077500000000000000000000000001444363446500142715ustar00rootroot00000000000000ormar-0.12.2/.github/FUNDING.yml000066400000000000000000000000201444363446500160760ustar00rootroot00000000000000github: collerekormar-0.12.2/.github/ISSUE_TEMPLATE/000077500000000000000000000000001444363446500164545ustar00rootroot00000000000000ormar-0.12.2/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000015051444363446500211470ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: bug assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error (Note: this should be a complete and concise piece of code that allows reproduction of an issue) **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Versions (please complete the following information):** - Database backend used (mysql/sqlite/postgress) - Python version - `ormar` version - `pydantic` version - if applicable `fastapi` version **Additional context** Add any other context about the problem here. ormar-0.12.2/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000006311444363446500204440ustar00rootroot00000000000000contact_links: - name: I have a question ❓ url: https://github.com/collerek/ormar/discussions about: If you have any question about the usage of ormar, please open a discussion first. - name: I want a new feature 🆕 url: https://github.com/collerek/ormar/discussions about: If you would like to request or make a change/enhancement that is not trivial, please open a discussion first. ormar-0.12.2/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000011341444363446500222000ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: enhancement assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ormar-0.12.2/.github/dependabot.yml000066400000000000000000000005331444363446500171220ustar00rootroot00000000000000# Basic set up # https://help.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates#package-ecosystem version: 2 updates: - package-ecosystem: "pip" directory: "/" schedule: interval: "daily" - package-ecosystem: "github-actions" directory: "/" schedule: interval: daily ormar-0.12.2/.github/workflows/000077500000000000000000000000001444363446500163265ustar00rootroot00000000000000ormar-0.12.2/.github/workflows/auto-merge-dependabot.yml000066400000000000000000000032351444363446500232240ustar00rootroot00000000000000# Based on https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request name: Dependabot auto-approve and auto-merge on: pull_request_target permissions: pull-requests: write contents: write jobs: autoapprove: name: Auto Approve a PR by dependabot runs-on: ubuntu-latest steps: - name: Auto approve uses: hmarr/auto-approve-action@v3.2.1 if: github.actor == 'dependabot[bot]' with: github-token: ${{ secrets.GITHUB_TOKEN }} dependabot: runs-on: ubuntu-latest if: ${{ github.actor == 'dependabot[bot]' }} steps: - name: Dependabot metadata id: metadata uses: dependabot/fetch-metadata@v1.5.1 with: github-token: "${{ secrets.GITHUB_TOKEN }}" - name: Enable auto-merge for Dependabot PRs # Automatically merge semver-patch and semver-minor PRs # or black dependency upgrades if: "${{ steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch' || steps.metadata.outputs.dependency-names == 'black' }}" # https://cli.github.com/manual/gh_pr_merge run: gh pr merge --auto --squash "$PR_URL" env: PR_URL: ${{github.event.pull_request.html_url}} GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} ormar-0.12.2/.github/workflows/deploy-docs.yml000066400000000000000000000011451444363446500212740ustar00rootroot00000000000000name: Build Documentation using MkDocs on: push: branches: [ master ] jobs: build: name: Build and Deploy Documentation runs-on: ubuntu-latest steps: - name: Checkout Master uses: actions/checkout@v3 - name: Set up Python 3.8 uses: actions/setup-python@v4 with: python-version: '3.8' - name: Install dependencies run: | python -m pip install poetry==1.4.1 poetry install --extras "all" env: POETRY_VIRTUALENVS_CREATE: false - name: Deploy run: | mkdocs gh-deploy --force ormar-0.12.2/.github/workflows/python-publish.yml000066400000000000000000000016251444363446500220420ustar00rootroot00000000000000# This workflows will upload a Python Package using Twine when a release is created # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries name: Upload Python Package on: release: types: [created] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: jobs: deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.x' - name: Install Poetry uses: snok/install-poetry@v1.3 with: version: 1.4.1 virtualenvs-create: true virtualenvs-in-project: true - name: Build and publish run: | poetry build -vvv poetry publish -u ${{ secrets.PYPI_USERNAME }} -p ${{ secrets.PYPI_PASSWORD }} ormar-0.12.2/.github/workflows/test-package.yml000066400000000000000000000045211444363446500214230ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a single version of Python # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: build on: push: branches-ignore: - 'gh-pages' pull_request: branches: [ master ] jobs: tests: name: "Python ${{ matrix.python-version }}" runs-on: ubuntu-latest if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != 'collerek/ormar' strategy: matrix: python-version: [3.7, 3.8, 3.9, "3.10", 3.11] fail-fast: false services: mysql: image: mysql:5.7 env: MYSQL_USER: username MYSQL_PASSWORD: password MYSQL_ROOT_PASSWORD: password MYSQL_DATABASE: testsuite ports: - 3306:3306 options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=3 postgres: image: postgres:10.8 env: POSTGRES_USER: username POSTGRES_PASSWORD: password POSTGRES_DB: testsuite ports: - 5432:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install poetry==1.4.2 poetry install --extras "all" env: POETRY_VIRTUALENVS_CREATE: false - name: Run mysql env: DATABASE_URL: "mysql://username:password@127.0.0.1:3306/testsuite" run: bash scripts/test.sh - name: Run postgres env: DATABASE_URL: "postgresql://username:password@localhost:5432/testsuite" run: bash scripts/test.sh - name: Run sqlite env: DATABASE_URL: "sqlite:///testsuite" run: bash scripts/test.sh - run: mypy ormar tests benchmarks - name: Upload coverage uses: codecov/codecov-action@v3.1.4 - name: Test & publish code coverage uses: paambaati/codeclimate-action@v4.0.0 if: github.event.pull_request.head.repo.full_name == 'collerek/ormar' env: CC_TEST_REPORTER_ID: ${{ secrets.CC_COVERAGE_TOKEN }} ormar-0.12.2/.gitignore000066400000000000000000000002751444363446500147250ustar00rootroot00000000000000p38venv alembic alembic.ini build .idea .pytest_cache .mypy_cache *.coverage *.pyc *.log test.db .vscode/ dist /ormar.egg-info/ site profile.py *.db *.db-journal *coverage.xml .benchmarks/ ormar-0.12.2/.pre-commit-config.yaml000066400000000000000000000016411444363446500172140ustar00rootroot00000000000000repos: - repo: https://github.com/psf/black rev: 22.3.0 hooks: - id: black exclude: ^(docs_src/|examples/) - repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 exclude: ^(docs_src/|examples/|tests/) args: [ '--max-line-length=88' ] - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 hooks: - id: mypy exclude: ^(docs_src/|examples/) args: [--no-strict-optional, --ignore-missing-imports] additional_dependencies: [ types-ujson>=0.1.1, types-PyMySQL>=1.0.2, types-ipaddress>=1.0.0, types-enum34>=1.1.0, types-cryptography>=3.3.5, types-orjson>=3.6.0, types-aiofiles>=0.1.9, types-pkg-resources>=0.1.3, types-requests>=2.25.9, types-toml>=0.10.0, pydantic>=1.8.2 ] ormar-0.12.2/LICENSE.md000066400000000000000000000020671444363446500143420ustar00rootroot00000000000000MIT License Copyright (c) 2020 Radosław Drążkiewicz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.ormar-0.12.2/Makefile000066400000000000000000000013721444363446500143740ustar00rootroot00000000000000test_all: test_pg test_mysql test_sqlite test_pg: export DATABASE_URL=postgresql://username:password@localhost:5432/testsuite test_pg: docker-compose -f scripts/docker-compose.yml up -d postgres bash scripts/test.sh -svv docker-compose -f scripts/docker-compose.yml stop postgres test_mysql: export DATABASE_URL=mysql://username:password@127.0.0.1:3306/testsuite test_mysql: docker-compose -f "scripts/docker-compose.yml" up -d mysql bash scripts/test.sh -svv docker-compose -f scripts/docker-compose.yml stop mysql test_sqlite: bash scripts/test.sh -svv test: pytest coverage: pytest --cov=ormar --cov=tests --cov-fail-under=100 --cov-report=term-missing black: black ormar tests lint: black ormar tests flake8 ormar mypy: mypy ormar tests ormar-0.12.2/README.md000066400000000000000000000612311444363446500142130ustar00rootroot00000000000000# ormar

Pypi version Pypi version Build Status Coverage CodeFactor

### Overview The `ormar` package is an async mini ORM for Python, with support for **Postgres, MySQL**, and **SQLite**. The main benefits of using `ormar` are: * getting an **async ORM that can be used with async frameworks** (fastapi, starlette etc.) * getting just **one model to maintain** - you don't have to maintain pydantic and other orm models (sqlalchemy, peewee, gino etc.) The goal was to create a simple ORM that can be **used directly (as request and response models) with [`fastapi`][fastapi]** that bases it's data validation on pydantic. Ormar - apart from the obvious "ORM" in name - gets its name from _ormar_ in Swedish which means _snakes_, and _ormar_ in Croatian which means _cabinet_. And what's a better name for python ORM than snakes cabinet :) **If you like ormar remember to star the repository in [github](https://github.com/collerek/ormar)!** The bigger community we build, the easier it will be to catch bugs and attract contributors ;) ### Documentation Check out the [documentation][documentation] for details. **Note that for brevity most of the documentation snippets omit the creation of the database and scheduling the execution of functions for asynchronous run.** If you want more real life examples than in the documentation you can see the [tests][tests] folder, since they actually have to create and connect to a database in most of the tests. Yet remember that those are - well - tests and not all solutions are suitable to be used in real life applications. ### Part of the `fastapi` ecosystem As part of the fastapi ecosystem `ormar` is supported in libraries that somehow work with databases. As of now `ormar` is supported by: * [`fastapi-users`](https://github.com/frankie567/fastapi-users) * [`fastapi-crudrouter`](https://github.com/awtkns/fastapi-crudrouter) * [`fastapi-pagination`](https://github.com/uriyyo/fastapi-pagination) If you maintain or use a different library and would like it to support `ormar` let us know how we can help. ### Dependencies Ormar is built with: * [`sqlalchemy core`][sqlalchemy-core] for query building. * [`databases`][databases] for cross-database async support. * [`pydantic`][pydantic] for data validation. * `typing_extensions` for python 3.6 - 3.7 ### License `ormar` is built as open-sorce software and will remain completely free (MIT license). As I write open-source code to solve everyday problems in my work or to promote and build strong python community you can say thank you and buy me a coffee or sponsor me with a monthly amount to help ensure my work remains free and maintained.
Sponsor - Github Sponsors
### Migrating from `sqlalchemy` and existing databases If you currently use `sqlalchemy` and would like to switch to `ormar` check out the auto-translation tool that can help you with translating existing sqlalchemy orm models so you do not have to do it manually. **Beta** versions available at github: [`sqlalchemy-to-ormar`](https://github.com/collerek/sqlalchemy-to-ormar) or simply `pip install sqlalchemy-to-ormar` `sqlalchemy-to-ormar` can be used in pair with `sqlacodegen` to auto-map/ generate `ormar` models from existing database, even if you don't use `sqlalchemy` for your project. ### Migrations & Database creation Because ormar is built on SQLAlchemy core, you can use [`alembic`][alembic] to provide database migrations (and you really should for production code). For tests and basic applications the `sqlalchemy` is more than enough: ```python # note this is just a partial snippet full working example below # 1. Imports import sqlalchemy import databases # 2. Initialization DATABASE_URL = "sqlite:///db.sqlite" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() # Define models here # 3. Database creation and tables creation engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) ``` For a sample configuration of alembic and more information regarding migrations and database creation visit [migrations][migrations] documentation section. ### Package versions **ormar is still under development:** We recommend pinning any dependencies (with i.e. `ormar~=0.9.1`) `ormar` also follows the release numeration that breaking changes bump the major number, while other changes and fixes bump minor number, so with the latter you should be safe to update, yet always read the [releases][releases] docs before. `example: (0.5.2 -> 0.6.0 - breaking, 0.5.2 -> 0.5.3 - non breaking)`. ### Asynchronous Python Note that `ormar` is an asynchronous ORM, which means that you have to `await` the calls to the methods, that are scheduled for execution in an event loop. Python has a builtin module [`asyncio`][asyncio] that allows you to do just that. Note that most "normal" python interpreters do not allow execution of `await` outside of a function (because you actually schedule this function for delayed execution and don't get the result immediately). In a modern web framework (like `fastapi`), the framework will handle this for you, but if you plan to do this on your own you need to perform this manually like described in the quick start below. ### Quick Start Note that you can find the same script in examples folder on github. ```python from typing import Optional import databases import pydantic import ormar import sqlalchemy DATABASE_URL = "sqlite:///db.sqlite" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() # note that this step is optional -> all ormar cares is a internal # class with name Meta and proper parameters, but this way you do not # have to repeat the same parameters if you use only one database class BaseMeta(ormar.ModelMeta): metadata = metadata database = database # Note that all type hints are optional # below is a perfectly valid model declaration # class Author(ormar.Model): # class Meta(BaseMeta): # tablename = "authors" # # id = ormar.Integer(primary_key=True) # <= notice no field types # name = ormar.String(max_length=100) class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) # create the database # note that in production you should use migrations # note that this is not required if you connect to existing database engine = sqlalchemy.create_engine(DATABASE_URL) # just to be sure we clear the db before metadata.drop_all(engine) metadata.create_all(engine) # all functions below are divided into functionality categories # note how all functions are defined with async - hence can use await AND needs to # be awaited on their own async def create(): # Create some records to work with through QuerySet.create method. # Note that queryset is exposed on each Model's class as objects tolkien = await Author.objects.create(name="J.R.R. Tolkien") await Book.objects.create(author=tolkien, title="The Hobbit", year=1937) await Book.objects.create(author=tolkien, title="The Lord of the Rings", year=1955) await Book.objects.create(author=tolkien, title="The Silmarillion", year=1977) # alternative creation of object divided into 2 steps sapkowski = Author(name="Andrzej Sapkowski") # do some stuff await sapkowski.save() # or save() after initialization await Book(author=sapkowski, title="The Witcher", year=1990).save() await Book(author=sapkowski, title="The Tower of Fools", year=2002).save() # to read more about inserting data into the database # visit: https://collerek.github.io/ormar/queries/create/ async def read(): # Fetch an instance, without loading a foreign key relationship on it. # Django style book = await Book.objects.get(title="The Hobbit") # or python style book = await Book.objects.get(Book.title == "The Hobbit") book2 = await Book.objects.first() # first() fetch the instance with lower primary key value assert book == book2 # you can access all fields on loaded model assert book.title == "The Hobbit" assert book.year == 1937 # when no condition is passed to get() # it behaves as last() based on primary key column book3 = await Book.objects.get() assert book3.title == "The Tower of Fools" # When you have a relation, ormar always defines a related model for you # even when all you loaded is a foreign key value like in this example assert isinstance(book.author, Author) # primary key is populated from foreign key stored in books table assert book.author.pk == 1 # since the related model was not loaded all other fields are None assert book.author.name is None # Load the relationship from the database when you already have the related model # alternatively see joins section below await book.author.load() assert book.author.name == "J.R.R. Tolkien" # get all rows for given model authors = await Author.objects.all() assert len(authors) == 2 # to read more about reading data from the database # visit: https://collerek.github.io/ormar/queries/read/ async def update(): # read existing row from db tolkien = await Author.objects.get(name="J.R.R. Tolkien") assert tolkien.name == "J.R.R. Tolkien" tolkien_id = tolkien.id # change the selected property tolkien.name = "John Ronald Reuel Tolkien" # call update on a model instance await tolkien.update() # confirm that object was updated tolkien = await Author.objects.get(name="John Ronald Reuel Tolkien") assert tolkien.name == "John Ronald Reuel Tolkien" assert tolkien.id == tolkien_id # alternatively update data without loading await Author.objects.filter(name__contains="Tolkien").update(name="J.R.R. Tolkien") # to read more about updating data in the database # visit: https://collerek.github.io/ormar/queries/update/ async def delete(): silmarillion = await Book.objects.get(year=1977) # call delete() on instance await silmarillion.delete() # alternatively delete without loading await Book.objects.delete(title="The Tower of Fools") # note that when there is no record ormar raises NoMatch exception try: await Book.objects.get(year=1977) except ormar.NoMatch: print("No book from 1977!") # to read more about deleting data from the database # visit: https://collerek.github.io/ormar/queries/delete/ # note that despite the fact that record no longer exists in database # the object above is still accessible and you can use it (and i.e. save()) again. tolkien = silmarillion.author await Book.objects.create(author=tolkien, title="The Silmarillion", year=1977) async def joins(): # Tho join two models use select_related # Django style book = await Book.objects.select_related("author").get(title="The Hobbit") # Python style book = await Book.objects.select_related(Book.author).get( Book.title == "The Hobbit" ) # now the author is already prefetched assert book.author.name == "J.R.R. Tolkien" # By default you also get a second side of the relation # constructed as lowercase source model name +'s' (books in this case) # you can also provide custom name with parameter related_name # Django style author = await Author.objects.select_related("books").all(name="J.R.R. Tolkien") # Python style author = await Author.objects.select_related(Author.books).all( Author.name == "J.R.R. Tolkien" ) assert len(author[0].books) == 3 # for reverse and many to many relations you can also prefetch_related # that executes a separate query for each of related models # Django style author = await Author.objects.prefetch_related("books").get(name="J.R.R. Tolkien") # Python style author = await Author.objects.prefetch_related(Author.books).get( Author.name == "J.R.R. Tolkien" ) assert len(author.books) == 3 # to read more about relations # visit: https://collerek.github.io/ormar/relations/ # to read more about joins and subqueries # visit: https://collerek.github.io/ormar/queries/joins-and-subqueries/ async def filter_and_sort(): # to filter the query you can use filter() or pass key-value pars to # get(), all() etc. # to use special methods or access related model fields use double # underscore like to filter by the name of the author use author__name # Django style books = await Book.objects.all(author__name="J.R.R. Tolkien") # python style books = await Book.objects.all(Book.author.name == "J.R.R. Tolkien") assert len(books) == 3 # filter can accept special methods also separated with double underscore # to issue sql query ` where authors.name like "%tolkien%"` that is not # case sensitive (hence small t in Tolkien) # Django style books = await Book.objects.filter(author__name__icontains="tolkien").all() # python style books = await Book.objects.filter(Book.author.name.icontains("tolkien")).all() assert len(books) == 3 # to sort use order_by() function of queryset # to sort decreasing use hyphen before the field name # same as with filter you can use double underscores to access related fields # Django style books = ( await Book.objects.filter(author__name__icontains="tolkien") .order_by("-year") .all() ) # python style books = ( await Book.objects.filter(Book.author.name.icontains("tolkien")) .order_by(Book.year.desc()) .all() ) assert len(books) == 3 assert books[0].title == "The Silmarillion" assert books[2].title == "The Hobbit" # to read more about filtering and ordering # visit: https://collerek.github.io/ormar/queries/filter-and-sort/ async def subset_of_columns(): # to exclude some columns from loading when querying the database # you can use fileds() method hobbit = await Book.objects.fields(["title"]).get(title="The Hobbit") # note that fields not included in fields are empty (set to None) assert hobbit.year is None assert hobbit.author is None # selected field is there assert hobbit.title == "The Hobbit" # alternatively you can provide columns you want to exclude hobbit = await Book.objects.exclude_fields(["year"]).get(title="The Hobbit") # year is still not set assert hobbit.year is None # but author is back assert hobbit.author is not None # also you cannot exclude primary key column - it's always there # even if you EXPLICITLY exclude it it will be there # note that each model have a shortcut for primary_key column which is pk # and you can filter/access/set the values by this alias like below assert hobbit.pk is not None # note that you cannot exclude fields that are not nullable # (required) in model definition try: await Book.objects.exclude_fields(["title"]).get(title="The Hobbit") except pydantic.ValidationError: print("Cannot exclude non nullable field title") # to read more about selecting subset of columns # visit: https://collerek.github.io/ormar/queries/select-columns/ async def pagination(): # to limit number of returned rows use limit() books = await Book.objects.limit(1).all() assert len(books) == 1 assert books[0].title == "The Hobbit" # to offset number of returned rows use offset() books = await Book.objects.limit(1).offset(1).all() assert len(books) == 1 assert books[0].title == "The Lord of the Rings" # alternatively use paginate that combines both books = await Book.objects.paginate(page=2, page_size=2).all() assert len(books) == 2 # note that we removed one book of Sapkowski in delete() # and recreated The Silmarillion - by default when no order_by is set # ordering sorts by primary_key column assert books[0].title == "The Witcher" assert books[1].title == "The Silmarillion" # to read more about pagination and number of rows # visit: https://collerek.github.io/ormar/queries/pagination-and-rows-number/ async def aggregations(): # count: assert 2 == await Author.objects.count() # exists assert await Book.objects.filter(title="The Hobbit").exists() # maximum assert 1990 == await Book.objects.max(columns=["year"]) # minimum assert 1937 == await Book.objects.min(columns=["year"]) # average assert 1964.75 == await Book.objects.avg(columns=["year"]) # sum assert 7859 == await Book.objects.sum(columns=["year"]) # to read more about aggregated functions # visit: https://collerek.github.io/ormar/queries/aggregations/ async def raw_data(): # extract raw data in a form of dicts or tuples # note that this skips the validation(!) as models are # not created from parsed data # get list of objects as dicts assert await Book.objects.values() == [ {"id": 1, "author": 1, "title": "The Hobbit", "year": 1937}, {"id": 2, "author": 1, "title": "The Lord of the Rings", "year": 1955}, {"id": 4, "author": 2, "title": "The Witcher", "year": 1990}, {"id": 5, "author": 1, "title": "The Silmarillion", "year": 1977}, ] # get list of objects as tuples assert await Book.objects.values_list() == [ (1, 1, "The Hobbit", 1937), (2, 1, "The Lord of the Rings", 1955), (4, 2, "The Witcher", 1990), (5, 1, "The Silmarillion", 1977), ] # filter data - note how you always get a list assert await Book.objects.filter(title="The Hobbit").values() == [ {"id": 1, "author": 1, "title": "The Hobbit", "year": 1937} ] # select only wanted fields assert await Book.objects.filter(title="The Hobbit").values(["id", "title"]) == [ {"id": 1, "title": "The Hobbit"} ] # if you select only one column you could flatten it with values_list assert await Book.objects.values_list("title", flatten=True) == [ "The Hobbit", "The Lord of the Rings", "The Witcher", "The Silmarillion", ] # to read more about extracting raw values # visit: https://collerek.github.io/ormar/queries/aggregations/ async def with_connect(function): # note that for any other backend than sqlite you actually need to # connect to the database to perform db operations async with database: await function() # note that if you use framework like `fastapi` you shouldn't connect # in your endpoints but have a global connection pool # check https://collerek.github.io/ormar/fastapi/ and section with db connection # gather and execute all functions # note - normally import should be at the beginning of the file import asyncio # note that normally you use gather() function to run several functions # concurrently but we actually modify the data and we rely on the order of functions for func in [ create, read, update, delete, joins, filter_and_sort, subset_of_columns, pagination, aggregations, raw_data, ]: print(f"Executing: {func.__name__}") asyncio.run(with_connect(func)) # drop the database tables metadata.drop_all(engine) ``` ## Ormar Specification ### QuerySet methods * `create(**kwargs): -> Model` * `get(*args, **kwargs): -> Model` * `get_or_none(*args, **kwargs): -> Optional[Model]` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs) -> Tuple[Model, bool]` * `first(*args, **kwargs): -> Model` * `update(each: bool = False, **kwargs) -> int` * `update_or_create(**kwargs) -> Model` * `bulk_create(objects: List[Model]) -> None` * `bulk_update(objects: List[Model], columns: List[str] = None) -> None` * `delete(*args, each: bool = False, **kwargs) -> int` * `all(*args, **kwargs) -> List[Optional[Model]]` * `iterate(*args, **kwargs) -> AsyncGenerator[Model]` * `filter(*args, **kwargs) -> QuerySet` * `exclude(*args, **kwargs) -> QuerySet` * `select_related(related: Union[List, str]) -> QuerySet` * `prefetch_related(related: Union[List, str]) -> QuerySet` * `limit(limit_count: int) -> QuerySet` * `offset(offset: int) -> QuerySet` * `count(distinct: bool = True) -> int` * `exists() -> bool` * `max(columns: List[str]) -> Any` * `min(columns: List[str]) -> Any` * `avg(columns: List[str]) -> Any` * `sum(columns: List[str]) -> Any` * `fields(columns: Union[List, str, set, dict]) -> QuerySet` * `exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet` * `order_by(columns:Union[List, str]) -> QuerySet` * `values(fields: Union[List, str, Set, Dict])` * `values_list(fields: Union[List, str, Set, Dict])` #### Relation types * One to many - with `ForeignKey(to: Model)` * Many to many - with `ManyToMany(to: Model, Optional[through]: Model)` #### Model fields types Available Model Fields (with required args - optional ones in docs): * `String(max_length)` * `Text()` * `Boolean()` * `Integer()` * `Float()` * `Date()` * `Time()` * `DateTime()` * `JSON()` * `BigInteger()` * `SmallInteger()` * `Decimal(scale, precision)` * `UUID()` * `LargeBinary(max_length)` * `Enum(enum_class)` * `Enum` like Field - by passing `choices` to any other Field type * `EncryptedString` - by passing `encrypt_secret` and `encrypt_backend` * `ForeignKey(to)` * `ManyToMany(to)` ### Available fields options The following keyword arguments are supported on all field types. * `primary_key: bool` * `nullable: bool` * `default: Any` * `server_default: Any` * `index: bool` * `unique: bool` * `choices: typing.Sequence` * `name: str` * `pydantic_only: bool` All fields are required unless one of the following is set: * `nullable` - Creates a nullable column. Sets the default to `False`. Read the fields common parameters for details. * `sql_nullable` - Used to set different setting for pydantic and the database. Sets the default to `nullable` value. Read the fields common parameters for details. * `default` - Set a default value for the field. **Not available for relation fields** * `server_default` - Set a default value for the field on server side (like sqlalchemy's `func.now()`). **Not available for relation fields** * `primary key` with `autoincrement` - When a column is set to primary key and autoincrement is set on this column. Autoincrement is set by default on int primary keys. * `pydantic_only` - Field is available only as normal pydantic field, not stored in the database. ### Available signals Signals allow to trigger your function for a given event on a given Model. * `pre_save` * `post_save` * `pre_update` * `post_update` * `pre_delete` * `post_delete` * `pre_relation_add` * `post_relation_add` * `pre_relation_remove` * `post_relation_remove` * `post_bulk_update` [sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/ [databases]: https://github.com/encode/databases [pydantic]: https://pydantic-docs.helpmanual.io/ [encode/orm]: https://github.com/encode/orm/ [alembic]: https://alembic.sqlalchemy.org/en/latest/ [fastapi]: https://fastapi.tiangolo.com/ [documentation]: https://collerek.github.io/ormar/ [migrations]: https://collerek.github.io/ormar/models/migrations/ [asyncio]: https://docs.python.org/3/library/asyncio.html [releases]: https://collerek.github.io/ormar/releases/ [tests]: https://github.com/collerek/ormar/tree/master/tests ormar-0.12.2/benchmarks/000077500000000000000000000000001444363446500150465ustar00rootroot00000000000000ormar-0.12.2/benchmarks/__init__.py000066400000000000000000000000001444363446500171450ustar00rootroot00000000000000ormar-0.12.2/benchmarks/conftest.py000066400000000000000000000056331444363446500172540ustar00rootroot00000000000000import asyncio import random import string import time import databases import nest_asyncio import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL nest_asyncio.apply() database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() pytestmark = pytest.mark.asyncio class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) score: float = ormar.Integer(minimum=0, maximum=100) class AuthorWithManyFields(Author): year_born: int = ormar.Integer() year_died: int = ormar.Integer(nullable=True) birthplace: str = ormar.String(max_length=255) class Publisher(ormar.Model): class Meta(BaseMeta): tablename = "publishers" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) prestige: int = ormar.Integer(minimum=0, maximum=10) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Author = ormar.ForeignKey(Author, index=True) publisher: Publisher = ormar.ForeignKey(Publisher, index=True) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) @pytest.fixture(autouse=True, scope="function") # TODO: fix this to be module def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture async def author(): author = await Author(name="Author", score=10).save() return author @pytest_asyncio.fixture async def publisher(): publisher = await Publisher(name="Publisher", prestige=random.randint(0, 10)).save() return publisher @pytest_asyncio.fixture async def authors_in_db(num_models: int): authors = [ Author( name="".join(random.sample(string.ascii_letters, 5)), score=random.random() * 100, ) for i in range(0, num_models) ] await Author.objects.bulk_create(authors) return await Author.objects.all() @pytest_asyncio.fixture @pytest.mark.benchmark( min_rounds=1, timer=time.process_time, disable_gc=True, warmup=False ) async def aio_benchmark(benchmark, event_loop: asyncio.BaseEventLoop): def _fixture_wrapper(func): def _func_wrapper(*args, **kwargs): if asyncio.iscoroutinefunction(func): @benchmark def benchmarked_func(): a = event_loop.run_until_complete(func(*args, **kwargs)) return a return benchmarked_func else: return benchmark(func, *args, **kwargs) return _func_wrapper return _fixture_wrapper ormar-0.12.2/benchmarks/test_benchmark_aggregate.py000066400000000000000000000027341444363446500224250ustar00rootroot00000000000000from typing import List import pytest from benchmarks.conftest import Author pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_count(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def count(): return await Author.objects.count() c = count() assert c == len(authors_in_db) @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_avg(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def avg(): return await Author.objects.avg("score") average = avg() assert 0 <= average <= 100 @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_sum(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def sum_(): return await Author.objects.sum("score") s = sum_() assert 0 <= s <= 100 * num_models @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_min(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def min_(): return await Author.objects.min("score") m = min_() assert 0 <= m <= 100 @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_max(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def max_(): return await Author.objects.max("score") m = max_() assert 0 <= m <= 100 ormar-0.12.2/benchmarks/test_benchmark_bulk_create.py000066400000000000000000000012361444363446500227530ustar00rootroot00000000000000import random import string import pytest from benchmarks.conftest import Author pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_making_and_inserting_models_in_bulk(aio_benchmark, num_models: int): @aio_benchmark async def make_and_insert(num_models: int): authors = [ Author( name="".join(random.sample(string.ascii_letters, 5)), score=random.random() * 100, ) for i in range(0, num_models) ] assert len(authors) == num_models await Author.objects.bulk_create(authors) make_and_insert(num_models) ormar-0.12.2/benchmarks/test_benchmark_bulk_update.py000066400000000000000000000013011444363446500227630ustar00rootroot00000000000000import random import string from typing import List import pytest from benchmarks.conftest import Author pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_updating_models_in_bulk( aio_benchmark, num_models: int, authors_in_db: List[Author] ): starting_first_name = authors_in_db[0].name @aio_benchmark async def update(authors: List[Author]): await Author.objects.bulk_update(authors) for author in authors_in_db: author.name = "".join(random.sample(string.ascii_letters, 5)) update(authors_in_db) author = await Author.objects.get(id=authors_in_db[0].id) assert author.name != starting_first_name ormar-0.12.2/benchmarks/test_benchmark_create.py000066400000000000000000000054141444363446500217400ustar00rootroot00000000000000import random import string import pytest from benchmarks.conftest import Author, Book, Publisher pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_creating_models_individually(aio_benchmark, num_models: int): @aio_benchmark async def create(num_models: int): authors = [] for idx in range(0, num_models): author = await Author.objects.create( name="".join(random.sample(string.ascii_letters, 5)), score=random.random() * 100, ) authors.append(author) return authors authors = create(num_models) for author in authors: assert author.id is not None @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_creating_individually_with_related_models( aio_benchmark, num_models: int, author: Author, publisher: Publisher ): @aio_benchmark async def create_with_related_models( author: Author, publisher: Publisher, num_models: int ): books = [] for idx in range(0, num_models): book = await Book.objects.create( author=author, publisher=publisher, title="".join(random.sample(string.ascii_letters, 5)), year=random.randint(0, 2000), ) books.append(book) return books books = create_with_related_models( author=author, publisher=publisher, num_models=num_models ) for book in books: assert book.id is not None @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_get_or_create_when_create(aio_benchmark, num_models: int): @aio_benchmark async def get_or_create(num_models: int): authors = [] for idx in range(0, num_models): author, created = await Author.objects.get_or_create( name="".join(random.sample(string.ascii_letters, 5)), score=random.random() * 100, ) assert created authors.append(author) return authors authors = get_or_create(num_models) for author in authors: assert author.id is not None @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_update_or_create_when_create(aio_benchmark, num_models: int): @aio_benchmark async def update_or_create(num_models: int): authors = [] for idx in range(0, num_models): author = await Author.objects.update_or_create( name="".join(random.sample(string.ascii_letters, 5)), score=random.random() * 100, ) authors.append(author) return authors authors = update_or_create(num_models) for author in authors: assert author.id is not None ormar-0.12.2/benchmarks/test_benchmark_delete.py000066400000000000000000000015431444363446500217360ustar00rootroot00000000000000from typing import List import pytest from benchmarks.conftest import Author pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_deleting_all( aio_benchmark, num_models: int, authors_in_db: List[Author] ): @aio_benchmark async def delete_all(): await Author.objects.delete(each=True) delete_all() num = await Author.objects.count() assert num == 0 @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_deleting_individually( aio_benchmark, num_models: int, authors_in_db: List[Author] ): @aio_benchmark async def delete_one_by_one(authors: List[Author]): for author in authors: await Author.objects.filter(id=author.id).delete() delete_one_by_one(authors_in_db) num = await Author.objects.count() assert num == 0 ormar-0.12.2/benchmarks/test_benchmark_get.py000066400000000000000000000061241444363446500212530ustar00rootroot00000000000000import random import string from typing import List import pytest import pytest_asyncio from benchmarks.conftest import Author, Book, Publisher pytestmark = pytest.mark.asyncio @pytest_asyncio.fixture() async def books(author: Author, publisher: Publisher, num_models: int): books = [ Book( author=author, publisher=publisher, title="".join(random.sample(string.ascii_letters, 5)), year=random.randint(0, 2000), ) for _ in range(0, num_models) ] await Book.objects.bulk_create(books) return books @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_get_all(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def get_all(authors: List[Author]): return await Author.objects.all() authors = get_all(authors_in_db) for idx, author in enumerate(authors_in_db): assert authors[idx].id == author.id @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_get_all_with_related_models( aio_benchmark, num_models: int, author: Author, books: List[Book] ): @aio_benchmark async def get_with_related(author: Author): return await Author.objects.select_related("books").all(id=author.id) authors = get_with_related(author) assert len(authors[0].books) == num_models @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_get_one(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def get_one(authors: List[Author]): return await Author.objects.get(id=authors[0].id) author = get_one(authors_in_db) assert author == authors_in_db[0] @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_get_or_none(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def get_or_none(authors: List[Author]): return await Author.objects.get_or_none(id=authors[0].id) author = get_or_none(authors_in_db) assert author == authors_in_db[0] @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_get_or_create_when_get( aio_benchmark, num_models: int, authors_in_db: List[Author] ): @aio_benchmark async def get_or_create(authors: List[Author]): author, created = await Author.objects.get_or_create(id=authors[0].id) assert not created return author author = get_or_create(authors_in_db) assert author == authors_in_db[0] @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_first(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def first(): return await Author.objects.first() author = first() assert author == authors_in_db[0] @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_exists(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def check_exists(authors: List[Author]): return await Author.objects.filter(id=authors[0].id).exists() exists = check_exists(authors_in_db) assert exists ormar-0.12.2/benchmarks/test_benchmark_init.py000066400000000000000000000026751444363446500214460ustar00rootroot00000000000000import random import string import pytest from benchmarks.conftest import Author, Book, Publisher pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_initializing_models(aio_benchmark, num_models: int): @aio_benchmark async def initialize_models(num_models: int): authors = [ Author( name="".join(random.sample(string.ascii_letters, 5)), score=random.random() * 100, ) for i in range(0, num_models) ] assert len(authors) == num_models await initialize_models(num_models) @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_initializing_models_with_related_models(aio_benchmark, num_models: int): @aio_benchmark async def initialize_models_with_related_models( author: Author, publisher: Publisher, num_models: int ): _ = [ Book( author=author, publisher=publisher, title="".join(random.sample(string.ascii_letters, 5)), year=random.randint(0, 2000), ) for i in range(0, num_models) ] author = await Author(name="Author", score=10).save() publisher = await Publisher(name="Publisher", prestige=random.randint(0, 10)).save() _ = initialize_models_with_related_models( author=author, publisher=publisher, num_models=num_models ) ormar-0.12.2/benchmarks/test_benchmark_iterate.py000066400000000000000000000011341444363446500221250ustar00rootroot00000000000000from typing import List import pytest from benchmarks.conftest import Author pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_iterate(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def iterate_over_all(authors: List[Author]): authors = [] async for author in Author.objects.iterate(): authors.append(author) return authors authors = iterate_over_all(authors_in_db) for idx, author in enumerate(authors_in_db): assert authors[idx].id == author.id ormar-0.12.2/benchmarks/test_benchmark_save.py000066400000000000000000000033271444363446500214340ustar00rootroot00000000000000import random import string import pytest from benchmarks.conftest import Author, Book, Publisher pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_saving_models_individually(aio_benchmark, num_models: int): @aio_benchmark async def make_and_insert(num_models: int): authors = [ Author( name="".join(random.sample(string.ascii_letters, 5)), score=random.random() * 100, ) for i in range(0, num_models) ] assert len(authors) == num_models ids = [] for author in authors: a = await author.save() ids.append(a) return ids ids = make_and_insert(num_models) for id in ids: assert id is not None @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_saving_models_individually_with_related_models( aio_benchmark, num_models: int, author: Author, publisher: Publisher ): @aio_benchmark async def making_and_inserting_related_models_one_by_one( author: Author, publisher: Publisher, num_models: int ): books = [ Book( author=author, publisher=publisher, title="".join(random.sample(string.ascii_letters, 5)), year=random.randint(0, 2000), ) for i in range(0, num_models) ] ids = [] for book in books: await book.save() ids.append(book.id) return ids ids = making_and_inserting_related_models_one_by_one( author=author, publisher=publisher, num_models=num_models ) for id in ids: assert id is not None ormar-0.12.2/benchmarks/test_benchmark_update.py000066400000000000000000000013031444363446500217500ustar00rootroot00000000000000import random import string from typing import List import pytest from benchmarks.conftest import Author pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [10, 20, 40]) async def test_updating_models_individually( aio_benchmark, num_models: int, authors_in_db: List[Author] ): starting_first_name = authors_in_db[0].name @aio_benchmark async def update(authors: List[Author]): for author in authors: _ = await author.update( name="".join(random.sample(string.ascii_letters, 5)) ) update(authors_in_db) author = await Author.objects.get(id=authors_in_db[0].id) assert author.name != starting_first_name ormar-0.12.2/benchmarks/test_benchmark_values.py000066400000000000000000000016701444363446500217740ustar00rootroot00000000000000from typing import List import pytest from benchmarks.conftest import Author pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_values(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def get_all_values(authors: List[Author]): return await Author.objects.values() authors_list = get_all_values(authors_in_db) for idx, author in enumerate(authors_in_db): assert authors_list[idx]["id"] == author.id @pytest.mark.parametrize("num_models", [250, 500, 1000]) async def test_values_list(aio_benchmark, num_models: int, authors_in_db: List[Author]): @aio_benchmark async def get_all_values_list(authors: List[Author]): return await Author.objects.values_list() authors_list = get_all_values_list(authors_in_db) for idx, author in enumerate(authors_in_db): assert authors_list[idx][0] == author.id ormar-0.12.2/docs/000077500000000000000000000000001444363446500136615ustar00rootroot00000000000000ormar-0.12.2/docs/contributing.md000066400000000000000000000037151444363446500167200ustar00rootroot00000000000000All contributions to *ormar* are welcomed! ## Issues To make it as simple as possible for us to help you, please include the following: * OS * python version * ormar version * database backend (mysql, sqlite or postgresql) Please try to always include the above unless you're unable to install *ormar* or **know** it's not relevant to your question or feature request. ## Pull Requests It should be quite straight forward to get started and create a Pull Request. !!! note Unless your change is trivial (typo, docs tweak etc.), please create an issue to discuss the change before creating a pull request. To make contributing as easy and fast as possible, you'll want to run tests and linting locally. You'll need to have **python 3.6.2**, **3.7**, or **3.8**, **poetry**, and **git** installed. ```bash # 1. clone your fork and cd into the repo directory git clone git@github.com:/ormar.git cd ormar # 2. Install ormar, dependencies and test dependencies poetry install -E dev # 3. Checkout a new branch and make your changes git checkout -b my-new-feature-branch # make your changes... # 4. Formatting and linting # ormar uses black for formatting, flake8 for linting and mypy for type hints check # run all of the following as all those calls will be run on travis after every push black ormar tests flake8 ormar mypy ormar tests # 5. Run tests # on localhost all tests are run against sglite backend # rest of the backends will be checked after push pytest -svv --cov=ormar --cov=tests --cov-fail-under=100 --cov-report=term-missing # 6. Build documentation mkdocs build # if you have changed the documentation make sure it builds successfully # you can also use `mkdocs serve` to serve the documentation at localhost:8000 # ... commit, push, and create your pull request ``` !!!tip For more information on how and why ormar works the way it works please see the [API documentation][API documentation] [API documentation]: ./api/index.mdormar-0.12.2/docs/fastapi/000077500000000000000000000000001444363446500153105ustar00rootroot00000000000000ormar-0.12.2/docs/fastapi/index.md000066400000000000000000000144131444363446500167440ustar00rootroot00000000000000# Fastapi The use of ormar with fastapi is quite simple. Apart from connecting to databases at startup everything else you need to do is substitute pydantic models with ormar models. Here you can find a very simple sample application code. !!!warning This example assumes that you already have a database created. If that is not the case please visit [database initialization][database initialization] section. !!!tip The following example (all sections) should be put in one file. It's divided into subsections for clarity. !!!note If you want to read more on how you can use ormar models in fastapi requests and responses check the [responses](response.md) and [requests](requests.md) documentation. ## Quick Start !!!note Note that you can find the full quick start script in the [github](https://github.com/collerek/ormar) repo under examples. ### Imports and initialization First take care of the imports and initialization ```python from typing import List, Optional import databases import sqlalchemy from fastapi import FastAPI import ormar app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database("sqlite:///test.db") app.state.database = database ``` ### Database connection Next define startup and shutdown events (or use middleware) - note that this is `databases` specific setting not the ormar one ```python @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() ``` !!!info You can read more on connecting to databases in [fastapi][fastapi] documentation ### Models definition Define ormar models with appropriate fields. Those models will be used instead of pydantic ones. ```python class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) ``` !!!tip You can read more on defining `Models` in [models][models] section. ### Fastapi endpoints definition Define your desired endpoints, note how `ormar` models are used both as `response_model` and as a requests parameters. ```python @app.get("/items/", response_model=List[Item]) async def get_items(): items = await Item.objects.select_related("category").all() return items @app.post("/items/", response_model=Item) async def create_item(item: Item): await item.save() return item @app.post("/categories/", response_model=Category) async def create_category(category: Category): await category.save() return category @app.put("/items/{item_id}") async def get_item(item_id: int, item: Item): item_db = await Item.objects.get(pk=item_id) return await item_db.update(**item.dict()) @app.delete("/items/{item_id}") async def delete_item(item_id: int, item: Item = None): if item: return {"deleted_rows": await item.delete()} item_db = await Item.objects.get(pk=item_id) return {"deleted_rows": await item_db.delete()} ``` !!!note Note how ormar `Model` methods like save() are available straight out of the box after fastapi initializes it for you. !!!note Note that you can return a `Model` (or list of `Models`) directly - fastapi will jsonize it for you ### Test the application #### Run fastapi If you want to run this script and play with fastapi swagger install uvicorn first `pip install uvicorn` And launch the fastapi. `uvicorn :app --reload` Now you can navigate to your browser (by default fastapi address is `127.0.0.1:8000/docs`) and play with the api. !!!info You can read more about running fastapi in [fastapi][fastapi] docs. #### Test with pytest Here you have a sample test that will prove that everything works as intended. Be sure to create the tables first. If you are using pytest you can use a fixture. ```python @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) ``` ```python # here is a sample test to check the working of the ormar with fastapi from starlette.testclient import TestClient def test_all_endpoints(): # note that TestClient is only sync, don't use asyns here client = TestClient(app) # note that you need to connect to database manually # or use client as contextmanager during tests with client as client: response = client.post("/categories/", json={"name": "test cat"}) category = response.json() response = client.post( "/items/", json={"name": "test", "id": 1, "category": category} ) item = Item(**response.json()) assert item.pk is not None response = client.get("/items/") items = [Item(**item) for item in response.json()] assert items[0] == item item.name = "New name" response = client.put(f"/items/{item.pk}", json=item.dict()) assert response.json() == item.dict() response = client.get("/items/") items = [Item(**item) for item in response.json()] assert items[0].name == "New name" response = client.delete(f"/items/{item.pk}", json=item.dict()) assert response.json().get("deleted_rows", "__UNDEFINED__") != "__UNDEFINED__" response = client.get("/items/") items = response.json() assert len(items) == 0 ``` !!!tip If you want to see more test cases and how to test ormar/fastapi see [tests][tests] directory in the github repo !!!info You can read more on testing fastapi in [fastapi][fastapi] docs. [fastapi]: https://fastapi.tiangolo.com/ [models]: ../models/index.md [database initialization]: ../models/migrations.md [tests]: https://github.com/collerek/ormar/tree/master/tests ormar-0.12.2/docs/fastapi/requests.md000066400000000000000000000145011444363446500175060ustar00rootroot00000000000000# Request You can use ormar Models in `fastapi` request `Body` parameters instead of pydantic models. You can of course also mix `ormar.Model`s with `pydantic` ones if you need to. One of the most common tasks in requests is excluding certain fields that you do not want to include in the payload you send to API. This can be achieved in several ways in `ormar` so below you can review your options and select the one most suitable for your situation. ## Excluding fields in request ### Optional fields Note that each field that is optional is not required, that means that Optional fields can be skipped both in response and in requests. Field is not required if (any/many/all) of following: * Field is marked with `nullable=True` * Field has `default` value or function provided, i.e. `default="Test"` * Field has a `server_default` value set * Field is an `autoincrement=True` `primary_key` field (note that `ormar.Integer` `primary_key` is `autoincrement` by default) Example: ```python class User(ormar.Model): class Meta: tablename: str = "users" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) email: str = ormar.String(max_length=255) password: str = ormar.String(max_length=255) first_name: str = ormar.String(max_length=255, nullable=True) last_name: str = ormar.String(max_length=255) category: str = ormar.String(max_length=255, default="User") ``` In above example fields `id` (is an `autoincrement` `Integer`), `first_name` ( has `nullable=True`) and `category` (has `default`) are optional and can be skipped in response and model wil still validate. If the field is nullable you don't have to include it in payload during creation as well as in response, so given example above you can: !!!Warning Note that although you do not have to pass the optional field, you still **can** do it. And if someone will pass a value it will be used later unless you take measures to prevent it. ```python # note that app is an FastApi app @app.post("/users/", response_model=User) # here we use ormar.Model in response async def create_user(user: User): # here we use ormar.Model in request parameter return await user.save() ``` That means that if you do not pass i.e. `first_name` in request it will validate correctly (as field is optional), `None` will be saved in the database. ### Generate `pydantic` model from `ormar.Model` Since task of excluding fields is so common `ormar` has a special way to generate `pydantic` models from existing `ormar.Models` without you needing to retype all the fields. That method is `get_pydantic()` method available on all models classes. ```python # generate a tree of models without password on User and without priority on nested Category RequestUser = User.get_pydantic(exclude={"password": ..., "category": {"priority"}}) @app.post("/users3/", response_model=User) # here you can also use both ormar/pydantic async def create_user3(user: RequestUser): # use the generated model here # note how now user is pydantic and not ormar Model so you need to convert return await User(**user.dict()).save() ``` !!!Note To see more examples and read more visit [get_pydantic](../models/methods.md#get_pydantic) part of the documentation. !!!Warning The `get_pydantic` method generates all models in a tree of nested models according to an algorithm that allows to avoid loops in models (same algorithm that is used in `dict()`, `select_all()` etc.) That means that nested models won't have reference to parent model (by default ormar relation is biderectional). Note also that if given model exists in a tree more than once it will be doubled in pydantic models (each occurance will have separate own model). That way you can exclude/include different fields on different leafs of the tree. #### Mypy and type checking Note that assigning a function as a python type passes at runtime (as it's not checked) the static type checkers like mypy will complain. Although result of the function call will always be the same for given model using a dynamically created type is not allowed. Therefore you have two options: First one is to simply add `# type: ignore` to skip the type checking ```python RequestUser = User.get_pydantic(exclude={"password": ..., "category": {"priority"}}) @app.post("/users3/", response_model=User) async def create_user3(user: RequestUser): # type: ignore # note how now user is not ormar Model so you need to convert return await User(**user.dict()).save() ``` The second one is a little bit more hacky and utilizes a way in which fastapi extract function parameters. You can overwrite the `__annotations__` entry for given param. ```python RequestUser = User.get_pydantic(exclude={"password": ..., "category": {"priority"}}) # do not use the app decorator async def create_user3(user: User): # use ormar model here return await User(**user.dict()).save() # overwrite the function annotations entry for user param with generated model create_user3.__annotations__["user"] = RequestUser # manually call app functions (app.get, app.post etc.) and pass your function reference app.post("/categories/", response_model=User)(create_user3) ``` Note that this will cause mypy to "think" that user is an ormar model but since in request it doesn't matter that much (you pass jsonized dict anyway and you need to convert before saving). That still should work fine as generated model will be a subset of fields, so all needed fields will validate, and all not used fields will fail at runtime. ### Separate `pydantic` model The final solution is to just create separate pydantic model manually. That works exactly the same as with normal fastapi application, so you can have different models for response and requests etc. Sample: ```python import pydantic class UserCreate(pydantic.BaseModel): class Config: orm_mode = True email: str first_name: str last_name: str password: str @app.post("/users3/", response_model=User) # use ormar model here (but of course you CAN use pydantic also here) async def create_user3(user: UserCreate): # use pydantic model here # note how now request param is a pydantic model and not the ormar one # so you need to parse/convert it to ormar before you can use database return await User(**user.dict()).save() ```ormar-0.12.2/docs/fastapi/response.md000066400000000000000000000240221444363446500174700ustar00rootroot00000000000000# Response You can use ormar Models in `fastapi` response_model instead of pydantic models. You can of course also mix `ormar.Model`s with `pydantic` ones if you need to. One of the most common tasks in responses is excluding certain fields that you do not want to include in response data. This can be achieved in several ways in `ormar` so below you can review your options and select the one most suitable for your situation. ## Excluding fields in response ### Optional fields Note that each field that is optional is not required, that means that Optional fields can be skipped both in response and in requests. Field is not required if (any/many/all) of following: * Field is marked with `nullable=True` * Field has `default` value or function provided, i.e. `default="Test"` * Field has a `server_default` value set * Field is an `autoincrement=True` `primary_key` field (note that `ormar.Integer` `primary_key` is `autoincrement` by default) Example: ```python class User(ormar.Model): class Meta: tablename: str = "users" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) email: str = ormar.String(max_length=255) password: str = ormar.String(max_length=255) first_name: str = ormar.String(max_length=255, nullable=True) last_name: str = ormar.String(max_length=255) category: str = ormar.String(max_length=255, default="User") ``` In above example fields `id` (is an `autoincrement` `Integer`), `first_name` ( has `nullable=True`) and `category` (has `default`) are optional and can be skipped in response and model wil still validate. If the field is nullable you don't have to include it in payload during creation as well as in response, so given example above you can: ```python # note that app is an FastApi app @app.post("/users/", response_model=User) # here we use ormar.Model in response async def create_user(user: User): # here we use ormar.Model in request parameter return await user.save() ``` That means that if you do not pass i.e. `first_name` in request it will validate correctly (as field is optional), save in the database and return the saved record without this field (which will also pass validation). !!!Note Note that although you do not pass the **field value**, the **field itself** is still present in the `response_model` that means it **will be present in response data** and set to `None`. If you want to fully exclude the field from the result read on. ### FastApi `response_model_exclude` Fastapi has `response_model_exclude` that accepts a set (or a list) of field names. That has it's limitation as `ormar` and `pydantic` accepts also dictionaries in which you can set exclude/include columns also on nested models (more on this below) !!!Warning Note that you cannot exclude required fields when using `response_model` as it will fail during validation. ```python @app.post("/users/", response_model=User, response_model_exclude={"password"}) async def create_user(user: User): return await user.save() ``` Above endpoint can be queried like this: ```python from starlette.testclient import TestClient client = TestClient(app) with client as client: # note there is no pk user = { "email": "test@domain.com", "password": "^*^%A*DA*IAAA", "first_name": "John", "last_name": "Doe", } response = client.post("/users/", json=user) # note that the excluded field is fully gone from response assert "password" not in response.json() # read the response and initialize model out of it created_user = User(**response.json()) # note pk is populated by autoincrement assert created_user.pk is not None # note that password is missing in initialized model too assert created_user.password is None ``` !!!Note Note how in above example `password` field is fully gone from the response data. Note that you can use this method only for non-required fields. #### Nested models excludes Despite the fact that `fastapi` allows passing only set of field names, so simple excludes, when using `response_model_exclude`, ormar is smarter. In `ormar` you can exclude nested models using two types of notations. One is a dictionary with nested fields that represents the model tree structure, and the second one is double underscore separated path of field names. Assume for a second that our user's category is a separate model: ```python class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Category(ormar.Model): class Meta(BaseMeta): tablename: str = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255) priority: int = ormar.Integer(nullable=True) class User(ormar.Model): class Meta(BaseMeta): tablename: str = "users" id: int = ormar.Integer(primary_key=True) email: str = ormar.String(max_length=255) password: str = ormar.String(max_length=255) first_name: str = ormar.String(max_length=255, nullable=True) last_name: str = ormar.String(max_length=255) category: Optional[Category] = ormar.ForeignKey(Category, related_name="categories") ``` If you want to exclude `priority` from category in your response, you can still use fastapi parameter. ```python @app.post("/users/", response_model=User, response_model_exclude={"category__priority"}) async def create_user(user: User): return await user.save() ``` Note that you can go in deeper models with double underscore, and if you wan't to exclude multiple fields from nested model you need to prefix them with full path. In example `response_model_exclude={"category__priority", "category__other_field", category__nested_model__nested_model_field}` etc. !!!Note To read more about possible excludes and how to structure your exclude dictionary or set visit [fields](../queries/select-columns.md#fields) section of documentation !!!Note Note that apart from `response_model_exclude` parameter `fastapi` supports also other parameters inherited from `pydantic`. All of them works also with ormar, but can have some nuances so best to read [dict](../models/methods.md#dict) part of the documentation. ### Exclude in `Model.dict()` Alternatively you can just return a dict from `ormar.Model` and use . Like this you can also set exclude/include as dict and exclude fields on nested models too. !!!Warning Not using a `response_model` will cause api documentation having no response example and schema since in theory response can have any format. ```python @app.post("/users2/", response_model=User) async def create_user2(user: User): user = await user.save() return user.dict(exclude={'password'}) # could be also something like return user.dict(exclude={'category': {'priority'}}) to exclude category priority ``` !!!Note Note that above example will nullify the password field even if you pass it in request, but the **field will be still there** as it's part of the response schema, the value will be set to `None`. If you want to fully exclude the field with this approach simply don't use `response_model` and exclude in Model's dict() Alternatively you can just return a dict from ormar model. Like this you can also set exclude/include as dict and exclude fields on nested models. !!!Note In theory you loose validation of response here but since you operate on `ormar.Models` the response data have already been validated after db query (as ormar model is pydantic model). So if you skip `response_model` altogether you can do something like this: ```python @app.post("/users4/") # note no response_model async def create_user4(user: User): user = await user.save() return user.dict(exclude={'last_name'}) ``` !!!Note Note that when you skip the response_model you can now **exclude also required fields** as the response is no longer validated after being returned. The cost of this solution is that you loose also api documentation as response schema in unknown from fastapi perspective. ### Generate `pydantic` model from `ormar.Model` Since task of excluding fields is so common `ormar` has a special way to generate `pydantic` models from existing `ormar.Models` without you needing to retype all the fields. That method is `get_pydantic()` method available on all models classes. ```python # generate a tree of models without password on User and without priority on nested Category ResponseUser = User.get_pydantic(exclude={"password": ..., "category": {"priority"}}) @app.post("/users3/", response_model=ResponseUser) # use the generated model here async def create_user3(user: User): return await user.save() ``` !!!Note To see more examples and read more visit [get_pydantic](../models/methods.md#get_pydantic) part of the documentation. !!!Warning The `get_pydantic` method generates all models in a tree of nested models according to an algorithm that allows to avoid loops in models (same algorithm that is used in `dict()`, `select_all()` etc.) That means that nested models won't have reference to parent model (by default ormar relation is biderectional). Note also that if given model exists in a tree more than once it will be doubled in pydantic models (each occurance will have separate own model). That way you can exclude/include different fields on different leafs of the tree. ### Separate `pydantic` model The final solution is to just create separate pydantic model manually. That works exactly the same as with normal fastapi application so you can have different models for response and requests etc. Sample: ```python import pydantic class UserBase(pydantic.BaseModel): class Config: orm_mode = True email: str first_name: str last_name: str @app.post("/users3/", response_model=UserBase) # use pydantic model here async def create_user3(user: User): #use ormar model here (but of course you CAN use pydantic also here) return await user.save() ```ormar-0.12.2/docs/fields/000077500000000000000000000000001444363446500151275ustar00rootroot00000000000000ormar-0.12.2/docs/fields/common-parameters.md000066400000000000000000000171301444363446500211040ustar00rootroot00000000000000# Common Parameters All `Field` types have a set of common parameters. ## primary_key `primary_key`: `bool` = `False` -> by default False. Sets the primary key column on a table, foreign keys always refer to the pk of the `Model`. Used in sql only. ## autoincrement `autoincrement`: `bool` = `primary_key and type == int` -> defaults to True if column is a primary key and of type Integer, otherwise False. Can be only used with int/bigint fields. If a field has autoincrement it becomes optional. Used both in sql and pydantic (changes pk field to optional for autoincrement). ## nullable `nullable`: `bool` = `False` -> defaults to False for all fields except relation fields. Automatically changed to True if user provide one of the following: * `default` value or function is provided * `server_default` value or function is provided * `autoincrement` is set on `Integer` `primary_key` field * **[DEPRECATED]**`pydantic_only=True` is set Specifies if field is optional or required, used both with sql and pydantic. By default, used for both `pydantic` and `sqlalchemy` as those are the most common settings: * `nullable=False` - means database column is not null and field is required in pydantic * `nullable=True` - means database column is null and field is optional in pydantic If you want to set different setting for pydantic and the database see `sql_nullable` below. !!!note By default all `ForeignKeys` are also nullable, meaning the related `Model` is not required. If you change the `ForeignKey` column to `nullable=False`, it becomes required. ## sql_nullable `sql_nullable`: `bool` = `nullable` -> defaults to the value of nullable (described above). Specifies if field is not null or allows nulls in the database only. Use this setting in combination with `nullable` only if you want to set different options on pydantic model and in the database. A sample usage might be i.e. making field not null in the database, but allow this field to be nullable in pydantic (i.e. with `server_default` value). That will prevent the updates of the field to null (as with `server_default` set you cannot insert null values already as the default value would be used) ## default `default`: `Any` = `None` -> defaults to None. A default value used if no other value is passed. In sql invoked on an insert, used during pydantic model definition. If the field has a default value it becomes optional. You can pass a static value or a Callable (function etc.) Used both in sql and pydantic. Sample usage: ```python # note the distinction between passing a value and Callable pointer # value name: str = ormar.String(max_length=200, default="Name") # note that when you call a function it's not a pointer to Callable # a definition like this will call the function at startup and assign # the result of the function to the default, so it will be constant value for all instances created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now()) # if you want to pass Callable reference (note that it cannot have arguments) # note lack of the parenthesis -> ormar will call this function for you on each model created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) # Callable can be a function, builtin, class etc. ``` ## server default `server_default`: `Any` = `None` -> defaults to None. A default value used if no other value is passed. In sql invoked on the server side so you can pass i.e. sql function (like now() or query/value wrapped in sqlalchemy text() clause). If the field has a server_default value it becomes optional. You can pass a static value or a Callable (function etc.) Used in sql only. Sample usage: ```Python hl_lines="21-23" --8<-- "../docs_src/fields/docs004.py" ``` !!!warning `server_default` accepts `str`, `sqlalchemy.sql.elements.ClauseElement` or `sqlalchemy.sql.elements.TextClause` so if you want to set i.e. Integer value you need to wrap it in `sqlalchemy.text()` function like above !!!tip You can pass also valid sql (dialect specific) wrapped in `sqlalchemy.text()` For example `func.now()` above could be exchanged for `text('(CURRENT_TIMESTAMP)')` for sqlite backend !!!info `server_default` is passed straight to sqlalchemy table definition so you can read more in [server default][server default] sqlalchemy documentation ## name `name`: `str` = `None` -> defaults to None Allows you to specify a column name alias to be used. Useful for existing database structures that use a reserved keyword, or if you would like to use database name that is different from `ormar` field name. Take for example the snippet below. `from`, being a reserved word in python, will prevent you from creating a model with that column name. Changing the model name to `from_` and adding the parameter `name='from'` will cause ormar to use `from` for the database column name. ```python #... rest of Model cut for brevity from_: str = ormar.String(max_length=15, name='from') ``` Similarly, you can change the foreign key column names in database, while keeping the desired relation name in ormar: ```python # ... rest of Model cut for brevity album: Optional[Album] = ormar.ForeignKey(Album, name="album_id") ``` ## index `index`: `bool` = `False` -> by default False, Sets the index on a table's column. Used in sql only. ## unique `unique`: `bool` = `False` Sets the unique constraint on a table's column. Used in sql only. ## pydantic_only (**DEPRECATED**) **This parameter is deprecated and will be removed in one of next releases!** **To check how to declare pydantic only fields that are not saved into database see [pydantic fields section](pydantic-fields.md)** `pydantic_only`: `bool` = `False` Prevents creation of a sql column for given field. Used for data related to given model but not to be stored in the database. Used in pydantic only. ## overwrite_pydantic_type By default, ormar uses predefined pydantic field types that it applies on model creation (hence the type hints are optional). If you want to, you can apply your own type, that will be **completely** replacing the build in one. So it's on you as a user to provide a type that is valid in the context of given ormar field type. !!!warning Note that by default you should use build in arguments that are passed to underlying pydantic field. You can check what arguments are supported in field types section or in [pydantic](https://pydantic-docs.helpmanual.io/usage/schema/#field-customisation) docs. !!!danger Setting a wrong type of pydantic field can break your model, so overwrite it only when you know what you are doing. As it's easy to break functionality of ormar the `overwrite_pydantic_type` argument is not available on relation fields! ```python # sample overwrites class OverwriteTest(ormar.Model): class Meta: tablename = "overwrites" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) my_int: str = ormar.Integer(overwrite_pydantic_type=PositiveInt) constraint_dict: Json = ormar.JSON( overwrite_pydantic_type=Optional[Json[Dict[str, int]]]) ``` ## choices `choices`: `Sequence` = `[]` A set of choices allowed to be used for given field. Used for data validation on pydantic side. Prevents insertion of value not present in the choices list. Used in pydantic only. [relations]: ../relations/index.md [queries]: ../queries/index.md [pydantic]: https://pydantic-docs.helpmanual.io/usage/types/#constrained-types [server default]: https://docs.sqlalchemy.org/en/13/core/defaults.html#server-invoked-ddl-explicit-default-expressions ormar-0.12.2/docs/fields/encryption.md000066400000000000000000000130351444363446500176450ustar00rootroot00000000000000# Encryption `ormar` provides you with a way to encrypt a field in the database only. Provided encryption backends allow for both one-way encryption (`HASH` backend) as well as both-way encryption/decryption (`FERNET` backend). !!!warning Note that in order for encryption to work you need to install optional `cryptography` package. You can do it manually `pip install cryptography` or with ormar by `pip install ormar[crypto]` !!!warning Note that adding `encrypt_backend` changes the database column type to `TEXT`, which needs to be reflected in db either by migration (`alembic`) or manual change ## Defining a field encryption To encrypt a field you need to pass at minimum `encrypt_secret` and `encrypt_backend` parameters. ```python hl_lines="7-8" class Filter(ormar.Model): class Meta(BaseMeta): tablename = "filters" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, encrypt_secret="secret123", encrypt_backend=ormar.EncryptBackends.FERNET) ``` !!!warning You can encrypt all `Field` types apart from `primary_key` column and relation columns (`ForeignKey` and `ManyToMany`). Check backends details for more information. ## Available backends ### HASH HASH is a one-way hash (like for password), never decrypted on retrieval To set it up pass appropriate backend value. ```python ... # rest of model definition password: str = ormar.String(max_length=128, encrypt_secret="secret123", encrypt_backend=ormar.EncryptBackends.HASH) ``` Note that since this backend never decrypt the stored value it's only applicable for `String` fields. Used hash is a `sha512` hash, so the field length has to be >=128. !!!warning Note that in `HASH` backend you can filter by full value but filters like `contain` will not work as comparison is make on encrypted values !!!note Note that provided `encrypt_secret` is first hashed itself and used as salt, so in order to compare to stored string you need to recreate this steps. The `order_by` will not work as encrypted strings are compared so you cannot reliably order by. ```python class Hash(ormar.Model): class Meta(BaseMeta): tablename = "hashes" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=128, encrypt_secret="udxc32", encrypt_backend=ormar.EncryptBackends.HASH) await Hash(name='test1').save() # note the steps to recreate the stored value # you can use also cryptography package instead of hashlib secret = hashlib.sha256("udxc32".encode()).digest() secret = base64.urlsafe_b64encode(secret) hashed_test1 = hashlib.sha512(secret + 'test1'.encode()).hexdigest() # full value comparison works hash1 = await Hash.objects.get(name='test1') assert hash1.name == hashed_test1 # but partial comparison does not (hashed strings are compared) with pytest.raises(NoMatch): await Filter.objects.get(name__icontains='test') ``` ### FERNET FERNET is a two-way encrypt/decrypt backend To set it up pass appropriate backend value. ```python ... # rest of model definition year: int = ormar.Integer(encrypt_secret="secret123", encrypt_backend=ormar.EncryptBackends.FERNET) ``` Value is encrypted on way to database end decrypted on way out. Can be used on all types, as the returned value is parsed to corresponding python type. !!!warning Note that in `FERNET` backend you loose `filter`ing possibility altogether as part of the encrypted value is a timestamp. The same goes for `order_by` as encrypted strings are compared so you cannot reliably order by. ```python class Filter(ormar.Model): class Meta(BaseMeta): tablename = "filters" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, encrypt_secret="asd123", encrypt_backend=ormar.EncryptBackends.FERNET) await Filter(name='test1').save() await Filter(name='test1').save() # values are properly encrypted and later decrypted filters = await Filter.objects.all() assert filters[0].name == filters[1].name == 'test1' # but you cannot filter at all since part of the fernet hash is a timestamp # which means that even if you encrypt the same string 2 times it will be different with pytest.raises(NoMatch): await Filter.objects.get(name='test1') ``` ## Custom Backends If you wish to support other type of encryption (i.e. AES) you can provide your own `EncryptionBackend`. To setup a backend all you need to do is subclass `ormar.fields.EncryptBackend` class and provide required backend. Sample dummy backend (that does nothing) can look like following: ```python class DummyBackend(ormar.fields.EncryptBackend): def _initialize_backend(self, secret_key: bytes) -> None: pass def encrypt(self, value: Any) -> str: return value def decrypt(self, value: Any) -> str: return value ``` To use this backend set `encrypt_backend` to `CUSTOM` and provide your backend as argument by `encrypt_custom_backend`. ```python class Filter(ormar.Model): class Meta(BaseMeta): tablename = "filters" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, encrypt_secret="secret123", encrypt_backend=ormar.EncryptBackends.CUSTOM, encrypt_custom_backend=DummyBackend ) ```ormar-0.12.2/docs/fields/field-types.md000066400000000000000000000174451444363446500177110ustar00rootroot00000000000000# Fields There are 12 basic model field types and a special `ForeignKey` and `Many2Many` fields to establish relationships between models. !!!tip For explanation of `ForeignKey` and `Many2Many` fields check [relations][relations]. Each of the `Fields` has assigned both `sqlalchemy` column class and python type that is used to create `pydantic` model. ## Fields Types ### String `String(max_length: int, min_length: int = None, regex: str = None,)` has a required `max_length` parameter. * Sqlalchemy column: `sqlalchemy.String` * Type (used for pydantic): `str` !!!tip For explanation of other parameters check [pydantic](https://pydantic-docs.helpmanual.io/usage/schema/#field-customisation) documentation. ### Text `Text()` has no required parameters. * Sqlalchemy column: `sqlalchemy.Text` * Type (used for pydantic): `str` !!!tip For explanation of other parameters check [pydantic][pydantic] documentation. ### Boolean `Boolean()` has no required parameters. * Sqlalchemy column: `sqlalchemy.Boolean` * Type (used for pydantic): `bool` ### Integer `Integer(minimum: int = None, maximum: int = None, multiple_of: int = None)` has no required parameters. * Sqlalchemy column: `sqlalchemy.Integer` * Type (used for pydantic): `int` !!!tip For explanation of other parameters check [pydantic][pydantic] documentation. ### BigInteger `BigInteger(minimum: int = None, maximum: int = None, multiple_of: int = None)` has no required parameters. * Sqlalchemy column: `sqlalchemy.BigInteger` * Type (used for pydantic): `int` !!!tip For explanation of other parameters check [pydantic][pydantic] documentation. ### SmallInteger `SmallInteger(minimum: int = None, maximum: int = None, multiple_of: int = None)` has no required parameters. * Sqlalchemy column: `sqlalchemy.SmallInteger` * Type (used for pydantic): `int` !!!tip For explanation of other parameters check [pydantic][pydantic] documentation. ### Float `Float(minimum: float = None, maximum: float = None, multiple_of: int = None)` has no required parameters. * Sqlalchemy column: `sqlalchemy.Float` * Type (used for pydantic): `float` !!!tip For explanation of other parameters check [pydantic][pydantic] documentation. ### Decimal `Decimal(minimum: float = None, maximum: float = None, multiple_of: int = None, precision: int = None, scale: int = None, max_digits: int = None, decimal_places: int = None)` has no required parameters You can use either `length` and `precision` parameters or `max_digits` and `decimal_places`. * Sqlalchemy column: `sqlalchemy.DECIMAL` * Type (used for pydantic): `decimal.Decimal` !!!tip For explanation of other parameters check [pydantic][pydantic] documentation. ### Date `Date()` has no required parameters. * Sqlalchemy column: `sqlalchemy.Date` * Type (used for pydantic): `datetime.date` ### Time `Time(timezone: bool = False)` has no required parameters. You can pass `timezone=True` for timezone aware database column. * Sqlalchemy column: `sqlalchemy.Time` * Type (used for pydantic): `datetime.time` ### DateTime `DateTime(timezone: bool = False)` has no required parameters. You can pass `timezone=True` for timezone aware database column. * Sqlalchemy column: `sqlalchemy.DateTime` * Type (used for pydantic): `datetime.datetime` ### JSON `JSON()` has no required parameters. * Sqlalchemy column: `sqlalchemy.JSON` * Type (used for pydantic): `pydantic.Json` ### LargeBinary `LargeBinary(max_length)` has a required `max_length` parameter. * Sqlalchemy column: `sqlalchemy.LargeBinary` * Type (used for pydantic): `bytes` LargeBinary length is used in some backend (i.e. mysql) to determine the size of the field, in other backends it's simply ignored yet in ormar it's always required. It should be max size of the file/bytes in bytes. `LargeBinary` has also optional `represent_as_base64_str: bool = False` flag. When set to `True` `ormar` will auto-convert bytes value to base64 decoded string, you can also set value by passing a base64 encoded string. That way you can i.e. set the value by API, even if value is not `utf-8` compatible and would otherwise fail during json conversion. ```python import base64 ... # other imports skipped for brevity class LargeBinaryStr(ormar.Model): class Meta: tablename = "my_str_blobs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) test_binary: str = ormar.LargeBinary( max_length=100000, represent_as_base64_str=True ) # set non utf-8 compliant value - note this can be passed by api (i.e. fastapi) in json item = LargeBinaryStr(test_binary=base64.b64encode(b"\xc3\x28").decode()) assert item.test_binary == base64.b64encode(b"\xc3\x28").decode() # technical note that underlying value is still bytes and will be saved as so assert item.__dict__["test_binary"] == b"\xc3\x28" ``` ### UUID `UUID(uuid_format: str = 'hex')` has no required parameters. * Sqlalchemy column: `ormar.UUID` based on `sqlalchemy.CHAR(36)` or `sqlalchemy.CHAR(32)` field (for string or hex format respectively) * Type (used for pydantic): `uuid.UUID` `uuid_format` parameters allow 'hex'(default) or 'string' values. Depending on the format either 32 or 36 char is used in the database. Sample: * 'hex' format value = `c616ab438cce49dbbf4380d109251dce` (CHAR(32)) * 'string' value = `c616ab43-8cce-49db-bf43-80d109251dce` (CHAR(36)) When loaded it's always python UUID so you can compare it and compare two formats values between each other. ### Enum There are two ways to use enums in ormar -> one is a dedicated `Enum` field that uses `sqlalchemy.Enum` column type, while the other is setting `choices` on any field in ormar. The Enum field uses the database dialect specific Enum column type if it's available, but fallback to varchar if this field type is not available. The `choices` option always respect the database field type selected. So which one to use depends on the backend you use and on the column/ data type you want in your Enum field. #### Enum - Field `Enum(enum_class=Type[Enum])` has a required `enum_class` parameter. * Sqlalchemy column: `sqlalchemy.Enum` * Type (used for pydantic): `Type[Enum]` #### Choices You can change any field into `Enum` like field by passing a `choices` list that is accepted by all Field types. It will add both: validation in `pydantic` model and will display available options in schema, therefore it will be available in docs of `fastapi`. If you still want to use `Enum` in your application you can do this by passing a `Enum` into choices and later pass value of given option to a given field (note tha Enum is not JsonSerializable). ```python # note that imports and endpoints declaration # is skipped here for brevity from enum import Enum class TestEnum(Enum): val1 = 'Val1' val2 = 'Val2' class TestModel(ormar.Model): class Meta: tablename = "org" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) # pass list(Enum) to choices enum_string: str = ormar.String(max_length=100, choices=list(TestEnum)) # sample payload coming to fastapi response = client.post( "/test_models/", json={ "id": 1, # you need to refer to the value of the `Enum` option # if called like this, alternatively just use value # string "Val1" in this case "enum_string": TestEnum.val1.value }, ) ``` [relations]: ../relations/index.md [queries]: ../queries.md [pydantic]: https://pydantic-docs.helpmanual.io/usage/schema/#field-customisation [server default]: https://docs.sqlalchemy.org/en/13/core/defaults.html#server-invoked-ddl-explicit-default-expressions ormar-0.12.2/docs/fields/pydantic-fields.md000066400000000000000000000121341444363446500205310ustar00rootroot00000000000000# Pydantic only fields Ormar allows you to declare normal `pydantic` fields in its model, so you have access to all basic and custom pydantic fields like `str`, `int`, `HttpUrl`, `PaymentCardNumber` etc. You can even declare fields leading to nested pydantic only Models, not only single fields. Since those fields are not stored in database (that's the whole point of those fields), you have to provide a meaningful value for them, either by setting a default one or providing one during model initialization. If `ormar` cannot resolve the value for pydantic field it will fail during loading data from the database, with missing required value for declared pydantic field. Options to provide a value are described below. Of course you can combine few or all of them in one model. ## Optional field If you set a field as `Optional`, it defaults to `None` if not provided and that's exactly what's going to happen during loading from database. ```python database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class ModelTest(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) number: Optional[PaymentCardNumber] test = ModelTest(name="Test") assert test.name == "Test" assert test.number is None test.number = "123456789015" await test.save() test_check = await ModelTest.objects.get() assert test_check.name == "Test" # after load it's back to None assert test_check.number is None ``` ## Field with default value By setting a default value, this value will be set on initialization and database load. Note that setting a default to `None` is the same as setting the field to `Optional`. ```python database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class ModelTest(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) url: HttpUrl = "https://www.example.com" test = ModelTest(name="Test") assert test.name == "Test" assert test.url == "https://www.example.com" test.url = "https://www.sdta.ada.pt" assert test.url == "https://www.sdta.ada.pt" await test.save() test_check = await ModelTest.objects.get() assert test_check.name == "Test" # after load it's back to default assert test_check.url == "https://www.example.com" ``` ## Default factory function By setting a `default_factory` function, this result of the function call will be set on initialization and each database load. ```python from pydantic import Field, PaymentCardNumber # ... database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database CARD_NUMBERS = [ "123456789007", "123456789015", "123456789023", "123456789031", "123456789049", ] def get_number(): return random.choice(CARD_NUMBERS) class ModelTest2(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) # note that you do not call the function, just pass reference number: PaymentCardNumber = Field(default_factory=get_number) # note that you still CAN provide a value test = ModelTest2(name="Test2", number="4000000000000002") assert test.name == "Test2" assert test.number == "4000000000000002" await test.save() test_check = await ModelTest2.objects.get() assert test_check.name == "Test2" # after load value is set to be one of the CARD_NUMBERS assert test_check.number in CARD_NUMBERS assert test_check.number != test.number ``` ## Custom setup in `__init__` You can provide a value for the field in your `__init__()` method before calling a `super()` init method. ```python from pydantic import BaseModel # ... database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class PydanticTest(BaseModel): aa: str bb: int class ModelTest3(ormar.Model): class Meta(BaseMeta): pass # provide your custom init function def __init__(self, **kwargs): # add value for required field without default value kwargs["pydantic_test"] = PydanticTest(aa="random", bb=42) # remember to call ormar.Model init! super().__init__(**kwargs) id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) pydantic_test: PydanticTest test = ModelTest3(name="Test3") assert test.name == "Test3" assert test.pydantic_test.bb == 42 test.pydantic.aa = "new value" assert test.pydantic.aa == "new value" await test.save() test_check = await ModelTest3.objects.get() assert test_check.name == "Test3" # after load it's back to value provided in init assert test_check.pydantic_test.aa == "random" ``` !!!warning If you do not provide a value in one of the above ways `ValidationError` will be raised on load from database.ormar-0.12.2/docs/gen_ref_pages.py000066400000000000000000000016431444363446500170230ustar00rootroot00000000000000"""Generate the code reference pages and navigation.""" from pathlib import Path import mkdocs_gen_files nav = mkdocs_gen_files.Nav() for path in sorted(Path("ormar").rglob("*.py")): module_path = path.relative_to(".").with_suffix("") doc_path = path.relative_to("ormar").with_suffix(".md") full_doc_path = Path("api", doc_path) parts = tuple(module_path.parts) if parts[-1] == "__init__": parts = parts[:-1] doc_path = doc_path.with_name("index.md") full_doc_path = full_doc_path.with_name("index.md") elif parts[-1] == "__main__": continue nav[parts] = str(doc_path) with mkdocs_gen_files.open(full_doc_path, "w") as fd: ident = ".".join(parts) fd.write(f"::: {ident}") mkdocs_gen_files.set_edit_path(full_doc_path, path) with mkdocs_gen_files.open("api/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) ormar-0.12.2/docs/index.md000066400000000000000000000633601444363446500153220ustar00rootroot00000000000000# ormar

Pypi version Pypi version Build Status Coverage CodeFactor

### Overview The `ormar` package is an async mini ORM for Python, with support for **Postgres, MySQL**, and **SQLite**. The main benefits of using `ormar` are: * getting an **async ORM that can be used with async frameworks** (fastapi, starlette etc.) * getting just **one model to maintain** - you don't have to maintain pydantic and other orm models (sqlalchemy, peewee, gino etc.) The goal was to create a simple ORM that can be **used directly (as request and response models) with [`fastapi`][fastapi]** that bases it's data validation on pydantic. Ormar - apart from the obvious "ORM" in name - gets its name from _ormar_ in Swedish which means _snakes_, and _ormar(e)_ in Croatian which means _cabinet_. And what's a better name for python ORM than snakes cabinet :) **If you like ormar remember to star the repository in [github](https://github.com/collerek/ormar)!** The bigger community we build, the easier it will be to catch bugs and attract contributors ;) ### Documentation Check out the [documentation][documentation] for details. **Note that for brevity most of the documentation snippets omit the creation of the database and scheduling the execution of functions for asynchronous run.** If you want more real life examples than in the documentation you can see the [tests][tests] folder, since they actually have to create and connect to a database in most of the tests. Yet remember that those are - well - tests and not all solutions are suitable to be used in real life applications. ### Part of the `fastapi` ecosystem As part of the fastapi ecosystem `ormar` is supported in libraries that somehow work with databases. As of now `ormar` is supported by: * [`fastapi-users`](https://github.com/frankie567/fastapi-users) * [`fastapi-crudrouter`](https://github.com/awtkns/fastapi-crudrouter) * [`fastapi-pagination`](https://github.com/uriyyo/fastapi-pagination) Ormar remains sql dialect agnostic - so only columns working in all supported backends are implemented. It's relatively easy to implement columns for specific dialects as an extensions of ormar. Postgres specific columns implementation: [`ormar-postgres-extensions`](https://github.com/tophat/ormar-postgres-extensions) If you maintain or use a different library and would like it to support `ormar` let us know how we can help. ### Dependencies Ormar is built with: * [`sqlalchemy core`][sqlalchemy-core] for query building. * [`databases`][databases] for cross-database async support. * [`pydantic`][pydantic] for data validation. * `typing_extensions` for python 3.6 - 3.7 ### License `ormar` is built as open-source software and will remain completely free (MIT license). As I write open-source code to solve everyday problems in my work or to promote and build strong python community you can say thank you and buy me a coffee or sponsor me with a monthly amount to help ensure my work remains free and maintained.
Sponsor
### Migrating from `sqlalchemy` and existing databases If you currently use `sqlalchemy` and would like to switch to `ormar` check out the auto-translation tool that can help you with translating existing sqlalchemy orm models so you do not have to do it manually. **Beta** versions available at github: [`sqlalchemy-to-ormar`](https://github.com/collerek/sqlalchemy-to-ormar) or simply `pip install sqlalchemy-to-ormar` `sqlalchemy-to-ormar` can be used in pair with `sqlacodegen` to auto-map/ generate `ormar` models from existing database, even if you don't use `sqlalchemy` for your project. ### Migrations & Database creation Because ormar is built on SQLAlchemy core, you can use [`alembic`][alembic] to provide database migrations (and you really should for production code). For tests and basic applications the `sqlalchemy` is more than enough: ```python # note this is just a partial snippet full working example below # 1. Imports import sqlalchemy import databases # 2. Initialization DATABASE_URL = "sqlite:///db.sqlite" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() # Define models here # 3. Database creation and tables creation engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) ``` For a sample configuration of alembic and more information regarding migrations and database creation visit [migrations][migrations] documentation section. ### Package versions **ormar is still under development:** We recommend pinning any dependencies (with i.e. `ormar~=0.9.1`) `ormar` also follows the release numeration that breaking changes bump the major number, while other changes and fixes bump minor number, so with the latter you should be safe to update, yet always read the [releases][releases] docs before. `example: (0.5.2 -> 0.6.0 - breaking, 0.5.2 -> 0.5.3 - non breaking)`. ### Asynchronous Python Note that `ormar` is an asynchronous ORM, which means that you have to `await` the calls to the methods, that are scheduled for execution in an event loop. Python has a builtin module [`asyncio`][asyncio] that allows you to do just that. Note that most "normal" python interpreters do not allow execution of `await` outside of a function (because you actually schedule this function for delayed execution and don't get the result immediately). In a modern web framework (like `fastapi`), the framework will handle this for you, but if you plan to do this on your own you need to perform this manually like described in the quick start below. ### Quick Start Note that you can find the same script in examples folder on github. ```python from typing import Optional import databases import pydantic import ormar import sqlalchemy DATABASE_URL = "sqlite:///db.sqlite" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() # note that this step is optional -> all ormar cares is a internal # class with name Meta and proper parameters, but this way you do not # have to repeat the same parameters if you use only one database class BaseMeta(ormar.ModelMeta): metadata = metadata database = database # Note that all type hints are optional # below is a perfectly valid model declaration # class Author(ormar.Model): # class Meta(BaseMeta): # tablename = "authors" # # id = ormar.Integer(primary_key=True) # <= notice no field types # name = ormar.String(max_length=100) class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) # create the database # note that in production you should use migrations # note that this is not required if you connect to existing database engine = sqlalchemy.create_engine(DATABASE_URL) # just to be sure we clear the db before metadata.drop_all(engine) metadata.create_all(engine) # all functions below are divided into functionality categories # note how all functions are defined with async - hence can use await AND needs to # be awaited on their own async def create(): # Create some records to work with through QuerySet.create method. # Note that queryset is exposed on each Model's class as objects tolkien = await Author.objects.create(name="J.R.R. Tolkien") await Book.objects.create(author=tolkien, title="The Hobbit", year=1937) await Book.objects.create(author=tolkien, title="The Lord of the Rings", year=1955) await Book.objects.create(author=tolkien, title="The Silmarillion", year=1977) # alternative creation of object divided into 2 steps sapkowski = Author(name="Andrzej Sapkowski") # do some stuff await sapkowski.save() # or save() after initialization await Book(author=sapkowski, title="The Witcher", year=1990).save() await Book(author=sapkowski, title="The Tower of Fools", year=2002).save() # to read more about inserting data into the database # visit: https://collerek.github.io/ormar/queries/create/ async def read(): # Fetch an instance, without loading a foreign key relationship on it. # Django style book = await Book.objects.get(title="The Hobbit") # or python style book = await Book.objects.get(Book.title == "The Hobbit") book2 = await Book.objects.first() # first() fetch the instance with lower primary key value assert book == book2 # you can access all fields on loaded model assert book.title == "The Hobbit" assert book.year == 1937 # when no condition is passed to get() # it behaves as last() based on primary key column book3 = await Book.objects.get() assert book3.title == "The Tower of Fools" # When you have a relation, ormar always defines a related model for you # even when all you loaded is a foreign key value like in this example assert isinstance(book.author, Author) # primary key is populated from foreign key stored in books table assert book.author.pk == 1 # since the related model was not loaded all other fields are None assert book.author.name is None # Load the relationship from the database when you already have the related model # alternatively see joins section below await book.author.load() assert book.author.name == "J.R.R. Tolkien" # get all rows for given model authors = await Author.objects.all() assert len(authors) == 2 # to read more about reading data from the database # visit: https://collerek.github.io/ormar/queries/read/ async def update(): # read existing row from db tolkien = await Author.objects.get(name="J.R.R. Tolkien") assert tolkien.name == "J.R.R. Tolkien" tolkien_id = tolkien.id # change the selected property tolkien.name = "John Ronald Reuel Tolkien" # call update on a model instance await tolkien.update() # confirm that object was updated tolkien = await Author.objects.get(name="John Ronald Reuel Tolkien") assert tolkien.name == "John Ronald Reuel Tolkien" assert tolkien.id == tolkien_id # alternatively update data without loading await Author.objects.filter(name__contains="Tolkien").update(name="J.R.R. Tolkien") # to read more about updating data in the database # visit: https://collerek.github.io/ormar/queries/update/ async def delete(): silmarillion = await Book.objects.get(year=1977) # call delete() on instance await silmarillion.delete() # alternatively delete without loading await Book.objects.delete(title="The Tower of Fools") # note that when there is no record ormar raises NoMatch exception try: await Book.objects.get(year=1977) except ormar.NoMatch: print("No book from 1977!") # to read more about deleting data from the database # visit: https://collerek.github.io/ormar/queries/delete/ # note that despite the fact that record no longer exists in database # the object above is still accessible and you can use it (and i.e. save()) again. tolkien = silmarillion.author await Book.objects.create(author=tolkien, title="The Silmarillion", year=1977) async def joins(): # Tho join two models use select_related # Django style book = await Book.objects.select_related("author").get(title="The Hobbit") # Python style book = await Book.objects.select_related(Book.author).get( Book.title == "The Hobbit" ) # now the author is already prefetched assert book.author.name == "J.R.R. Tolkien" # By default you also get a second side of the relation # constructed as lowercase source model name +'s' (books in this case) # you can also provide custom name with parameter related_name # Django style author = await Author.objects.select_related("books").all(name="J.R.R. Tolkien") # Python style author = await Author.objects.select_related(Author.books).all( Author.name == "J.R.R. Tolkien" ) assert len(author[0].books) == 3 # for reverse and many to many relations you can also prefetch_related # that executes a separate query for each of related models # Django style author = await Author.objects.prefetch_related("books").get(name="J.R.R. Tolkien") # Python style author = await Author.objects.prefetch_related(Author.books).get( Author.name == "J.R.R. Tolkien" ) assert len(author.books) == 3 # to read more about relations # visit: https://collerek.github.io/ormar/relations/ # to read more about joins and subqueries # visit: https://collerek.github.io/ormar/queries/joins-and-subqueries/ async def filter_and_sort(): # to filter the query you can use filter() or pass key-value pars to # get(), all() etc. # to use special methods or access related model fields use double # underscore like to filter by the name of the author use author__name # Django style books = await Book.objects.all(author__name="J.R.R. Tolkien") # python style books = await Book.objects.all(Book.author.name == "J.R.R. Tolkien") assert len(books) == 3 # filter can accept special methods also separated with double underscore # to issue sql query ` where authors.name like "%tolkien%"` that is not # case sensitive (hence small t in Tolkien) # Django style books = await Book.objects.filter(author__name__icontains="tolkien").all() # python style books = await Book.objects.filter(Book.author.name.icontains("tolkien")).all() assert len(books) == 3 # to sort use order_by() function of queryset # to sort decreasing use hyphen before the field name # same as with filter you can use double underscores to access related fields # Django style books = ( await Book.objects.filter(author__name__icontains="tolkien") .order_by("-year") .all() ) # python style books = ( await Book.objects.filter(Book.author.name.icontains("tolkien")) .order_by(Book.year.desc()) .all() ) assert len(books) == 3 assert books[0].title == "The Silmarillion" assert books[2].title == "The Hobbit" # to read more about filtering and ordering # visit: https://collerek.github.io/ormar/queries/filter-and-sort/ async def subset_of_columns(): # to exclude some columns from loading when querying the database # you can use fileds() method hobbit = await Book.objects.fields(["title"]).get(title="The Hobbit") # note that fields not included in fields are empty (set to None) assert hobbit.year is None assert hobbit.author is None # selected field is there assert hobbit.title == "The Hobbit" # alternatively you can provide columns you want to exclude hobbit = await Book.objects.exclude_fields(["year"]).get(title="The Hobbit") # year is still not set assert hobbit.year is None # but author is back assert hobbit.author is not None # also you cannot exclude primary key column - it's always there # even if you EXPLICITLY exclude it it will be there # note that each model have a shortcut for primary_key column which is pk # and you can filter/access/set the values by this alias like below assert hobbit.pk is not None # note that you cannot exclude fields that are not nullable # (required) in model definition try: await Book.objects.exclude_fields(["title"]).get(title="The Hobbit") except pydantic.ValidationError: print("Cannot exclude non nullable field title") # to read more about selecting subset of columns # visit: https://collerek.github.io/ormar/queries/select-columns/ async def pagination(): # to limit number of returned rows use limit() books = await Book.objects.limit(1).all() assert len(books) == 1 assert books[0].title == "The Hobbit" # to offset number of returned rows use offset() books = await Book.objects.limit(1).offset(1).all() assert len(books) == 1 assert books[0].title == "The Lord of the Rings" # alternatively use paginate that combines both books = await Book.objects.paginate(page=2, page_size=2).all() assert len(books) == 2 # note that we removed one book of Sapkowski in delete() # and recreated The Silmarillion - by default when no order_by is set # ordering sorts by primary_key column assert books[0].title == "The Witcher" assert books[1].title == "The Silmarillion" # to read more about pagination and number of rows # visit: https://collerek.github.io/ormar/queries/pagination-and-rows-number/ async def aggregations(): # count: assert 2 == await Author.objects.count() # exists assert await Book.objects.filter(title="The Hobbit").exists() # maximum assert 1990 == await Book.objects.max(columns=["year"]) # minimum assert 1937 == await Book.objects.min(columns=["year"]) # average assert 1964.75 == await Book.objects.avg(columns=["year"]) # sum assert 7859 == await Book.objects.sum(columns=["year"]) # to read more about aggregated functions # visit: https://collerek.github.io/ormar/queries/aggregations/ async def raw_data(): # extract raw data in a form of dicts or tuples # note that this skips the validation(!) as models are # not created from parsed data # get list of objects as dicts assert await Book.objects.values() == [ {"id": 1, "author": 1, "title": "The Hobbit", "year": 1937}, {"id": 2, "author": 1, "title": "The Lord of the Rings", "year": 1955}, {"id": 4, "author": 2, "title": "The Witcher", "year": 1990}, {"id": 5, "author": 1, "title": "The Silmarillion", "year": 1977}, ] # get list of objects as tuples assert await Book.objects.values_list() == [ (1, 1, "The Hobbit", 1937), (2, 1, "The Lord of the Rings", 1955), (4, 2, "The Witcher", 1990), (5, 1, "The Silmarillion", 1977), ] # filter data - note how you always get a list assert await Book.objects.filter(title="The Hobbit").values() == [ {"id": 1, "author": 1, "title": "The Hobbit", "year": 1937} ] # select only wanted fields assert await Book.objects.filter(title="The Hobbit").values(["id", "title"]) == [ {"id": 1, "title": "The Hobbit"} ] # if you select only one column you could flatten it with values_list assert await Book.objects.values_list("title", flatten=True) == [ "The Hobbit", "The Lord of the Rings", "The Witcher", "The Silmarillion", ] # to read more about extracting raw values # visit: https://collerek.github.io/ormar/queries/aggregations/ async def with_connect(function): # note that for any other backend than sqlite you actually need to # connect to the database to perform db operations async with database: await function() # note that if you use framework like `fastapi` you shouldn't connect # in your endpoints but have a global connection pool # check https://collerek.github.io/ormar/fastapi/ and section with db connection # gather and execute all functions # note - normally import should be at the beginning of the file import asyncio # note that normally you use gather() function to run several functions # concurrently but we actually modify the data and we rely on the order of functions for func in [ create, read, update, delete, joins, filter_and_sort, subset_of_columns, pagination, aggregations, raw_data, ]: print(f"Executing: {func.__name__}") asyncio.run(with_connect(func)) # drop the database tables metadata.drop_all(engine) ``` ## Ormar Specification ### QuerySet methods * `create(**kwargs): -> Model` * `get(*args, **kwargs): -> Model` * `get_or_none(*args, **kwargs): -> Optional[Model]` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs) -> Tuple[Model, bool]` * `first(*args, **kwargs): -> Model` * `update(each: bool = False, **kwargs) -> int` * `update_or_create(**kwargs) -> Model` * `bulk_create(objects: List[Model]) -> None` * `bulk_update(objects: List[Model], columns: List[str] = None) -> None` * `delete(*args, each: bool = False, **kwargs) -> int` * `all(*args, **kwargs) -> List[Optional[Model]]` * `iterate(*args, **kwargs) -> AsyncGenerator[Model]` * `filter(*args, **kwargs) -> QuerySet` * `exclude(*args, **kwargs) -> QuerySet` * `select_related(related: Union[List, str]) -> QuerySet` * `prefetch_related(related: Union[List, str]) -> QuerySet` * `limit(limit_count: int) -> QuerySet` * `offset(offset: int) -> QuerySet` * `count(distinct: bool = True) -> int` * `exists() -> bool` * `max(columns: List[str]) -> Any` * `min(columns: List[str]) -> Any` * `avg(columns: List[str]) -> Any` * `sum(columns: List[str]) -> Any` * `fields(columns: Union[List, str, set, dict]) -> QuerySet` * `exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet` * `order_by(columns:Union[List, str]) -> QuerySet` * `values(fields: Union[List, str, Set, Dict])` * `values_list(fields: Union[List, str, Set, Dict])` #### Relation types * One to many - with `ForeignKey(to: Model)` * Many to many - with `ManyToMany(to: Model, Optional[through]: Model)` #### Model fields types Available Model Fields (with required args - optional ones in docs): * `String(max_length)` * `Text()` * `Boolean()` * `Integer()` * `Float()` * `Date()` * `Time()` * `DateTime()` * `JSON()` * `BigInteger()` * `SmallInteger()` * `Decimal(scale, precision)` * `UUID()` * `LargeBinary(max_length)` * `Enum(enum_class)` * `Enum` like Field - by passing `choices` to any other Field type * `EncryptedString` - by passing `encrypt_secret` and `encrypt_backend` * `ForeignKey(to)` * `ManyToMany(to)` ### Available fields options The following keyword arguments are supported on all field types. * `primary_key: bool` * `nullable: bool` * `default: Any` * `server_default: Any` * `index: bool` * `unique: bool` * `choices: typing.Sequence` * `name: str` * `pydantic_only: bool` All fields are required unless one of the following is set: * `nullable` - Creates a nullable column. Sets the default to `False`. Read the fields common parameters for details. * `sql_nullable` - Used to set different setting for pydantic and the database. Sets the default to `nullable` value. Read the fields common parameters for details. * `default` - Set a default value for the field. **Not available for relation fields** * `server_default` - Set a default value for the field on server side (like sqlalchemy's `func.now()`). **Not available for relation fields** * `primary key` with `autoincrement` - When a column is set to primary key and autoincrement is set on this column. Autoincrement is set by default on int primary keys. * `pydantic_only` - Field is available only as normal pydantic field, not stored in the database. ### Available signals Signals allow to trigger your function for a given event on a given Model. * `pre_save` * `post_save` * `pre_update` * `post_update` * `pre_delete` * `post_delete` * `pre_relation_add` * `post_relation_add` * `pre_relation_remove` * `post_relation_remove` [sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/ [databases]: https://github.com/encode/databases [pydantic]: https://pydantic-docs.helpmanual.io/ [encode/orm]: https://github.com/encode/orm/ [alembic]: https://alembic.sqlalchemy.org/en/latest/ [fastapi]: https://fastapi.tiangolo.com/ [documentation]: https://collerek.github.io/ormar/ [migrations]: https://collerek.github.io/ormar/models/migrations/ [asyncio]: https://docs.python.org/3/library/asyncio.html [releases]: https://collerek.github.io/ormar/releases/ [tests]: https://github.com/collerek/ormar/tree/master/tests ormar-0.12.2/docs/install.md000066400000000000000000000021311444363446500156460ustar00rootroot00000000000000## Installation Installation is as simple as: ```py pip install ormar ``` ### Dependencies Ormar uses `databases` for connectivity issues, `pydantic` for validation and `sqlalchemy-core` for queries. All three should install along the installation of ormar if not present at your system before. * databases * pydantic>=1.5 * sqlalchemy ## Optional dependencies *ormar* has three optional dependencies based on database backend you use: ### Postgresql ```py pip install ormar[postgresql] ``` Will install also `asyncpg` and `psycopg2`. ### Mysql ```py pip install ormar[mysql] ``` Will install also `aiomysql` and `pymysql`. ### Sqlite ```py pip install ormar[sqlite] ``` Will install also `aiosqlite`. ### Orjson ```py pip install ormar[orjson] ``` Will install also `orjson` that is much faster than builtin json parser. ### Crypto ```py pip install ormar[crypto] ``` Will install also `cryptography` that is required to work with encrypted columns. ### Manual installation of dependencies Of course, you can also install these requirements manually with `pip install asyncpg` etc. ormar-0.12.2/docs/models/000077500000000000000000000000001444363446500151445ustar00rootroot00000000000000ormar-0.12.2/docs/models/index.md000066400000000000000000000521401444363446500165770ustar00rootroot00000000000000# Models ## Defining models By defining an ormar Model you get corresponding **Pydantic model** as well as **Sqlalchemy table** for free. They are being managed in the background and you do not have to create them on your own. ### Model Class To build an ormar model you simply need to inherit a `ormar.Model` class. ```Python hl_lines="10" --8<-- "../docs_src/models/docs001.py" ``` ### Defining Fields Next assign one or more of the [Fields][fields] as a class level variables. #### Basic Field Types Each table **has to** have a primary key column, which you specify by setting `primary_key=True` on selected field. Only one primary key column is allowed. ```Python hl_lines="15 16 17" --8<-- "../docs_src/models/docs001.py" ``` !!! warning Not assigning `primary_key` column or assigning more than one column per `Model` will raise `ModelDefinitionError` exception. By default if you assign primary key to `Integer` field, the `autoincrement` option is set to true. You can disable by passing `autoincrement=False`. ```Python id: int = ormar.Integer(primary_key=True, autoincrement=False) ``` #### Non Database Fields Note that if you need a normal pydantic field in your model (used to store value on model or pass around some value) you can define a field with parameter `pydantic_only=True`. Fields created like this are added to the `pydantic` model fields -> so are subject to validation according to `Field` type, also appear in `dict()` and `json()` result. The difference is that **those fields are not saved in the database**. So they won't be included in underlying sqlalchemy `columns`, or `table` variables (check [Internals][Internals] section below to see how you can access those if you need). Subsequently `pydantic_only` fields won't be included in migrations or any database operation (like `save`, `update` etc.) Fields like those can be passed around into payload in `fastapi` request and will be returned in `fastapi` response (of course only if you set their value somewhere in your code as the value is **not** fetched from the db. If you pass a value in `fastapi` `request` and return the same instance that `fastapi` constructs for you in `request_model` you should get back exactly same value in `response`.). !!!warning `pydantic_only=True` fields are always **Optional** and it cannot be changed (otherwise db load validation would fail) !!!tip `pydantic_only=True` fields are a good solution if you need to pass additional information from outside of your API (i.e. frontend). They are not stored in db but you can access them in your `APIRoute` code and they also have `pydantic` validation. ```Python hl_lines="18" --8<-- "../docs_src/models/docs014.py" ``` If you combine `pydantic_only=True` field with `default` parameter and do not pass actual value in request you will always get default value. Since it can be a function you can set `default=datetime.datetime.now` and get current timestamp each time you call an endpoint etc. !!!note Note that both `pydantic_only` and `property_field` decorated field can be included/excluded in both `dict()` and `fastapi` response with `include`/`exclude` and `response_model_include`/`response_model_exclude` accordingly. ```python # <==related of code removed for clarity==> class User(ormar.Model): class Meta: tablename: str = "users2" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) email: str = ormar.String(max_length=255, nullable=False) password: str = ormar.String(max_length=255) first_name: str = ormar.String(max_length=255) last_name: str = ormar.String(max_length=255) category: str = ormar.String(max_length=255, nullable=True) timestamp: datetime.datetime = ormar.DateTime( pydantic_only=True, default=datetime.datetime.now ) # <==related of code removed for clarity==> app =FastAPI() @app.post("/users/") async def create_user(user: User): return await user.save() # <==related of code removed for clarity==> def test_excluding_fields_in_endpoints(): client = TestClient(app) with client as client: timestamp = datetime.datetime.now() user = { "email": "test@domain.com", "password": "^*^%A*DA*IAAA", "first_name": "John", "last_name": "Doe", "timestamp": str(timestamp), } response = client.post("/users/", json=user) assert list(response.json().keys()) == [ "id", "email", "first_name", "last_name", "category", "timestamp", ] # returned is the same timestamp assert response.json().get("timestamp") == str(timestamp).replace(" ", "T") # <==related of code removed for clarity==> ``` #### Property fields Sometimes it's desirable to do some kind of calculation on the model instance. One of the most common examples can be concatenating two or more fields. Imagine you have `first_name` and `last_name` fields on your model, but would like to have `full_name` in the result of the `fastapi` query. You can create a new `pydantic` model with a `method` that accepts only `self` (so like default python `@property`) and populate it in your code. But it's so common that `ormar` has you covered. You can "materialize" a `property_field` on you `Model`. !!!warning `property_field` fields are always **Optional** and it cannot be changed (otherwise db load validation would fail) ```Python hl_lines="20-22" --8<-- "../docs_src/models/docs015.py" ``` !!!warning The decorated function has to accept only one parameter, and that parameter have to be `self`. If you try to decorate a function with more parameters `ormar` will raise `ModelDefinitionError`. Sample: ```python # will raise ModelDefinitionError @property_field def prefixed_name(self, prefix="prefix_"): return 'custom_prefix__' + self.name # will raise ModelDefinitionError # (calling first param something else than 'self' is a bad practice anyway) @property_field def prefixed_name(instance): return 'custom_prefix__' + self.name ``` Note that `property_field` decorated methods do not go through verification (but that might change in future) and are only available in the response from `fastapi` and `dict()` and `json()` methods. You cannot pass a value for this field in the request (or rather you can but it will be discarded by ormar so really no point but no Exception will be raised). !!!note Note that both `pydantic_only` and `property_field` decorated field can be included/excluded in both `dict()` and `fastapi` response with `include`/`exclude` and `response_model_include`/`response_model_exclude` accordingly. !!!tip Note that `@property_field` decorator is designed to replace the python `@property` decorator, you do not have to combine them. In theory you can cause `ormar` have a failsafe mechanism, but note that i.e. `mypy` will complain about re-decorating a property. ```python # valid and working but unnecessary and mypy will complain @property_field @property def prefixed_name(self): return 'custom_prefix__' + self.name ``` ```python # <==related of code removed for clarity==> def gen_pass(): # note: NOT production ready choices = string.ascii_letters + string.digits + "!@#$%^&*()" return "".join(random.choice(choices) for _ in range(20)) class RandomModel(ormar.Model): class Meta: tablename: str = "random_users" metadata = metadata database = database include_props_in_dict = True id: int = ormar.Integer(primary_key=True) password: str = ormar.String(max_length=255, default=gen_pass) first_name: str = ormar.String(max_length=255, default="John") last_name: str = ormar.String(max_length=255) created_date: datetime.datetime = ormar.DateTime( server_default=sqlalchemy.func.now() ) @property_field def full_name(self) -> str: return " ".join([self.first_name, self.last_name]) # <==related of code removed for clarity==> app =FastAPI() # explicitly exclude property_field in this endpoint @app.post("/random/", response_model=RandomModel, response_model_exclude={"full_name"}) async def create_user(user: RandomModel): return await user.save() # <==related of code removed for clarity==> def test_excluding_property_field_in_endpoints2(): client = TestClient(app) with client as client: RandomModel.Meta.include_props_in_dict = True user3 = {"last_name": "Test"} response = client.post("/random3/", json=user3) assert list(response.json().keys()) == [ "id", "password", "first_name", "last_name", "created_date", ] # despite being decorated with property_field if you explictly exclude it it will be gone assert response.json().get("full_name") is None # <==related of code removed for clarity==> ``` #### Fields names vs Column names By default names of the fields will be used for both the underlying `pydantic` model and `sqlalchemy` table. If for whatever reason you prefer to change the name in the database but keep the name in the model you can do this with specifying `name` parameter during Field declaration Here you have a sample model with changed names ```Python hl_lines="16-19" --8<-- "../docs_src/models/docs008.py" ``` Note that you can also change the ForeignKey column name ```Python hl_lines="21" --8<-- "../docs_src/models/docs009.py" ``` But for now you cannot change the ManyToMany column names as they go through other Model anyway. ```Python hl_lines="28" --8<-- "../docs_src/models/docs010.py" ``` ## Overwriting the default QuerySet If you want to customize the queries run by ormar you can define your own queryset class (that extends the ormar `QuerySet`) in your model class, default one is simply the `QuerySet` You can provide a new class in `Meta` configuration of your class as `queryset_class` parameter. ```python import ormar from ormar.queryset.queryset import QuerySet from fastapi import HTTPException class MyQuerySetClass(QuerySet): async def first_or_404(self, *args, **kwargs): entity = await self.get_or_none(*args, **kwargs) if entity is None: # in fastapi or starlette raise HTTPException(404) class Book(ormar.Model): class Meta(ormar.ModelMeta): metadata = metadata database = database tablename = "book" queryset_class = MyQuerySetClass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=32) # when book not found, raise `404` in your view. book = await Book.objects.first_or_404(name="123") ``` ### Type Hints & Legacy Before version 0.4.0 `ormar` supported only one way of defining `Fields` on a `Model` using python type hints as pydantic. ```Python hl_lines="15-17" --8<-- "../docs_src/models/docs011.py" ``` But that didn't play well with static type checkers like `mypy` and `pydantic` PyCharm plugin. Therefore from version >=0.4.0 `ormar` switched to new notation. ```Python hl_lines="15-17" --8<-- "../docs_src/models/docs001.py" ``` Note that type hints are **optional** so perfectly valid `ormar` code can look like this: ```Python hl_lines="15-17" --8<-- "../docs_src/models/docs012.py" ``` !!!warning Even if you use type hints **`ormar` does not use them to construct `pydantic` fields!** Type hints are there only to support static checkers and linting, `ormar` construct annotations used by `pydantic` from own fields. ### Dependencies Since ormar depends on [`databases`][databases] and [`sqlalchemy-core`][sqlalchemy-core] for database connection and table creation you need to assign each `Model` with two special parameters. #### Databases One is `Database` instance created with your database url in [sqlalchemy connection string][sqlalchemy connection string] format. Created instance needs to be passed to every `Model` with `Meta` class `database` parameter. ```Python hl_lines="1 6 12" --8<-- "../docs_src/models/docs001.py" ``` !!! tip You need to create the `Database` instance **only once** and use it for all models. You can create several ones if you want to use multiple databases. #### Sqlalchemy Second dependency is sqlalchemy `MetaData` instance. Created instance needs to be passed to every `Model` with `Meta` class `metadata` parameter. ```Python hl_lines="2 7 13" --8<-- "../docs_src/models/docs001.py" ``` !!! tip You need to create the `MetaData` instance **only once** and use it for all models. You can create several ones if you want to use multiple databases. #### Best practice Only thing that `ormar` expects is a class with name `Meta` and two class variables: `metadata` and `databases`. So instead of providing the same parameters over and over again for all models you should creata a class and subclass it in all models. ```Python hl_lines="14 20 33" --8<-- "../docs_src/models/docs013.py" ``` !!!warning You need to subclass your `MainMeta` class in each `Model` class as those classes store configuration variables that otherwise would be overwritten by each `Model`. ### Table Names By default table name is created from Model class name as lowercase name plus 's'. You can overwrite this parameter by providing `Meta` class `tablename` argument. ```Python hl_lines="12 13 14" --8<-- "../docs_src/models/docs002.py" ``` ### Constraints On a model level you can also set model-wise constraints on sql columns. Right now only `IndexColumns` and `UniqueColumns` constraints are supported. !!!note Note that both constraints should be used only if you want to set a name on constraint or want to set the index on multiple columns, otherwise `index` and `unique` properties on ormar fields are preferred. !!!tip To read more about columns constraints like `primary_key`, `unique`, `ForeignKey` etc. visit [fields][fields]. #### UniqueColumns You can set this parameter by providing `Meta` class `constraints` argument. ```Python hl_lines="14-17" --8<-- "../docs_src/models/docs006.py" ``` !!!note Note that constraints are meant for combination of columns that should be unique. To set one column as unique use [`unique`](../fields/common-parameters.md#unique) common parameter. Of course you can set many columns as unique with this param but each of them will be checked separately. #### IndexColumns You can set this parameter by providing `Meta` class `constraints` argument. ```Python hl_lines="14-17" --8<-- "../docs_src/models/docs017.py" ``` !!!note Note that constraints are meant for combination of columns that should be in the index. To set one column index use [`unique`](../fields/common-parameters.md#index) common parameter. Of course, you can set many columns as indexes with this param but each of them will be a separate index. #### CheckColumns You can set this parameter by providing `Meta` class `constraints` argument. ```Python hl_lines="14-17" --8<-- "../docs_src/models/docs018.py" ``` !!!note Note that some databases do not actively support check constraints such as MySQL. ### Pydantic configuration As each `ormar.Model` is also a `pydantic` model, you might want to tweak the settings of the pydantic configuration. The way to do this in pydantic is to adjust the settings on the `Config` class provided to your model, and it works exactly the same for ormar models. So in order to set your own preferences you need to provide not only the `Meta` class but also the `Config` class to your model. !!!note To read more about available settings visit the [pydantic](https://pydantic-docs.helpmanual.io/usage/model_config/) config page. Note that if you do not provide your own configuration, ormar will do it for you. The default config provided is as follows: ```python class Config(pydantic.BaseConfig): orm_mode = True validate_assignment = True ``` So to overwrite setting or provide your own a sample model can look like following: ```Python hl_lines="15-16" --8<-- "../docs_src/models/docs016.py" ``` ### Extra fields in models By default `ormar` forbids you to pass extra fields to Model. If you try to do so the `ModelError` will be raised. Since the extra fields cannot be saved in the database the default to disallow such fields seems a feasible option. On the contrary in `pydantic` the default option is to ignore such extra fields, therefore `ormar` provides an `Meta.extra` setting to behave in the same way. To ignore extra fields passed to `ormar` set this setting to `Extra.ignore` instead of default `Extra.forbid`. Note that `ormar` does not allow accepting extra fields, you can only ignore them or forbid them (raise exception if present) ```python from ormar import Extra class Child(ormar.Model): class Meta(ormar.ModelMeta): tablename = "children" metadata = metadata database = database extra = Extra.ignore # set extra setting to prevent exceptions on extra fields presence id: int = ormar.Integer(name="child_id", primary_key=True) first_name: str = ormar.String(name="fname", max_length=100) last_name: str = ormar.String(name="lname", max_length=100) ``` To set the same setting on all model check the [best practices]("../models/index/#best-practice") and `BaseMeta` concept. ## Model sort order When querying the database with given model by default the Model is ordered by the `primary_key` column ascending. If you wish to change the default behaviour you can do it by providing `orders_by` parameter to model `Meta` class. Sample default ordering: ```python database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database # default sort by column id ascending class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) ``` Modified ```python database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database # now default sort by name descending class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" orders_by = ["-name"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) ``` ## Model Initialization There are two ways to create and persist the `Model` instance in the database. !!!tip Use `ipython` to try this from the console, since it supports `await`. If you plan to modify the instance in the later execution of your program you can initiate your `Model` as a normal class and later await a `save()` call. ```Python hl_lines="20 21" --8<-- "../docs_src/models/docs007.py" ``` If you want to initiate your `Model` and at the same time save in in the database use a QuerySet's method `create()`. For creating multiple objects at once a `bulk_create()` QuerySet's method is available. Each model has a `QuerySet` initialised as `objects` parameter ```Python hl_lines="23" --8<-- "../docs_src/models/docs007.py" ``` !!!info To read more about `QuerySets` (including bulk operations) and available methods visit [queries][queries] ## `Model` save status Each model instance is a separate python object and they do not know anything about each other. ```python track1 = await Track.objects.get(name='The Bird') track2 = await Track.objects.get(name='The Bird') assert track1 == track2 # True track1.name = 'The Bird2' await track1.save() assert track1.name == track2.name # False # track2 does not update and knows nothing about track1 ``` The objects itself have a saved status, which is set as following: * Model is saved after `save/update/load/upsert` method on model * Model is saved after `create/get/first/all/get_or_create/update_or_create` method * Model is saved when passed to `bulk_update` and `bulk_create` * Model is saved after `adding/removing` `ManyToMany` related objects (through model instance auto saved/deleted) * Model is **not** saved after change of any own field (including `pk` as `Model.pk` alias) * Model is **not** saved after adding/removing `ForeignKey` related object (fk column not saved) * Model is **not** saved after instantiation with `__init__` (w/o `QuerySet.create` or before calling `save`) You can check if model is saved with `ModelInstance.saved` property [fields]: ../fields/field-types.md [relations]: ../relations/index.md [queries]: ../queries/index.md [pydantic]: https://pydantic-docs.helpmanual.io/ [sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/ [sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html [databases]: https://github.com/encode/databases [sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls [sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables [alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html [save status]: ../models/index/#model-save-status [Internals]: ../models/internals.md ormar-0.12.2/docs/models/inheritance.md000066400000000000000000000476131444363446500177720ustar00rootroot00000000000000# Inheritance Out of various types of ORM models inheritance `ormar` currently supports two of them: * **Mixins** * **Concrete table inheritance** (with parents set to `abstract=True`) ## Types of inheritance The short summary of different types of inheritance is: * **Mixins [SUPPORTED]** - don't subclass `ormar.Model`, just define fields that are later used on different models (like `created_date` and `updated_date` on each model), only actual models create tables, but those fields from mixins are added * **Concrete table inheritance [SUPPORTED]** - means that parent is marked as abstract and each child has its own table with columns from a parent and own child columns, kind of similar to Mixins but parent also is a Model * **Single table inheritance [NOT SUPPORTED]** - means that only one table is created with fields that are combination/sum of the parent and all children models but child models use only subset of column in db (all parent and own ones, skipping the other children ones) * **Multi/ Joined table inheritance [NOT SUPPORTED]** - means that part of the columns is saved on parent model and part is saved on child model that are connected to each other by kind of one to one relation and under the hood you operate on two models at once * **Proxy models [NOT SUPPORTED]** - means that only parent has an actual table, children just add methods, modify settings etc. ## Mixins To use Mixins just define a class that is not inheriting from an `ormar.Model` but is defining `ormar.Fields` as class variables. ```python # a mixin defines the fields but is a normal python class class AuditMixin: created_by: str = ormar.String(max_length=100) updated_by: str = ormar.String(max_length=100, default="Sam") class DateFieldsMixins: created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) updated_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) # a models can inherit from one or more mixins class Category(ormar.Model, DateFieldsMixins, AuditMixin): class Meta(ormar.ModelMeta): tablename = "categories" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() ``` !!!tip Note that Mixins are **not** models, so you still need to inherit from `ormar.Model` as well as define `Meta` class in the **final** model. A Category class above will have four additional fields: `created_date`, `updated_date`, `created_by` and `updated_by`. There will be only one table created for model `Category` (`categories`), with `Category` class fields combined with all `Mixins` fields. Note that `Mixin` in class name is optional but is a good python practice. ## Concrete table inheritance In concept concrete table inheritance is very similar to Mixins, but uses actual `ormar.Models` as base classes. !!!warning Note that base classes have `abstract=True` set in `Meta` class, if you try to inherit from non abstract marked class `ModelDefinitionError` will be raised. Since this abstract Model will never be initialized you can skip `metadata` and `database` in it's `Meta` definition. But if you provide it - it will be inherited, that way you do not have to provide `metadata` and `databases` in the final/concrete class Note that you can always overwrite it in child/concrete class if you need to. More over at least one of the classes in inheritance chain have to provide both `database` and `metadata` - otherwise an error will be raised. ```python # note that base classes have abstract=True # since this model will never be initialized you can skip metadata and database class AuditModel(ormar.Model): class Meta: abstract = True created_by: str = ormar.String(max_length=100) updated_by: str = ormar.String(max_length=100, default="Sam") # but if you provide it it will be inherited - DRY (Don't Repeat Yourself) in action class DateFieldsModel(ormar.Model): class Meta: abstract = True metadata = metadata database = db created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) updated_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) # that way you do not have to provide metadata and databases in concrete class class Category(DateFieldsModel, AuditModel): class Meta(ormar.ModelMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() ``` The list of inherited options/settings is as follows: `metadata`, `database` and `constraints`. Also methods decorated with `@property_field` decorator will be inherited/recognized. Of course apart from that all fields from base classes are combined and created in the concrete table of the final Model. !!!tip Note how you don't have to provide `abstarct=False` in the final class - it's the default setting that is not inherited. ## Redefining fields in subclasses Note that you can redefine previously created fields like in normal python class inheritance. Whenever you define a field with same name and new definition it will completely replace the previously defined one. ```python # base class class DateFieldsModel(ormar.Model): class Meta: abstract = True metadata = metadata database = db # note that UniqueColumns need sqlalchemy db columns names not the ormar ones constraints = [ormar.UniqueColumns("creation_date", "modification_date")] created_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="creation_date" ) updated_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="modification_date" ) class RedefinedField(DateFieldsModel): class Meta(ormar.ModelMeta): tablename = "redefines" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) # here the created_date is replaced by the String field created_date: str = ormar.String(max_length=200, name="creation_date") # you can verify that the final field is correctly declared and created changed_field = RedefinedField.Meta.model_fields["created_date"] assert changed_field.default is None assert changed_field.alias == "creation_date" assert any(x.name == "creation_date" for x in RedefinedField.Meta.table.columns) assert isinstance( RedefinedField.Meta.table.columns["creation_date"].type, sqlalchemy.sql.sqltypes.String, ) ``` !!!warning If you declare `UniqueColumns` constraint with column names, the final model **has to have** a column with the same name declared. Otherwise, the `ModelDefinitionError` will be raised. So in example above if you do not provide `name` for `created_date` in `RedefinedField` model ormar will complain. `created_date: str = ormar.String(max_length=200) # exception` `created_date: str = ormar.String(max_length=200, name="creation_date2") # exception` ## Relations in inheritance You can declare relations in every step of inheritance, so both in parent and child classes. When you define a relation on a child model level it's either overwriting the relation defined in parent model (if the same field name is used), or is accessible only to this child if you define a new relation. When inheriting relations, you always need to be aware of `related_name` parameter, that has to be unique across a related model, when you define multiple child classes that inherit the same relation. If you do not provide `related_name` parameter ormar calculates it for you. This works with inheritance as all child models have to have different class names, which are used to calculate the default `related_name` (class.name.lower()+'s'). But, if you provide a `related_name` this name cannot be reused in all child models as they would overwrite each other on the related model side. Therefore, you have two options: * redefine relation field in child models and manually provide different `related_name` parameters * let this for `ormar` to handle -> auto adjusted related_name are: original related_name + "_" + child model **table** name That might sound complicated but let's look at the following example: ### ForeignKey relations ```python # normal model used in relation class Person(ormar.Model): class Meta: metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) # parent model - needs to be abstract class Car(ormar.Model): class Meta: abstract = True metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) owner: Person = ormar.ForeignKey(Person) # note that we refer to the Person model again so we **have to** provide related_name co_owner: Person = ormar.ForeignKey(Person, related_name="coowned") created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) class Truck(Car): class Meta: pass max_capacity: int = ormar.Integer() class Bus(Car): class Meta: # default naming is name.lower()+'s' so it's ugly for buss ;) tablename = "buses" max_persons: int = ormar.Integer() ``` Now when you will inspect the fields on Person model you will get: ```python Person.Meta.model_fields """ {'id': , 'name': , 'trucks': , 'coowned_trucks': , 'buss': , 'coowned_buses': } """ ``` Note how you have `trucks` and `buss` fields that leads to Truck and Bus class that this Person owns. There were no `related_name` parameter so default names were used. At the same time the co-owned cars need to be referenced by `coowned_trucks` and `coowned_buses`. Ormar appended `_trucks` and `_buses` suffixes taken from child model table names. Seems fine, but the default name for owned trucks is ok (`trucks`) but the `buss` is ugly, so how can we change it? The solution is pretty simple - just redefine the field in Bus class and provide different `related_name` parameter. ```python # rest of the above example remains the same class Bus(Car): class Meta: tablename = "buses" # new field that changes the related_name owner: Person = ormar.ForeignKey(Person, related_name="buses") max_persons: int = ormar.Integer() ``` Now the columns looks much better. ```python Person.Meta.model_fields """ {'id': , 'name': , 'trucks': , 'coowned_trucks': , 'buses': , 'coowned_buses': } """ ``` !!!note You could also provide `related_name` for the `owner` field, that way the proper suffixes would be added. `owner: Person = ormar.ForeignKey(Person, related_name="owned")` and model fields for Person owned cars would become `owned_trucks` and `owned_buses`. ### ManyToMany relations Similarly, you can inherit from Models that have ManyToMany relations declared but there is one, but substantial difference - the Through model. Since in the future the Through model will be able to hold additional fields and now it links only two Tables (`from` and `to` ones), each child that inherits the m2m relation field has to have separate Through model. Of course, you can overwrite the relation in each Child model, but that requires additional code and undermines the point of the whole inheritance. `Ormar` will handle this for you if you agree with default naming convention, which you can always manually overwrite in children if needed. Again, let's look at the example to easier grasp the concepts. We will modify the previous example described above to use m2m relation for co_owners. ```python # person remain the same as above class Person(ormar.Model): class Meta: metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) # new through model between Person and Car2 class PersonsCar(ormar.Model): class Meta: tablename = "cars_x_persons" metadata = metadata database = db # note how co_owners is now ManyToMany relation class Car2(ormar.Model): class Meta: # parent class needs to be marked abstract abstract = True metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) # note the related_name - needs to be unique across Person # model, regardless of how many different models leads to Person owner: Person = ormar.ForeignKey(Person, related_name="owned") co_owners: List[Person] = ormar.ManyToMany( Person, through=PersonsCar, related_name="coowned" ) created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) # child models define only additional Fields class Truck2(Car2): class Meta: # note how you don't have to provide inherited Meta params tablename = "trucks2" max_capacity: int = ormar.Integer() class Bus2(Car2): class Meta: tablename = "buses2" max_persons: int = ormar.Integer() ``` `Ormar` automatically modifies related_name of the fields to include the **table** name of the children models. The dafault name is original related_name + '_' + child table name. That way for class Truck2 the relation defined in `owner: Person = ormar.ForeignKey(Person, related_name="owned")` becomes `owned_trucks2` You can verify the names by inspecting the list of fields present on `Person` model. ```python Person.Meta.model_fields { # note how all relation fields need to be unique on Person # regardless if autogenerated or manually overwritten 'id': , 'name': , # note that we expanded on previous example so all 'old' fields are here 'trucks': , 'coowned_trucks': , 'buses': , 'coowned_buses': , # newly defined related fields 'owned_trucks2': , 'coowned_trucks2': , 'owned_buses2': , 'coowned_buses2': } ``` But that's not all. It's kind of internal to `ormar` but affects the data structure in the database, so let's examine the through models for both `Bus2` and `Truck2` models. ```python Bus2.Meta.model_fields['co_owners'].through Bus2.Meta.model_fields['co_owners'].through.Meta.tablename 'cars_x_persons_buses2' Truck2.Meta.model_fields['co_owners'].through Truck2.Meta.model_fields['co_owners'].through.Meta.tablename 'cars_x_persons_trucks2' ``` As you can see above `ormar` cloned the Through model for each of the Child classes and added Child **class** name at the end, while changing the table names of the cloned fields the name of the **table** from the child is used. Note that original model is not only not used, the table for this model is removed from metadata: ```python Bus2.Meta.metadata.tables.keys() dict_keys(['test_date_models', 'categories', 'subjects', 'persons', 'trucks', 'buses', 'cars_x_persons_trucks2', 'trucks2', 'cars_x_persons_buses2', 'buses2']) ``` So be aware that if you introduce inheritance along the way and convert a model into abstract parent model you may lose your data on through table if not careful. !!!note Note that original table name and model name of the Through model is never used. Only the cloned models tables are created and used. !!!warning Note that each subclass of the Model that has `ManyToMany` relation defined generates a new `Through` model, meaning also **new database table**. That means that each time you define a Child model you need to either manually create the table in the database, or run a migration (with alembic). ## exclude_parent_fields Ormar allows you to skip certain fields in inherited model that are coming from a parent model. !!!Note Note that the same behaviour can be achieved by splitting the model into more abstract models and mixins - which is a preferred way in normal circumstances. To skip certain fields from a child model, list all fields that you want to skip in `model.Meta.exclude_parent_fields` parameter like follows: ```python metadata = sa.MetaData() db = databases.Database(DATABASE_URL) class AuditModel(ormar.Model): class Meta: abstract = True created_by: str = ormar.String(max_length=100) updated_by: str = ormar.String(max_length=100, default="Sam") class DateFieldsModel(ormar.Model): class Meta(ormar.ModelMeta): abstract = True metadata = metadata database = db created_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="creation_date" ) updated_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="modification_date" ) class Category(DateFieldsModel, AuditModel): class Meta(ormar.ModelMeta): tablename = "categories" # set fields that should be skipped exclude_parent_fields = ["updated_by", "updated_date"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() # Note that now the update fields in Category are gone in all places -> ormar fields, pydantic fields and sqlachemy table columns # so full list of available fileds in Category is: ["created_by", "created_date", "id", "name", "code"] ``` Note how you simply need to provide field names and it will exclude the parent field regardless of from which parent model the field is coming from. !!!Note Note that if you want to overwrite a field in child model you do not have to exclude it, simpy overwrite the field declaration in child model with same field name. !!!Warning Note that this kind of behavior can confuse mypy and static type checkers, yet accessing the non existing fields will fail at runtime. That's why splitting the base classes is preferred. The same effect can be achieved by splitting base classes like: ```python metadata = sa.MetaData() db = databases.Database(DATABASE_URL) class AuditCreateModel(ormar.Model): class Meta: abstract = True created_by: str = ormar.String(max_length=100) class AuditUpdateModel(ormar.Model): class Meta: abstract = True updated_by: str = ormar.String(max_length=100, default="Sam") class CreateDateFieldsModel(ormar.Model): class Meta(ormar.ModelMeta): abstract = True metadata = metadata database = db created_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="creation_date" ) class UpdateDateFieldsModel(ormar.Model): class Meta(ormar.ModelMeta): abstract = True metadata = metadata database = db updated_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="modification_date" ) class Category(CreateDateFieldsModel, AuditCreateModel): class Meta(ormar.ModelMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() ``` That way you can inherit from both create and update classes if needed, and only one of them otherwise. ormar-0.12.2/docs/models/internals.md000066400000000000000000000043021444363446500174640ustar00rootroot00000000000000# Internals Apart from special parameters defined in the `Model` during definition (tablename, metadata etc.) the `Model` provides you with useful internals. ## Pydantic Model All `Model` classes inherit from `pydantic.BaseModel` so you can access all normal attributes of pydantic models. For example to list pydantic model fields you can: ```Python hl_lines="20" --8<-- "../docs_src/models/docs003.py" ``` !!!tip Note how the primary key `id` field is optional as `Integer` primary key by default has `autoincrement` set to `True`. !!!info For more options visit official [pydantic][pydantic] documentation. ## Sqlalchemy Table To access auto created sqlalchemy table you can use `Model.Meta.table` parameter For example to list table columns you can: ```Python hl_lines="20" --8<-- "../docs_src/models/docs004.py" ``` !!!tip You can access table primary key name by `Course.Meta.pkname` !!!info For more options visit official [sqlalchemy-metadata][sqlalchemy-metadata] documentation. ## Fields Definition To access ormar `Fields` you can use `Model.Meta.model_fields` parameter For example to list table model fields you can: ```Python hl_lines="20" --8<-- "../docs_src/models/docs005.py" ``` !!!info Note that fields stored on a model are `classes` not `instances`. So if you print just model fields you will get: `{'id': , ` `'name': , ` `'completed': }` [fields]: ./fields.md [relations]: ./relations/index.md [queries]: ./queries.md [pydantic]: https://pydantic-docs.helpmanual.io/ [sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/ [sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html [databases]: https://github.com/encode/databases [sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls [sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables [alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html [save status]: ../models/#model-save-status [Internals]: #internals ormar-0.12.2/docs/models/methods.md000066400000000000000000000600201444363446500171270ustar00rootroot00000000000000# Model methods !!!tip Main interaction with the databases is exposed through a `QuerySet` object exposed on each model as `Model.objects` similar to the django orm. To read more about **quering, joining tables, excluding fields etc. visit [queries][queries] section.** Each model instance have a set of methods to `save`, `update` or `load` itself. Available methods are described below. ## `pydantic` methods Note that each `ormar.Model` is also a `pydantic.BaseModel`, so all `pydantic` methods are also available on a model, especially `dict()` and `json()` methods that can also accept `exclude`, `include` and other parameters. To read more check [pydantic][pydantic] documentation ## construct `construct` is a raw equivalent of `__init__` method used for construction of new instances. The difference is that `construct` skips validations, so it should be used when you know that data is correct and can be trusted. The benefit of using construct is the speed of execution due to skipped validation. !!!note Note that in contrast to `pydantic.construct` method - the `ormar` equivalent will also process the nested related models. !!!warning Bear in mind that due to skipped validation the `construct` method does not perform any conversions, checks etc. So it's your responsibility to provide tha data that is valid and can be consumed by the database. The only two things that construct still performs are: * Providing a `default` value for not set fields * Initialize nested ormar models if you pass a dictionary or a primary key value ## dict `dict` is a method inherited from `pydantic`, yet `ormar` adds its own parameters and has some nuances when working with default values, therefore it's listed here for clarity. `dict` as the name suggests export data from model tree to dictionary. Explanation of dict parameters: ### include (`ormar` modifed) `include: Union[Set, Dict] = None` Set or dictionary of field names to include in returned dictionary. Note that `pydantic` has an uncommon pattern of including/ excluding fields in lists (so also nested models) by an index. And if you want to exclude the field in all children you need to pass a `__all__` key to dictionary. You cannot exclude nested models in `Set`s in `pydantic` but you can in `ormar` (by adding double underscore on relation name i.e. to exclude name of category for a book you cen use `exclude={"book__category__name"}`) `ormar` does not support by index exclusion/ inclusions and accepts a simplified and more user-friendly notation. To check how you can include/exclude fields, including nested fields check out [fields](../queries/select-columns.md#fields) section that has an explanation and a lot of samples. !!!note The fact that in `ormar` you can exclude nested models in sets, you can exclude from a whole model tree in `response_model_exclude` and `response_model_include` in fastapi! ### exclude (`ormar` modified) `exclude: Union[Set, Dict] = None` Set or dictionary of field names to exclude in returned dictionary. Note that `pydantic` has an uncommon pattern of including/ excluding fields in lists (so also nested models) by an index. And if you want to exclude the field in all children you need to pass a `__all__` key to dictionary. You cannot exclude nested models in `Set`s in `pydantic` but you can in `ormar` (by adding double underscore on relation name i.e. to exclude name of category for a book you cen use `exclude={"book__category__name"}`) `ormar` does not support by index exclusion/ inclusions and accepts a simplified and more user-friendly notation. To check how you can include/exclude fields, including nested fields check out [fields](../queries/select-columns.md#fields) section that has an explanation and a lot of samples. !!!note The fact that in `ormar` you can exclude nested models in sets, you can exclude from a whole model tree in `response_model_exclude` and `response_model_include` in fastapi! ### exclude_unset `exclude_unset: bool = False` Flag indicates whether fields which were not explicitly set when creating the model should be excluded from the returned dictionary. !!!warning Note that after you save data into database each field has its own value -> either provided by you, default, or `None`. That means that when you load the data from database, **all** fields are set, and this flag basically stop working! ```python class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="Test") visibility: bool = ormar.Boolean(default=True) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) price: float = ormar.Float(default=9.99) categories: List[Category] = ormar.ManyToMany(Category) category = Category(name="Test 2") assert category.dict() == {'id': None, 'items': [], 'name': 'Test 2', 'visibility': True} assert category.dict(exclude_unset=True) == {'items': [], 'name': 'Test 2'} await category.save() category2 = await Category.objects.get() assert category2.dict() == {'id': 1, 'items': [], 'name': 'Test 2', 'visibility': True} # NOTE how after loading from db all fields are set explicitly # as this is what happens when you populate a model from db assert category2.dict(exclude_unset=True) == {'id': 1, 'items': [], 'name': 'Test 2', 'visibility': True} ``` ### exclude_defaults `exclude_defaults: bool = False` Flag indicates are equal to their default values (whether set or otherwise) should be excluded from the returned dictionary ```python class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="Test") visibility: bool = ormar.Boolean(default=True) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) price: float = ormar.Float(default=9.99) categories: List[Category] = ormar.ManyToMany(Category) category = Category() # note that Integer pk is by default autoincrement so optional assert category.dict() == {'id': None, 'items': [], 'name': 'Test', 'visibility': True} assert category.dict(exclude_defaults=True) == {'items': []} # save and reload the data await category.save() category2 = await Category.objects.get() assert category2.dict() == {'id': 1, 'items': [], 'name': 'Test', 'visibility': True} assert category2.dict(exclude_defaults=True) == {'id': 1, 'items': []} ``` ### exclude_none `exclude_none: bool = False` Flag indicates whether fields which are equal to `None` should be excluded from the returned dictionary. ```python class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="Test", nullable=True) visibility: bool = ormar.Boolean(default=True) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) price: float = ormar.Float(default=9.99) categories: List[Category] = ormar.ManyToMany(Category) category = Category(name=None) assert category.dict() == {'id': None, 'items': [], 'name': None, 'visibility': True} # note the id is not set yet so None and excluded assert category.dict(exclude_none=True) == {'items': [], 'visibility': True} await category.save() category2 = await Category.objects.get() assert category2.dict() == {'id': 1, 'items': [], 'name': None, 'visibility': True} assert category2.dict(exclude_none=True) == {'id': 1, 'items': [], 'visibility': True} ``` ### exclude_primary_keys (`ormar` only) `exclude_primary_keys: bool = False` Setting flag to `True` will exclude all primary key columns in a tree, including nested models. ```python class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) item1 = Item(id=1, name="Test Item") assert item1.dict() == {"id": 1, "name": "Test Item"} assert item1.dict(exclude_primary_keys=True) == {"name": "Test Item"} ``` ### exclude_through_models (`ormar` only) `exclude_through_models: bool = False` `Through` models are auto added for every `ManyToMany` relation, and they hold additional parameters on linking model/table. Setting the `exclude_through_models=True` will exclude all through models, including Through models of submodels. ```python class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) categories: List[Category] = ormar.ManyToMany(Category) # tree defining the models item_dict = { "name": "test", "categories": [{"name": "test cat"}, {"name": "test cat2"}], } # save whole tree await Item(**item_dict).save_related(follow=True, save_all=True) # get the saved values item = await Item.objects.select_related("categories").get() # by default you can see the through models (itemcategory) assert item.dict() == {'id': 1, 'name': 'test', 'categories': [ {'id': 1, 'name': 'test cat', 'itemcategory': {'id': 1, 'category': None, 'item': None}}, {'id': 2, 'name': 'test cat2', 'itemcategory': {'id': 2, 'category': None, 'item': None}} ]} # you can exclude those fields/ models assert item.dict(exclude_through_models=True) == { 'id': 1, 'name': 'test', 'categories': [ {'id': 1, 'name': 'test cat'}, {'id': 2, 'name': 'test cat2'} ]} ``` ## json `json()` has exactly the same parameters as `dict()` so check above. Of course the end result is a string with json representation and not a dictionary. ## get_pydantic `get_pydantic(include: Union[Set, Dict] = None, exclude: Union[Set, Dict] = None)` This method allows you to generate `pydantic` models from your ormar models without you needing to retype all the fields. Note that if you have nested models, it **will generate whole tree of pydantic models for you!** Moreover, you can pass `exclude` and/or `include` parameters to keep only the fields that you want to, including in nested models. That means that this way you can effortlessly create pydantic models for requests and responses in `fastapi`. !!!Note To read more about possible excludes/includes and how to structure your exclude dictionary or set visit [fields](../queries/select-columns.md#fields) section of documentation Given sample ormar models like follows: ```python metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="test") category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) ``` You can generate pydantic models out of it with a one simple call. ```python PydanticCategory = Category.get_pydantic(include={"id", "name"} ``` Which will generate model equivalent of: ```python class Category(BaseModel): id: Optional[int] name: Optional[str] = "test" ``` !!!warning Note that it's not a good practice to have several classes with same name in one module, as well as it would break `fastapi` docs. Thats's why ormar adds random 3 uppercase letters to the class name. In example above it means that in reality class would be named i.e. `Category_XIP(BaseModel)`. To exclude or include nested fields you can use dict or double underscores. ```python # both calls are equivalent PydanticCategory = Category.get_pydantic(include={"id", "items__id"}) PydanticCategory = Category.get_pydantic(include={"id": ..., "items": {"id"}}) ``` and results in a generated structure as follows: ```python class Item(BaseModel): id: Optional[int] class Category(BaseModel): id: Optional[int] items: Optional[List[Item]] ``` Of course, you can use also deeply nested structures and ormar will generate it pydantic equivalent you (in a way that exclude loops). Note how `Item` model above does not have a reference to `Category` although in ormar the relation is bidirectional (and `ormar.Item` has `categories` field). !!!warning Note that the generated pydantic model will inherit all **field** validators from the original `ormar` model, that includes the ormar choices validator as well as validators defined with `pydantic.validator` decorator. But, at the same time all root validators present on `ormar` models will **NOT** be copied to the generated pydantic model. Since root validator can operate on all fields and a user can exclude some fields during generation of pydantic model it's not safe to copy those validators. If required, you need to redefine/ manually copy them to generated pydantic model. ## load By default when you query a table without prefetching related models, the ormar will still construct your related models, but populate them only with the pk value. You can load the related model by calling `load()` method. `load()` can also be used to refresh the model from the database (if it was changed by some other process). ```python track = await Track.objects.get(name='The Bird') track.album.pk # will return malibu album pk (1) track.album.name # will return None # you need to actually load the data first await track.album.load() track.album.name # will return 'Malibu' ``` ## load_all `load_all(follow: bool = False, exclude: Union[List, str, Set, Dict] = None) -> Model` Method works like `load()` but also goes through all relations of the `Model` on which the method is called, and reloads them from database. By default the `load_all` method loads only models that are directly related (one step away) to the model on which the method is called. But you can specify the `follow=True` parameter to traverse through nested models and load all of them in the relation tree. !!!warning To avoid circular updates with `follow=True` set, `load_all` keeps a set of already visited Models, and won't perform nested `loads` on Models that were already visited. So if you have a diamond or circular relations types you need to perform the loads in a manual way. ```python # in example like this the second Street (coming from City) won't be load_all, so ZipCode won't be reloaded Street -> District -> City -> Street -> ZipCode ``` Method accepts also optional exclude parameter that works exactly the same as exclude_fields method in `QuerySet`. That way you can remove fields from related models being refreshed or skip whole related models. Method performs one database query so it's more efficient than nested calls to `load()` and `all()` on related models. !!!tip To read more about `exclude` read [exclude_fields][exclude_fields] !!!warning All relations are cleared on `load_all()`, so if you exclude some nested models they will be empty after call. ## save `save() -> self` You can create new models by using `QuerySet.create()` method or by initializing your model as a normal pydantic model and later calling `save()` method. `save()` can also be used to persist changes that you made to the model, but only if the primary key is not set or the model does not exist in database. The `save()` method does not check if the model exists in db, so if it does you will get a integrity error from your selected db backend if trying to save model with already existing primary key. ```python track = Track(name='The Bird') await track.save() # will persist the model in database track = await Track.objects.get(name='The Bird') await track.save() # will raise integrity error as pk is populated ``` ## update `update(_columns: List[str] = None, **kwargs) -> self` You can update models by using `QuerySet.update()` method or by updating your model attributes (fields) and calling `update()` method. If you try to update a model without a primary key set a `ModelPersistenceError` exception will be thrown. To persist a newly created model use `save()` or `upsert(**kwargs)` methods. ```python track = await Track.objects.get(name='The Bird') await track.update(name='The Bird Strikes Again') ``` To update only selected columns from model into the database provide a list of columns that should be updated to `_columns` argument. In example: ```python class Movie(ormar.Model): class Meta: tablename = "movies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="title") year: int = ormar.Integer() profit: float = ormar.Float() terminator = await Movie(name='Terminator', year=1984, profit=0.078).save() terminator.name = "Terminator 2" terminator.year = 1991 terminator.profit = 0.520 # update only name await terminator.update(_columns=["name"]) # note that terminator instance was not reloaded so assert terminator.year == 1991 # but once you load the data from db you see it was not updated await terminator.load() assert terminator.year == 1984 ``` !!!warning Note that `update()` does not refresh the instance of the Model, so if you change more columns than you pass in `_columns` list your Model instance will have different values than the database! ## upsert `upsert(**kwargs) -> self` It's a proxy to either `save()` or `update(**kwargs)` methods described above. If the primary key is set -> the `update` method will be called. If the pk is not set the `save()` method will be called. ```python track = Track(name='The Bird') await track.upsert() # will call save as the pk is empty track = await Track.objects.get(name='The Bird') await track.upsert(name='The Bird Strikes Again') # will call update as pk is already populated ``` ## delete You can delete models by using `QuerySet.delete()` method or by using your model and calling `delete()` method. ```python track = await Track.objects.get(name='The Bird') await track.delete() # will delete the model from database ``` !!!tip Note that that `track` object stays the same, only record in the database is removed. ## save_related `save_related(follow: bool = False, save_all: bool = False, exclude=Optional[Union[Set, Dict]]) -> None` Method goes through all relations of the `Model` on which the method is called, and calls `upsert()` method on each model that is **not** saved. To understand when a model is saved check [save status][save status] section above. By default the `save_related` method saved only models that are directly related (one step away) to the model on which the method is called. But you can specify the `follow=True` parameter to traverse through nested models and save all of them in the relation tree. By default save_related saves only model that has not `saved` status, meaning that they were modified in current scope. If you want to force saving all of the related methods use `save_all=True` flag, which will upsert all related models, regardless of their save status. If you want to skip saving some of the relations you can pass `exclude` parameter. `Exclude` can be a set of own model relations, or it can be a dictionary that can also contain nested items. !!!note Note that `exclude` parameter in `save_related` accepts only relation fields names, so if you pass any other fields they will be saved anyway !!!note To read more about the structure of possible values passed to `exclude` check `Queryset.fields` method documentation. !!!warning To avoid circular updates with `follow=True` set, `save_related` keeps a set of already visited Models on each branch of relation tree, and won't perform nested `save_related` on Models that were already visited. So if you have circular relations types you need to perform the updates in a manual way. Note that with `save_all=True` and `follow=True` you can use `save_related()` to save whole relation tree at once. Example: ```python class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) department_name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) course_name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean() department: Optional[Department] = ormar.ForeignKey(Department) class Student(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany(Course) to_save = { "department_name": "Ormar", "courses": [ {"course_name": "basic1", "completed": True, "students": [ {"name": "Jack"}, {"name": "Abi"} ]}, {"course_name": "basic2", "completed": True, "students": [ {"name": "Kate"}, {"name": "Miranda"} ] }, ], } # initializa whole tree department = Department(**to_save) # save all at once (one after another) await department.save_related(follow=True, save_all=True) department_check = await Department.objects.select_all(follow=True).get() to_exclude = { "id": ..., "courses": { "id": ..., "students": {"id", "studentcourse"} } } # after excluding ids and through models you get exact same payload used to # construct whole tree assert department_check.dict(exclude=to_exclude) == to_save ``` !!!warning `save_related()` iterates all relations and all models and upserts() them one by one, so it will save all models but might not be optimal in regard of number of database queries. [fields]: ../fields.md [relations]: ../relations/index.md [queries]: ../queries/index.md [pydantic]: https://pydantic-docs.helpmanual.io/ [sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/ [sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html [databases]: https://github.com/encode/databases [sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls [sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables [alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html [save status]: ../models/index/#model-save-status [Internals]: #internals [exclude_fields]: ../queries/select-columns.md#exclude_fields ormar-0.12.2/docs/models/migrations.md000066400000000000000000000140441444363446500176450ustar00rootroot00000000000000# Migrations ## Database Initialization Note that all examples assume that you already have a database. If that is not the case and you need to create your tables, that's super easy as `ormar` is using sqlalchemy for underlying table construction. All you have to do is call `create_all()` like in the example below. ```python import sqlalchemy # get your database url in sqlalchemy format - same as used with databases instance used in Model definition engine = sqlalchemy.create_engine("sqlite:///test.db") # note that this has to be the same metadata that is used in ormar Models definition metadata.create_all(engine) ``` You can also create single tables, sqlalchemy tables are exposed in `ormar.Meta` class. ```python import sqlalchemy # get your database url in sqlalchemy format - same as used with databases instance used in Model definition engine = sqlalchemy.create_engine("sqlite:///test.db") # Artist is an ormar model from previous examples Artist.Meta.table.create(engine) ``` !!!warning You need to create the tables only once, so use a python console for that or remove the script from your production code after first use. ## Alembic usage Likewise as with tables, since we base tables on sqlalchemy for migrations please use [alembic][alembic]. ### Initialization Use command line to reproduce this minimalistic example. ```python alembic init alembic alembic revision --autogenerate -m "made some changes" alembic upgrade head ``` ### Sample env.py file A quick example of alembic migrations should be something similar to: When you have application structure like: ``` -> app -> alembic (initialized folder - so run alembic init alembic inside app folder) -> models (here are the models) -> __init__.py -> my_models.py ``` Your `env.py` file (in alembic folder) can look something like: ```python from logging.config import fileConfig from sqlalchemy import create_engine from alembic import context import sys, os # add app folder to system path (alternative is running it from parent folder with python -m ...) myPath = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, myPath + '/../../') # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here (the one used in ormar) # for 'autogenerate' support from app.models.my_models import metadata target_metadata = metadata # set your url here or import from settings # note that by default url is in saved sqlachemy.url variable in alembic.ini file URL = "sqlite:///test.db" def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure( url=URL, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, # if you use UUID field set also this param # the prefix has to match sqlalchemy import name in alembic # that can be set by sqlalchemy_module_prefix option (default 'sa.') user_module_prefix='sa.' ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = create_engine(URL) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, # if you use UUID field set also this param # the prefix has to match sqlalchemy import name in alembic # that can be set by sqlalchemy_module_prefix option (default 'sa.') user_module_prefix='sa.' ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ``` ### Excluding tables You can also include/exclude specific tables with `include_object` parameter passed to `context.configure`. That should be a function returning `True/False` for given objects. A sample function excluding tables starting with `data_` in name unless it's 'data_jobs': ```python def include_object(object, name, type_, reflected, compare_to): if name and name.startswith('data_') and name not in ['data_jobs']: return False return True ``` !!!note Function parameters for `include_objects` (you can change the name) are required and defined in alembic to check what they do check the [alembic][alembic] documentation And you pass it into context like (both in online and offline): ```python context.configure( url=URL, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, user_module_prefix='sa.', include_object=include_object ) ``` !!!info You can read more about table creation, altering and migrations in [sqlalchemy table creation][sqlalchemy table creation] documentation. [fields]: ./fields.md [relations]: ./relations/index.md [queries]: ./queries.md [pydantic]: https://pydantic-docs.helpmanual.io/ [sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/ [sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html [databases]: https://github.com/encode/databases [sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls [sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables [alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html [save status]: ../models/index/#model-save-status [Internals]: #internals ormar-0.12.2/docs/mypy.md000066400000000000000000000013161444363446500152020ustar00rootroot00000000000000To provide better errors check you should use mypy with pydantic [plugin][plugin] Note that legacy model declaration type will raise static type analyzers errors. So you **cannot use the old notation** like this: ```Python hl_lines="15-17" --8<-- "../docs_src/models/docs011.py" ``` Instead switch to notation introduced in version 0.4.0. ```Python hl_lines="15-17" --8<-- "../docs_src/models/docs012.py" ``` Note that above example is not using the type hints, so further operations with mypy might fail, depending on the context. Preferred notation should look liked this: ```Python hl_lines="15-17" --8<-- "../docs_src/models/docs001.py" ``` [plugin]: https://pydantic-docs.helpmanual.io/mypy_plugin/ormar-0.12.2/docs/plugin.md000066400000000000000000000014711444363446500155040ustar00rootroot00000000000000While `ormar` will work with any IDE there is a PyCharm `pydantic` plugin that enhances the user experience for this IDE. Plugin is available on the JetBrains Plugins Repository for PyCharm: [plugin page][plugin page]. You can install the plugin for free from the plugin marketplace (PyCharm's Preferences -> Plugin -> Marketplace -> search "pydantic"). !!!note For plugin to work properly you need to provide valid type hints for model fields. !!!info Plugin supports type hints, argument inspection and more but mainly only for __init__ methods More information can be found on the [official plugin page](https://plugins.jetbrains.com/plugin/12861-pydantic) and [github repository](https://github.com/koxudaxi/pydantic-pycharm-plugin). [plugin page]: https://plugins.jetbrains.com/plugin/12861-pydanticormar-0.12.2/docs/queries/000077500000000000000000000000001444363446500153365ustar00rootroot00000000000000ormar-0.12.2/docs/queries/aggregations.md000066400000000000000000000236711444363446500203430ustar00rootroot00000000000000# Aggregation functions Currently 6 aggregation functions are supported. * `count(distinct: bool = True) -> int` * `exists() -> bool` * `sum(columns) -> Any` * `avg(columns) -> Any` * `min(columns) -> Any` * `max(columns) -> Any` * `QuerysetProxy` * `QuerysetProxy.count(distinct=True)` method * `QuerysetProxy.exists()` method * `QuerysetProxy.sum(columns)` method * `QuerysetProxy.avg(columns)` method * `QuerysetProxy.min(column)` method * `QuerysetProxy.max(columns)` method ## count `count(distinct: bool = True) -> int` Returns number of rows matching the given criteria (i.e. applied with `filter` and `exclude`). If `distinct` is `True` (the default), this will return the number of primary rows selected. If `False`, the count will be the total number of rows returned (including extra rows for `one-to-many` or `many-to-many` left `select_related` table joins). `False` is the legacy (buggy) behavior for workflows that depend on it. ```python class Book(ormar.Model): class Meta: tablename = "books" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) author: str = ormar.String(max_length=100) genre: str = ormar.String( max_length=100, default="Fiction", choices=["Fiction", "Adventure", "Historic", "Fantasy"], ) ``` ```python # returns count of rows in db for Books model no_of_books = await Book.objects.count() ``` ## exists `exists() -> bool` Returns a bool value to confirm if there are rows matching the given criteria (applied with `filter` and `exclude`) ```python class Book(ormar.Model): class Meta: tablename = "books" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) author: str = ormar.String(max_length=100) genre: str = ormar.String( max_length=100, default="Fiction", choices=["Fiction", "Adventure", "Historic", "Fantasy"], ) ``` ```python # returns a boolean value if given row exists has_sample = await Book.objects.filter(title='Sample').exists() ``` ## sum `sum(columns) -> Any` Returns sum value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). You can pass one or many column names including related columns. As of now each column passed is aggregated separately (so `sum(col1+col2)` is not possible, you can have `sum(col1, col2)` and later add 2 returned sums in python) You cannot `sum` non numeric columns. If you aggregate on one column, the single value is directly returned as a result If you aggregate on multiple columns a dictionary with column: result pairs is returned Given models like follows ```Python --8<-- "../docs_src/aggregations/docs001.py" ``` A sample usage might look like following ```python author = await Author(name="Author 1").save() await Book(title="Book 1", year=1920, ranking=3, author=author).save() await Book(title="Book 2", year=1930, ranking=1, author=author).save() await Book(title="Book 3", year=1923, ranking=5, author=author).save() assert await Book.objects.sum("year") == 5773 result = await Book.objects.sum(["year", "ranking"]) assert result == dict(year=5773, ranking=9) try: # cannot sum string column await Book.objects.sum("title") except ormar.QueryDefinitionError: pass assert await Author.objects.select_related("books").sum("books__year") == 5773 result = await Author.objects.select_related("books").sum( ["books__year", "books__ranking"] ) assert result == dict(books__year=5773, books__ranking=9) assert ( await Author.objects.select_related("books") .filter(books__year__lt=1925) .sum("books__year") == 3843 ) ``` ## avg `avg(columns) -> Any` Returns avg value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). You can pass one or many column names including related columns. As of now each column passed is aggregated separately (so `sum(col1+col2)` is not possible, you can have `sum(col1, col2)` and later add 2 returned sums in python) You cannot `avg` non numeric columns. If you aggregate on one column, the single value is directly returned as a result If you aggregate on multiple columns a dictionary with column: result pairs is returned ```Python --8<-- "../docs_src/aggregations/docs001.py" ``` A sample usage might look like following ```python author = await Author(name="Author 1").save() await Book(title="Book 1", year=1920, ranking=3, author=author).save() await Book(title="Book 2", year=1930, ranking=1, author=author).save() await Book(title="Book 3", year=1923, ranking=5, author=author).save() assert round(float(await Book.objects.avg("year")), 2) == 1924.33 result = await Book.objects.avg(["year", "ranking"]) assert round(float(result.get("year")), 2) == 1924.33 assert result.get("ranking") == 3.0 try: # cannot avg string column await Book.objects.avg("title") except ormar.QueryDefinitionError: pass result = await Author.objects.select_related("books").avg("books__year") assert round(float(result), 2) == 1924.33 result = await Author.objects.select_related("books").avg( ["books__year", "books__ranking"] ) assert round(float(result.get("books__year")), 2) == 1924.33 assert result.get("books__ranking") == 3.0 assert ( await Author.objects.select_related("books") .filter(books__year__lt=1925) .avg("books__year") == 1921.5 ) ``` ## min `min(columns) -> Any` Returns min value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). You can pass one or many column names including related columns. As of now each column passed is aggregated separately (so `sum(col1+col2)` is not possible, you can have `sum(col1, col2)` and later add 2 returned sums in python) If you aggregate on one column, the single value is directly returned as a result If you aggregate on multiple columns a dictionary with column: result pairs is returned ```Python --8<-- "../docs_src/aggregations/docs001.py" ``` A sample usage might look like following ```python author = await Author(name="Author 1").save() await Book(title="Book 1", year=1920, ranking=3, author=author).save() await Book(title="Book 2", year=1930, ranking=1, author=author).save() await Book(title="Book 3", year=1923, ranking=5, author=author).save() assert await Book.objects.min("year") == 1920 result = await Book.objects.min(["year", "ranking"]) assert result == dict(year=1920, ranking=1) assert await Book.objects.min("title") == "Book 1" assert await Author.objects.select_related("books").min("books__year") == 1920 result = await Author.objects.select_related("books").min( ["books__year", "books__ranking"] ) assert result == dict(books__year=1920, books__ranking=1) assert ( await Author.objects.select_related("books") .filter(books__year__gt=1925) .min("books__year") == 1930 ) ``` ## max `max(columns) -> Any` Returns max value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). Returns min value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). You can pass one or many column names including related columns. As of now each column passed is aggregated separately (so `sum(col1+col2)` is not possible, you can have `sum(col1, col2)` and later add 2 returned sums in python) If you aggregate on one column, the single value is directly returned as a result If you aggregate on multiple columns a dictionary with column: result pairs is returned ```Python --8<-- "../docs_src/aggregations/docs001.py" ``` A sample usage might look like following ```python author = await Author(name="Author 1").save() await Book(title="Book 1", year=1920, ranking=3, author=author).save() await Book(title="Book 2", year=1930, ranking=1, author=author).save() await Book(title="Book 3", year=1923, ranking=5, author=author).save() assert await Book.objects.max("year") == 1930 result = await Book.objects.max(["year", "ranking"]) assert result == dict(year=1930, ranking=5) assert await Book.objects.max("title") == "Book 3" assert await Author.objects.select_related("books").max("books__year") == 1930 result = await Author.objects.select_related("books").max( ["books__year", "books__ranking"] ) assert result == dict(books__year=1930, books__ranking=5) assert ( await Author.objects.select_related("books") .filter(books__year__lt=1925) .max("books__year") == 1923 ) ``` ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes a subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### count Works exactly the same as [count](./#count) function above but allows you to select columns from related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### exists Works exactly the same as [exists](./#exists) function above but allows you to select columns from related objects from other side of the relation. ### sum Works exactly the same as [sum](./#sum) function above but allows you to sum columns from related objects from other side of the relation. ### avg Works exactly the same as [avg](./#avg) function above but allows you to average columns from related objects from other side of the relation. ### min Works exactly the same as [min](./#min) function above but allows you to select minimum of columns from related objects from other side of the relation. ### max Works exactly the same as [max](./#max) function above but allows you to select maximum of columns from related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.md ormar-0.12.2/docs/queries/create.md000066400000000000000000000133051444363446500171250ustar00rootroot00000000000000# Insert data into database Following methods allow you to insert data into the database. * `create(**kwargs) -> Model` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Model, bool]` * `update_or_create(**kwargs) -> Model` * `bulk_create(objects: List[Model]) -> None` * `Model` * `Model.save()` method * `Model.upsert()` method * `Model.save_related()` method * `QuerysetProxy` * `QuerysetProxy.create(**kwargs)` method * `QuerysetProxy.get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs)` method * `QuerysetProxy.update_or_create(**kwargs)` method ## create `create(**kwargs): -> Model` Creates the model instance, saves it in a database and returns the updates model (with pk populated if not passed and autoincrement is set). The allowed kwargs are `Model` fields names and proper value types. ```python class Album(ormar.Model): class Meta: tablename = "album" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) ``` ```python malibu = await Album.objects.create(name="Malibu") await Track.objects.create(album=malibu, title="The Bird", position=1) ``` The alternative is a split creation and persistence of the `Model`. ```python malibu = Album(name="Malibu") await malibu.save() ``` !!!tip Check other `Model` methods in [models][models] ## get_or_create `get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Model, bool]` Combination of create and get methods. Tries to get a row meeting the criteria and if `NoMatch` exception is raised it creates a new one with given kwargs and _defaults. When `_defaults` dictionary is provided the values set in `_defaults` will **always** be set, including overwriting explicitly provided values. i.e. `get_or_create(_defaults: {"title": "I win"}, title="never used")` will always use "I win" as title whether you provide your own value in kwargs or not. ```python class Album(ormar.Model): class Meta: tablename = "album" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) year: int = ormar.Integer() ``` ```python album, created = await Album.objects.get_or_create(name='The Cat', _defaults={"year": 1999}) assert created is True assert album.name == "The Cat" assert album.year == 1999 # object is created as it does not exist album2, created = await Album.objects.get_or_create(name='The Cat') assert created is False assert album == album2 # return True as the same db row is returned ``` !!!warning Despite being a equivalent row from database the `album` and `album2` in example above are 2 different python objects! Updating one of them will not refresh the second one until you excplicitly load() the fresh data from db. !!!note Note that if you want to create a new object you either have to pass pk column value or pk column has to be set as autoincrement ## update_or_create `update_or_create(**kwargs) -> Model` Updates the model, or in case there is no match in database creates a new one. ```Python hl_lines="26-32" --8<-- "../docs_src/queries/docs003.py" ``` !!!note Note that if you want to create a new object you either have to pass pk column value or pk column has to be set as autoincrement ## bulk_create `bulk_create(objects: List["Model"]) -> None` Allows you to create multiple objects at once. A valid list of `Model` objects needs to be passed. ```python hl_lines="21-27" --8<-- "../docs_src/queries/docs004.py" ``` ## Model methods Each model instance have a set of methods to `save`, `update` or `load` itself. ###save You can create new models by using `QuerySet.create()` method or by initializing your model as a normal pydantic model and later calling `save()` method. !!!tip Read more about `save()` method in [models-save][models-save] ###upsert It's a proxy to either `save()` or `update(**kwargs)` methods of a Model. If the pk is not set the `save()` method will be called. !!!tip Read more about `upsert()` method in [models-upsert][models-upsert] ###save_related Method goes through all relations of the `Model` on which the method is called, and calls `upsert()` method on each model that is **not** saved. !!!tip Read more about `save_related()` method in [models-save-related][models-save-related] ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### create Works exactly the same as [create](./#create) function above but allows you to create related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### get_or_create Works exactly the same as [get_or_create](./#get_or_create) function above but allows you to query or create related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### update_or_create Works exactly the same as [update_or_create](./#update_or_create) function above but allows you to update or create related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [models]: ../models/methods.md [models-save]: ../models/methods.md#save [models-upsert]: ../models/methods.md#upsert [models-save-related]: ../models/methods.md#save_related [querysetproxy]: ../relations/queryset-proxy.md ormar-0.12.2/docs/queries/delete.md000066400000000000000000000073551444363446500171340ustar00rootroot00000000000000# Delete data from database Following methods allow you to delete data from the database. * `delete(each: bool = False, **kwargs) -> int` * `Model` * `Model.delete()` method * `QuerysetProxy` * `QuerysetProxy.remove()` method * `QuerysetProxy.clear()` method ## delete `delete(each: bool = False, **kwargs) -> int` QuerySet level delete is used to delete multiple records at once. You either have to filter the QuerySet first or provide a `each=True` flag to delete whole table. If you do not provide this flag or a filter a `QueryDefinitionError` will be raised. Return number of rows deleted. ```python hl_lines="26-30" --8<-- "../docs_src/queries/docs005.py" ``` ## Model methods Each model instance have a set of methods to `save`, `update` or `load` itself. ### delete You can delete model instance by calling `delete()` method on it. !!!tip Read more about `delete()` method in [models methods](../models/methods.md#delete) ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### remove Removal of the related model one by one. Removes the relation in the database. If you specify the keep_reversed flag to `False` `ormar` will also delete the related model from the database. ```python class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True) ``` ```python album = await Album(name="Malibu").save() track1 = await Track( album=album, title="The Bird", position=1, play_count=30, ).save() # remove through proxy from reverse side of relation await album.tracks.remove(track1, keep_reversed=False) # the track was also deleted tracks = await Track.objects.all() assert len(tracks) == 0 ``` ### clear Removal of all related models in one call. Removes also the relation in the database. If you specify the keep_reversed flag to `False` `ormar` will also delete the related model from the database. ```python class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True) ``` ```python album = await Album(name="Malibu").save() track1 = await Track( album=album, title="The Bird", position=1, play_count=30, ).save() track2 = await Track( album=album, title="Heart don't stand a chance", position=2, play_count=20, ).save() # removes the relation only -> clears foreign keys on tracks await album.tracks.clear() # removes also the tracks await album.tracks.clear(keep_reversed=False) ``` [querysetproxy]: ../relations/queryset-proxy.mdormar-0.12.2/docs/queries/filter-and-sort.md000066400000000000000000000655641444363446500207120ustar00rootroot00000000000000# Filtering and sorting data You can use following methods to filter the data (sql where clause). * `filter(*args, **kwargs) -> QuerySet` * `exclude(*args, **kwargs) -> QuerySet` * `get(*args, **kwargs) -> Model` * `get_or_none(*args, **kwargs) -> Optional[Model]` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs) -> Tuple[Model, bool]` * `all(*args, **kwargs) -> List[Optional[Model]]` * `QuerysetProxy` * `QuerysetProxy.filter(*args, **kwargs)` method * `QuerysetProxy.exclude(*args, **kwargs)` method * `QuerysetProxy.get(*args, **kwargs)` method * `QuerysetProxy.get_or_none(*args, **kwargs)` method * `QuerysetProxy.get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs)` method * `QuerysetProxy.all(*args, **kwargs)` method And following methods to sort the data (sql order by clause). * `order_by(columns:Union[List, str, OrderAction]) -> QuerySet` * `QuerysetProxy` * `QuerysetProxy.order_by(columns:Union[List, str, OrderAction])` method ## Filtering ### filter `filter(*args, **kwargs) -> QuerySet` Allows you to filter by any `Model` attribute/field as well as to fetch instances, with a filter across an FK relationship. ```python class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) name: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True) ``` ```python track = Track.objects.filter(name="The Bird").get() # will return a track with name equal to 'The Bird' tracks = Track.objects.filter(album__name="Fantasies").all() # will return all tracks where the columns album name = 'Fantasies' ``` ### Django style filters You can use special filter suffix to change the filter operands: * **exact** - exact match to value, sql `column = ` * can be written as`album__name__exact='Malibu'` * **iexact** - exact match sql `column = ` (case insensitive) * can be written as`album__name__iexact='malibu'` * **contains** - sql `column LIKE '%%'` * can be written as`album__name__contains='Mal'` * **icontains** - sql `column LIKE '%%'` (case insensitive) * can be written as`album__name__icontains='mal'` * **in** - sql ` column IN (, , ...)` * can be written as`album__name__in=['Malibu', 'Barclay']` * **isnull** - sql `column IS NULL` (and sql `column IS NOT NULL`) * can be written as`album__name__isnull=True` (isnotnull `album__name__isnull=False`) * **gt** - sql `column > ` (greater than) * can be written as`position__gt=3` * **gte** - sql `column >= ` (greater or equal than) * can be written as`position__gte=3` * **lt** - sql `column < ` (lower than) * can be written as`position__lt=3` * **lte** - sql `column <= ` (lower equal than) * can be written as`position__lte=3` * **startswith** - sql `column LIKE '%'` (exact start match) * can be written as`album__name__startswith='Mal'` * **istartswith** - sql `column LIKE '%'` (case insensitive) * can be written as`album__name__istartswith='mal'` * **endswith** - sql `column LIKE '%'` (exact end match) * can be written as`album__name__endswith='ibu'` * **iendswith** - sql `column LIKE '%'` (case insensitive) * can be written as`album__name__iendswith='IBU'` Some samples: ```python # sql: ( product.name = 'Test' AND product.rating >= 3.0 ) Product.objects.filter(name='Test', rating__gte=3.0).get() # sql: ( product.name = 'Test' AND product.rating >= 3.0 ) # OR (categories.name IN ('Toys', 'Books')) Product.objects.filter( ormar.or_( ormar.and_(name='Test', rating__gte=3.0), categories__name__in=['Toys', 'Books']) ).get() # note: to read more about and_ and or_ read complex filters section below ``` ### Python style filters * **exact** - exact match to value, sql `column = ` * can be written as `Track.album.name == 'Malibu` * **iexact** - exact match sql `column = ` (case insensitive) * can be written as `Track.album.name.iexact('malibu')` * **contains** - sql `column LIKE '%%'` * can be written as `Track.album.name % 'Mal')` * can be written as `Track.album.name.contains('Mal')` * **icontains** - sql `column LIKE '%%'` (case insensitive) * can be written as `Track.album.name.icontains('mal')` * **in** - sql ` column IN (, , ...)` * can be written as `Track.album.name << ['Malibu', 'Barclay']` * can be written as `Track.album.name.in_(['Malibu', 'Barclay'])` * **isnull** - sql `column IS NULL` (and sql `column IS NOT NULL`) * can be written as `Track.album.name >> None` * can be written as `Track.album.name.isnull(True)` * not null can be written as `Track.album.name.isnull(False)` * not null can be written as `~(Track.album.name >> None)` * not null can be written as `~(Track.album.name.isnull(True))` * **gt** - sql `column > ` (greater than) * can be written as `Track.album.name > 3` * **gte** - sql `column >= ` (greater or equal than) * can be written as `Track.album.name >= 3` * **lt** - sql `column < ` (lower than) * can be written as `Track.album.name < 3` * **lte** - sql `column <= ` (lower equal than) * can be written as `Track.album.name <= 3` * **startswith** - sql `column LIKE '%'` (exact start match) * can be written as `Track.album.name.startswith('Mal')` * **istartswith** - sql `column LIKE '%'` (case insensitive) * can be written as `Track.album.name.istartswith('mal')` * **endswith** - sql `column LIKE '%'` (exact end match) * can be written as `Track.album.name.endswith('ibu')` * **iendswith** - sql `column LIKE '%'` (case insensitive) * can be written as `Track.album.name.iendswith('IBU')` Some samples: ```python # sql: ( product.name = 'Test' AND product.rating >= 3.0 ) Product.objects.filter( (Product.name == 'Test') & (Product.rating >= 3.0) ).get() # sql: ( product.name = 'Test' AND product.rating >= 3.0 ) # OR (categories.name IN ('Toys', 'Books')) Product.objects.filter( ((Product.name == 'Test') & (Product.rating >= 3.0)) | (Product.categories.name << ['Toys', 'Books']) ).get() ``` !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` !!!warning Note that you do not have to specify the `%` wildcard in contains and other filters, it's added for you. If you include `%` in your search value it will be escaped and treated as literal percentage sign inside the text. ### exclude `exclude(*args, **kwargs) -> QuerySet` Works exactly the same as filter and all modifiers (suffixes) are the same, but returns a not condition. So if you use `filter(name='John')` which equals to `where name = 'John'` in SQL, the `exclude(name='John')` equals to `where name <> 'John'` Note that all conditions are joined so if you pass multiple values it becomes a union of conditions. `exclude(name='John', age>=35)` will become `where not (name='John' and age>=35)` ```python class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) name: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True) ``` ```python notes = await Track.objects.exclude(position_gt=3).all() # returns all tracks with position < 3 ``` ## Complex filters (including OR) By default both `filter()` and `exclude()` methods combine provided filter options with `AND` condition so `filter(name="John", age__gt=30)` translates into `WHERE name = 'John' AND age > 30`. Sometimes it's useful to query the database with conditions that should not be applied jointly like `WHERE name = 'John' OR age > 30`, or build a complex where query that you would like to have bigger control over. After all `WHERE (name = 'John' OR age > 30) and city='New York'` is completely different than `WHERE name = 'John' OR (age > 30 and city='New York')`. In order to build `OR` and nested conditions ormar provides two functions that can be used in `filter()` and `exclude()` in `QuerySet` and `QuerysetProxy`. !!!note Note that you can provide those methods in any other method like `get()` or `all()` that accepts `*args`. Call to `or_` and `and_` can be nested in each other, as well as combined with keyword arguments. Since it sounds more complicated than it is, let's look at some examples. Given a sample models like this: ```python database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ``` Let's create some sample data: ```python tolkien = await Author(name="J.R.R. Tolkien").save() await Book(author=tolkien, title="The Hobbit", year=1933).save() await Book(author=tolkien, title="The Lord of the Rings", year=1955).save() await Book(author=tolkien, title="The Silmarillion", year=1977).save() sapkowski = await Author(name="Andrzej Sapkowski").save() await Book(author=sapkowski, title="The Witcher", year=1990).save() await Book(author=sapkowski, title="The Tower of Fools", year=2002).save() ``` We can construct some sample complex queries: Let's select books of Tolkien **OR** books written after 1970 sql: `WHERE ( authors.name = 'J.R.R. Tolkien' OR books.year > 1970 )` #### Django style ```python books = ( await Book.objects.select_related("author") .filter(ormar.or_(author__name="J.R.R. Tolkien", year__gt=1970)) .all() ) assert len(books) == 5 ``` #### Python style ```python books = ( await Book.objects.select_related("author") .filter((Book.author.name=="J.R.R. Tolkien") | (Book.year > 1970)) .all() ) assert len(books) == 5 ``` Now let's select books written after 1960 or before 1940 which were written by Tolkien. sql: `WHERE ( books.year > 1960 OR books.year < 1940 ) AND authors.name = 'J.R.R. Tolkien'` #### Django style ```python # OPTION 1 - split and into separate call books = ( await Book.objects.select_related("author") .filter(ormar.or_(year__gt=1960, year__lt=1940)) .filter(author__name="J.R.R. Tolkien") .all() ) assert len(books) == 2 # OPTION 2 - all in one books = ( await Book.objects.select_related("author") .filter( ormar.and_( ormar.or_(year__gt=1960, year__lt=1940), author__name="J.R.R. Tolkien", ) ) .all() ) assert len(books) == 2 assert books[0].title == "The Hobbit" assert books[1].title == "The Silmarillion" ``` #### Python style ```python books = ( await Book.objects.select_related("author") .filter((Book.year > 1960) | (Book.year < 1940)) .filter(Book.author.name == "J.R.R. Tolkien") .all() ) assert len(books) == 2 # OPTION 2 - all in one books = ( await Book.objects.select_related("author") .filter( ( (Book.year > 1960) | (Book.year < 1940) ) & (Book.author.name == "J.R.R. Tolkien") ) .all() ) assert len(books) == 2 assert books[0].title == "The Hobbit" assert books[1].title == "The Silmarillion" ``` Books of Sapkowski from before 2000 or books of Tolkien written after 1960 sql: `WHERE ( ( books.year > 1960 AND authors.name = 'J.R.R. Tolkien' ) OR ( books.year < 2000 AND authors.name = 'Andrzej Sapkowski' ) ) ` #### Django style ```python books = ( await Book.objects.select_related("author") .filter( ormar.or_( ormar.and_(year__gt=1960, author__name="J.R.R. Tolkien"), ormar.and_(year__lt=2000, author__name="Andrzej Sapkowski"), ) ) .all() ) assert len(books) == 2 ``` #### Python style ```python books = ( await Book.objects.select_related("author") .filter( ((Book.year > 1960) & (Book.author.name == "J.R.R. Tolkien")) | ((Book.year < 2000) & (Book.author.name == "Andrzej Sapkowski")) ) .all() ) assert len(books) == 2 ``` Of course those functions can have more than 2 conditions, so if we for example want books that contains 'hobbit': sql: `WHERE ( ( books.year > 1960 AND authors.name = 'J.R.R. Tolkien' ) OR ( books.year < 2000 AND os0cec_authors.name = 'Andrzej Sapkowski' ) OR books.title LIKE '%hobbit%' )` #### Django style ```python books = ( await Book.objects.select_related("author") .filter( ormar.or_( ormar.and_(year__gt=1960, author__name="J.R.R. Tolkien"), ormar.and_(year__lt=2000, author__name="Andrzej Sapkowski"), title__icontains="hobbit", ) ) .all() ) ``` #### Python style ```python books = ( await Book.objects.select_related("author") .filter( ((Book.year > 1960) & (Book.author.name == "J.R.R. Tolkien")) | ((Book.year < 2000) & (Book.author.name == "Andrzej Sapkowski")) | (Book.title.icontains("hobbit")) ) .all() ) ``` If you want or need to you can nest deeper conditions as deep as you want, in example to achieve a query like this: sql: ``` WHERE ( ( ( books.year > 1960 OR books.year < 1940 ) AND authors.name = 'J.R.R. Tolkien' ) OR ( books.year < 2000 AND authors.name = 'Andrzej Sapkowski' ) ) ``` You can construct a query as follows: #### Django style ```python books = ( await Book.objects.select_related("author") .filter( ormar.or_( ormar.and_( ormar.or_(year__gt=1960, year__lt=1940), author__name="J.R.R. Tolkien", ), ormar.and_(year__lt=2000, author__name="Andrzej Sapkowski"), ) ) .all() ) assert len(books) == 3 assert books[0].title == "The Hobbit" assert books[1].title == "The Silmarillion" assert books[2].title == "The Witcher" ``` #### Python style ```python books = ( await Book.objects.select_related("author") .filter( ( ( (Book.year > 1960) | (Book.year < 1940) ) & (Book.author.name == "J.R.R. Tolkien") ) | ( (Book.year < 2000) & (Book.author.name == "Andrzej Sapkowski") ) ) .all() ) assert len(books) == 3 assert books[0].title == "The Hobbit" assert books[1].title == "The Silmarillion" assert books[2].title == "The Witcher" ``` By now you should already have an idea how `ormar.or_` and `ormar.and_` works. Of course, you could chain them in any other methods of queryset, so in example a perfectly valid query can look like follows: ```python books = ( await Book.objects.select_related("author") .filter(ormar.or_(year__gt=1980, author__name="Andrzej Sapkowski")) .filter(title__startswith="The") .limit(1) .offset(1) .order_by("-id") .all() ) assert len(books) == 1 assert books[0].title == "The Witcher" ``` Same applies to python style chaining and nesting. #### Django style Note that with django style you cannot provide the same keyword argument several times so queries like `filter(ormar.or_(name='Jack', name='John'))` are not allowed. If you want to check the same column for several values simply use `in` operator: `filter(name__in=['Jack','John'])`. If you pass only one parameter to `or_` or `and_` functions it's simply wrapped in parenthesis and has no effect on actual query, so in the end all 3 queries are identical: ```python await Book.objects.filter(title='The Hobbit').get() await Book.objects.filter(ormar.or_(title='The Hobbit')).get() await Book.objects.filter(ormar.and_(title='The Hobbit')).get() ``` !!!note Note that `or_` and `and_` queries will have `WHERE (title='The Hobbit')` but the parenthesis is redundant and has no real effect. This feature can be used if you **really** need to use the same field name twice. Remember that you cannot pass the same keyword arguments twice to the function, so how you can query in example `WHERE (authors.name LIKE '%tolkien%') OR (authors.name LIKE '%sapkowski%'))`? You cannot do: ```python books = ( await Book.objects.select_related("author") .filter(ormar.or_( author__name__icontains="tolkien", author__name__icontains="sapkowski" # you cannot use same keyword twice in or_! )) # python syntax error .all() ) ``` But you can do this: ```python books = ( await Book.objects.select_related("author") .filter(ormar.or_( ormar.and_(author__name__icontains="tolkien"), # one argument == just wrapped in () ormar.and_(author__name__icontains="sapkowski") )) .all() ) assert len(books) == 5 ``` #### Python style Note that with python style you can perfectly use the same fields as many times as you want. ```python books = ( await Book.objects.select_related("author") .filter( (Book.author.name.icontains("tolkien")) | (Book.author.name.icontains("sapkowski")) )) .all() ) ``` ## get `get(*args, **kwargs) -> Model` Get's the first row from the db meeting the criteria set by kwargs. When any args and/or kwargs are passed it's a shortcut equivalent to calling `filter(*args, **kwargs).get()` !!!tip To read more about `filter` go to [filter](./#filter). To read more about `get` go to [read/get](../read/#get) ## get_or_none Exact equivalent of get described above but instead of raising the exception returns `None` if no db record matching the criteria is found. ## get_or_create `get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs) -> Tuple[Model, bool]` Combination of create and get methods. When any args and/or kwargs are passed it's a shortcut equivalent to calling `filter(*args, **kwargs).get_or_create()` !!!tip To read more about `filter` go to [filter](./#filter). To read more about `get_or_create` go to [read/get_or_create](../read/#get_or_create) !!!warning When given item does not exist you need to pass kwargs for all required fields of the model, including but not limited to primary_key column (unless it's autoincrement). ## all `all(*args, **kwargs) -> List[Optional["Model"]]` Returns all rows from a database for given model for set filter options. When any kwargs are passed it's a shortcut equivalent to calling `filter(*args, **kwargs).all()` !!!tip To read more about `filter` go to [filter](./#filter). To read more about `all` go to [read/all](../read/#all) ### QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. #### filter Works exactly the same as [filter](./#filter) function above but allows you to filter related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section #### exclude Works exactly the same as [exclude](./#exclude) function above but allows you to filter related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section #### get Works exactly the same as [get](./#get) function above but allows you to filter related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section #### get_or_none Exact equivalent of get described above but instead of raising the exception returns `None` if no db record matching the criteria is found. #### get_or_create Works exactly the same as [get_or_create](./#get_or_create) function above but allows you to filter related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section #### all Works exactly the same as [all](./#all) function above but allows you to filter related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ## Sorting ### order_by `order_by(columns: Union[List, str, OrderAction]) -> QuerySet` With `order_by()` you can order the results from database based on your choice of fields. You can provide a string with field name or list of strings with different fields. Ordering in sql will be applied in order of names you provide in order_by. !!!tip By default if you do not provide ordering `ormar` explicitly orders by all primary keys !!!warning If you are sorting by nested models that causes that the result rows are unsorted by the main model `ormar` will combine those children rows into one main model. Sample raw database rows result (sort by child model desc): ``` MODEL: 1 - Child Model - 3 MODEL: 2 - Child Model - 2 MODEL: 1 - Child Model - 1 ``` will result in 2 rows of result: ``` MODEL: 1 - Child Models: [3, 1] # encountered first in result, all children rows combined MODEL: 2 - Child Modles: [2] ``` The main model will never duplicate in the result Given sample Models like following: ```python --8 < -- "../../docs_src/queries/docs007.py" ``` To order by main model field just provide a field name #### Django style ```python toys = await Toy.objects.select_related("owner").order_by("name").all() assert [x.name.replace("Toy ", "") for x in toys] == [ str(x + 1) for x in range(6) ] assert toys[0].owner == zeus assert toys[1].owner == aphrodite ``` #### Python style ```python toys = await Toy.objects.select_related("owner").order_by(Toy.name.asc()).all() assert [x.name.replace("Toy ", "") for x in toys] == [ str(x + 1) for x in range(6) ] assert toys[0].owner == zeus assert toys[1].owner == aphrodite ``` To sort on nested models separate field names with dunder '__'. You can sort this way across all relation types -> `ForeignKey`, reverse virtual FK and `ManyToMany` fields. #### Django style ```python toys = await Toy.objects.select_related("owner").order_by("owner__name").all() assert toys[0].owner.name == toys[1].owner.name == "Aphrodite" assert toys[2].owner.name == toys[3].owner.name == "Hermes" assert toys[4].owner.name == toys[5].owner.name == "Zeus" ``` #### Python style ```python toys = await Toy.objects.select_related("owner").order_by(Toy.owner.name.asc()).all() assert toys[0].owner.name == toys[1].owner.name == "Aphrodite" assert toys[2].owner.name == toys[3].owner.name == "Hermes" assert toys[4].owner.name == toys[5].owner.name == "Zeus" ``` To sort in descending order provide a hyphen in front of the field name #### Django style ```python owner = ( await Owner.objects.select_related("toys") .order_by("-toys__name") .filter(name="Zeus") .get() ) assert owner.toys[0].name == "Toy 4" assert owner.toys[1].name == "Toy 1" ``` #### Python style ```python owner = ( await Owner.objects.select_related("toys") .order_by(Owner.toys.name.desc()) .filter(Owner.name == "Zeus") .get() ) assert owner.toys[0].name == "Toy 4" assert owner.toys[1].name == "Toy 1" ``` !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` ### Default sorting in ormar Since order of rows in a database is not guaranteed, `ormar` **always** issues an `order by` sql clause to each (part of) query even if you do not provide order yourself. When querying the database with given model by default the `Model` is ordered by the `primary_key` column ascending. If you wish to change the default behaviour you can do it by providing `orders_by` parameter to model `Meta` class. !!!tip To read more about models sort order visit [models](../models/index.md#model-sort-order) section of documentation By default the relations follow the same ordering, but you can modify the order in which related models are loaded during query by providing `orders_by` and `related_orders_by` parameters to relations. !!!tip To read more about models sort order visit [relations](../relations/index.md#relationship-default-sort-order) section of documentation Order in which order_by clauses are applied is as follows: * Explicitly passed `order_by()` calls in query * Relation passed `orders_by` and `related_orders_by` if exists * Model `Meta` class `orders_by` * Model `primary_key` column ascending (fallback, used if none of above provided) **Order from only one source is applied to each `Model` (so that you can always overwrite it in a single query).** That means that if you provide explicit `order_by` for a model in a query, the `Relation` and `Model` sort orders are skipped. If you provide a `Relation` one, the `Model` sort is skipped. Finally, if you provide one for `Model` the default one by `primary_key` is skipped. ### QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. #### order_by Works exactly the same as [order_by](./#order_by) function above but allows you to sort related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.md ormar-0.12.2/docs/queries/index.md000066400000000000000000000142271444363446500167750ustar00rootroot00000000000000# Querying database with ormar ## QuerySet Each Model is auto registered with a `QuerySet` that represents the underlying query, and it's options. Most of the methods are also available through many to many relations and on reverse foreign key relations through `QuerysetProxy` interface. !!!info To see which relations are supported and how to construct relations visit [relations][relations]. For simplicity available methods to fetch and save the data into the database are divided into categories according to the function they fulfill. Note that some functions/methods are in multiple categories. For completeness, Model and relation methods are listed. To read more about any specific section or function please refer to the details subpage. ###[Insert data into database](./create.md) * `create(**kwargs) -> Model` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Model, bool]` * `update_or_create(**kwargs) -> Model` * `bulk_create(objects: List[Model]) -> None` * `Model` * `Model.save()` method * `Model.upsert()` method * `Model.save_related()` method * `QuerysetProxy` * `QuerysetProxy.create(**kwargs)` method * `QuerysetProxy.get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs)` method * `QuerysetProxy.update_or_create(**kwargs)` method !!!tip To read more about any or all of those functions visit [create](./create.md) section. ### [Read data from database](./read.md) * `get(**kwargs) -> Model` * `get_or_none(**kwargs) -> Optional[Model]` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Model, bool]` * `first() -> Model` * `all(**kwargs) -> List[Optional[Model]]` * `Model` * `Model.load()` method * `QuerysetProxy` * `QuerysetProxy.get(**kwargs)` method * `QuerysetProxy.get_or_none(**kwargs)` method * `QuerysetProxy.get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs)` method * `QuerysetProxy.first()` method * `QuerysetProxy.all(**kwargs)` method !!!tip To read more about any or all of those functions visit [read](./read.md) section. ### [Read raw data from database](./raw-data.md) Instead of ormar models return raw data in form list of dictionaries or tuples. * `values(fields = None, exclude_through = False) -> List[Dict]` * `values_list(fields = None, exclude_through = False, flatten = False) -> List` * `QuerysetProxy` * `QuerysetProxy.values(fields = None, exclude_through = False)` method * `QuerysetProxy.values_list(fields = None, exclude_through= False, flatten = False)` method !!!tip To read more about any or all of those functions visit [raw data](./raw-data.md) section. ### [Update data in database](./update.md) * `update(each: bool = False, **kwargs) -> int` * `update_or_create(**kwargs) -> Model` * `bulk_update(objects: List[Model], columns: List[str] = None) -> None` * `Model` * `Model.update()` method * `Model.upsert()` method * `Model.save_related()` method * `QuerysetProxy` * `QuerysetProxy.update_or_create(**kwargs)` method !!!tip To read more about any or all of those functions visit [update](./update.md) section. ### [Delete data from database](./delete.md) * `delete(each: bool = False, **kwargs) -> int` * `Model` * `Model.delete()` method * `QuerysetProxy` * `QuerysetProxy.remove()` method * `QuerysetProxy.clear()` method !!!tip To read more about any or all of those functions visit [delete](./delete.md) section. ### [Joins and subqueries](./joins-and-subqueries.md) * `select_related(related: Union[List, str]) -> QuerySet` * `prefetch_related(related: Union[List, str]) -> QuerySet` * `Model` * `Model.load()` method * `QuerysetProxy` * `QuerysetProxy.select_related(related: Union[List, str])` method * `QuerysetProxy.prefetch_related(related: Union[List, str])` method !!!tip To read more about any or all of those functions visit [joins and subqueries](./joins-and-subqueries.md) section. ### [Filtering and sorting](./filter-and-sort.md) * `filter(**kwargs) -> QuerySet` * `exclude(**kwargs) -> QuerySet` * `order_by(columns:Union[List, str]) -> QuerySet` * `get(**kwargs) -> Model` * `get_or_none(**kwargs) -> Optional[Model]` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Model, bool]` * `all(**kwargs) -> List[Optional[Model]]` * `QuerysetProxy` * `QuerysetProxy.filter(**kwargs)` method * `QuerysetProxy.exclude(**kwargs)` method * `QuerysetProxy.order_by(columns:Union[List, str])` method * `QuerysetProxy.get(**kwargs)` method * `QuerysetProxy.get_or_none(**kwargs)` method * `QuerysetProxy.get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs)` method * `QuerysetProxy.all(**kwargs)` method !!!tip To read more about any or all of those functions visit [filtering and sorting](./filter-and-sort.md) section. ### [Selecting columns](./select-columns.md) * `fields(columns: Union[List, str, set, dict]) -> QuerySet` * `exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet` * `QuerysetProxy` * `QuerysetProxy.fields(columns: Union[List, str, set, dict])` method * `QuerysetProxy.exclude_fields(columns: Union[List, str, set, dict])` method !!!tip To read more about any or all of those functions visit [selecting columns](./select-columns.md) section. ### [Pagination and rows number](./pagination-and-rows-number.md) * `paginate(page: int) -> QuerySet` * `limit(limit_count: int) -> QuerySet` * `offset(offset: int) -> QuerySet` * `get() -> Model` * `first() -> Model` * `QuerysetProxy` * `QuerysetProxy.paginate(page: int)` method * `QuerysetProxy.limit(limit_count: int)` method * `QuerysetProxy.offset(offset: int)` method !!!tip To read more about any or all of those functions visit [pagination](./pagination-and-rows-number.md) section. ### [Aggregated functions](./aggregations.md) * `count(distinct: bool = True) -> int` * `exists() -> bool` * `QuerysetProxy` * `QuerysetProxy.count(distinct=True)` method * `QuerysetProxy.exists()` method !!!tip To read more about any or all of those functions visit [aggregations](./aggregations.md) section. [relations]: ../relations/index.md ormar-0.12.2/docs/queries/joins-and-subqueries.md000066400000000000000000000423261444363446500217360ustar00rootroot00000000000000# Joins and subqueries To join one table to another, so load also related models you can use following methods. * `select_related(related: Union[List, str]) -> QuerySet` * `select_all(follow: bool = True) -> QuerySet` * `prefetch_related(related: Union[List, str]) -> QuerySet` * `Model` * `Model.load()` method * `QuerysetProxy` * `QuerysetProxy.select_related(related: Union[List, str])` method * `QuerysetProxy.select_all(follow: bool=True)` method * `QuerysetProxy.prefetch_related(related: Union[List, str])` method ## select_related `select_related(related: Union[List, str]) -> QuerySet` Allows to prefetch related models during the same query. **With `select_related` always only one query is run against the database**, meaning that one (sometimes complicated) join is generated and later nested models are processed in python. To fetch related model use `ForeignKey` names. To chain related `Models` relation use double underscores between names. !!!note If you are coming from `django` note that `ormar` `select_related` differs -> in `django` you can `select_related` only singe relation types, while in `ormar` you can select related across `ForeignKey` relation, reverse side of `ForeignKey` (so virtual auto generated keys) and `ManyToMany` fields (so all relations as of current version). !!!tip To control which model fields to select use `fields()` and `exclude_fields()` `QuerySet` methods. !!!tip To control order of models (both main or nested) use `order_by()` method. ```python class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True) ``` ```python # Django style album = await Album.objects.select_related("tracks").all() # Python style album = await Album.objects.select_related(Album.tracks).all() # will return album with all columns tracks ``` You can provide a string or a list of strings (or a field/ list of fields) ```python class SchoolClass(ormar.Model): class Meta: tablename = "schoolclasses" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) department: Optional[Department] = ormar.ForeignKey(Department, nullable=False) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Student(ormar.Model): class Meta: tablename = "students" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) class Teacher(ormar.Model): class Meta: tablename = "teachers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) ``` ```python # Django style classes = await SchoolClass.objects.select_related( ["teachers__category", "students"]).all() # Python style classes = await SchoolClass.objects.select_related( [SchoolClass.teachers.category, SchoolClass.students]).all() # will return classes with teachers and teachers categories # as well as classes students ``` Exactly the same behavior is for Many2Many fields, where you put the names of Many2Many fields and the final `Models` are fetched for you. !!!warning If you set `ForeignKey` field as not nullable (so required) during all queries the not nullable `Models` will be auto prefetched, even if you do not include them in select_related. !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` ## select_all `select_all(follow: bool = False) -> QuerySet` By default when you select `all()` none of the relations are loaded, likewise, when `select_related()` is used you need to explicitly specify all relations that should be loaded. If you want to include also nested relations this can be cumberstone. That's why `select_all()` was introduced, so by default load all relations of a model (so kind of opposite as with `all()` approach). By default adds only directly related models of a parent model (from which the query is run). If `follow=True` is set it adds also related models of related models. !!!info To not get stuck in an infinite loop as related models also keep a relation to parent model visited models set is kept. That way already visited models that are nested are loaded, but the load do not follow them inside. So Model A -> Model B -> Model C -> Model A -> Model X will load second Model A but will never follow into Model X. Nested relations of those kind need to be loaded manually. With sample date like follow: ```python database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Address(ormar.Model): class Meta(BaseMeta): tablename = "addresses" id: int = ormar.Integer(primary_key=True) street: str = ormar.String(max_length=100, nullable=False) number: int = ormar.Integer(nullable=False) post_code: str = ormar.String(max_length=20, nullable=False) class Branch(ormar.Model): class Meta(BaseMeta): tablename = "branches" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False) address = ormar.ForeignKey(Address) class Company(ormar.Model): class Meta(BaseMeta): tablename = "companies" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) branches = ormar.ManyToMany(Branch) ``` To select all `Companies` with all `Branches` and `Addresses` you can simply query: ```python companies = await Company.objects.select_all(follow=True).all() # which is equivalent to: companies = await Company.objects.select_related('branches__address').all() ``` Of course in this case it's quite easy to issue explicit relation names in `select_related`, but the benefit of `select_all()` shows when you have multiple relations. If for example `Company` would have 3 relations and all of those 3 relations have it's own 3 relations you would have to issue 9 relation strings to `select_related`, `select_all()` is also resistant to change in names of relations. !!!note Note that you can chain `select_all()` with other `QuerySet` methods like `filter`, `exclude_fields` etc. To exclude relations use `exclude_fields()` call with names of relations (also nested) to exclude. ## prefetch_related `prefetch_related(related: Union[List, str]) -> QuerySet` Allows to prefetch related models during query - but opposite to `select_related` each subsequent model is fetched in a separate database query. **With `prefetch_related` always one query per Model is run against the database**, meaning that you will have multiple queries executed one after another. To fetch related model use `ForeignKey` names. To chain related `Models` relation use double underscores between names. !!!tip To control which model fields to select use `fields()` and `exclude_fields()` `QuerySet` methods. !!!tip To control order of models (both main or nested) use `order_by()` method. ```python class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True) ``` ```python # Django style album = await Album.objects.prefetch_related("tracks").all() # Python style album = await Album.objects.prefetch_related(Album.tracks).all() # will return album will all columns tracks ``` You can provide a string, or a list of strings ```python class SchoolClass(ormar.Model): class Meta: tablename = "schoolclasses" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) department: Optional[Department] = ormar.ForeignKey(Department, nullable=False) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Student(ormar.Model): class Meta: tablename = "students" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) class Teacher(ormar.Model): class Meta: tablename = "teachers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) ``` ```python # Django style classes = await SchoolClass.objects.prefetch_related( ["teachers__category", "students"]).all() # Python style classes = await SchoolClass.objects.prefetch_related( [SchoolClass.teachers.category, SchoolClass.students]).all() # will return classes with teachers and teachers categories # as well as classes students ``` Exactly the same behavior is for Many2Many fields, where you put the names of Many2Many fields and the final `Models` are fetched for you. !!!warning If you set `ForeignKey` field as not nullable (so required) during all queries the not nullable `Models` will be auto prefetched, even if you do not include them in select_related. !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` ## select_related vs prefetch_related Which should you use -> `select_related` or `prefetch_related`? Well, it really depends on your data. The best answer is try yourself and see which one performs faster/better in your system constraints. What to keep in mind: ### Performance **Number of queries**: `select_related` always executes one query against the database, while `prefetch_related` executes multiple queries. Usually the query (I/O) operation is the slowest one but it does not have to be. **Number of rows**: Imagine that you have 10 000 object in one table A and each of those objects have 3 children in table B, and subsequently each object in table B has 2 children in table C. Something like this: ``` Model C / Model B - Model C / Model A - Model B - Model C \ \ \ Model C \ Model B - Model C \ Model C ``` That means that `select_related` will always return 60 000 rows (10 000 * 3 * 2) later compacted to 10 000 models. How many rows will return `prefetch_related`? Well, that depends, if each of models B and C is unique it will return 10 000 rows in first query, 30 000 rows (each of 3 children of A in table B are unique) in second query and 60 000 rows (each of 2 children of model B in table C are unique) in 3rd query. In this case `select_related` seems like a better choice, not only it will run one query comparing to 3 of `prefetch_related` but will also return 60 000 rows comparing to 100 000 of `prefetch_related` (10+30+60k). But what if each Model A has exactly the same 3 models B and each models C has exactly same models C? `select_related` will still return 60 000 rows, while `prefetch_related` will return 10 000 for model A, 3 rows for model B and 2 rows for Model C. So in total 10 006 rows. Now depending on the structure of models (i.e. if it has long Text() fields etc.) `prefetch_related` might be faster despite it needs to perform three separate queries instead of one. #### Memory `ormar` is a mini ORM meaning that it does not keep a registry of already loaded models. That means that in `select_related` example above you will always have 10 000 Models A, 30 000 Models B (even if the unique number of rows in db is 3 - processing of `select_related` spawns ** new** child models for each parent model). And 60 000 Models C. If the same Model B is shared by rows 1, 10, 100 etc. and you update one of those, the rest of rows that share the same child will **not** be updated on the spot. If you persist your changes into the database the change **will be available only after reload (either each child separately or the whole query again)**. That means that `select_related` will use more memory as each child is instantiated as a new object - obviously using it's own space. !!!note This might change in future versions if we decide to introduce caching. !!!warning By default all children (or event the same models loaded 2+ times) are completely independent, distinct python objects, despite that they represent the same row in db. They will evaluate to True when compared, so in example above: ```python # will return True if child1 of both rows is the same child db row row1.child1 == row100.child1 # same here: model1 = await Model.get(pk=1) model2 = await Model.get(pk=1) # same pk = same row in db # will return `True` model1 == model2 ``` but ```python # will return False (note that id is a python `builtin` function not ormar one). id(row1.child1) == (ro100.child1) # from above - will also return False id(model1) == id(model2) ``` On the contrary - with `prefetch_related` each unique distinct child model is instantiated only once and the same child models is shared across all parent models. That means that in `prefetch_related` example above if there are 3 distinct models in table B and 2 in table C, there will be only 5 children nested models shared between all model A instances. That also means that if you update any attribute it will be updated on all parents as they share the same child object. ## Model methods Each model instance have a set of methods to `save`, `update` or `load` itself. ### load You can load the `ForeignKey` related model by calling `load()` method. `load()` can be used to refresh the model from the database (if it was changed by some other process). !!!tip Read more about `load()` method in [models methods](../models/methods.md#load) ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### select_related Works exactly the same as [select_related](./#select_related) function above but allows you to fetch related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### select_all Works exactly the same as [select_all](./#select_all) function above but allows you to fetch related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### prefetch_related Works exactly the same as [prefetch_related](./#prefetch_related) function above but allows you to fetch related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.md ormar-0.12.2/docs/queries/pagination-and-rows-number.md000066400000000000000000000112471444363446500230340ustar00rootroot00000000000000#Pagination and rows number Following methods allow you to paginate and limit number of rows in queries. * `paginate(page: int) -> QuerySet` * `limit(limit_count: int) -> QuerySet` * `offset(offset: int) -> QuerySet` * `get() -> Model` * `first() -> Model` * `QuerysetProxy` * `QuerysetProxy.paginate(page: int)` method * `QuerysetProxy.limit(limit_count: int)` method * `QuerysetProxy.offset(offset: int)` method ## paginate `paginate(page: int, page_size: int = 20) -> QuerySet` Combines the `offset` and `limit` methods based on page number and size ```python class Track(ormar.Model): class Meta: tablename = "track" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) name: str = ormar.String(max_length=100) position: int = ormar.Integer() ``` ```python tracks = await Track.objects.paginate(3).all() # will return 20 tracks starting at row 41 # (with default page size of 20) ``` Note that `paginate(2)` is equivalent to `offset(20).limit(20)` ## limit `limit(limit_count: int, limit_raw_sql: bool = None) -> QuerySet` You can limit the results to desired number of parent models. To limit the actual number of database query rows instead of number of main models use the `limit_raw_sql` parameter flag, and set it to `True`. ```python class Track(ormar.Model): class Meta: tablename = "track" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) name: str = ormar.String(max_length=100) position: int = ormar.Integer() ``` ```python tracks = await Track.objects.limit(1).all() # will return just one Track ``` !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` ## offset `offset(offset: int, limit_raw_sql: bool = None) -> QuerySet` You can also offset the results by desired number of main models. To offset the actual number of database query rows instead of number of main models use the `limit_raw_sql` parameter flag, and set it to `True`. ```python class Track(ormar.Model): class Meta: tablename = "track" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) name: str = ormar.String(max_length=100) position: int = ormar.Integer() ``` ```python tracks = await Track.objects.offset(1).limit(1).all() # will return just one Track, but this time the second one ``` !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` ## get `get(**kwargs) -> Model` Get's the first row from the db meeting the criteria set by kwargs. If no criteria is set it will return the last row in db sorted by pk. (The criteria cannot be set also with filter/exclude). !!!tip To read more about `get` visit [read/get](./read/#get) ## first `first() -> Model` Gets the first row from the db ordered by primary key column ascending. !!!tip To read more about `first` visit [read/first](./read/#first) ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### paginate Works exactly the same as [paginate](./#paginate) function above but allows you to paginate related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### limit Works exactly the same as [limit](./#limit) function above but allows you to paginate related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### offset Works exactly the same as [offset](./#offset) function above but allows you to paginate related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.mdormar-0.12.2/docs/queries/raw-data.md000066400000000000000000000277471444363446500174010ustar00rootroot00000000000000# Return raw data Following methods allow you to execute a query but instead of returning ormar models those will return list of dicts or tuples. * `values(fields = None, exclude_through = False) -> List[Dict]` * `values_list(fields = None, exclude_through = False, flatten = False) -> List` * `QuerysetProxy` * `QuerysetProxy.values(fields = None, exclude_through = False)` method * `QuerysetProxy.values_list(fields = None, exclude_through= False, flatten = False)` method !!!danger Note that `values` and `values_list` skips parsing the result to ormar models so skips also the validation of the result! !!!warning Note that each entry in a result list is one to one reflection of a query result row. Since rows are not parsed if you have one-to-many or many-to-many relation expect duplicated columns values in result entries if one parent row have multiple related rows. ## values `values(fields: Union[List, str, Set, Dict] = None, exclude_through: bool = False) -> List[Dict]` Return a list of dictionaries representing the values of the columns coming from the database. You can select a subset of fields with fields parameter, that accepts the same set of parameters as `fields()` method. Note that passing fields to `values(fields)` is actually a shortcut for calling `fields(fields).values()`. !!!tip To read more about what you can pass to fields and how to select nested models fields read [selecting columns](./select-columns.md#fields) docs You can limit the number of rows by providing conditions in `filter()` and `exclude()`, but note that even if only one row (or no rows!) match your criteria you will return a list in response. Example: ```python # declared models class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) sort_order: int = ormar.Integer(nullable=True) class Post(ormar.Model): class Meta(BaseMeta): tablename = "posts" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) category: Optional[Category] = ormar.ForeignKey(Category) # sample data news = await Category(name="News", sort_order=0).save() await Post(name="Ormar strikes again!", category=news).save() await Post(name="Why don't you use ormar yet?", category=news).save() await Post(name="Check this out, ormar now for free", category=news).save() ``` Access Post models: ```python posts = await Post.objects.values() assert posts == [ {"id": 1, "name": "Ormar strikes again!", "category": 1}, {"id": 2, "name": "Why don't you use ormar yet?", "category": 1}, {"id": 3, "name": "Check this out, ormar now for free", "category": 1}, ] ``` To select also related models use `select_related` or `prefetch_related`. Note how nested models columns will be prefixed with full relation path coming from the main model (the one used in a query). ```python # declare models class User(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Role(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) users: List[User] = ormar.ManyToMany(User) # sample data creator = await User(name="Anonymous").save() admin = await Role(name="admin").save() editor = await Role(name="editor").save() await creator.roles.add(admin) await creator.roles.add(editor) ``` Select user with roles ```python user = await User.objects.select_related("roles").values() # note nested prefixes: roleuser and roles assert user == [ { "id": 1, "name": "Anonymous", "roleuser__id": 1, "roleuser__role": 1, "roleuser__user": 1, "roles__id": 1, "roles__name": "admin", }, { "id": 1, "name": "Anonymous", "roleuser__id": 2, "roleuser__role": 2, "roleuser__user": 1, "roles__id": 2, "roles__name": "editor", }, ] ``` !!!note Note how role to users relation is a `ManyToMany` relation so by default you also get through model columns. Combine select related and fields to select only 3 fields. Note that we also exclude through model as by definition every model included in a join but without any reference in fields is assumed to be selected in full (all fields included). !!!note Note that in contrary to other queryset methods here you can exclude the in-between models but keep the end columns, which does not make sense when parsing the raw data into models. So in relation category -> category_x_post -> post -> user you can exclude category_x_post and post models but can keep the user one. (in ormar model context that is not possible as if you would exclude through and post model there would be no way to reach user model from category model). ```python user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .fields({"name": ..., "users": {"name": ..., "categories": {"name"}}}) .exclude_fields("roleuser") .values() ) assert user == [ { "name": "admin", "users__name": "Anonymous", "users__categories__name": "News", } ] ``` If you have multiple ManyToMany models in your query you would have to exclude each through model manually. To avoid this burden `ormar` provides you with `exclude_through=False` parameter. If you set this flag to True **all through models will be fully excluded**. ```python # equivalent to query above, note lack of exclude_fields call user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .fields({"name": ..., "users": {"name": ..., "categories": {"name"}}}) .values(exclude_through=True) ) assert user == [ { "name": "admin", "users__name": "Anonymous", "users__categories__name": "News", } ] ``` ## values_list `values_list(fields: Union[List, str, Set, Dict] = None, flatten: bool = False, exclude_through: bool = False) -> List` Return a list of tuples representing the values of the columns coming from the database. You can select a subset of fields with fields parameter, that accepts the same set of parameters as `fields()` method. Note that passing fields to `values_list(fields)` is actually a shortcut for calling `fields(fields).values_list()`. !!!tip To read more about what you can pass to fields and how to select nested models fields read [selecting columns](./select-columns.md#fields) docs If you select only one column/field you can pass `flatten=True` which will return you a list of values instead of list of one element tuples. !!!warning Setting `flatten=True` if more than one (or none which means all) fields are selected will raise `QueryDefinitionError` exception. You can limit the number of rows by providing conditions in `filter()` and `exclude()`, but note that even if only one row (or no rows!) match your criteria you will return a list in response. Example: ```python # declared models class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) sort_order: int = ormar.Integer(nullable=True) class Post(ormar.Model): class Meta(BaseMeta): tablename = "posts" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) category: Optional[Category] = ormar.ForeignKey(Category) # sample data news = await Category(name="News", sort_order=0).save() await Post(name="Ormar strikes again!", category=news).save() await Post(name="Why don't you use ormar yet?", category=news).save() await Post(name="Check this out, ormar now for free", category=news).save() ``` Access Post models: ```python posts = await Post.objects.values_list() # note how columns refer to id, name and category (fk) assert posts == [ (1, "Ormar strikes again!", 1), (2, "Why don't you use ormar yet?", 1), (3, "Check this out, ormar now for free", 1), ] ``` To select also related models use `select_related` or `prefetch_related`. Let's complicate the relation and modify the previously mentioned Category model to refer to User model. ```python class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) sort_order: int = ormar.Integer(nullable=True) # new column below created_by: Optional[User] = ormar.ForeignKey(User, related_name="categories") ``` Now create the sample data with link to user. ```python creator = await User(name="Anonymous").save() admin = await Role(name="admin").save() editor = await Role(name="editor").save() await creator.roles.add(admin) await creator.roles.add(editor) news = await Category(name="News", sort_order=0, created_by=creator).save() ``` Combine select related and fields to select only 3 fields. Note that we also exclude through model as by definition every model included in a join but without any reference in fields is assumed to be selected in full (all fields included). !!!note Note that in contrary to other queryset methods here you can exclude the in-between models but keep the end columns, which does not make sense when parsing the raw data into models. So in relation category -> category_x_post -> post -> user you can exclude category_x_post and post models but can keep the user one. (in ormar model context that is not possible as if you would exclude through and post model there would be no way to reach user model from category model). ```python user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .fields({"name": ..., "users": {"name": ..., "categories": {"name"}}}) .exclude_fields("roleuser") .values_list() ) assert user == [("admin", "Anonymous", "News")] ``` If you have multiple ManyToMany models in your query you would have to exclude each through model manually. To avoid this burden `ormar` provides you with `exclude_through=False` parameter. If you set this flag to True **all through models will be fully excluded**. ```python # equivalent to query above, note lack of exclude_fields call user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .fields({"name": ..., "users": {"name": ..., "categories": {"name"}}}) .values_list(exclude_through=True) ) assert user == [("admin", "Anonymous", "News")] ``` Use flatten to get list of values. ```python # using flatten with more than one field will raise exception! await Role.objects.fields({"name", "id"}).values_list(flatten=True) # proper usage roles = await Role.objects.fields("name").values_list(flatten=True) assert roles == ["admin", "editor"] ``` ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. !!!warning Because using `values` and `values_list` skips parsing of the models and validation, in contrast to all other read methods in querysetproxy those 2 **does not clear currently loaded related models** and **does not overwrite the currently loaded models** with result of own call! ### values Works exactly the same as [values](./#values) function above but allows you to fetch related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### values_list Works exactly the same as [values_list](./#values_list) function above but allows you to query or create related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.md ormar-0.12.2/docs/queries/read.md000066400000000000000000000175571444363446500166120ustar00rootroot00000000000000# Read data from database Following methods allow you to load data from the database. * `get(*args, **kwargs) -> Model` * `get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs) -> Tuple[Model, bool]` * `first(*args, **kwargs) -> Model` * `all(*args, **kwargs) -> List[Optional[Model]]` * `iterate(*args, **kwargs) -> AsyncGenerator[Model]` * `Model` * `Model.load()` method * `QuerysetProxy` * `QuerysetProxy.get(*args, **kwargs)` method * `QuerysetProxy.get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs)` method * `QuerysetProxy.first(*args, **kwargs)` method * `QuerysetProxy.all(*args, **kwargs)` method ## get `get(*args, **kwargs) -> Model` Get's the first row from the db meeting the criteria set by kwargs. If no criteria set it will return the last row in db sorted by pk column. Passing a criteria is actually calling filter(*args, **kwargs) method described below. ```python class Track(ormar.Model): class Meta: tablename = "track" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) name: str = ormar.String(max_length=100) position: int = ormar.Integer() ``` ```python track = await Track.objects.get(name='The Bird') # note that above is equivalent to await Track.objects.filter(name='The Bird').get() track2 = track = await Track.objects.get() track == track2 # True since it's the only row in db in our example # and get without arguments return first row by pk column desc ``` !!!warning If no row meets the criteria `NoMatch` exception is raised. If there are multiple rows meeting the criteria the `MultipleMatches` exception is raised. ## get_or_none `get_or_none(*args, **kwargs) -> Model` Exact equivalent of get described above but instead of raising the exception returns `None` if no db record matching the criteria is found. ## get_or_create `get_or_create(_defaults: Optional[Dict[str, Any]] = None, *args, **kwargs) -> Tuple[Model, bool]` Combination of create and get methods. Tries to get a row meeting the criteria and if `NoMatch` exception is raised it creates a new one with given kwargs and _defaults. ```python class Album(ormar.Model): class Meta: tablename = "album" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) year: int = ormar.Integer() ``` ```python album, created = await Album.objects.get_or_create(name='The Cat', _defaults={"year": 1999}) assert created is True assert album.name == "The Cat" assert album.year == 1999 # object is created as it does not exist album2, created = await Album.objects.get_or_create(name='The Cat') assert created is False assert album == album2 # return True as the same db row is returned ``` !!!warning Despite being an equivalent row from database the `album` and `album2` in example above are 2 different python objects! Updating one of them will not refresh the second one until you excplicitly load() the fresh data from db. !!!note Note that if you want to create a new object you either have to pass pk column value or pk column has to be set as autoincrement ## first `first(*args, **kwargs) -> Model` Gets the first row from the db ordered by primary key column ascending. ```python class Album(ormar.Model): class Meta: tablename = "album" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) ``` ```python await Album.objects.create(name='The Cat') await Album.objects.create(name='The Dog') album = await Album.objects.first() # first row by primary_key column asc assert album.name == 'The Cat' ``` ## all `all(*args, **kwargs) -> List[Optional["Model"]]` Returns all rows from a database for given model for set filter options. Passing kwargs is a shortcut and equals to calling `filter(*args, **kwargs).all()`. If there are no rows meeting the criteria an empty list is returned. ```python class Album(ormar.Model): class Meta: tablename = "album" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Track(ormar.Model): class Meta: tablename = "track" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() ``` ```python tracks = await Track.objects.select_related("album").all(album__title='Sample') # will return a list of all Tracks for album Sample # for more on joins visit joining and subqueries section tracks = await Track.objects.all() # will return a list of all Tracks in database ``` ## iterate `iterate(*args, **kwargs) -> AsyncGenerator["Model"]` Return async iterable generator for all rows from a database for given model. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).iterate()`. If there are no rows meeting the criteria an empty async generator is returned. ```python class Album(ormar.Model): class Meta: tablename = "album" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) ``` ```python await Album.objects.create(name='The Cat') await Album.objects.create(name='The Dog') # will asynchronously iterate all Album models yielding one main model at a time from the generator async for album in Album.objects.iterate(): print(album.name) # The Cat # The Dog ``` !!!warning Use of `iterate()` causes previous `prefetch_related()` calls to be ignored; since these two optimizations do not make sense together. If `iterate()` & `prefetch_related()` are used together the `QueryDefinitionError` exception is raised. ## Model methods Each model instance have a set of methods to `save`, `update` or `load` itself. ### load You can load the `ForeignKey` related model by calling `load()` method. `load()` can be used to refresh the model from the database (if it was changed by some other process). !!!tip Read more about `load()` method in [models methods](../models/methods.md#load) ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### get Works exactly the same as [get](./#get) function above but allows you to fetch related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### get_or_none Exact equivalent of get described above but instead of raising the exception returns `None` if no db record matching the criteria is found. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### get_or_create Works exactly the same as [get_or_create](./#get_or_create) function above but allows you to query or create related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### first Works exactly the same as [first](./#first) function above but allows you to query related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### all Works exactly the same as [all](./#all) function above but allows you to query related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.md ormar-0.12.2/docs/queries/select-columns.md000066400000000000000000000277531444363446500206330ustar00rootroot00000000000000# Selecting subset of columns To select only chosen columns of your model you can use following functions. * `fields(columns: Union[List, str, set, dict]) -> QuerySet` * `exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet` * `QuerysetProxy` * `QuerysetProxy.fields(columns: Union[List, str, set, dict])` method * `QuerysetProxy.exclude_fields(columns: Union[List, str, set, dict])` method ## fields `fields(columns: Union[List, str, set, dict]) -> QuerySet` With `fields()` you can select subset of model columns to limit the data load. !!!note Note that `fields()` and `exclude_fields()` works both for main models (on normal queries like `get`, `all` etc.) as well as `select_related` and `prefetch_related` models (with nested notation). Given a sample data like following: ```python import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) founded: int = ormar.Integer(nullable=True) class Car(ormar.Model): class Meta: tablename = "cars" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) manufacturer = ormar.ForeignKey(Company) name: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) gearbox_type: str = ormar.String(max_length=20, nullable=True) gears: int = ormar.Integer(nullable=True) aircon_type: str = ormar.String(max_length=20, nullable=True) # build some sample data toyota = await Company.objects.create(name="Toyota", founded=1937) await Car.objects.create(manufacturer=toyota, name="Corolla", year=2020, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Yaris", year=2019, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Supreme", year=2020, gearbox_type='Auto', gears=6, aircon_type='Auto') ``` You can select specified fields by passing a `str, List[str], Set[str] or dict` with nested definition. To include related models use notation `{related_name}__{column}[__{optional_next} etc.]`. ```python hl_lines="1" all_cars = await Car.objects.select_related('manufacturer').fields(['id', 'name', 'manufacturer__name']).all() for car in all_cars: # excluded columns will yield None assert all(getattr(car, x) is None for x in ['year', 'gearbox_type', 'gears', 'aircon_type']) # included column on related models will be available, pk column is always included # even if you do not include it in fields list assert car.manufacturer.name == 'Toyota' # also in the nested related models - you cannot exclude pk - it's always auto added assert car.manufacturer.founded is None ``` `fields()` can be called several times, building up the columns to select. If you include related models into `select_related()` call but you won't specify columns for those models in fields - implies a list of all fields for those nested models. ```python hl_lines="1" all_cars = await Car.objects.select_related('manufacturer').fields('id').fields( ['name']).all() # all fiels from company model are selected assert all_cars[0].manufacturer.name == 'Toyota' assert all_cars[0].manufacturer.founded == 1937 ``` !!!warning Mandatory fields cannot be excluded as it will raise `ValidationError`, to exclude a field it has to be nullable. The `values()` method can be used to exclude mandatory fields, though data will be returned as a `dict`. You cannot exclude mandatory model columns - `manufacturer__name` in this example. ```python await Car.objects.select_related('manufacturer').fields( ['id', 'name', 'manufacturer__founded']).all() # will raise pydantic ValidationError as company.name is required ``` !!!tip Pk column cannot be excluded - it's always auto added even if not explicitly included. You can also pass fields to include as dictionary or set. To mark a field as included in a dictionary use it's name as key and ellipsis as value. To traverse nested models use nested dictionaries. To include fields at last level instead of nested dictionary a set can be used. To include whole nested model specify model related field name and ellipsis. Below you can see examples that are equivalent: ```python # 1. like in example above await Car.objects.select_related('manufacturer').fields(['id', 'name', 'manufacturer__name']).all() # 2. to mark a field as required use ellipsis await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': { 'name': ...} }).all() # 3. to include whole nested model use ellipsis await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': ... }).all() # 4. to specify fields at last nesting level you can also use set - equivalent to 2. above await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': {'name'} }).all() # 5. of course set can have multiple fields await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': {'name', 'founded'} }).all() # 6. you can include all nested fields but it will be equivalent of 3. above which is shorter await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': {'id', 'name', 'founded'} }).all() ``` !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.objects.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` ## exclude_fields `exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet` With `exclude_fields()` you can select subset of model columns that will be excluded to limit the data load. It's the opposite of `fields()` method so check documentation above to see what options are available. Especially check above how you can pass also nested dictionaries and sets as a mask to exclude fields from whole hierarchy. !!!note Note that `fields()` and `exclude_fields()` works both for main models (on normal queries like `get`, `all` etc.) as well as `select_related` and `prefetch_related` models (with nested notation). Below you can find few simple examples: ```python hl_lines="47 48 60 61 67" import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) founded: int = ormar.Integer(nullable=True) class Car(ormar.Model): class Meta: tablename = "cars" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) manufacturer = ormar.ForeignKey(Company) name: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) gearbox_type: str = ormar.String(max_length=20, nullable=True) gears: int = ormar.Integer(nullable=True) aircon_type: str = ormar.String(max_length=20, nullable=True) # build some sample data toyota = await Company.objects.create(name="Toyota", founded=1937) await Car.objects.create(manufacturer=toyota, name="Corolla", year=2020, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Yaris", year=2019, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Supreme", year=2020, gearbox_type='Auto', gears=6, aircon_type='Auto') # select manufacturer but only name - to include related models use notation {model_name}__{column} all_cars = await Car.objects.select_related('manufacturer').exclude_fields( ['year', 'gearbox_type', 'gears', 'aircon_type', 'company__founded']).all() for car in all_cars: # excluded columns will yield None assert all(getattr(car, x) is None for x in ['year', 'gearbox_type', 'gears', 'aircon_type']) # included column on related models will be available, pk column is always included # even if you do not include it in fields list assert car.manufacturer.name == 'Toyota' # also in the nested related models - you cannot exclude pk - it's always auto added assert car.manufacturer.founded is None # fields() can be called several times, building up the columns to select # models selected in select_related but with no columns in fields list implies all fields all_cars = await Car.objects.select_related('manufacturer').exclude_fields('year').exclude_fields( ['gear', 'gearbox_type']).all() # all fiels from company model are selected assert all_cars[0].manufacturer.name == 'Toyota' assert all_cars[0].manufacturer.founded == 1937 # cannot exclude mandatory model columns - company__name in this example - note usage of dict/set this time await Car.objects.select_related('manufacturer').exclude_fields([{'company': {'name'}}]).all() # will raise pydantic ValidationError as company.name is required ``` !!!warning Mandatory fields cannot be excluded as it will raise `ValidationError`, to exclude a field it has to be nullable. The `values()` method can be used to exclude mandatory fields, though data will be returned as a `dict`. !!!tip Pk column cannot be excluded - it's always auto added even if explicitly excluded. !!!note All methods that do not return the rows explicitly returns a QuerySet instance so you can chain them together So operations like `filter()`, `select_related()`, `limit()` and `offset()` etc. can be chained. Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### fields Works exactly the same as [fields](./#fields) function above but allows you to select columns from related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section ### exclude_fields Works exactly the same as [exclude_fields](./#exclude_fields) function above but allows you to select columns from related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.md ormar-0.12.2/docs/queries/update.md000066400000000000000000000071771444363446500171560ustar00rootroot00000000000000# Update data in database Following methods and functions allow updating existing data in the database. * `update(each: bool = False, **kwargs) -> int` * `update_or_create(**kwargs) -> Model` * `bulk_update(objects: List[Model], columns: List[str] = None) -> None` * `Model` * `Model.update()` method * `Model.upsert()` method * `Model.save_related()` method * `QuerysetProxy` * `QuerysetProxy.update_or_create(**kwargs)` method ## update `update(each: bool = False, **kwargs) -> int` QuerySet level update is used to update multiple records with the same value at once. You either have to filter the QuerySet first or provide a `each=True` flag to update whole table. If you do not provide this flag or a filter a `QueryDefinitionError` will be raised. Return number of rows updated. ```Python hl_lines="26-28" --8<-- "../docs_src/queries/docs002.py" ``` !!!warning Queryset needs to be filtered before updating to prevent accidental overwrite. To update whole database table `each=True` needs to be provided as a safety switch ## update_or_create `update_or_create(**kwargs) -> Model` Updates the model, or in case there is no match in database creates a new one. ```Python hl_lines="26-32" --8<-- "../docs_src/queries/docs003.py" ``` !!!note Note that if you want to create a new object you either have to pass pk column value or pk column has to be set as autoincrement ## bulk_update `bulk_update(objects: List["Model"], columns: List[str] = None) -> None` Allows to update multiple instance at once. All `Models` passed need to have primary key column populated. You can also select which fields to update by passing `columns` list as a list of string names. ```python hl_lines="8" # continuing the example from bulk_create # update objects for todo in todoes: todo.completed = False # perform update of all objects at once # objects need to have pk column set, otherwise exception is raised await ToDo.objects.bulk_update(todoes) completed = await ToDo.objects.filter(completed=False).all() assert len(completed) == 3 ``` ## Model methods Each model instance have a set of methods to `save`, `update` or `load` itself. ###update You can update models by updating your model attributes (fields) and calling `update()` method. If you try to update a model without a primary key set a `ModelPersistenceError` exception will be thrown. !!!tip Read more about `update()` method in [models-update](../models/methods.md#update) ###upsert It's a proxy to either `save()` or `update(**kwargs)` methods of a Model. If the pk is set the `update()` method will be called. !!!tip Read more about `upsert()` method in [models-upsert][models-upsert] ###save_related Method goes through all relations of the `Model` on which the method is called, and calls `upsert()` method on each model that is **not** saved. !!!tip Read more about `save_related()` method in [models-save-related][models-save-related] ## QuerysetProxy methods When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. ### update_or_create Works exactly the same as [update_or_create](./#update_or_create) function above but allows you to update or create related objects from other side of the relation. !!!tip To read more about `QuerysetProxy` visit [querysetproxy][querysetproxy] section [querysetproxy]: ../relations/queryset-proxy.md [models-upsert]: ../models/methods.md#upsert [models-save-related]: ../models/methods.md#save_relatedormar-0.12.2/docs/relations/000077500000000000000000000000001444363446500156615ustar00rootroot00000000000000ormar-0.12.2/docs/relations/foreign-key.md000066400000000000000000000231621444363446500204260ustar00rootroot00000000000000# ForeignKey `ForeignKey(to: Model, *, name: str = None, unique: bool = False, nullable: bool = True, related_name: str = None, virtual: bool = False, onupdate: Union[ReferentialAction, str] = None, ondelete: Union[ReferentialAction, str] = None, **kwargs: Any)` has required parameters `to` that takes target `Model` class. Sqlalchemy column and Type are automatically taken from target `Model`. * Sqlalchemy column: class of a target `Model` primary key column * Type (used for pydantic): type of a target `Model` ## Defining Models To define a relation add `ForeignKey` field that points to related `Model`. ```Python hl_lines="29" --8<-- "../docs_src/fields/docs003.py" ``` ## Reverse Relation `ForeignKey` fields are automatically registering reverse side of the relation. By default it's child (source) `Model` name + s, like courses in snippet below: ```Python hl_lines="29 35" --8<-- "../docs_src/fields/docs001.py" ``` Reverse relation exposes API to manage related objects also from parent side. ### Skipping reverse relation If you are sure you don't want the reverse relation you can use `skip_reverse=True` flag of the `ForeignKey`. If you set `skip_reverse` flag internally the field is still registered on the other side of the relationship so you can: * `filter` by related models fields from reverse model * `order_by` by related models fields from reverse model But you cannot: * Access the related field from reverse model with `related_name` * Even if you `select_related` from reverse side of the model the returned models won't be populated in reversed instance (the join is not prevented so you still can `filter` and `order_by` over the relation) * The relation won't be populated in `dict()` and `json()` * You cannot pass the nested related objects when populating from dictionary or json (also through `fastapi`). It will be either ignored or error will be raised depending on `extra` setting in pydantic `Config`. Example: ```python class Author(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) author: Optional[Author] = ormar.ForeignKey(Author, skip_reverse=True) # create sample data author = Author(first_name="Test", last_name="Author") post = Post(title="Test Post", author=author) assert post.author == author # ok assert author.posts # Attribute error! # but still can use in order_by authors = ( await Author.objects.select_related("posts").order_by("posts__title").all() ) assert authors[0].first_name == "Test" # note that posts are not populated for author even if explicitly # included in select_related - note no posts in dict() assert author.dict(exclude={"id"}) == {"first_name": "Test", "last_name": "Author"} # still can filter through fields of related model authors = await Author.objects.filter(posts__title="Test Post").all() assert authors[0].first_name == "Test" assert len(authors) == 1 ``` ### add Adding child model from parent side causes adding related model to currently loaded parent relation, as well as sets child's model foreign key value and updates the model. ```python department = await Department(name="Science").save() course = Course(name="Math", completed=False) # note - not saved await department.courses.add(course) assert course.pk is not None # child model was saved # relation on child model is set and FK column saved in db assert course.department == department # relation on parent model is also set assert department.courses[0] == course ``` !!!warning If you want to add child model on related model the primary key value for parent model **has to exist in database**. Otherwise ormar will raise RelationshipInstanceError as it cannot set child's ForeignKey column value if parent model has no primary key value. That means that in example above the department has to be saved before you can call `department.courses.add()`. !!!warning This method will not work on `ManyToMany` relations - there, both sides of the relation have to be saved before adding to relation. ### remove Removal of the related model one by one. In reverse relation calling `remove()` does not remove the child model, but instead nulls it ForeignKey value. ```python # continuing from above await department.courses.remove(course) assert len(department.courses) == 0 # course still exists and was saved in remove assert course.pk is not None assert course.department is None # to remove child from db await course.delete() ``` But if you want to clear the relation and delete the child at the same time you can issue: ```python # this will not only clear the relation # but also delete related course from db await department.courses.remove(course, keep_reversed=False) ``` ### clear Removal of all related models in one call. Like remove by default `clear()` nulls the ForeigKey column on child model (all, not matter if they are loaded or not). ```python # nulls department column on all courses related to this department await department.courses.clear() ``` If you want to remove the children altogether from the database, set `keep_reversed=False` ```python # deletes from db all courses related to this department await department.courses.clear(keep_reversed=False) ``` ## QuerysetProxy Reverse relation exposes QuerysetProxy API that allows you to query related model like you would issue a normal Query. To read which methods of QuerySet are available read below [querysetproxy][querysetproxy] ## related_name But you can overwrite this name by providing `related_name` parameter like below: ```Python hl_lines="29 35" --8<-- "../docs_src/fields/docs002.py" ``` !!!tip The reverse relation on access returns list of `wekref.proxy` to avoid circular references. !!!warning When you provide multiple relations to the same model `ormar` can no longer auto generate the `related_name` for you. Therefore, in that situation you **have to** provide `related_name` for all but one (one can be default and generated) or all related fields. ## Referential Actions When an object referenced by a ForeignKey is changed (deleted or updated), ormar will set the SQL constraint specified by the `ondelete` and `onupdate` argument. The possible values for `ondelete` and `onupdate` are found in `ormar.ReferentialAction`: !!!note Instead of `ormar.ReferentialAction`, you can directly pass string values to these two arguments, but this is not recommended because it will break the integrity. ### CASCADE Whenever rows in the parent (referenced) table are deleted (or updated), the respective rows of the child (referencing) table with a matching foreign key column will be deleted (or updated) as well. This is called a cascade delete (or update). ### RESTRICT A value cannot be updated or deleted when a row exists in a referencing or child table that references the value in the referenced table. Similarly, a row cannot be deleted as long as there is a reference to it from a referencing or child table. ### SET_NULL Set the ForeignKey to `None`; this is only possible if `nullable` is True. ### SET_DEFAULT Set the ForeignKey to its default value; a `server_default` for the ForeignKey must be set. !!!note Note that the `default` value is not allowed and you must do this through `server_default`, which you can read about in [this section][server_default]. ### DO_NOTHING Take `NO ACTION`; NO ACTION and RESTRICT are very much alike. The main difference between NO ACTION and RESTRICT is that with NO ACTION the referential integrity check is done after trying to alter the table. RESTRICT does the check before trying to execute the UPDATE or DELETE statement. Both referential actions act the same if the referential integrity check fails: the UPDATE or DELETE statement will result in an error. ## Relation Setup You have several ways to set-up a relationship connection. ### `Model` instance The most obvious one is to pass a related `Model` instance to the constructor. ```Python hl_lines="34-35" --8<-- "../docs_src/relations/docs001.py" ``` ### Primary key value You can setup the relation also with just the pk column value of the related model. ```Python hl_lines="37-38" --8<-- "../docs_src/relations/docs001.py" ``` ### Dictionary Next option is with a dictionary of key-values of the related model. You can build the dictionary yourself or get it from existing model with `dict()` method. ```Python hl_lines="40-41" --8<-- "../docs_src/relations/docs001.py" ``` ### None Finally you can explicitly set it to None (default behavior if no value passed). ```Python hl_lines="43-44" --8<-- "../docs_src/relations/docs001.py" ``` !!!warning In all not None cases the primary key value for related model **has to exist in database**. Otherwise an IntegrityError will be raised by your database driver library. [queries]: ./queries.md [querysetproxy]: ./queryset-proxy.md [get]: ./queries.md#get [all]: ./queries.md#all [create]: ./queries.md#create [get_or_create]: ./queries.md#get_or_create [update_or_create]: ./queries.md#update_or_create [filter]: ./queries.md#filter [exclude]: ./queries.md#exclude [select_related]: ./queries.md#select_related [prefetch_related]: ./queries.md#prefetch_related [limit]: ./queries.md#limit [offset]: ./queries.md#offset [count]: ./queries.md#count [exists]: ./queries.md#exists [fields]: ./queries.md#fields [exclude_fields]: ./queries.md#exclude_fields [order_by]: ./queries.md#order_by [server_default]: ../fields/common-parameters.md#server-defaultormar-0.12.2/docs/relations/index.md000066400000000000000000000147621444363446500173240ustar00rootroot00000000000000# Relations Currently `ormar` supports two types of relations: * One-to-many (and many-to-one) with `ForeignKey` field * Many-to-many with `ManyToMany` field Below you can find a very basic examples of definitions for each of those relations. To read more about methods, possibilities, definition etc. please read the subsequent section of the documentation. ## ForeignKey To define many-to-one relation use `ForeignKey` field. ```Python hl_lines="17" --8<-- "../docs_src/relations/docs003.py" ``` !!!tip To read more about one-to-many relations visit [foreign-keys][foreign-keys] section ## Reverse ForeignKey The definition of one-to-many relation also uses `ForeignKey`, and it's registered for you automatically. So in relation ato example above. ```Python hl_lines="17" class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) # there is a virtual field here like follows courses: Optional[List[Course]] = ormar.ForeignKey(Course, virtual=True) # note that you DO NOT define it yourself, ormar does it for you. ``` !!!tip To read more about many-to-one relations (i.e changing the name of generated field) visit [foreign-keys][foreign-keys] section !!!tip Reverse ForeignKey allows you to query the related models with [queryset-proxy][queryset-proxy]. It allows you to use `await department.courses.all()` to fetch data related only to specific department etc. ##ManyToMany To define many-to-many relation use `ManyToMany` field. ```python hl_lines="18" class Category(ormar.Model): class Meta: tablename = "categories" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta: tablename = "posts" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category) ``` !!!tip To read more about many-to-many relations visit [many-to-many][many-to-many] section !!!tip ManyToMany allows you to query the related models with [queryset-proxy][queryset-proxy]. It allows you to use `await post.categories.all()` but also `await category.posts.all()` to fetch data related only to specific post, category etc. ## Through fields As part of the `ManyToMany` relation you can define a through model, that can contain additional fields that you can use to filter, order etc. Fields defined like this are exposed on the reverse side of the current query for m2m models. So if you query from model `A` to model `B`, only model `B` has through field exposed. Which kind of make sense, since it's a one through model/field for each of related models. ```python hl_lines="10-15" class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id = ormar.Integer(primary_key=True) name = ormar.String(max_length=40) # you can specify additional fields on through model class PostCategory(ormar.Model): class Meta(BaseMeta): tablename = "posts_x_categories" id: int = ormar.Integer(primary_key=True) sort_order: int = ormar.Integer(nullable=True) param_name: str = ormar.String(default="Name", max_length=200) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories = ormar.ManyToMany(Category, through=PostCategory) ``` !!!tip To read more about many-to-many relations and through fields visit [many-to-many][many-to-many] section !!!tip ManyToMany allows you to query the related models with [queryset-proxy][queryset-proxy]. It allows you to use `await post.categories.all()` but also `await category.posts.all()` to fetch data related only to specific post, category etc. ## Relationship default sort order By default relations follow model default sort order so `primary_key` column ascending, or any sort order se in `Meta` class. !!!tip To read more about models sort order visit [models](../models/index.md#model-sort-order) section of documentation But you can modify the order in which related models are loaded during query by providing `orders_by` and `related_orders_by` parameters to relations. In relations you can sort only by directly related model columns or for `ManyToMany` columns also `Through` model columns `{through_field_name}__{column_name}` Sample configuration might look like this: ```python hl_lines="24" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey( Author, orders_by=["name"], related_orders_by=["-year"] ) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ranking: int = ormar.Integer(nullable=True) ``` Now calls: `await Author.objects.select_related("books").get()` - the books will be sorted by the book year descending `await Book.objects.select_related("author").all()` - the authors will be sorted by author name ascending ## Self-reference and postponed references In order to create auto-relation or create two models that reference each other in at least two different relations (remember the reverse side is auto-registered for you), you need to use `ForwardRef` from `typing` module. ```python hl_lines="1 11 14" PersonRef = ForwardRef("Person") class Person(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) supervisor: PersonRef = ormar.ForeignKey(PersonRef, related_name="employees") Person.update_forward_refs() ``` !!!tip To read more about self-reference and postponed relations visit [postponed-annotations][postponed-annotations] section [foreign-keys]: ./foreign-key.md [many-to-many]: ./many-to-many.md [queryset-proxy]: ./queryset-proxy.md [postponed-annotations]: ./postponed-annotations.mdormar-0.12.2/docs/relations/many-to-many.md000066400000000000000000000327511444363446500205410ustar00rootroot00000000000000# ManyToMany `ManyToMany(to, through)` has required parameters `to` and optional `through` that takes target and relation `Model` classes. Sqlalchemy column and Type are automatically taken from target `Model`. * Sqlalchemy column: class of a target `Model` primary key column * Type (used for pydantic): type of a target `Model` ## Defining Models ```Python hl_lines="40" --8<-- "../docs_src/relations/docs002.py" ``` Create sample data: ```Python guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") ``` ## Reverse relation `ForeignKey` fields are automatically registering reverse side of the relation. By default it's child (source) `Model` name + s, like courses in snippet below: ```python class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category) # create some sample data post = await Post.objects.create(title="Hello, M2M") news = await Category.objects.create(name="News") await post.categories.add(news) # now you can query and access from both sides: post_check = Post.objects.select_related("categories").get() assert post_check.categories[0] == news # query through auto registered reverse side category_check = Category.objects.select_related("posts").get() assert category_check.posts[0] == post ``` Reverse relation exposes API to manage related objects also from parent side. ### related_name By default, the related_name is generated in the same way as for the `ForeignKey` relation (class.name.lower()+'s'), but in the same way you can overwrite this name by providing `related_name` parameter like below: ```Python categories: Optional[Union[Category, List[Category]]] = ormar.ManyToMany( Category, through=PostCategory, related_name="new_categories" ) ``` !!!warning When you provide multiple relations to the same model `ormar` can no longer auto generate the `related_name` for you. Therefore, in that situation you **have to** provide `related_name` for all but one (one can be default and generated) or all related fields. ### Skipping reverse relation If you are sure you don't want the reverse relation you can use `skip_reverse=True` flag of the `ManyToMany`. If you set `skip_reverse` flag internally the field is still registered on the other side of the relationship so you can: * `filter` by related models fields from reverse model * `order_by` by related models fields from reverse model But you cannot: * access the related field from reverse model with `related_name` * even if you `select_related` from reverse side of the model the returned models won't be populated in reversed instance (the join is not prevented so you still can `filter` and `order_by` over the relation) * the relation won't be populated in `dict()` and `json()` * you cannot pass the nested related objects when populating from dictionary or json (also through `fastapi`). It will be either ignored or error will be raised depending on `extra` setting in pydantic `Config`. Example: ```python class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category, skip_reverse=True) # create some sample data post = await Post.objects.create(title="Hello, M2M") news = await Category.objects.create(name="News") await post.categories.add(news) assert post.categories[0] == news # ok assert news.posts # Attribute error! # but still can use in order_by categories = ( await Category.objects.select_related("posts").order_by("posts__title").all() ) assert categories[0].first_name == "Test" # note that posts are not populated for author even if explicitly # included in select_related - note no posts in dict() assert news.dict(exclude={"id"}) == {"name": "News"} # still can filter through fields of related model categories = await Category.objects.filter(posts__title="Hello, M2M").all() assert categories[0].name == "News" assert len(categories) == 1 ``` ## Through Model Optionally if you want to add additional fields you can explicitly create and pass the through model class. ```Python hl_lines="14-20 29" --8<-- "../docs_src/relations/docs004.py" ``` !!!warning Note that even of you do not provide through model it's going to be created for you automatically and still has to be included in example in `alembic` migrations. !!!tip Note that you need to provide `through` model if you want to customize the `Through` model name or the database table name of this model. If you do not provide the Through field it will be generated for you. The default naming convention is: * for class name it's union of both classes name (parent+other) so in example above it would be `PostCategory` * for table name it similar but with underscore in between and s in the end of class lowercase name, in example above would be `posts_categorys` ### Customizing Through relation names By default `Through` model relation names default to related model name in lowercase. So in example like this: ```python ... # course declaration omitted class Student(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany(Course) # will produce default Through model like follows (example simplified) class StudentCourse(ormar.Model): class Meta: database = database metadata = metadata tablename = "students_courses" id: int = ormar.Integer(primary_key=True) student = ormar.ForeignKey(Student) # default name course = ormar.ForeignKey(Course) # default name ``` To customize the names of fields/relation in Through model now you can use new parameters to `ManyToMany`: * `through_relation_name` - name of the field leading to the model in which `ManyToMany` is declared * `through_reverse_relation_name` - name of the field leading to the model to which `ManyToMany` leads to Example: ```python ... # course declaration ommited class Student(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany(Course, through_relation_name="student_id", through_reverse_relation_name="course_id") # will produce Through model like follows (example simplified) class StudentCourse(ormar.Model): class Meta: database = database metadata = metadata tablename = "students_courses" id: int = ormar.Integer(primary_key=True) student_id = ormar.ForeignKey(Student) # set by through_relation_name course_id = ormar.ForeignKey(Course) # set by through_reverse_relation_name ``` !!!note Note that explicitly declaring relations in Through model is forbidden, so even if you provide your own custom Through model you cannot change the names there and you need to use same `through_relation_name` and `through_reverse_relation_name` parameters. ## Through Fields The through field is auto added to the reverse side of the relation. The exposed field is named as lowercase `Through` class name. The exposed field **explicitly has no relations loaded** as the relation is already populated in `ManyToMany` field, so it's useful only when additional fields are provided on `Through` model. In a sample model setup as following: ```Python hl_lines="14-20 29" --8<-- "../docs_src/relations/docs004.py" ``` the through field can be used as a normal model field in most of the QuerySet operations. Note that through field is attached only to related side of the query so: ```python post = await Post.objects.select_related("categories").get() # source model has no through field assert post.postcategory is None # related models have through field assert post.categories[0].postcategory is not None # same is applicable for reversed query category = await Category.objects.select_related("posts").get() assert category.postcategory is None assert category.posts[0].postcategory is not None ``` Through field can be used for filtering the data. ```python post = ( await Post.objects.select_related("categories") .filter(postcategory__sort_order__gt=1) .get() ) ``` !!!tip Note that despite that the actual instance is not populated on source model, in queries, order by statements etc you can access through model from both sides. So below query has exactly the same effect (note access through `categories`) ```python post = ( await Post.objects.select_related("categories") .filter(categories__postcategory__sort_order__gt=1) .get() ) ``` Through model can be used in order by queries. ```python post = ( await Post.objects.select_related("categories") .order_by("-postcategory__sort_order") .get() ) ``` You can also select subset of the columns in a normal `QuerySet` way with `fields` and `exclude_fields`. ```python post2 = ( await Post.objects.select_related("categories") .exclude_fields("postcategory__param_name") .get() ) ``` !!!warning Note that because through fields explicitly nullifies all relation fields, as relation is populated in ManyToMany field, you should not use the standard model methods like `save()` and `update()` before re-loading the field from database. If you want to modify the through field in place remember to reload it from database. Otherwise you will set relations to None so effectively make the field useless! ```python # always reload the field before modification await post2.categories[0].postcategory.load() # only then update the field await post2.categories[0].postcategory.update(sort_order=3) ``` Note that reloading the model effectively reloads the relations as `pk_only` models (only primary key is set) so they are not fully populated, but it's enough to preserve the relation on update. !!!warning If you use i.e. `fastapi` the partially loaded related models on through field might cause `pydantic` validation errors (that's the primary reason why they are not populated by default). So either you need to exclude the related fields in your response, or fully load the related models. In example above it would mean: ```python await post2.categories[0].postcategory.post.load() await post2.categories[0].postcategory.category.load() ``` Alternatively you can use `load_all()`: ```python await post2.categories[0].postcategory.load_all() ``` **Preferred way of update is through queryset proxy `update()` method** ```python # filter the desired related model with through field and update only through field params await post2.categories.filter(name='Test category').update(postcategory={"sort_order": 3}) ``` ## Relation methods ### add `add(item: Model, **kwargs)` Allows you to add model to ManyToMany relation. ```python # Add a category to a post. await post.categories.add(news) # or from the other end: await news.posts.add(post) ``` !!!warning In all not `None` cases the primary key value for related model **has to exist in database**. Otherwise an IntegrityError will be raised by your database driver library. If you declare your models with a Through model with additional fields, you can populate them during adding child model to relation. In order to do so, pass keyword arguments with field names and values to `add()` call. Note that this works only for `ManyToMany` relations. ```python post = await Post(title="Test post").save() category = await Category(name="Test category").save() # apart from model pass arguments referencing through model fields await post.categories.add(category, sort_order=1, param_name='test') ``` ### remove Removal of the related model one by one. Removes also the relation in the database. ```python await news.posts.remove(post) ``` ### clear Removal of all related models in one call. Removes also the relation in the database. ```python await news.posts.clear() ``` ### QuerysetProxy Reverse relation exposes QuerysetProxy API that allows you to query related model like you would issue a normal Query. To read which methods of QuerySet are available read below [querysetproxy][querysetproxy] [queries]: ./queries.md [querysetproxy]: ./queryset-proxy.md [get]: ./queries.md#get [all]: ./queries.md#all [create]: ./queries.md#create [get_or_create]: ./queries.md#get_or_create [update_or_create]: ./queries.md#update_or_create [filter]: ./queries.md#filter [exclude]: ./queries.md#exclude [select_related]: ./queries.md#select_related [prefetch_related]: ./queries.md#prefetch_related [limit]: ./queries.md#limit [offset]: ./queries.md#offset [count]: ./queries.md#count [exists]: ./queries.md#exists [fields]: ./queries.md#fields [exclude_fields]: ./queries.md#exclude_fields [order_by]: ./queries.md#order_byormar-0.12.2/docs/relations/postponed-annotations.md000066400000000000000000000120141444363446500225470ustar00rootroot00000000000000# Postponed annotations ## Self-referencing Models When you want to reference the same model during declaration to create a relation you need to declare the referenced model as a `ForwardRef`, as during the declaration the class is not yet ready and python by default won't let you reference it. Although you might be tempted to use __future__ annotations or simply quote the name with `""` it won't work as `ormar` is designed to work with explicitly declared `ForwardRef`. First, you need to import the required ref from typing. ```python from typing import ForwardRef ``` But note that before python 3.7 it used to be internal, so for python <= 3.6 you need ```python from typing import _ForwardRef as ForwardRef ``` or since `pydantic` is required by `ormar` it can handle this switch for you. In that case you can simply import ForwardRef from pydantic regardless of your python version. ```python from pydantic.typing import ForwardRef ``` Now we need a sample model and a reference to the same model, which will be used to creat a self referencing relation. ```python # create the forwardref to model Person PersonRef = ForwardRef("Person") class Person(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) # use the forwardref as to parameter supervisor: PersonRef = ormar.ForeignKey(PersonRef, related_name="employees") ``` That's so simple. But before you can use the model you need to manually update the references so that they lead to the actual models. !!!warning If you try to use the model without updated references, `ModelError` exception will be raised. So in our example above any call like following will cause exception ```python # creation of model - exception await Person.objects.create(name="Test") # initialization of model - exception Person2(name="Test") # usage of model's QuerySet - exception await Person2.objects.get() ``` To update the references call the `update_forward_refs` method on **each model** with forward references, only **after all related models were declared.** So in order to make our previous example work we need just one extra line. ```python hl_lines="14" PersonRef = ForwardRef("Person") class Person(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) supervisor: PersonRef = ormar.ForeignKey(PersonRef, related_name="employees") Person.update_forward_refs() ``` Of course the same can be done with ManyToMany relations in exactly same way, both for to and through parameters. ```python # declare the reference ChildRef = ForwardRef("Child") class ChildFriend(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db class Child(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) # use it in relation friends = ormar.ManyToMany(ChildRef, through=ChildFriend, related_name="also_friends") Child.update_forward_refs() ``` ## Cross model relations The same mechanism and logic as for self-reference model can be used to link multiple different models between each other. Of course `ormar` links both sides of relation for you, creating a reverse relation with specified (or default) `related_name`. But if you need two (or more) relations between any two models, that for whatever reason should be stored on both sides (so one relation is declared on one model, and other on the second model), you need to use `ForwardRef` to achieve that. Look at the following simple example. ```python # teacher is not yet defined TeacherRef = ForwardRef("Teacher") class Student(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) # so we use reference instead of actual model primary_teacher: TeacherRef = ormar.ForeignKey(TeacherRef, related_name="own_students") class StudentTeacher(ormar.Model): class Meta(ModelMeta): tablename = 'students_x_teachers' metadata = metadata database = db class Teacher(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) # we need students for other relation hence the order students = ormar.ManyToMany(Student, through=StudentTeacher, related_name="teachers") # now the Teacher model is already defined we can update references Student.update_forward_refs() ``` !!!warning Remember that `related_name` needs to be unique across related models regardless of how many relations are defined. ormar-0.12.2/docs/relations/queryset-proxy.md000066400000000000000000000234111444363446500212440ustar00rootroot00000000000000# QuerySetProxy When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models. But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model. !!!note By default exposed QuerySet is already filtered to return only `Models` related to parent `Model`. So if you issue `post.categories.all()` you will get all categories related to that post, not all in table. !!!note Note that when accessing QuerySet API methods through QuerysetProxy you don't need to use `objects` attribute like in normal queries. So note that it's `post.categories.all()` and **not** `post.categories.objects.all()`. To learn more about available QuerySet methods visit [queries][queries] !!!warning Querying related models from ManyToMany cleans list of related models loaded on parent model: Example: `post.categories.first()` will set post.categories to list of 1 related model -> the one returned by first() Example 2: if post has 4 categories so `len(post.categories) == 4` calling `post.categories.limit(2).all()` -> will load only 2 children and now `assert len(post.categories) == 2` This happens for all QuerysetProxy methods returning data: `get`, `all` and `first` and in `get_or_create` if model already exists. Note that value returned by `create` or created in `get_or_create` and `update_or_create` if model does not exist will be added to relation list (not clearing it). ## Read data from database ### get `get(**kwargs): -> Model` To grab just one of related models filtered by name you can use `get(**kwargs)` method. ```python # grab one category assert news == await post.categories.get(name="News") # note that method returns the category so you can grab this value # but it also modifies list of related models in place # so regardless of what was previously loaded on parent model # now it has only one value -> just loaded with get() call assert len(post.categories) == 1 assert post.categories[0] == news ``` !!!tip Read more in queries documentation [get][get] ### get_or_create `get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Model, bool]` Tries to get a row meeting the criteria and if NoMatch exception is raised it creates a new one with given kwargs and _defaults. !!!tip Read more in queries documentation [get_or_create][get_or_create] ### all `all(**kwargs) -> List[Optional["Model"]]` To get a list of related models use `all()` method. Note that you can filter the queryset, select related, exclude fields etc. like in normal query. ```python # with all Queryset methods - filtering, selecting columns, counting etc. await news.posts.filter(title__contains="M2M").all() await Category.objects.filter(posts__author=guido).get() # columns models of many to many relation can be prefetched news_posts = await news.posts.select_related("author").all() assert news_posts[0].author == guido ``` !!!tip Read more in queries documentation [all][all] ### iterate `iterate(**kwargs) -> AsyncGenerator["Model"]` To iterate on related models use `iterate()` method. Note that you can filter the queryset, select related, exclude fields etc. like in normal query. ```python # iterate on categories of this post with an async generator async for category in post.categories.iterate(): print(category.name) ``` !!!tip Read more in queries documentation [iterate][iterate] ## Insert/ update data into database ### create `create(**kwargs): -> Model` Create related `Model` directly from parent `Model`. The link table is automatically populated, as well as relation ids in the database. ```python # Creating columns object from instance: await post.categories.create(name="Tips") assert len(await post.categories.all()) == 2 # newly created instance already have relation persisted in the database ``` !!!tip Read more in queries documentation [create][create] For `ManyToMany` relations there is an additional functionality of passing parameters that will be used to create a through model if you declared additional fields on explicitly provided Through model. Given sample like this: ```Python hl_lines="14-20 29" --8<-- "../docs_src/relations/docs004.py" ``` You can populate fields on through model in the `create()` call in a following way: ```python post = await Post(title="Test post").save() await post.categories.create( name="Test category1", # in arguments pass a dictionary with name of the through field and keys # corresponding to through model fields postcategory={"sort_order": 1, "param_name": "volume"}, ) ``` ### get_or_create `get_or_create(_defaults: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Model, bool]` Tries to get a row meeting the criteria and if NoMatch exception is raised it creates a new one with given kwargs. !!!tip Read more in queries documentation [get_or_create][get_or_create] ### update_or_create `update_or_create(**kwargs) -> Model` Updates the model, or in case there is no match in database creates a new one. !!!tip Read more in queries documentation [update_or_create][update_or_create] ### update `update(**kwargs, each:bool = False) -> int` Updates the related model with provided keyword arguments, return number of updated rows. !!!tip Read more in queries documentation [update][update] Note that for `ManyToMany` relations update can also accept an argument with through field name and a dictionary of fields. ```Python hl_lines="14-20 29" --8<-- "../docs_src/relations/docs004.py" ``` In example above you can update attributes of `postcategory` in a following call: ```python await post.categories.filter(name="Test category3").update( postcategory={"sort_order": 4} ) ``` ## Filtering and sorting ### filter `filter(*args, **kwargs) -> QuerySet` Allows you to filter by any Model attribute/field as well as to fetch instances, with a filter across an FK relationship. !!!tip Read more in queries documentation [filter][filter] ### exclude `exclude(*args, **kwargs) -> QuerySet` Works exactly the same as filter and all modifiers (suffixes) are the same, but returns a not condition. !!!tip Read more in queries documentation [exclude][exclude] ### order_by `order_by(columns:Union[List, str]) -> QuerySet` With order_by() you can order the results from database based on your choice of fields. !!!tip Read more in queries documentation [order_by][order_by] ## Joins and subqueries ### select_related `select_related(related: Union[List, str]) -> QuerySet` Allows to prefetch related models during the same query. With select_related always only one query is run against the database, meaning that one (sometimes complicated) join is generated and later nested models are processed in python. !!!tip Read more in queries documentation [select_related][select_related] ### prefetch_related `prefetch_related(related: Union[List, str]) -> QuerySet` Allows to prefetch related models during query - but opposite to select_related each subsequent model is fetched in a separate database query. With prefetch_related always one query per Model is run against the database, meaning that you will have multiple queries executed one after another. !!!tip Read more in queries documentation [prefetch_related][prefetch_related] ## Pagination and rows number ### paginate `paginate(page: int, page_size: int = 20) -> QuerySet` Combines the offset and limit methods based on page number and size. !!!tip Read more in queries documentation [paginate][paginate] ### limit `limit(limit_count: int) -> QuerySet` You can limit the results to desired number of parent models. !!!tip Read more in queries documentation [limit][limit] ### offset `offset(offset: int) -> QuerySet` You can offset the results by desired number of main models. !!!tip Read more in queries documentation [offset][offset] ## Selecting subset of columns ### fields `fields(columns: Union[List, str, set, dict]) -> QuerySet` With fields() you can select subset of model columns to limit the data load. !!!tip Read more in queries documentation [fields][fields] ### exclude_fields `exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet` With exclude_fields() you can select subset of model columns that will be excluded to limit the data load. !!!tip Read more in queries documentation [exclude_fields][exclude_fields] ## Aggregated functions ### count `count(distinct: bool = True) -> int` Returns number of rows matching the given criteria (i.e. applied with filter and exclude) !!!tip Read more in queries documentation [count][count] ### exists `exists() -> bool` Returns a bool value to confirm if there are rows matching the given criteria (applied with filter and exclude) !!!tip Read more in queries documentation [exists][exists] [queries]: ../queries/index.md [get]: ../queries/read.md#get [all]: ../queries/read.md#all [iterate]: ../queries/read.md#iterate [create]: ../queries/create.md#create [get_or_create]: ../queries/read.md#get_or_create [update_or_create]: ../queries/update.md#update_or_create [update]: ../queries/update.md#update [filter]: ../queries/filter-and-sort.md#filter [exclude]: ../queries/filter-and-sort.md#exclude [select_related]: ../queries/joins-and-subqueries.md#select_related [prefetch_related]: ../queries/joins-and-subqueries.md#prefetch_related [limit]: ../queries/pagination-and-rows-number.md#limit [offset]: ../queries/pagination-and-rows-number.md#offset [paginate]: ../queries/pagination-and-rows-number.md#paginate [count]: ../queries/aggregations.md#count [exists]: ../queries/aggregations.md#exists [fields]: ../queries/select-columns.md#fields [exclude_fields]: ../queries/select-columns.md#exclude_fields [order_by]: ../queries/filter-and-sort.md#order_by ormar-0.12.2/docs/releases.md000066400000000000000000001540451444363446500160170ustar00rootroot00000000000000# 0.12.2 ## ✨ Features * Bump support for `FastAPI` up to the newest version (0.97.0) [#1110](https://github.com/collerek/ormar/pull/1110) * Add support and tests for `Python 3.11` [#1110](https://github.com/collerek/ormar/pull/1110) # 0.12.1 ## ✨ Features * Massive performance improvements in area of loading the models due to recursive loads and caching of the models and related models. (by @erichaydel - thanks!) [#853](https://github.com/collerek/ormar/pull/948) ## 💬 Internals * Benchmarks for comparing performance effect of implemented changes in regard of trends (again, by @erichaydel - thanks!) [#853](https://github.com/collerek/ormar/pull/948) # 0.12.0 ## ✨ Breaking Changes * `Queryset.bulk_create` will now raise `ModelListEmptyError` on empty list of models (by @ponytailer - thanks!) [#853](https://github.com/collerek/ormar/pull/853) ## ✨ Features * `Model.upsert()` now handles a flag `__force_save__`: `bool` that allow upserting the models regardless of the fact if they have primary key set or not. Note that setting this flag will cause two queries for each upserted model -> `get` to check if model exists and later `update/insert` accordingly. [#889](https://github.com/collerek/ormar/pull/853) ## 🐛 Fixes * Fix for empty relations breaking `construct` method (by @Abdeldjalil-H - thanks!) [#870](https://github.com/collerek/ormar/issues/870) * Fix save related not saving models with already set pks (including uuid) [#885](https://github.com/collerek/ormar/issues/885) * Fix for wrong relations exclusions depending on the order of exclusions [#779](https://github.com/collerek/ormar/issues/779) * Fix `property_fields` not being inherited properly [#774](https://github.com/collerek/ormar/issues/774) # 0.11.3 ## ✨ Features * Document `onupdate` and `ondelete` referential actions in `ForeignKey` and provide `ReferentialAction` enum to specify the behavior of the relationship (by @SepehrBazyar - thanks!) [#724](https://github.com/collerek/ormar/issues/724) * Add `CheckColumn` to supported constraints in models Meta (by @SepehrBazyar - thanks!) [#729](https://github.com/collerek/ormar/issues/729) ## 🐛 Fixes * Fix limiting query result to 0 should return empty list (by @SepehrBazyar - thanks!) [#766](https://github.com/collerek/ormar/issues/713) ## 💬 Other * Add dark mode to docs (by @SepehrBazyar - thanks!) [#717](https://github.com/collerek/ormar/pull/717) * Update aiomysql dependency [#778](https://github.com/collerek/ormar/issues/778) # 0.11.2 ## 🐛 Fixes * Fix database drivers being required, while they should be optional [#713](https://github.com/collerek/ormar/issues/713) * Fix boolean field problem in `limit` queries in postgres without `limit_raw_sql` flag [#704](https://github.com/collerek/ormar/issues/704) * Fix enum_class spilling to schema causing errors in OpenAPI [#699](https://github.com/collerek/ormar/issues/699) # 0.11.1 ## 🐛 Fixes * Fix deepcopy issues introduced in pydantic 1.9 [#685](https://github.com/collerek/ormar/issues/685) # 0.11.0 ## ✨ Breaking Changes * Dropped support for python 3.6 * `Queryset.get_or_create` returns now a tuple with model and bool value indicating if the model was created (by @MojixCoder - thanks!) [#554](https://github.com/collerek/ormar/pull/554) * `Queryset.count()` now counts the number of distinct parent model rows by default, counting all rows is possible by setting `distinct=False` (by @erichaydel - thanks) [#588](https://github.com/collerek/ormar/pull/588) ## ✨ Features * Added support for python 3.10 ## 🐛 Fixes * Fix inconsistent `JSON` fields behaviour in `save` and `bulk_create` [#584](https://github.com/collerek/ormar/issues/584) * Fix maximum recursion error [#580](https://github.com/collerek/ormar/pull/580) # 0.10.25 ## ✨ Features * Add `queryset_class` option to `Model.Meta` that allows you to easily swap `QuerySet` for your Model (by @ponytailer - thanks!) [#538](https://github.com/collerek/ormar/pull/538) * Allow passing extra `kwargs` to `IndexColumns` that will be passed to sqlalchemy `Index` (by @zevisert - thanks) [#575](https://github.com/collerek/ormar/pull/538) ## 🐛 Fixes * Fix nullable setting on `JSON` fields [#529](https://github.com/collerek/ormar/issues/529) * Fix bytes/str mismatch in bulk operations when using orjson instead of json (by @ponytailer - thanks!) [#538](https://github.com/collerek/ormar/pull/538) # 0.10.24 ## ✨ Features * Add `post_bulk_update` signal (by @ponytailer - thanks!) [#524](https://github.com/collerek/ormar/pull/524) ## 🐛 Fixes * Fix support for `pydantic==1.9.0` [#502](https://github.com/collerek/ormar/issues/502) * Fix timezone issues with datetime [#504](https://github.com/collerek/ormar/issues/504) * Remove literal binds in query generation to unblock postgres arrays [#/tophat/ormar-postgres-extensions/9](https://github.com/tophat/ormar-postgres-extensions/pull/9) * Fix bulk update for `JSON` fields [#519](https://github.com/collerek/ormar/issues/519) ## 💬 Other * Improve performance of `bulk_create` by bypassing `databases` `execute_many` suboptimal implementation. (by @Mng-dev-ai thanks!) [#520](https://github.com/collerek/ormar/pull/520) * Bump min. required `databases` version to `>=5.4`. # 0.10.23 ## ✨ Features * Add ability to pass `comment` to sqlalchemy when creating a column [#485](https://github.com/collerek/ormar/issues/485) ## 🐛 Fixes * Fix `LargeBinary` fields that can be nullable [#409](https://github.com/collerek/ormar/issues/409) * Make `ormar.Model` pickable [#413](https://github.com/collerek/ormar/issues/413) * Make `first()` and `get()` without arguments respect ordering of main model set by user, fallback to primary key (asc, and desc respectively) [#453](https://github.com/collerek/ormar/issues/453) * Fix improper quoting of non-aliased join `on` clauses in postgress [#455](https://github.com/collerek/ormar/issues/455) # 0.10.22 ## 🐛 Fixes * Hot fix for validators not being inherited when parent `ormar` model was set [#365](https://github.com/collerek/ormar/issues/365) # 0.10.21 ## 🐛 Fixes * Add `ormar` implementation of `construct` classmethod that allows to build `Model` instances without validating the input to speed up the whole flow, if your data is already validated [#318](https://github.com/collerek/ormar/issues/318) * Fix for "inheriting" field validators from `ormar` model when newly created pydanic model is generated with `get_pydantic` [#365](https://github.com/collerek/ormar/issues/365) # 0.10.20 ## ✨ Features * Add `extra` parameter in `Model.Meta` that accepts `Extra.ignore` and `Extra.forbid` (default) and either ignores the extra fields passed to `ormar` model or raises an exception if one is encountered [#358](https://github.com/collerek/ormar/issues/358) ## 🐛 Fixes * Allow `None` if field is nullable and have choices set [#354](https://github.com/collerek/ormar/issues/354) * Always set `primary_key` to `not null` regardless of `autoincrement` and explicit `nullable` setting to avoid problems with migrations [#348](https://github.com/collerek/ormar/issues/348) # 0.10.19 ## ✨ Features * Add support for multi-column non-unique `IndexColumns` in `Meta.constraints` [#307](https://github.com/collerek/ormar/issues/307) * Add `sql_nullable` field attribute that allows to set different nullable setting for pydantic model and for underlying sql column [#308](https://github.com/collerek/ormar/issues/308) ## 🐛 Fixes * Enable caching of relation map to increase performance [#337](https://github.com/collerek/ormar/issues/337) * Clarify and fix documentation in regard of nullable fields [#339](https://github.com/collerek/ormar/issues/339) ## 💬 Other * Bump supported `databases` version to `<=5.2`. # 0.10.18 ## 🐛 Fixes * Fix order of fields in pydantic models [#328](https://github.com/collerek/ormar/issues/328) * Fix databases 0.5.0 support [#142](https://github.com/collerek/ormar/issues/142) # 0.10.17 ## ✨ Features * Allow overwriting the default pydantic type for model fields [#312](https://github.com/collerek/ormar/issues/312) * Add support for `sqlalchemy` >=1.4 (requires `databases` >= 0.5.0) [#142](https://github.com/collerek/ormar/issues/142) # 0.10.16 ## ✨ Features * Allow passing your own pydantic `Config` to `ormar.Model` that will be merged with the default one by @naturalethic (thanks!) [#285](https://github.com/collerek/ormar/issues/285) * Add `SmallInteger` field type by @ProgrammerPlus1998 (thanks!) [#297](https://github.com/collerek/ormar/pull/297) ## 🐛 Fixes * Fix generating openapi schema by removing obsolete pydantic field parameters that were directly exposed in schema [#291](https://github.com/collerek/ormar/issues/291) * Fix unnecessary warning for auto generated through models [#295](https://github.com/collerek/ormar/issues/295) # 0.10.15 ## 🐛 Fixes * Fix generating pydantic models tree with nested models (by @pawamoy - thanks!) [#278](https://github.com/collerek/ormar/issues/278) * Fix missing f-string in warning about missing primary key field [#274](https://github.com/collerek/ormar/issues/274) * Fix passing foreign key value as relation (additional guard, fixed already in the latest release) [#270](https://github.com/collerek/ormar/issues/270) # 0.10.14 ## ✨ Features * Allow passing `timezone:bool = False` parameter to `DateTime` and `Time` fields for timezone aware database columns [#264](https://github.com/collerek/ormar/issues/264) * Allow passing datetime, date and time for filter on `DateTime`, `Time` and `Date` fields to allow filtering by datetimes instead of converting the value to string [#79](https://github.com/collerek/ormar/issues/79) ## 🐛 Fixes * Fix dependencies from `psycopg2` to `psycopg2-binary` [#255](https://github.com/collerek/ormar/issues/255) # 0.10.13 ## ✨ Features * Allow passing field accessors in `select_related` and `prefetch_related` aka. python style `select_related` [#225](https://github.com/collerek/ormar/issues/225). * Previously: ```python await Post.objects.select_related(["author", "categories"]).get() await Author.objects.prefetch_related("posts__categories").get() ``` * Now also: ```python await Post.objects.select_related([Post.author, Post.categories]).get() await Author.objects.prefetch_related(Author.posts.categories).get() ``` ## 🐛 Fixes * Fix overwriting default value for inherited primary key [#253](https://github.com/collerek/ormar/issues/253) # 0.10.12 ## 🐛 Fixes * Fix `QuerySet.create` method not using init (if custom provided) [#245](https://github.com/collerek/ormar/issues/245) * Fix `ForwardRef` `ManyToMany` relation setting wrong pydantic type [#250](https://github.com/collerek/ormar/issues/250) # 0.10.11 ## ✨ Features * Add `values` and `values_list` to `QuerySet` and `QuerysetProxy` that allows to return raw data from query [#223](https://github.com/collerek/ormar/issues/223). * Allow returning list of tuples or list of dictionaries from a query * Skips parsing the data to ormar model so skips also the validation * Allow excluding models in between in chain of relations, so you can extract only needed columns * `values_list` allows you to flatten the result if you extract only one column. ## 🐛 Fixes * Fix creation of auto through model for m2m relation with ForwardRef [#226](https://github.com/collerek/ormar/issues/226) # 0.10.10 ## ✨ Features * Add [`get_pydantic`](https://collerek.github.io/ormar/models/methods/#get_pydantic) flag that allows you to auto generate equivalent pydantic models tree from ormar.Model. This newly generated model tree can be used in requests and responses to exclude fields you do not want to include in the data. * Add [`exclude_parent_fields`](https://collerek.github.io/ormar/models/inheritance/#exclude_parent_fields) parameter to model Meta that allows you to exclude fields from parent models during inheritance. Note that best practice is to combine models and mixins but if you have many similar models and just one that differs it might be useful tool to achieve that. ## 🐛 Fixes * Fix is null filter with pagination and relations (by @erichaydel) [#214](https://github.com/collerek/ormar/issues/214) * Fix not saving child object on reverse side of the relation if not saved before [#216](https://github.com/collerek/ormar/issues/216) ## 💬 Other * Expand [fastapi](https://collerek.github.io/ormar/fastapi) part of the documentation to show samples of using ormar in requests and responses in fastapi. * Improve the docs in regard of `default`, `ForeignKey.add` etc. # 0.10.9 ## Important security fix * Update pin for pydantic to fix security vulnerability [CVE-2021-29510](https://github.com/samuelcolvin/pydantic/security/advisories/GHSA-5jqp-qgf6-3pvh) You are advised to update to version of pydantic that was patched. In 0.10.9 ormar excludes versions with vulnerability in pinned dependencies. ## 🐛 Fixes * Fix OpenAPi schema for LargeBinary [#204](https://github.com/collerek/ormar/issues/204) # 0.10.8 ## 🐛 Fixes * Fix populating default values in pk_only child models [#202](https://github.com/collerek/ormar/issues/202) * Fix mypy for LargeBinary fields with base64 str representation [#199](https://github.com/collerek/ormar/issues/199) * Fix OpenAPI schema format for LargeBinary fields with base64 str representation [#199](https://github.com/collerek/ormar/issues/199) * Fix OpenAPI choices encoding for LargeBinary fields with base64 str representation # 0.10.7 ## ✨ Features * Add `exclude_primary_keys: bool = False` flag to `dict()` method that allows to exclude all primary key columns in the resulting dictionaru. [#164](https://github.com/collerek/ormar/issues/164) * Add `exclude_through_models: bool = False` flag to `dict()` that allows excluding all through models from `ManyToMany` relations [#164](https://github.com/collerek/ormar/issues/164) * Add `represent_as_base64_str: bool = False` parameter that allows conversion of bytes `LargeBinary` field to base64 encoded string. String is returned in `dict()`, on access to attribute and string is converted to bytes on setting. Data in database is stored as bytes. [#187](https://github.com/collerek/ormar/issues/187) * Add `pk` alias to allow field access by `Model.pk` in filters and order by clauses (python style) ## 🐛 Fixes * Remove default `None` option for `max_length` for `LargeBinary` field [#186](https://github.com/collerek/ormar/issues/186) * Remove default `None` option for `max_length` for `String` field ## 💬 Other * Provide a guide and samples of `dict()` parameters in the [docs](https://collerek.github.io/ormar/models/methods/) * Major refactor of getting/setting attributes from magic methods into descriptors -> noticeable performance improvement # 0.10.6 ## ✨ Features * Add `LargeBinary(max_length)` field type [#166](https://github.com/collerek/ormar/issues/166) * Add support for normal pydantic fields (including Models) instead of `pydantic_only` attribute which is now deprecated [#160](https://github.com/collerek/ormar/issues/160). Pydantic fields should be declared normally as in pydantic model next to ormar fields, note that (obviously) `ormar` does not save and load the value for this field in database that mean that **ONE** of the following has to be true: * pydantic field declared on ormar model has to be `Optional` (defaults to None) * pydantic field has to have a default value set * pydantic field has `default_factory` function set * ormar.Model with pydantic field has to overwrite `__init__()` and provide the value there If none of the above `ormar` (or rather pydantic) will fail during loading data from the database, with missing required value for declared pydantic field. * Ormar provides now a meaningful examples in openapi schema, including nested models. The same algorithm is used to iterate related models without looks as with `dict()` and `select/load_all`. Examples appear also in `fastapi`. [#157](https://github.com/collerek/ormar/issues/157) ## 🐛 Fixes * By default `pydantic` is not validating fields during assignment, which is not a desirable setting for an ORM, now all `ormar.Models` have validation turned-on during assignment (like `model.column = 'value'`) ## 💬 Other * Add connecting to the database in QuickStart in readme [#180](https://github.com/collerek/ormar/issues/180) * OpenAPI schema does no longer include `ormar.Model` docstring as description, instead just model name is provided if you do not provide your own docstring. * Some performance improvements. # 0.10.5 ## 🐛 Fixes * Fix bug in `fastapi-pagination` [#73](https://github.com/uriyyo/fastapi-pagination/issues/73) * Remove unnecessary `Optional` in `List[Optional[T]]` in return value for `QuerySet.all()` and `Querysetproxy.all()` return values [#174](https://github.com/collerek/ormar/issues/174) * Run tests coverage publish only on internal prs instead of all in github action. # 0.10.4 ## ✨ Features * Add **Python style** to `filter` and `order_by` with field access instead of dunder separated strings. [#51](https://github.com/collerek/ormar/issues/51) * Accessing a field with attribute access (chain of dot notation) can be used to construct `FilterGroups` (`ormar.and_` and `ormar.or_`) * Field access overloads set of python operators and provide a set of functions to allow same functionality as with dunder separated param names in `**kwargs`, that means that querying from sample model `Track` related to model `Album` now you have more options: * exact - exact match to value, sql `column = ` * OLD: `album__name__exact='Malibu'` * NEW: can be also written as `Track.album.name == 'Malibu` * iexact - exact match sql `column = ` (case insensitive) * OLD: `album__name__iexact='malibu'` * NEW: can be also written as `Track.album.name.iexact('malibu')` * contains - sql `column LIKE '%%'` * OLD: `album__name__contains='Mal'` * NEW: can be also written as `Track.album.name % 'Mal')` * NEW: can be also written as `Track.album.name.contains('Mal')` * icontains - sql `column LIKE '%%'` (case insensitive) * OLD: `album__name__icontains='mal'` * NEW: can be also written as `Track.album.name.icontains('mal')` * in - sql ` column IN (, , ...)` * OLD: `album__name__in=['Malibu', 'Barclay']` * NEW: can be also written as `Track.album.name << ['Malibu', 'Barclay']` * NEW: can be also written as `Track.album.name.in_(['Malibu', 'Barclay'])` * isnull - sql `column IS NULL` (and sql `column IS NOT NULL`) * OLD: `album__name__isnull=True` (isnotnull `album__name__isnull=False`) * NEW: can be also written as `Track.album.name >> None` * NEW: can be also written as `Track.album.name.isnull(True)` * NEW: not null can be also written as `Track.album.name.isnull(False)` * NEW: not null can be also written as `~(Track.album.name >> None)` * NEW: not null can be also written as `~(Track.album.name.isnull(True))` * gt - sql `column > ` (greater than) * OLD: `position__gt=3` * NEW: can be also written as `Track.album.name > 3` * gte - sql `column >= ` (greater or equal than) * OLD: `position__gte=3` * NEW: can be also written as `Track.album.name >= 3` * lt - sql `column < ` (lower than) * OLD: `position__lt=3` * NEW: can be also written as `Track.album.name < 3` * lte - sql `column <= ` (lower equal than) * OLD: `position__lte=3` * NEW: can be also written as `Track.album.name <= 3` * startswith - sql `column LIKE '%'` (exact start match) * OLD: `album__name__startswith='Mal'` * NEW: can be also written as `Track.album.name.startswith('Mal')` * istartswith - sql `column LIKE '%'` (case insensitive) * OLD: `album__name__istartswith='mal'` * NEW: can be also written as `Track.album.name.istartswith('mal')` * endswith - sql `column LIKE '%'` (exact end match) * OLD: `album__name__endswith='ibu'` * NEW: can be also written as `Track.album.name.endswith('ibu')` * iendswith - sql `column LIKE '%'` (case insensitive) * OLD: `album__name__iendswith='IBU'` * NEW: can be also written as `Track.album.name.iendswith('IBU')` * You can provide `FilterGroups` not only in `filter()` and `exclude()` but also in: * `get()` * `get_or_none()` * `get_or_create()` * `first()` * `all()` * `delete()` * With `FilterGroups` (`ormar.and_` and `ormar.or_`) you can now use: * `&` - as `and_` instead of next level of nesting * `|` - as `or_' instead of next level of nesting * `~` - as negation of the filter group * To combine groups of filters into one set of conditions use `&` (sql `AND`) and `|` (sql `OR`) ```python # Following queries are equivalent: # sql: ( product.name = 'Test' AND product.rating >= 3.0 ) # ormar OPTION 1 - OLD one Product.objects.filter(name='Test', rating__gte=3.0).get() # ormar OPTION 2 - OLD one Product.objects.filter(ormar.and_(name='Test', rating__gte=3.0)).get() # ormar OPTION 3 - NEW one (field access) Product.objects.filter((Product.name == 'Test') & (Product.rating >=3.0)).get() ``` * Same applies to nested complicated filters ```python # Following queries are equivalent: # sql: ( product.name = 'Test' AND product.rating >= 3.0 ) # OR (categories.name IN ('Toys', 'Books')) # ormar OPTION 1 - OLD one Product.objects.filter(ormar.or_( ormar.and_(name='Test', rating__gte=3.0), categories__name__in=['Toys', 'Books']) ).get() # ormar OPTION 2 - NEW one (instead of nested or use `|`) Product.objects.filter( ormar.and_(name='Test', rating__gte=3.0) | ormar.and_(categories__name__in=['Toys', 'Books']) ).get() # ormar OPTION 3 - NEW one (field access) Product.objects.filter( ((Product.name='Test') & (Product.rating >= 3.0)) | (Product.categories.name << ['Toys', 'Books']) ).get() ``` * Now you can also use field access to provide OrderActions to `order_by()` * Order ascending: * OLD: `Product.objects.order_by("name").all()` * NEW: `Product.objects.order_by(Product.name.asc()).all()` * Order descending: * OLD: `Product.objects.order_by("-name").all()` * NEW: `Product.objects.order_by(Product.name.desc()).all()` * You can of course also combine different models and many order_bys: `Product.objects.order_by([Product.category.name.asc(), Product.name.desc()]).all()` ## 🐛 Fixes * Not really a bug but rather inconsistency. Providing a filter with nested model i.e. `album__category__name = 'AA'` is checking if album and category models are included in `select_related()` and if not it's auto-adding them there. The same functionality was not working for `FilterGroups` (`and_` and `or_`), now it works (also for python style filters which return `FilterGroups`). # 0.10.3 ## ✨ Features * `ForeignKey` and `ManyToMany` now support `skip_reverse: bool = False` flag [#118](https://github.com/collerek/ormar/issues/118). If you set `skip_reverse` flag internally the field is still registered on the other side of the relationship so you can: * `filter` by related models fields from reverse model * `order_by` by related models fields from reverse model But you cannot: * access the related field from reverse model with `related_name` * even if you `select_related` from reverse side of the model the returned models won't be populated in reversed instance (the join is not prevented so you still can `filter` and `order_by`) * the relation won't be populated in `dict()` and `json()` * you cannot pass the nested related objects when populating from `dict()` or `json()` (also through `fastapi`). It will be either ignored or raise error depending on `extra` setting in pydantic `Config`. * `Model.save_related()` now can save whole data tree in once [#148](https://github.com/collerek/ormar/discussions/148) meaning: * it knows if it should save main `Model` or related `Model` first to preserve the relation * it saves main `Model` if * it's not `saved`, * has no `pk` value * or `save_all=True` flag is set in those cases you don't have to split save into two calls (`save()` and `save_related()`) * it supports also `ManyToMany` relations * it supports also optional `Through` model values for m2m relations * Add possibility to customize `Through` model relation field names. * By default `Through` model relation names default to related model name in lowercase. So in example like this: ```python ... # course declaration ommited class Student(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany(Course) # will produce default Through model like follows (example simplified) class StudentCourse(ormar.Model): class Meta: database = database metadata = metadata tablename = "students_courses" id: int = ormar.Integer(primary_key=True) student = ormar.ForeignKey(Student) # default name course = ormar.ForeignKey(Course) # default name ``` * To customize the names of fields/relation in Through model now you can use new parameters to `ManyToMany`: * `through_relation_name` - name of the field leading to the model in which `ManyToMany` is declared * `through_reverse_relation_name` - name of the field leading to the model to which `ManyToMany` leads to Example: ```python ... # course declaration ommited class Student(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany(Course, through_relation_name="student_id", through_reverse_relation_name="course_id") # will produce default Through model like follows (example simplified) class StudentCourse(ormar.Model): class Meta: database = database metadata = metadata tablename = "students_courses" id: int = ormar.Integer(primary_key=True) student_id = ormar.ForeignKey(Student) # set by through_relation_name course_id = ormar.ForeignKey(Course) # set by through_reverse_relation_name ``` ## 🐛 Fixes * Fix weakref `ReferenceError` error [#118](https://github.com/collerek/ormar/issues/118) * Fix error raised by Through fields when pydantic `Config.extra="forbid"` is set * Fix bug with `pydantic.PrivateAttr` not being initialized at `__init__` [#149](https://github.com/collerek/ormar/issues/149) * Fix bug with pydantic-type `exclude` in `dict()` with `__all__` key not working ## 💬 Other * Introduce link to `sqlalchemy-to-ormar` auto-translator for models * Provide links to fastapi ecosystem libraries that support `ormar` * Add transactions to docs (supported with `databases`) # 0.10.2 ## ✨ Features * `Model.save_related(follow=False)` now accept also two additional arguments: `Model.save_related(follow=False, save_all=False, exclude=None)`. * `save_all:bool` -> By default (so with `save_all=False`) `ormar` only upserts models that are not saved (so new or updated ones), with `save_all=True` all related models are saved, regardless of `saved` status, which might be useful if updated models comes from api call, so are not changed in the backend. * `exclude: Union[Set, Dict, None]` -> set/dict of relations to exclude from save, those relation won't be saved even with `follow=True` and `save_all=True`. To exclude nested relations pass a nested dictionary like: `exclude={"child":{"sub_child": {"exclude_sub_child_realtion"}}}`. The allowed values follow the `fields/exclude_fields` (from `QuerySet`) methods schema so when in doubt you can refer to docs in queries -> selecting subset of fields -> fields. * `Model.update()` method now accepts `_columns: List[str] = None` parameter, that accepts list of column names to update. If passed only those columns will be updated in database. Note that `update()` does not refresh the instance of the Model, so if you change more columns than you pass in `_columns` list your Model instance will have different values than the database! * `Model.dict()` method previously included only directly related models or nested models if they were not nullable and not virtual, now all related models not previously visited without loops are included in `dict()`. This should be not breaking as just more data will be dumped to dict, but it should not be missing. * `QuerySet.delete(each=False, **kwargs)` previously required that you either pass a `filter` (by `**kwargs` or as a separate `filter()` call) or set `each=True` now also accepts `exclude()` calls that generates NOT filter. So either `each=True` needs to be set to delete whole table or at least one of `filter/exclude` clauses. * Same thing applies to `QuerySet.update(each=False, **kwargs)` which also previously required that you either pass a `filter` (by `**kwargs` or as a separate `filter()` call) or set `each=True` now also accepts `exclude()` calls that generates NOT filter. So either `each=True` needs to be set to update whole table or at least one of `filter/exclude` clauses. * Same thing applies to `QuerysetProxy.update(each=False, **kwargs)` which also previously required that you either pass a `filter` (by `**kwargs` or as a separate `filter()` call) or set `each=True` now also accepts `exclude()` calls that generates NOT filter. So either `each=True` needs to be set to update whole table or at least one of `filter/exclude` clauses. ## 🐛 Fixes * Fix improper relation field resolution in `QuerysetProxy` if fk column has different database alias. * Fix hitting recursion error with very complicated models structure with loops when calling `dict()`. * Fix bug when two non-relation fields were merged (appended) in query result when they were not relation fields (i.e. JSON) * Fix bug when during translation to dict from list the same relation name is used in chain but leads to different models * Fix bug when bulk_create would try to save also `property_field` decorated methods and `pydantic` fields * Fix wrong merging of deeply nested chain of reversed relations ## 💬 Other * Performance optimizations * Split tests into packages based on tested area # 0.10.1 ## Features * add `get_or_none(**kwargs)` method to `QuerySet` and `QuerysetProxy`. It is exact equivalent of `get(**kwargs)` but instead of raising `ormar.NoMatch` exception if there is no db record matching the criteria, `get_or_none` simply returns `None`. ## Fixes * Fix dialect dependent quoting of column and table names in order_by clauses not working properly in postgres. # 0.10.0 ## Breaking * Dropped supported for long deprecated notation of field definition in which you use ormar fields as type hints i.e. `test_field: ormar.Integger() = None` * Improved type hints -> `mypy` can properly resolve related models fields (`ForeignKey` and `ManyToMany`) as well as return types of `QuerySet` methods. Those mentioned are now returning proper model (i.e. `Book`) instead or `ormar.Model` type. There is still problem with reverse sides of relation and `QuerysetProxy` methods, to ease type hints now those return `Any`. Partially fixes #112. ## Features * add `select_all(follow: bool = False)` method to `QuerySet` and `QuerysetProxy`. It is kind of equivalent of the Model's `load_all()` method but can be used directly in a query. By default `select_all()` adds only directly related models, with `follow=True` also related models of related models are added without loops in relations. Note that it's not and end `async` model so you still have to issue `get()`, `all()` etc. as `select_all()` returns a QuerySet (or proxy) like `fields()` or `order_by()`. ## Internals * `ormar` fields are no longer stored as classes in `Meta.model_fields` dictionary but instead they are stored as instances. # 0.9.9 ## Features * Add possibility to change default ordering of relations and models. * To change model sorting pass `orders_by = [columns]` where `columns: List[str]` to model `Meta` class * To change relation order_by pass `orders_by = [columns]` where `columns: List[str]` * To change reverse relation order_by pass `related_orders_by = [columns]` where `columns: List[str]` * Arguments can be column names or `-{col_name}` to sort descending * In relations you can sort only by directly related model columns or for `ManyToMany` columns also `Through` model columns `"{through_field_name}__{column_name}"` * Order in which order_by clauses are applied is as follows: * Explicitly passed `order_by()` calls in query * Relation passed `orders_by` if exists * Model `Meta` class `orders_by` * Model primary key column asc (fallback, used if none of above provided) * Add 4 new aggregated functions -> `min`, `max`, `sum` and `avg` that are their corresponding sql equivalents. * You can pass one or many column names including related columns. * As of now each column passed is aggregated separately (so `sum(col1+col2)` is not possible, you can have `sum(col1, col2)` and later add 2 returned sums in python) * You cannot `sum` and `avg` non numeric columns * If you aggregate on one column, the single value is directly returned as a result * If you aggregate on multiple columns a dictionary with column: result pairs is returned * Add 4 new signals -> `pre_relation_add`, `post_relation_add`, `pre_relation_remove` and `post_relation_remove` * The newly added signals are emitted for `ManyToMany` relations (both sides) and reverse side of `ForeignKey` relation (same as `QuerysetProxy` is exposed). * Signals recieve following args: `sender: Type[Model]` - sender class, `instance: Model` - instance to which related model is added, `child: Model` - model being added, `relation_name: str` - name of the relation to which child is added, for add signals also `passed_kwargs: Dict` - dict of kwargs passed to `add()` ## Changes * `Through` models for ManyToMany relations are now instantiated on creation, deletion and update, so you can provide not only autoincrement int as a primary key but any column type with default function provided. * Since `Through` models are now instantiated you can also subscribe to `Through` model pre/post save/update/delete signals * `pre_update` signals receivers now get also passed_args argument which is a dict of values passed to update function if any (else empty dict) ## Fixes * `pre_update` signal now is sent before the extraction of values so you can modify the passed instance in place and modified fields values will be reflected in database * `bulk_update` now works correctly also with `UUID` primary key column type # 0.9.8 ## Features * Add possibility to encrypt the selected field(s) in the database * As minimum you need to provide `encrypt_secret` and `encrypt_backend` * `encrypt_backend` can be one of the `ormar.EncryptBackends` enum (`NONE, FERNET, HASH, CUSTOM`) - default: `NONE` * When custom backend is selected you need to provide your backend class that subclasses `ormar.fields.EncryptBackend` * You cannot encrypt `primary_key` column and relation columns (FK and M2M). * Provided are 2 backends: HASH and FERNET * HASH is a one-way hash (like for password), never decrypted on retrieval * FERNET is a two-way encrypt/decrypt backend * Note that in FERNET backend you loose `filtering` possibility altogether as part of the encrypted value is a timestamp. * Note that in HASH backend you can filter by full value but filters like `contain` will not work as comparison is make on encrypted values * Note that adding `encrypt_backend` changes the database column type to `TEXT`, which needs to be reflected in db either by migration or manual change ## Fixes * (Advanced/ Internal) Restore custom sqlalchemy types (by `types.TypeDecorator` subclass) functionality that ceased to working so `process_result_value` was never called # 0.9.7 ## Features * Add `isnull` operator to filter and exclude methods. ```python album__name__isnull=True #(sql: album.name is null) album__name__isnull=False #(sql: album.name is not null)) ``` * Add `ormar.or_` and `ormar.and_` functions that can be used to compose complex queries with nested conditions. Sample query: ```python books = ( await Book.objects.select_related("author") .filter( ormar.and_( ormar.or_(year__gt=1960, year__lt=1940), author__name="J.R.R. Tolkien", ) ) .all() ) ``` Check the updated docs in Queries -> Filtering and sorting -> Complex filters ## Other * Setting default on `ForeignKey` or `ManyToMany` raises and `ModelDefinition` exception as it is (and was) not supported # 0.9.6 ##Important * `Through` model for `ManyToMany` relations now **becomes optional**. It's not a breaking change since if you provide it everything works just fine as it used to. So if you don't want or need any additional fields on `Through` model you can skip it. Note that it's going to be created for you automatically and still has to be included in example in `alembic` migrations. If you want to delete existing one check the default naming convention to adjust your existing database structure. Note that you still need to provide it if you want to customize the `Through` model name or the database table name. ## Features * Add `update` method to `QuerysetProxy` so now it's possible to update related models directly from parent model in `ManyToMany` relations and in reverse `ForeignKey` relations. Note that update like in `QuerySet` `update` returns number of updated models and **does not update related models in place** on parent model. To get the refreshed data on parent model you need to refresh the related models (i.e. `await model_instance.related.all()`) * Add `load_all(follow=False, exclude=None)` model method that allows to load current instance of the model with all related models in one call. By default it loads only directly related models but setting `follow=True` causes traversing the tree (avoiding loops). You can also pass `exclude` parameter that works the same as `QuerySet.exclude_fields()` method. * Added possibility to add more fields on `Through` model for `ManyToMany` relationships: * name of the through model field is the lowercase name of the Through class * you can pass additional fields when calling `add(child, **kwargs)` on relation (on `QuerysetProxy`) * you can pass additional fields when calling `create(**kwargs)` on relation (on `QuerysetProxy`) when one of the keyword arguments should be the through model name with a dict of values * you can order by on through model fields * you can filter on through model fields * you can include and exclude fields on through models * through models are attached only to related models (i.e. if you query from A to B -> only on B) * note that through models are explicitly loaded without relations -> relation is already populated in ManyToMany field. * note that just like before you cannot declare the relation fields on through model, they will be populated for you by `ormar`, but now if you try to do so `ModelDefinitionError` will be thrown * check the updated ManyToMany relation docs for more information # Other * Updated docs and api docs * Refactors and optimisations mainly related to filters, exclusions and order bys # 0.9.5 ## Fixes * Fix creation of `pydantic` FieldInfo after update of `pydantic` to version >=1.8 * Pin required dependency versions to avoid such situations in the future # 0.9.4 ## Fixes * Fix `fastapi` OpenAPI schema generation for automatic docs when multiple models refer to the same related one # 0.9.3 ## Fixes * Fix `JSON` field being double escaped when setting value after initialization * Fix `JSON` field not respecting `nullable` field setting due to `pydantic` internals * Fix `choices` verification for `JSON` field * Fix `choices` not being verified when setting the attribute after initialization * Fix `choices` not being verified during `update` call from `QuerySet` # 0.9.2 ## Other * Updated the Quick Start in docs/readme * Updated docs with links to queries subpage * Added badges for code climate and pepy downloads # 0.9.1 ## Features * Add choices values to `OpenAPI` specs, so it looks like native `Enum` field in the result schema. ## Fixes * Fix `choices` behavior with `fastapi` usage when special fields can be not initialized yet but passed as strings etc. # 0.9.0 ## Important * **Braking Fix:** Version 0.8.0 introduced a bug that prevents generation of foreign_keys constraint in the database, both in alembic and during creation through sqlalchemy.engine, this is fixed now. * **THEREFORE IF YOU USE VERSION >=0.8.0 YOU ARE STRONGLY ADVISED TO UPDATE** cause despite that most of the `ormar` functions are working your database **CREATED with ormar (or ormar + alembic)** does not have relations and suffer from perspective of performance and data integrity. * If you were using `ormar` to connect to existing database your performance and integrity should be fine nevertheless you should update to reflect all future schema updates in your models. ## Breaking * **Breaking:** All foreign_keys and unique constraints now have a name so `alembic` can identify them in db and not depend on db * **Breaking:** During model construction if `Meta` class of the `Model` does not include `metadata` or `database` now `ModelDefinitionError` will be raised instead of generic `AttributeError`. * **Breaking:** `encode/databases` used for running the queries does not have a connection pool for sqlite backend, meaning that each querry is run with a new connection and there is no way to enable enforcing ForeignKeys constraints as those are by default turned off on every connection. This is changed in `ormar` since >=0.9.0 and by default each sqlite3 query has `"PRAGMA foreign_keys=1;"` run so now each sqlite3 connection by default enforces ForeignKey constraints including cascades. ## Other * Update api docs. * Add tests for fk creation in db and for cascades in db # 0.8.1 ## Features * Introduce processing of `ForwardRef` in relations. Now you can create self-referencing models - both `ForeignKey` and `ManyToMany` relations. `ForwardRef` can be used both for `to` and `through` `Models`. * Introduce the possibility to perform two **same relation** joins in one query, so to process complex relations like: ``` B = X = Y // A \ C = X = Y <= before you could link from X to Y only once in one query unless two different relation were used (two relation fields with different names) ``` * Introduce the `paginate` method that allows to limit/offset by `page` and `page_size`. Available for `QuerySet` and `QuerysetProxy`. ## Other * Refactoring and performance optimization in queries and joins. * Add python 3.9 to tests and pypi setup. * Update API docs and docs -> i.e. split of queries documentation. # 0.8.0 ## Breaking * **Breaking:** `remove()` parent from child side in reverse ForeignKey relation now requires passing a relation `name`, as the same model can be registered multiple times and `ormar` needs to know from which relation on the parent you want to remove the child. * **Breaking:** applying `limit` and `offset` with `select_related` is by default applied only on the main table before the join -> meaning that not the total number of rows is limited but just number of main models (first one in the query, the one used to construct it). You can still limit all rows from db response with `limit_raw_sql=True` flag on either `limit` or `offset` (or both) * **Breaking:** issuing `first()` now fetches the first row ordered by the primary key asc (so first one inserted (can be different for non number primary keys - i.e. alphabetical order of string)) * **Breaking:** issuing `get()` **without any filters** now fetches the first row ordered by the primary key desc (so should be last one inserted (can be different for non number primary keys - i.e. alphabetical order of string)) * **Breaking (internal):** sqlalchemy columns kept at `Meta.columns` are no longer bind to table, so you cannot get the column straight from there ## Features * Introduce **inheritance**. For now two types of inheritance are possible: * **Mixins** - don't subclass `ormar.Model`, just define fields that are later used on different models (like `created_date` and `updated_date` on each child model), only actual models create tables, but those fields from mixins are added * **Concrete table inheritance** - means that parent is marked as `abstract=True` in Meta class and each child has its own table with columns from the parent and own child columns, kind of similar to Mixins but parent also is a (an abstract) Model * To read more check the docs on models -> inheritance section. * QuerySet `first()` can be used with `prefetch_related` ## Fixes * Fix minor bug in `order_by` for primary model order bys * Fix in `prefetch_query` for multiple related_names for the same model. * Fix using same `related_name` on different models leading to the same related `Model` overwriting each other, now `ModelDefinitionError` is raised and you need to change the name. * Fix `order_by` overwriting conditions when multiple joins to the same table applied. ## Docs * Split and cleanup in docs: * Divide models section into subsections * Divide relations section into subsections * Divide fields section into subsections * Add model inheritance section * Add API (BETA) documentation # 0.7.5 * Fix for wrong relation column name in many_to_many relation joins (fix [#73][#73]) # 0.7.4 * Allow multiple relations to the same related model/table. * Fix for wrong relation column used in many_to_many relation joins (fix [#73][#73]) * Fix for wrong relation population for m2m relations when also fk relation present for same model. * Add check if user provide related_name if there are multiple relations to same table on one model. * More eager cleaning of the dead weak proxy models. # 0.7.3 * Fix for setting fetching related model with UUDI pk, which is a string in raw (fix [#71][#71]) # 0.7.2 * Fix for overwriting related models with pk only in `Model.update() with fields passed as parameters` (fix [#70][#70]) # 0.7.1 * Fix for overwriting related models with pk only in `Model.save()` (fix [#68][#68]) # 0.7.0 * **Breaking:** QuerySet `bulk_update` method now raises `ModelPersistenceError` for unsaved models passed instead of `QueryDefinitionError` * **Breaking:** Model initialization with unknown field name now raises `ModelError` instead of `KeyError` * Added **Signals**, with pre-defined list signals and decorators: `post_delete`, `post_save`, `post_update`, `pre_delete`, `pre_save`, `pre_update` * Add `py.typed` and modify `setup.py` for mypy support * Performance optimization * Updated docs # 0.6.2 * Performance optimization * Fix for bug with `pydantic_only` fields being required * Add `property_field` decorator that registers a function as a property that will be included in `Model.dict()` and in `fastapi` response * Update docs # 0.6.1 * Explicitly set None to excluded nullable fields to avoid pydantic setting a default value (fix [#60][#60]). # 0.6.0 * **Breaking:** calling instance.load() when the instance row was deleted from db now raises `NoMatch` instead of `ValueError` * **Breaking:** calling add and remove on ReverseForeignKey relation now updates the child model in db setting/removing fk column * **Breaking:** ReverseForeignKey relation now exposes QuerySetProxy API like ManyToMany relation * **Breaking:** querying related models from ManyToMany cleans list of related models loaded on parent model: * Example: `post.categories.first()` will set post.categories to list of 1 related model -> the one returned by first() * Example 2: if post has 4 categories so `len(post.categories) == 4` calling `post.categories.limit(2).all()` -> will load only 2 children and now `assert len(post.categories) == 2` * Added `get_or_create`, `update_or_create`, `fields`, `exclude_fields`, `exclude`, `prefetch_related` and `order_by` to QuerySetProxy so now you can use those methods directly from relation * Update docs # 0.5.5 * Fix for alembic autogenaration of migration `UUID` columns. It should just produce sqlalchemy `CHAR(32)` or `CHAR(36)` * In order for this to work you have to set user_module_prefix='sa.' (must be equal to sqlalchemy_module_prefix option (default 'sa.')) # 0.5.4 * Allow to pass `uuid_format` (allowed 'hex'(default) or 'string') to `UUID` field to change the format in which it's saved. By default field is saved in hex format (trimmed to 32 chars (without dashes)), but you can pass format='string' to use 36 (with dashes) instead to adjust to existing db or other libraries. Sample: * hex value = c616ab438cce49dbbf4380d109251dce * string value = c616ab43-8cce-49db-bf43-80d109251dce # 0.5.3 * Fixed bug in `Model.dict()` method that was ignoring exclude parameter and not include dictionary argument. # 0.5.2 * Added `prefetch_related` method to load subsequent models in separate queries. * Update docs # 0.5.1 * Switched to github actions instead of travis * Update badges in the docs # 0.5.0 * Added save status -> you can check if model is saved with `ModelInstance.saved` property * Model is saved after `save/update/load/upsert` method on model * Model is saved after `create/get/first/all/get_or_create/update_or_create` method * Model is saved when passed to `bulk_update` and `bulk_create` * Model is saved after adding/removing `ManyToMany` related objects (through model instance auto saved/deleted) * Model is **not** saved after change of any own field (including pk as `Model.pk` alias) * Model is **not** saved after adding/removing `ForeignKey` related object (fk column not saved) * Model is **not** saved after instantation with `__init__` (w/o `QuerySet.create` or before calling `save`) * Added `Model.upsert(**kwargs)` that performs `save()` if pk not set otherwise `update(**kwargs)` * Added `Model.save_related(follow=False)` that iterates all related objects in all relations and checks if they are saved. If not it calls `upsert()` on each of them. * **Breaking:** added raising exceptions if `add`-ing/`remove`-ing not saved (pk is None) models to `ManyToMany` relation * Allow passing dictionaries and sets to fields and exclude_fields * Auto translate str and lists to dicts for fields and exclude_fields * **Breaking:** passing nested models to fields and exclude_fields is now by related ForeignKey name and not by target model name * Performance optimizations - in modelproxy, newbasemodel - > less queries, some properties are cached on models * Cleanup of unused relations code * Optional performance dependency orjson added (**strongly recommended**) * Updated docs # 0.4.4 * add exclude_fields() method to exclude fields from sql * refactor column names setting (aliases) * fix ordering by for column with aliases * additional tests for fields and exclude_fields * update docs # 0.4.3 * include properties in models.dict() and model.json() # 0.4.2 * modify creation of pydantic models to allow returning related models with only pk populated # 0.4.1 * add order_by method to queryset to allow sorting * update docs # 0.4.0 * Changed notation in Model definition -> now use name = ormar.Field() not name: ormar.Field() * Note that old notation is still supported but deprecated and will not play nice with static checkers like mypy and pydantic pycharm plugin * Type hint docs and test * Use mypy for tests also not, only ormar package * Fix scale and precision translation with max_digits and decimal_places pydantic Decimal field * Update docs - add best practices for dependencies * Refactor metaclass and model_fields to play nice with type hints * Add mypy and pydantic plugin to docs * Expand the docs on ManyToMany relation # 0.3.11 * Fix setting server_default as default field value in python # 0.3.10 * Fix postgresql check to avoid exceptions with drivers not installed if using different backend # 0.3.9 * Fix json schema generation as of [#19][#19] * Fix for not initialized ManyToMany relations in fastapi copies of ormar.Models * Update docs in regard of fastapi use * Add tests to verify fastapi/docs proper generation # 0.3.8 * Added possibility to provide alternative database column names with name parameter to all fields. * Fix bug with selecting related ManyToMany fields with `fields()` if they are empty. * Updated documentation # 0.3.7 * Publish documentation and update readme # 0.3.6 * Add fields() method to limit the selected columns from database - only nullable columns can be excluded. * Added UniqueColumns and constraints list in model Meta to build unique constraints on list of columns. * Added UUID field type based on Char(32) column type. # 0.3.5 * Added bulk_create and bulk_update for operations on multiple objects. # 0.3.4 Add queryset level methods * delete * update * get_or_create * update_or_create # 0.3.3 * Add additional filters - startswith and endswith # 0.3.2 * Add choices parameter to all fields - limiting the accepted values to ones provided # 0.3.1 * Added exclude to filter where not conditions. * Added tests for mysql and postgres with fixes for postgres. * Rafactors and cleanup. # 0.3.0 * Added ManyToMany field and support for many to many relations [#19]: https://github.com/collerek/ormar/issues/19 [#60]: https://github.com/collerek/ormar/issues/60 [#68]: https://github.com/collerek/ormar/issues/68 [#70]: https://github.com/collerek/ormar/issues/70 [#71]: https://github.com/collerek/ormar/issues/71 [#73]: https://github.com/collerek/ormar/issues/73 ormar-0.12.2/docs/signals.md000066400000000000000000000231701444363446500156460ustar00rootroot00000000000000# Signals Signals are a mechanism to fire your piece of code (function / method) whenever given type of event happens in `ormar`. To achieve this you need to register your receiver for a given type of signal for selected model(s). ## Defining receivers Given a sample model like following: ```Python import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) play_count: int = ormar.Integer(default=0) ``` You can for example define a trigger that will set `album.is_best_seller` status if it will be played more than 50 times. Import `pre_update` decorator, for list of currently available decorators/ signals check below. ```Python hl_lines="1" --8<-- "../docs_src/signals/docs002.py" ``` Define your function. Note that each receiver function: * has to be **callable** * has to accept first **`sender`** argument that receives the class of sending object * has to accept **`**kwargs`** argument as the parameters send in each `ormar.Signal` can change at any time so your function has to serve them. * has to be **`async`** cause callbacks are gathered and awaited. `pre_update` currently sends only one argument apart from `sender` and it's `instance` one. Note how `pre_update` decorator accepts a `senders` argument that can be a single model or a list of models, for which you want to run the signal receiver. Currently there is no way to set signal for all models at once without explicitly passing them all into registration of receiver. ```Python hl_lines="4-7" --8<-- "../docs_src/signals/docs002.py" ``` !!!note Note that receivers are defined on a class level -> so even if you connect/disconnect function through instance it will run/ stop running for all operations on that `ormar.Model` class. Note that our newly created function has instance and class of the instance so you can easily run database queries inside your receivers if you want to. ```Python hl_lines="15-22" --8<-- "../docs_src/signals/docs002.py" ``` You can define same receiver for multiple models at once by passing a list of models to signal decorator. ```python # define a dummy debug function @pre_update([Album, Track]) async def before_update(sender, instance, **kwargs): print(f"{sender.get_name()}: {instance.json()}: {kwargs}") ``` Of course you can also create multiple functions for the same signal and model. Each of them will run at each signal. ```python @pre_update(Album) async def before_update(sender, instance, **kwargs): print(f"{sender.get_name()}: {instance.json()}: {kwargs}") @pre_update(Album) async def before_update2(sender, instance, **kwargs): print(f'About to update {sender.get_name()} with pk: {instance.pk}') ``` Note that `ormar` decorators are the syntactic sugar, you can directly connect your function or method for given signal for given model. Connect accept only one parameter - your `receiver` function / method. ```python hl_lines="11 13 16" class AlbumAuditor: def __init__(self): self.event_type = "ALBUM_INSTANCE" async def before_save(self, sender, instance, **kwargs): await AuditLog( event_type=f"{self.event_type}_SAVE", event_log=instance.json() ).save() auditor = AlbumAuditor() pre_save(Album)(auditor.before_save) # call above has same result like the one below Album.Meta.signals.pre_save.connect(auditor.before_save) # signals are also exposed on instance album = Album(name='Miami') album.signals.pre_save.connect(auditor.before_save) ``` !!!warning Note that signals keep the reference to your receiver (not a `weakref`) so keep that in mind to avoid circular references. ## Disconnecting the receivers To disconnect the receiver and stop it for running for given model you need to disconnect it. ```python hl_lines="7 10" @pre_update(Album) async def before_update(sender, instance, **kwargs): if instance.play_count > 50 and not instance.is_best_seller: instance.is_best_seller = True # disconnect given function from signal for given Model Album.Meta.signals.pre_save.disconnect(before_save) # signals are also exposed on instance album = Album(name='Miami') album.signals.pre_save.disconnect(before_save) ``` ## Available signals !!!warning Note that signals are **not** send for: * bulk operations (`QuerySet.bulk_create` and `QuerySet.bulk_update`) as they are designed for speed. * queryset table level operations (`QuerySet.update` and `QuerySet.delete`) as they run on the underlying tables (more lak raw sql update/delete operations) and do not have specific instance. ### pre_save `pre_save(sender: Type["Model"], instance: "Model")` Send for `Model.save()` and `Model.objects.create()` methods. `sender` is a `ormar.Model` class and `instance` is the model to be saved. ### post_save `post_save(sender: Type["Model"], instance: "Model")` Send for `Model.save()` and `Model.objects.create()` methods. `sender` is a `ormar.Model` class and `instance` is the model that was saved. ### pre_update `pre_update(sender: Type["Model"], instance: "Model")` Send for `Model.update()` method. `sender` is a `ormar.Model` class and `instance` is the model to be updated. ### post_update `post_update(sender: Type["Model"], instance: "Model")` Send for `Model.update()` method. `sender` is a `ormar.Model` class and `instance` is the model that was updated. ### pre_delete `pre_delete(sender: Type["Model"], instance: "Model")` Send for `Model.save()` and `Model.objects.create()` methods. `sender` is a `ormar.Model` class and `instance` is the model to be deleted. ### post_delete `post_delete(sender: Type["Model"], instance: "Model")` Send for `Model.update()` method. `sender` is a `ormar.Model` class and `instance` is the model that was deleted. ### pre_relation_add `pre_relation_add(sender: Type["Model"], instance: "Model", child: "Model", relation_name: str, passed_args: Dict)` Send for `Model.relation_name.add()` method for `ManyToMany` relations and reverse side of `ForeignKey` relation. `sender` - sender class, `instance` - instance to which related model is added, `child` - model being added, `relation_name` - name of the relation to which child is added, for add signals also `passed_kwargs` - dict of kwargs passed to `add()` ### post_relation_add `post_relation_add(sender: Type["Model"], instance: "Model", child: "Model", relation_name: str, passed_args: Dict)` Send for `Model.relation_name.add()` method for `ManyToMany` relations and reverse side of `ForeignKey` relation. `sender` - sender class, `instance` - instance to which related model is added, `child` - model being added, `relation_name` - name of the relation to which child is added, for add signals also `passed_kwargs` - dict of kwargs passed to `add()` ### pre_relation_remove `pre_relation_remove(sender: Type["Model"], instance: "Model", child: "Model", relation_name: str)` Send for `Model.relation_name.remove()` method for `ManyToMany` relations and reverse side of `ForeignKey` relation. `sender` - sender class, `instance` - instance to which related model is added, `child` - model being added, `relation_name` - name of the relation to which child is added. ### post_relation_remove `post_relation_remove(sender: Type["Model"], instance: "Model", child: "Model", relation_name: str, passed_args: Dict)` Send for `Model.relation_name.remove()` method for `ManyToMany` relations and reverse side of `ForeignKey` relation. `sender` - sender class, `instance` - instance to which related model is added, `child` - model being added, `relation_name` - name of the relation to which child is added. ### post_bulk_update `post_bulk_update(sender: Type["Model"], instances: List["Model"], **kwargs)`, Send for `Model.objects.bulk_update(List[objects])` method. ## Defining your own signals Note that you can create your own signals although you will have to send them manually in your code or subclass `ormar.Model` and trigger your signals there. Creating new signal is super easy. Following example will set a new signal with name your_custom_signal. ```python hl_lines="21" import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) play_count: int = ormar.Integer(default=0) Album.Meta.signals.your_custom_signal = ormar.Signal() Album.Meta.signals.your_custom_signal.connect(your_receiver_name) ``` Actually under the hood signal is a `SignalEmitter` instance that keeps a dictionary of know signals, and allows you to access them as attributes. When you try to access a signal that does not exist `SignalEmitter` will create one for you. So example above can be simplified to. The `Signal` will be created for you. ``` Album.Meta.signals.your_custom_signal.connect(your_receiver_name) ``` Now to trigger this signal you need to call send method of the Signal. ```python await Album.Meta.signals.your_custom_signal.send(sender=Album) ``` Note that sender is the only required parameter and it should be ormar Model class. Additional parameters have to be passed as keyword arguments. ```python await Album.Meta.signals.your_custom_signal.send(sender=Album, my_param=True) ``` ormar-0.12.2/docs/transactions.md000066400000000000000000000041331444363446500167140ustar00rootroot00000000000000# Transactions Database transactions are supported thanks to `encode/databases` which is used to issue async queries. ## Basic usage To use transactions use `database.transaction` as async context manager: ```python async with database.transaction(): # everyting called here will be one transaction await Model1().save() await Model2().save() ... ``` !!!note Note that it has to be the same `database` that the one used in Model's `Meta` class. To avoid passing `database` instance around in your code you can extract the instance from each `Model`. Database provided during declaration of `ormar.Model` is available through `Meta.database` and can be reached from both class and instance. ```python import databases import sqlalchemy import ormar metadata = sqlalchemy.MetaData() database = databases.Database("sqlite:///") class Author(ormar.Model): class Meta: database=database metadata=metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255) # database is accessible from class database = Author.Meta.database # as well as from instance author = Author(name="Stephen King") database = author.Meta.database ``` You can also use `.transaction()` as a function decorator on any async function: ```python @database.transaction() async def create_users(request): ... ``` Transaction blocks are managed as task-local state. Nested transactions are fully supported, and are implemented using database savepoints. ## Manual commits/ rollbacks For a lower-level transaction API you can trigger it manually ```python transaction = await database.transaction() try: await transaction.start() ... except: await transaction.rollback() else: await transaction.commit() ``` ## Testing Transactions can also be useful during testing when you can apply force rollback and you do not have to clean the data after each test. ```python @pytest.mark.asyncio async def sample_test(): async with database: async with database.transaction(force_rollback=True): # your test code here ... ```ormar-0.12.2/docs_src/000077500000000000000000000000001444363446500145305ustar00rootroot00000000000000ormar-0.12.2/docs_src/__init__.py000066400000000000000000000000001444363446500166270ustar00rootroot00000000000000ormar-0.12.2/docs_src/aggregations/000077500000000000000000000000001444363446500172025ustar00rootroot00000000000000ormar-0.12.2/docs_src/aggregations/__init__.py000066400000000000000000000000001444363446500213010ustar00rootroot00000000000000ormar-0.12.2/docs_src/aggregations/docs001.py000066400000000000000000000015201444363446500207230ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" order_by = ["-name"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" order_by = ["year", "-ranking"] id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ranking: int = ormar.Integer(nullable=True) ormar-0.12.2/docs_src/fastapi/000077500000000000000000000000001444363446500161575ustar00rootroot00000000000000ormar-0.12.2/docs_src/fastapi/__init__.py000066400000000000000000000000001444363446500202560ustar00rootroot00000000000000ormar-0.12.2/docs_src/fastapi/docs001.py000066400000000000000000000036161444363446500177100ustar00rootroot00000000000000from typing import List, Optional import databases import sqlalchemy from fastapi import FastAPI import ormar app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database("sqlite:///test.db") app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) @app.get("/items/", response_model=List[Item]) async def get_items(): items = await Item.objects.select_related("category").all() return items @app.post("/items/", response_model=Item) async def create_item(item: Item): await item.save() return item @app.post("/categories/", response_model=Category) async def create_category(category: Category): await category.save() return category @app.put("/items/{item_id}") async def get_item(item_id: int, item: Item): item_db = await Item.objects.get(pk=item_id) return await item_db.update(**item.dict()) @app.delete("/items/{item_id}") async def delete_item(item_id: int, item: Item = None): if item: return {"deleted_rows": await item.delete()} item_db = await Item.objects.get(pk=item_id) return {"deleted_rows": await item_db.delete()} ormar-0.12.2/docs_src/fastapi/mypy/000077500000000000000000000000001444363446500171555ustar00rootroot00000000000000ormar-0.12.2/docs_src/fastapi/mypy/__init__.py000066400000000000000000000000001444363446500212540ustar00rootroot00000000000000ormar-0.12.2/docs_src/fastapi/mypy/docs001.py000066400000000000000000000005541444363446500207040ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id = ormar.Integer(primary_key=True) name = ormar.String(max_length=100) completed = ormar.Boolean(default=False) ormar-0.12.2/docs_src/fields/000077500000000000000000000000001444363446500157765ustar00rootroot00000000000000ormar-0.12.2/docs_src/fields/__init__.py000066400000000000000000000000001444363446500200750ustar00rootroot00000000000000ormar-0.12.2/docs_src/fields/docs001.py000066400000000000000000000017071444363446500175260ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) department: Optional[Department] = ormar.ForeignKey(Department) department = await Department(name="Science").save() course = Course(name="Math", completed=False, department=department) print(department.courses[0]) # Will produce: # Course(id=None, # name='Math', # completed=False, # department=Department(id=None, name='Science')) ormar-0.12.2/docs_src/fields/docs002.py000066400000000000000000000017301444363446500175230ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) department: Optional[Department] = ormar.ForeignKey(Department, related_name="my_courses") department = Department(name="Science") course = Course(name="Math", completed=False, department=department) print(department.my_courses[0]) # Will produce: # Course(id=None, # name='Math', # completed=False, # department=Department(id=None, name='Science')) ormar-0.12.2/docs_src/fields/docs003.py000066400000000000000000000012421444363446500175220ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) department: Optional[Department] = ormar.ForeignKey(Department) ormar-0.12.2/docs_src/fields/docs004.py000066400000000000000000000011571444363446500175300ustar00rootroot00000000000000from datetime import datetime import databases import sqlalchemy from sqlalchemy import func, text import ormar database = databases.Database("sqlite:///test.db") metadata = sqlalchemy.MetaData() class Product(ormar.Model): class Meta: tablename = "product" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) company: str = ormar.String(max_length=200, server_default="Acme") sort_order: int = ormar.Integer(server_default=text("10")) created: datetime = ormar.DateTime(server_default=func.now()) ormar-0.12.2/docs_src/models/000077500000000000000000000000001444363446500160135ustar00rootroot00000000000000ormar-0.12.2/docs_src/models/__init__.py000066400000000000000000000000001444363446500201120ustar00rootroot00000000000000ormar-0.12.2/docs_src/models/docs001.py000066400000000000000000000005741444363446500175440ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) ormar-0.12.2/docs_src/models/docs002.py000066400000000000000000000010501444363446500175330ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: # if you omit this parameter it will be created automatically # as class.__name__.lower()+'s' -> "courses" in this example tablename = "my_courses" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) ormar-0.12.2/docs_src/models/docs003.py000066400000000000000000000016351444363446500175450ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) print(Course.__fields__) """ Will produce: {'id': ModelField(name='id', type=Optional[int], required=False, default=None), 'name': ModelField(name='name', type=Optional[str], required=False, default=None), 'completed': ModelField(name='completed', type=bool, required=False, default=False)} """ ormar-0.12.2/docs_src/models/docs004.py000066400000000000000000000011201444363446500175330ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta(ormar.ModelMeta): # note you don't have to subclass - but it's recommended for ide completion and mypy database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) print(Course.Meta.table.columns) """ Will produce: ['courses.id', 'courses.name', 'courses.completed'] """ ormar-0.12.2/docs_src/models/docs005.py000066400000000000000000000045631444363446500175520ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta(ormar.ModelMeta): database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) print({x: v.__dict__ for x, v in Course.Meta.model_fields.items()}) """ Will produce: {'completed': mappingproxy({'autoincrement': False, 'choices': set(), 'column_type': Boolean(), 'default': False, 'index': False, 'name': 'completed', 'nullable': True, 'primary_key': False, 'pydantic_only': False, 'server_default': None, 'unique': False}), 'id': mappingproxy({'autoincrement': True, 'choices': set(), 'column_type': Integer(), 'default': None, 'ge': None, 'index': False, 'le': None, 'maximum': None, 'minimum': None, 'multiple_of': None, 'name': 'id', 'nullable': False, 'primary_key': True, 'pydantic_only': False, 'server_default': None, 'unique': False}), 'name': mappingproxy({'allow_blank': False, 'autoincrement': False, 'choices': set(), 'column_type': String(max_length=100), 'curtail_length': None, 'default': None, 'index': False, 'max_length': 100, 'min_length': None, 'name': 'name', 'nullable': False, 'primary_key': False, 'pydantic_only': False, 'regex': None, 'server_default': None, 'strip_whitespace': False, 'unique': False})} """ ormar-0.12.2/docs_src/models/docs006.py000066400000000000000000000012031444363446500175370ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata # define your constraints in Meta class of the model # it's a list that can contain multiple constraints # hera a combination of name and column will have to be unique in db constraints = [ormar.UniqueColumns("name", "completed")] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) ormar-0.12.2/docs_src/models/docs007.py000066400000000000000000000010331444363446500175410ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) course = Course(name="Painting for dummies", completed=False) await course.save() await Course.objects.create(name="Painting for dummies", completed=False) ormar-0.12.2/docs_src/models/docs008.py000066400000000000000000000010431444363446500175430ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///test.db", force_rollback=True) metadata = sqlalchemy.MetaData() class Child(ormar.Model): class Meta: tablename = "children" metadata = metadata database = database id: int = ormar.Integer(name="child_id", primary_key=True) first_name: str = ormar.String(name="fname", max_length=100) last_name: str = ormar.String(name="lname", max_length=100) born_year: int = ormar.Integer(name="year_born", nullable=True) ormar-0.12.2/docs_src/models/docs009.py000066400000000000000000000010711444363446500175450ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy import ormar from .docs010 import Artist # previous example database = databases.Database("sqlite:///test.db", force_rollback=True) metadata = sqlalchemy.MetaData() class Album(ormar.Model): class Meta: tablename = "music_albums" metadata = metadata database = database id: int = ormar.Integer(name="album_id", primary_key=True) name: str = ormar.String(name="album_name", max_length=100) artist: Optional[Artist] = ormar.ForeignKey(Artist, name="artist_id") ormar-0.12.2/docs_src/models/docs010.py000066400000000000000000000014001444363446500175310ustar00rootroot00000000000000import databases import sqlalchemy import ormar from .docs008 import Child database = databases.Database("sqlite:///test.db", force_rollback=True) metadata = sqlalchemy.MetaData() class ArtistChildren(ormar.Model): class Meta: tablename = "children_x_artists" metadata = metadata database = database class Artist(ormar.Model): class Meta: tablename = "artists" metadata = metadata database = database id: int = ormar.Integer(name="artist_id", primary_key=True) first_name: str = ormar.String(name="fname", max_length=100) last_name: str = ormar.String(name="lname", max_length=100) born_year: int = ormar.Integer(name="year") children = ormar.ManyToMany(Child, through=ArtistChildren) ormar-0.12.2/docs_src/models/docs011.py000066400000000000000000000005671444363446500175470ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id: ormar.Integer(primary_key=True) name: ormar.String(max_length=100) completed: ormar.Boolean(default=False) c1 = Course()ormar-0.12.2/docs_src/models/docs012.py000066400000000000000000000005541444363446500175440ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id = ormar.Integer(primary_key=True) name = ormar.String(max_length=100) completed = ormar.Boolean(default=False) ormar-0.12.2/docs_src/models/docs013.py000066400000000000000000000017421444363446500175450ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy import ormar database = databases.Database("sqlite:///test.db", force_rollback=True) metadata = sqlalchemy.MetaData() # note that you do not have to subclass ModelMeta, # it's useful for type hints and code completion class MainMeta(ormar.ModelMeta): metadata = metadata database = database class Artist(ormar.Model): class Meta(MainMeta): # note that tablename is optional # if not provided ormar will user class.__name__.lower()+'s' # -> artists in this example pass id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=100) last_name: str = ormar.String(max_length=100) born_year: int = ormar.Integer(name="year") class Album(ormar.Model): class Meta(MainMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) artist: Optional[Artist] = ormar.ForeignKey(Artist) ormar-0.12.2/docs_src/models/docs014.py000066400000000000000000000007051444363446500175440ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) non_db_field: str = ormar.String(max_length=100, pydantic_only=True) ormar-0.12.2/docs_src/models/docs015.py000066400000000000000000000007741444363446500175530ustar00rootroot00000000000000import databases import sqlalchemy import ormar from ormar import property_field database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) @property_field def prefixed_name(self): return "custom_prefix__" + self.name ormar-0.12.2/docs_src/models/docs016.py000066400000000000000000000006561444363446500175530ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata class Config: allow_mutation = False id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) ormar-0.12.2/docs_src/models/docs017.py000066400000000000000000000012121444363446500175410ustar00rootroot00000000000000import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata # define your constraints in Meta class of the model # it's a list that can contain multiple constraints # hera a combination of name and column will have a compound index in the db constraints = [ormar.IndexColumns("name", "completed")] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) ormar-0.12.2/docs_src/models/docs018.py000066400000000000000000000013521444363446500175470ustar00rootroot00000000000000import datetime import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Course(ormar.Model): class Meta: database = database metadata = metadata # define your constraints in Meta class of the model # it's a list that can contain multiple constraints # hera a combination of name and column will have a level check in the db constraints = [ ormar.CheckColumns("start_time < end_time", name="date_check"), ] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) start_date: datetime.date = ormar.Date() end_date: datetime.date = ormar.Date() ormar-0.12.2/docs_src/queries/000077500000000000000000000000001444363446500162055ustar00rootroot00000000000000ormar-0.12.2/docs_src/queries/__init__.py000066400000000000000000000000001444363446500203040ustar00rootroot00000000000000ormar-0.12.2/docs_src/queries/docs001.py000066400000000000000000000012661444363446500177350ustar00rootroot00000000000000from typing import Optional import databases import ormar import sqlalchemy database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Album(ormar.Model): class Meta: tablename = "album" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Track(ormar.Model): class Meta: tablename = "track" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() ormar-0.12.2/docs_src/queries/docs002.py000066400000000000000000000017041444363446500177330ustar00rootroot00000000000000import databases import ormar import sqlalchemy database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Book(ormar.Model): class Meta: tablename = "books" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) author: str = ormar.String(max_length=100) genre: str = ormar.String(max_length=100, default='Fiction', choices=['Fiction', 'Adventure', 'Historic', 'Fantasy']) await Book.objects.create(title='Tom Sawyer', author="Twain, Mark", genre='Adventure') await Book.objects.create(title='War and Peace', author="Tolstoy, Leo", genre='Fiction') await Book.objects.create(title='Anna Karenina', author="Tolstoy, Leo", genre='Fiction') await Book.objects.update(each=True, genre='Fiction') all_books = await Book.objects.filter(genre='Fiction').all() assert len(all_books) == 3 ormar-0.12.2/docs_src/queries/docs003.py000066400000000000000000000022621444363446500177340ustar00rootroot00000000000000import databases import ormar import sqlalchemy database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Book(ormar.Model): class Meta: tablename = "books" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) author: str = ormar.String(max_length=100) genre: str = ormar.String(max_length=100, default='Fiction', choices=['Fiction', 'Adventure', 'Historic', 'Fantasy']) await Book.objects.create(title='Tom Sawyer', author="Twain, Mark", genre='Adventure') await Book.objects.create(title='War and Peace', author="Tolstoy, Leo", genre='Fiction') await Book.objects.create(title='Anna Karenina', author="Tolstoy, Leo", genre='Fiction') # if not exist the instance will be persisted in db vol2 = await Book.objects.update_or_create(title="Volume II", author='Anonymous', genre='Fiction') assert await Book.objects.count() == 1 # if pk or pkname passed in kwargs (like id here) the object will be updated assert await Book.objects.update_or_create(id=vol2.id, genre='Historic') assert await Book.objects.count() == 1 ormar-0.12.2/docs_src/queries/docs004.py000066400000000000000000000012751444363446500177400ustar00rootroot00000000000000import databases import ormar import sqlalchemy database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class ToDo(ormar.Model): class Meta: tablename = "todos" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) text: str = ormar.String(max_length=500) completed = ormar.Boolean(default=False) # create multiple instances at once with bulk_create await ToDo.objects.bulk_create( [ ToDo(text="Buy the groceries."), ToDo(text="Call Mum.", completed=True), ToDo(text="Send invoices.", completed=True), ] ) todoes = await ToDo.objects.all() assert len(todoes) == 3 ormar-0.12.2/docs_src/queries/docs005.py000066400000000000000000000020651444363446500177370ustar00rootroot00000000000000import databases import ormar import sqlalchemy database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Book(ormar.Model): class Meta: tablename = "books" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) author: str = ormar.String(max_length=100) genre: str = ormar.String(max_length=100, default='Fiction', choices=['Fiction', 'Adventure', 'Historic', 'Fantasy']) await Book.objects.create(title='Tom Sawyer', author="Twain, Mark", genre='Adventure') await Book.objects.create(title='War and Peace in Space', author="Tolstoy, Leo", genre='Fantasy') await Book.objects.create(title='Anna Karenina', author="Tolstoy, Leo", genre='Fiction') # delete accepts kwargs that will be used in filter # acting in same way as queryset.filter(**kwargs).delete() await Book.objects.delete(genre='Fantasy') # delete all fantasy books all_books = await Book.objects.all() assert len(all_books) == 2 ormar-0.12.2/docs_src/queries/docs006.py000066400000000000000000000027251444363446500177430ustar00rootroot00000000000000import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) founded: int = ormar.Integer(nullable=True) class Car(ormar.Model): class Meta: tablename = "cars" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) manufacturer = ormar.ForeignKey(Company) name: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) gearbox_type: str = ormar.String(max_length=20, nullable=True) gears: int = ormar.Integer(nullable=True) aircon_type: str = ormar.String(max_length=20, nullable=True) # build some sample data toyota = await Company.objects.create(name="Toyota", founded=1937) await Car.objects.create(manufacturer=toyota, name="Corolla", year=2020, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Yaris", year=2019, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Supreme", year=2020, gearbox_type='Auto', gears=6, aircon_type='Auto') ormar-0.12.2/docs_src/queries/docs007.py000066400000000000000000000022241444363446500177360ustar00rootroot00000000000000import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Owner(ormar.Model): class Meta: tablename = "owners" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Toy(ormar.Model): class Meta: tablename = "toys" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) owner: Owner = ormar.ForeignKey(Owner) # build some sample data aphrodite = await Owner.objects.create(name="Aphrodite") hermes = await Owner.objects.create(name="Hermes") zeus = await Owner.objects.create(name="Zeus") await Toy.objects.create(name="Toy 4", owner=zeus) await Toy.objects.create(name="Toy 5", owner=hermes) await Toy.objects.create(name="Toy 2", owner=aphrodite) await Toy.objects.create(name="Toy 1", owner=zeus) await Toy.objects.create(name="Toy 3", owner=aphrodite) await Toy.objects.create(name="Toy 6", owner=hermes) ormar-0.12.2/docs_src/queries/docs008.py000066400000000000000000000055671444363446500177540ustar00rootroot00000000000000import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) founded: int = ormar.Integer(nullable=True) class Car(ormar.Model): class Meta: tablename = "cars" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) manufacturer = ormar.ForeignKey(Company) name: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) gearbox_type: str = ormar.String(max_length=20, nullable=True) gears: int = ormar.Integer(nullable=True) aircon_type: str = ormar.String(max_length=20, nullable=True) # build some sample data toyota = await Company.objects.create(name="Toyota", founded=1937) await Car.objects.create(manufacturer=toyota, name="Corolla", year=2020, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Yaris", year=2019, gearbox_type='Manual', gears=5, aircon_type='Manual') await Car.objects.create(manufacturer=toyota, name="Supreme", year=2020, gearbox_type='Auto', gears=6, aircon_type='Auto') # select manufacturer but only name - to include related models use notation {model_name}__{column} all_cars = await Car.objects.select_related('manufacturer').exclude_fields( ['year', 'gearbox_type', 'gears', 'aircon_type', 'company__founded']).all() for car in all_cars: # excluded columns will yield None assert all(getattr(car, x) is None for x in ['year', 'gearbox_type', 'gears', 'aircon_type']) # included column on related models will be available, pk column is always included # even if you do not include it in fields list assert car.manufacturer.name == 'Toyota' # also in the nested related models - you cannot exclude pk - it's always auto added assert car.manufacturer.founded is None # fields() can be called several times, building up the columns to select # models selected in select_related but with no columns in fields list implies all fields all_cars = await Car.objects.select_related('manufacturer').exclude_fields('year').exclude_fields( ['gear', 'gearbox_type']).all() # all fiels from company model are selected assert all_cars[0].manufacturer.name == 'Toyota' assert all_cars[0].manufacturer.founded == 1937 # cannot exclude mandatory model columns - company__name in this example - note usage of dict/set this time await Car.objects.select_related('manufacturer').exclude_fields([{'company': {'name'}}]).all() # will raise pydantic ValidationError as company.name is required ormar-0.12.2/docs_src/queries/docs009.py000066400000000000000000000036671444363446500177540ustar00rootroot00000000000000# 1. like in example above await Car.objects.select_related('manufacturer').fields(['id', 'name', 'manufacturer__name']).all() # 2. to mark a field as required use ellipsis await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': { 'name': ...} }).all() # 3. to include whole nested model use ellipsis await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': ... }).all() # 4. to specify fields at last nesting level you can also use set - equivalent to 2. above await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': {'name'} }).all() # 5. of course set can have multiple fields await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': {'name', 'founded'} }).all() # 6. you can include all nested fields but it will be equivalent of 3. above which is shorter await Car.objects.select_related('manufacturer').fields({'id': ..., 'name': ..., 'manufacturer': {'id', 'name', 'founded'} }).all() ormar-0.12.2/docs_src/relations/000077500000000000000000000000001444363446500165305ustar00rootroot00000000000000ormar-0.12.2/docs_src/relations/__init__.py000066400000000000000000000000001444363446500206270ustar00rootroot00000000000000ormar-0.12.2/docs_src/relations/docs001.py000066400000000000000000000023171444363446500202560ustar00rootroot00000000000000from typing import Optional, Dict, Union import databases import sqlalchemy import ormar database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) department: Optional[Union[Department, Dict]] = ormar.ForeignKey(Department) department = Department(name="Science") # set up a relation with actual Model instance course = Course(name="Math", completed=False, department=department) # set up relation with only related model pk value course2 = Course(name="Math II", completed=False, department=department.pk) # set up a relation with dictionary corresponding to related model course3 = Course(name="Math III", completed=False, department=department.dict()) # explicitly set up None course4 = Course(name="Math III", completed=False, department=None) ormar-0.12.2/docs_src/relations/docs002.py000066400000000000000000000020031444363446500202470ustar00rootroot00000000000000from typing import Optional, Union, List import databases import ormar import sqlalchemy database = databases.Database("sqlite:///db.sqlite") metadata = sqlalchemy.MetaData() class Author(ormar.Model): class Meta: tablename = "authors" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Category(ormar.Model): class Meta: tablename = "categories" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta: tablename = "posts" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category) author: Optional[Author] = ormar.ForeignKey(Author) ormar-0.12.2/docs_src/relations/docs003.py000066400000000000000000000007251444363446500202610ustar00rootroot00000000000000class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) department: Optional[Union[Department, Dict]] = ormar.ForeignKey(Department) ormar-0.12.2/docs_src/relations/docs004.py000066400000000000000000000013501444363446500202550ustar00rootroot00000000000000class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id = ormar.Integer(primary_key=True) name = ormar.String(max_length=40) class PostCategory(ormar.Model): class Meta(BaseMeta): tablename = "posts_x_categories" id: int = ormar.Integer(primary_key=True) sort_order: int = ormar.Integer(nullable=True) param_name: str = ormar.String(default="Name", max_length=200) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories = ormar.ManyToMany(Category, through=PostCategory) ormar-0.12.2/docs_src/signals/000077500000000000000000000000001444363446500161705ustar00rootroot00000000000000ormar-0.12.2/docs_src/signals/__init__.py000066400000000000000000000000001444363446500202670ustar00rootroot00000000000000ormar-0.12.2/docs_src/signals/docs002.py000066400000000000000000000011071444363446500177130ustar00rootroot00000000000000from ormar import pre_update @pre_update(Album) async def before_update(sender, instance, **kwargs): if instance.play_count > 50 and not instance.is_best_seller: instance.is_best_seller = True # here album.play_count ans is_best_seller get default values album = await Album.objects.create(name="Venice") assert not album.is_best_seller assert album.play_count == 0 album.play_count = 30 # here a trigger is called but play_count is too low await album.update() assert not album.is_best_seller album.play_count = 60 await album.update() assert album.is_best_seller ormar-0.12.2/examples/000077500000000000000000000000001444363446500145475ustar00rootroot00000000000000ormar-0.12.2/examples/fastapi_quick_start.py000066400000000000000000000040671444363446500211700ustar00rootroot00000000000000from typing import List, Optional import databases import sqlalchemy import uvicorn from fastapi import FastAPI import ormar app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database("sqlite:///test.db") app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) @app.get("/items/", response_model=List[Item]) async def get_items(): items = await Item.objects.select_related("category").all() return items @app.post("/items/", response_model=Item) async def create_item(item: Item): await item.save() return item @app.post("/categories/", response_model=Category) async def create_category(category: Category): await category.save() return category @app.put("/items/{item_id}") async def get_item(item_id: int, item: Item): item_db = await Item.objects.get(pk=item_id) return await item_db.update(**item.dict()) @app.delete("/items/{item_id}") async def delete_item(item_id: int, item: Item = None): if item: return {"deleted_rows": await item.delete()} item_db = await Item.objects.get(pk=item_id) return {"deleted_rows": await item_db.delete()} if __name__ == "__main__": # to play with API run the script and visit http://127.0.0.1:8000/docs uvicorn.run(app, host="127.0.0.1", port=8000) ormar-0.12.2/examples/script_from_readme.py000066400000000000000000000340371444363446500207740ustar00rootroot00000000000000from typing import Optional import databases import pydantic import ormar import sqlalchemy DATABASE_URL = "sqlite:///db.sqlite" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() # note that this step is optional -> all ormar cares is a internal # class with name Meta and proper parameters, but this way you do not # have to repeat the same parameters if you use only one database class BaseMeta(ormar.ModelMeta): metadata = metadata database = database # Note that all type hints are optional # below is a perfectly valid model declaration # class Author(ormar.Model): # class Meta(BaseMeta): # tablename = "authors" # # id = ormar.Integer(primary_key=True) # <= notice no field types # name = ormar.String(max_length=100) class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) # create the database # note that in production you should use migrations # note that this is not required if you connect to existing database engine = sqlalchemy.create_engine(DATABASE_URL) # just to be sure we clear the db before metadata.drop_all(engine) metadata.create_all(engine) # all functions below are divided into functionality categories # note how all functions are defined with async - hence can use await AND needs to # be awaited on their own async def create(): # Create some records to work with through QuerySet.create method. # Note that queryset is exposed on each Model's class as objects tolkien = await Author.objects.create(name="J.R.R. Tolkien") await Book.objects.create(author=tolkien, title="The Hobbit", year=1937) await Book.objects.create(author=tolkien, title="The Lord of the Rings", year=1955) await Book.objects.create(author=tolkien, title="The Silmarillion", year=1977) # alternative creation of object divided into 2 steps sapkowski = Author(name="Andrzej Sapkowski") # do some stuff await sapkowski.save() # or save() after initialization await Book(author=sapkowski, title="The Witcher", year=1990).save() await Book(author=sapkowski, title="The Tower of Fools", year=2002).save() # to read more about inserting data into the database # visit: https://collerek.github.io/ormar/queries/create/ async def read(): # Fetch an instance, without loading a foreign key relationship on it. # Django style book = await Book.objects.get(title="The Hobbit") # or python style book = await Book.objects.get(Book.title == "The Hobbit") book2 = await Book.objects.first() # first() fetch the instance with lower primary key value assert book == book2 # you can access all fields on loaded model assert book.title == "The Hobbit" assert book.year == 1937 # when no condition is passed to get() # it behaves as last() based on primary key column book3 = await Book.objects.get() assert book3.title == "The Tower of Fools" # When you have a relation, ormar always defines a related model for you # even when all you loaded is a foreign key value like in this example assert isinstance(book.author, Author) # primary key is populated from foreign key stored in books table assert book.author.pk == 1 # since the related model was not loaded all other fields are None assert book.author.name is None # Load the relationship from the database when you already have the related model # alternatively see joins section below await book.author.load() assert book.author.name == "J.R.R. Tolkien" # get all rows for given model authors = await Author.objects.all() assert len(authors) == 2 # to read more about reading data from the database # visit: https://collerek.github.io/ormar/queries/read/ async def update(): # read existing row from db tolkien = await Author.objects.get(name="J.R.R. Tolkien") assert tolkien.name == "J.R.R. Tolkien" tolkien_id = tolkien.id # change the selected property tolkien.name = "John Ronald Reuel Tolkien" # call update on a model instance await tolkien.update() # confirm that object was updated tolkien = await Author.objects.get(name="John Ronald Reuel Tolkien") assert tolkien.name == "John Ronald Reuel Tolkien" assert tolkien.id == tolkien_id # alternatively update data without loading await Author.objects.filter(name__contains="Tolkien").update(name="J.R.R. Tolkien") # to read more about updating data in the database # visit: https://collerek.github.io/ormar/queries/update/ async def delete(): silmarillion = await Book.objects.get(year=1977) # call delete() on instance await silmarillion.delete() # alternatively delete without loading await Book.objects.delete(title="The Tower of Fools") # note that when there is no record ormar raises NoMatch exception try: await Book.objects.get(year=1977) except ormar.NoMatch: print("No book from 1977!") # to read more about deleting data from the database # visit: https://collerek.github.io/ormar/queries/delete/ # note that despite the fact that record no longer exists in database # the object above is still accessible and you can use it (and i.e. save()) again. tolkien = silmarillion.author await Book.objects.create(author=tolkien, title="The Silmarillion", year=1977) async def joins(): # Tho join two models use select_related # Django style book = await Book.objects.select_related("author").get(title="The Hobbit") # Python style book = await Book.objects.select_related(Book.author).get( Book.title == "The Hobbit" ) # now the author is already prefetched assert book.author.name == "J.R.R. Tolkien" # By default you also get a second side of the relation # constructed as lowercase source model name +'s' (books in this case) # you can also provide custom name with parameter related_name # Django style author = await Author.objects.select_related("books").all(name="J.R.R. Tolkien") # Python style author = await Author.objects.select_related(Author.books).all( Author.name == "J.R.R. Tolkien" ) assert len(author[0].books) == 3 # for reverse and many to many relations you can also prefetch_related # that executes a separate query for each of related models # Django style author = await Author.objects.prefetch_related("books").get(name="J.R.R. Tolkien") # Python style author = await Author.objects.prefetch_related(Author.books).get( Author.name == "J.R.R. Tolkien" ) assert len(author.books) == 3 # to read more about relations # visit: https://collerek.github.io/ormar/relations/ # to read more about joins and subqueries # visit: https://collerek.github.io/ormar/queries/joins-and-subqueries/ async def filter_and_sort(): # to filter the query you can use filter() or pass key-value pars to # get(), all() etc. # to use special methods or access related model fields use double # underscore like to filter by the name of the author use author__name # Django style books = await Book.objects.all(author__name="J.R.R. Tolkien") # python style books = await Book.objects.all(Book.author.name == "J.R.R. Tolkien") assert len(books) == 3 # filter can accept special methods also separated with double underscore # to issue sql query ` where authors.name like "%tolkien%"` that is not # case sensitive (hence small t in Tolkien) # Django style books = await Book.objects.filter(author__name__icontains="tolkien").all() # python style books = await Book.objects.filter(Book.author.name.icontains("tolkien")).all() assert len(books) == 3 # to sort use order_by() function of queryset # to sort decreasing use hyphen before the field name # same as with filter you can use double underscores to access related fields # Django style books = ( await Book.objects.filter(author__name__icontains="tolkien") .order_by("-year") .all() ) # python style books = ( await Book.objects.filter(Book.author.name.icontains("tolkien")) .order_by(Book.year.desc()) .all() ) assert len(books) == 3 assert books[0].title == "The Silmarillion" assert books[2].title == "The Hobbit" # to read more about filtering and ordering # visit: https://collerek.github.io/ormar/queries/filter-and-sort/ async def subset_of_columns(): # to exclude some columns from loading when querying the database # you can use fileds() method hobbit = await Book.objects.fields(["title"]).get(title="The Hobbit") # note that fields not included in fields are empty (set to None) assert hobbit.year is None assert hobbit.author is None # selected field is there assert hobbit.title == "The Hobbit" # alternatively you can provide columns you want to exclude hobbit = await Book.objects.exclude_fields(["year"]).get(title="The Hobbit") # year is still not set assert hobbit.year is None # but author is back assert hobbit.author is not None # also you cannot exclude primary key column - it's always there # even if you EXPLICITLY exclude it it will be there # note that each model have a shortcut for primary_key column which is pk # and you can filter/access/set the values by this alias like below assert hobbit.pk is not None # note that you cannot exclude fields that are not nullable # (required) in model definition try: await Book.objects.exclude_fields(["title"]).get(title="The Hobbit") except pydantic.ValidationError: print("Cannot exclude non nullable field title") # to read more about selecting subset of columns # visit: https://collerek.github.io/ormar/queries/select-columns/ async def pagination(): # to limit number of returned rows use limit() books = await Book.objects.limit(1).all() assert len(books) == 1 assert books[0].title == "The Hobbit" # to offset number of returned rows use offset() books = await Book.objects.limit(1).offset(1).all() assert len(books) == 1 assert books[0].title == "The Lord of the Rings" # alternatively use paginate that combines both books = await Book.objects.paginate(page=2, page_size=2).all() assert len(books) == 2 # note that we removed one book of Sapkowski in delete() # and recreated The Silmarillion - by default when no order_by is set # ordering sorts by primary_key column assert books[0].title == "The Witcher" assert books[1].title == "The Silmarillion" # to read more about pagination and number of rows # visit: https://collerek.github.io/ormar/queries/pagination-and-rows-number/ async def aggregations(): # count: assert 2 == await Author.objects.count() # exists assert await Book.objects.filter(title="The Hobbit").exists() # maximum assert 1990 == await Book.objects.max(columns=["year"]) # minimum assert 1937 == await Book.objects.min(columns=["year"]) # average assert 1964.75 == await Book.objects.avg(columns=["year"]) # sum assert 7859 == await Book.objects.sum(columns=["year"]) # to read more about aggregated functions # visit: https://collerek.github.io/ormar/queries/aggregations/ async def raw_data(): # extract raw data in a form of dicts or tuples # note that this skips the validation(!) as models are # not created from parsed data # get list of objects as dicts assert await Book.objects.values() == [ {"id": 1, "author": 1, "title": "The Hobbit", "year": 1937}, {"id": 2, "author": 1, "title": "The Lord of the Rings", "year": 1955}, {"id": 4, "author": 2, "title": "The Witcher", "year": 1990}, {"id": 5, "author": 1, "title": "The Silmarillion", "year": 1977}, ] # get list of objects as tuples assert await Book.objects.values_list() == [ (1, 1, "The Hobbit", 1937), (2, 1, "The Lord of the Rings", 1955), (4, 2, "The Witcher", 1990), (5, 1, "The Silmarillion", 1977), ] # filter data - note how you always get a list assert await Book.objects.filter(title="The Hobbit").values() == [ {"id": 1, "author": 1, "title": "The Hobbit", "year": 1937} ] # select only wanted fields assert await Book.objects.filter(title="The Hobbit").values(["id", "title"]) == [ {"id": 1, "title": "The Hobbit"} ] # if you select only one column you could flatten it with values_list assert await Book.objects.values_list("title", flatten=True) == [ "The Hobbit", "The Lord of the Rings", "The Witcher", "The Silmarillion", ] # to read more about extracting raw values # visit: https://collerek.github.io/ormar/queries/aggregations/ async def with_connect(function): # note that for any other backend than sqlite you actually need to # connect to the database to perform db operations async with database: await function() # note that if you use framework like `fastapi` you shouldn't connect # in your endpoints but have a global connection pool # check https://collerek.github.io/ormar/fastapi/ and section with db connection # gather and execute all functions # note - normally import should be at the beginning of the file import asyncio # note that normally you use gather() function to run several functions # concurrently but we actually modify the data and we rely on the order of functions for func in [ create, read, update, delete, joins, filter_and_sort, subset_of_columns, pagination, aggregations, raw_data, ]: print(f"Executing: {func.__name__}") asyncio.run(with_connect(func)) # drop the database tables metadata.drop_all(engine) ormar-0.12.2/mkdocs.yml000066400000000000000000000053631444363446500147430ustar00rootroot00000000000000site_name: ormar site_description: A simple async ORM with fastapi in mind and pydantic validation. nav: - Overview: index.md - Installation: install.md - Models: - Definition: models/index.md - Inheritance: models/inheritance.md - Methods: models/methods.md - Migrations: models/migrations.md - Internals: models/internals.md - Fields: - Common parameters: fields/common-parameters.md - Fields types: fields/field-types.md - Pydantic only fields: fields/pydantic-fields.md - Fields encryption: fields/encryption.md - Relations: - relations/index.md - relations/postponed-annotations.md - relations/foreign-key.md - relations/many-to-many.md - relations/queryset-proxy.md - Queries: - queries/index.md - queries/create.md - queries/read.md - queries/update.md - queries/delete.md - queries/joins-and-subqueries.md - queries/filter-and-sort.md - queries/select-columns.md - queries/pagination-and-rows-number.md - queries/aggregations.md - Return raw data: queries/raw-data.md - Signals: signals.md - Transactions: transactions.md - Use with Fastapi: - Quick Start: fastapi/index.md - Using ormar in responses: fastapi/response.md - Using ormar in requests: fastapi/requests.md - Use with mypy: mypy.md - PyCharm plugin: plugin.md - Contributing: contributing.md - Release Notes: releases.md - Api (BETA): api/ repo_name: collerek/ormar repo_url: https://github.com/collerek/ormar theme: name: material highlightjs: true hljs_languages: - python palette: - media: "(prefers-color-scheme: light)" scheme: default primary: indigo accent: indigo toggle: icon: material/lightbulb name: Switch to light mode - media: "(prefers-color-scheme: dark)" scheme: slate primary: indigo accent: indigo toggle: icon: material/lightbulb-outline name: Switch to dark mode analytics: gtag: G-ZJWZYM5DNM markdown_extensions: - admonition - pymdownx.superfences - pymdownx.snippets: base_path: docs - pymdownx.inlinehilite - pymdownx.highlight: linenums: true plugins: - search - gen-files: scripts: - docs/gen_ref_pages.py - literate-nav: nav_file: SUMMARY.md - section-index - mkdocstrings: watch: - ormar handlers: python: selection: docstring_style: sphinx rendering: show_submodules: no extra: analytics: provider: google property: UA-72514911-3 extra_javascript: - https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js - javascripts/config.js extra_css: - https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/default.min.css ormar-0.12.2/ormar/000077500000000000000000000000001444363446500140515ustar00rootroot00000000000000ormar-0.12.2/ormar/__init__.py000066400000000000000000000063641444363446500161730ustar00rootroot00000000000000""" The `ormar` package is an async mini ORM for Python, with support for **Postgres, MySQL**, and **SQLite**. The main benefit of using `ormar` are: * getting an **async ORM that can be used with async frameworks** (fastapi, starlette etc.) * getting just **one model to maintain** - you don't have to maintain pydantic and other orm model (sqlalchemy, peewee, gino etc.) The goal was to create a simple ORM that can be **used directly (as request and response models) with `fastapi`** that bases it's data validation on pydantic. Ormar - apart form obvious ORM in name - get it's name from ormar in swedish which means snakes, and ormar(e) in italian which means cabinet. And what's a better name for python ORM than snakes cabinet :) """ try: from importlib.metadata import version # type: ignore except ImportError: # pragma: no cover from importlib_metadata import version # type: ignore from ormar.protocols import QuerySetProtocol, RelationProtocol # noqa: I100 from ormar.decorators import ( # noqa: I100 post_bulk_update, post_delete, post_relation_add, post_relation_remove, post_save, post_update, pre_delete, pre_relation_add, pre_relation_remove, pre_save, pre_update, property_field, ) from ormar.exceptions import ( # noqa: I100 ModelDefinitionError, MultipleMatches, NoMatch, ) from ormar.fields import ( BaseField, BigInteger, Boolean, DECODERS_MAP, Date, DateTime, Decimal, ENCODERS_MAP, EncryptBackends, Enum, Float, ForeignKey, ForeignKeyField, Integer, JSON, LargeBinary, ManyToMany, ManyToManyField, SQL_ENCODERS_MAP, SmallInteger, String, Text, Time, UUID, UniqueColumns, IndexColumns, CheckColumns, ReferentialAction, ) # noqa: I100 from ormar.models import ExcludableItems, Extra, Model from ormar.models.metaclass import ModelMeta from ormar.queryset import OrderAction, QuerySet, and_, or_ from ormar.relations import RelationType from ormar.signals import Signal class UndefinedType: # pragma no cover def __repr__(self) -> str: return "OrmarUndefined" Undefined = UndefinedType() __version__ = version("ormar") __all__ = [ "Integer", "BigInteger", "SmallInteger", "Boolean", "Time", "Text", "String", "JSON", "DateTime", "Date", "Decimal", "Enum", "Float", "ManyToMany", "Model", "Action", "ModelDefinitionError", "MultipleMatches", "NoMatch", "ForeignKey", "QuerySet", "RelationType", "Undefined", "UUID", "UniqueColumns", "IndexColumns", "CheckColumns", "ReferentialAction", "QuerySetProtocol", "RelationProtocol", "ModelMeta", "property_field", "post_bulk_update", "post_delete", "post_save", "post_update", "post_relation_add", "post_relation_remove", "pre_delete", "pre_save", "pre_update", "pre_relation_remove", "pre_relation_add", "Signal", "BaseField", "ManyToManyField", "ForeignKeyField", "OrderAction", "ExcludableItems", "and_", "or_", "EncryptBackends", "ENCODERS_MAP", "SQL_ENCODERS_MAP", "DECODERS_MAP", "LargeBinary", "Extra", ] ormar-0.12.2/ormar/decorators/000077500000000000000000000000001444363446500162165ustar00rootroot00000000000000ormar-0.12.2/ormar/decorators/__init__.py000066400000000000000000000014501444363446500203270ustar00rootroot00000000000000""" Module with all decorators that are exposed for users. Currently only: * property_field - exposing @property like function as field in Model.dict() * predefined signals decorators (pre/post + save/update/delete) """ from ormar.decorators.property_field import property_field from ormar.decorators.signals import ( post_bulk_update, post_delete, post_relation_add, post_relation_remove, post_save, post_update, pre_delete, pre_relation_add, pre_relation_remove, pre_save, pre_update, ) __all__ = [ "property_field", "post_bulk_update", "post_delete", "post_save", "post_update", "pre_delete", "pre_save", "pre_update", "post_relation_remove", "post_relation_add", "pre_relation_remove", "pre_relation_add", ] ormar-0.12.2/ormar/decorators/property_field.py000066400000000000000000000023601444363446500216200ustar00rootroot00000000000000import inspect from collections.abc import Callable from typing import Union from ormar.exceptions import ModelDefinitionError def property_field(func: Callable) -> Union[property, Callable]: """ Decorator to set a property like function on Model to be exposed as field in dict() and fastapi response. Although you can decorate a @property field like this and this will work, mypy validation will complain about this. Note that "fields" exposed like this do not go through validation. :raises ModelDefinitionError: if method has any other argument than self. :param func: decorated function to be exposed :type func: Callable :return: decorated function passed in func param, with set __property_field__ = True :rtype: Union[property, Callable] """ if isinstance(func, property): # pragma: no cover func.fget.__property_field__ = True else: arguments = list(inspect.signature(func).parameters.keys()) if len(arguments) > 1 or arguments[0] != "self": raise ModelDefinitionError( "property_field decorator can be used " "only on methods with no arguments" ) func.__dict__["__property_field__"] = True return func ormar-0.12.2/ormar/decorators/signals.py000066400000000000000000000144311444363446500202330ustar00rootroot00000000000000from typing import Callable, List, TYPE_CHECKING, Type, Union if TYPE_CHECKING: # pragma: no cover from ormar import Model def receiver( signal: str, senders: Union[Type["Model"], List[Type["Model"]]] ) -> Callable: """ Connect given function to all senders for given signal name. :param signal: name of the signal to register to :type signal: str :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ def _decorator(func: Callable) -> Callable: """ Internal decorator that does all the registering. :param func: function to register as receiver :type func: Callable :return: untouched function already registered for given signal :rtype: Callable """ if not isinstance(senders, list): _senders = [senders] else: _senders = senders for sender in _senders: signals = getattr(sender.Meta.signals, signal) signals.connect(func) return func return _decorator def post_save(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for post_save signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="post_save", senders=senders) def post_update(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for post_update signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="post_update", senders=senders) def post_delete(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for post_delete signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="post_delete", senders=senders) def pre_save(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for pre_save signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="pre_save", senders=senders) def pre_update(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for pre_update signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="pre_update", senders=senders) def pre_delete(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for pre_delete signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="pre_delete", senders=senders) def pre_relation_add(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for pre_relation_add signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="pre_relation_add", senders=senders) def post_relation_add(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for post_relation_add signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="post_relation_add", senders=senders) def pre_relation_remove(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for pre_relation_remove signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="pre_relation_remove", senders=senders) def post_relation_remove( senders: Union[Type["Model"], List[Type["Model"]]] ) -> Callable: """ Connect given function to all senders for post_relation_remove signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="post_relation_remove", senders=senders) def post_bulk_update(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable: """ Connect given function to all senders for post_bulk_update signal. :param senders: one or a list of "Model" classes that should have the signal receiver registered :type senders: Union[Type["Model"], List[Type["Model"]]] :return: returns the original function untouched :rtype: Callable """ return receiver(signal="post_bulk_update", senders=senders) ormar-0.12.2/ormar/exceptions.py000066400000000000000000000036211444363446500166060ustar00rootroot00000000000000""" Gathers all exceptions thrown by ormar. """ class AsyncOrmException(Exception): """ Base ormar Exception """ pass class ModelDefinitionError(AsyncOrmException): """ Raised for errors related to the model definition itself: * setting @property_field on method with arguments other than func(self) * defining a Field without required parameters * defining a model with more than one primary_key * defining a model without primary_key * setting primary_key column as pydantic_only """ pass class ModelError(AsyncOrmException): """ Raised for initialization of model with non-existing field keyword. """ pass class NoMatch(AsyncOrmException): """ Raised for database queries that has no matching result (empty result). """ pass class MultipleMatches(AsyncOrmException): """ Raised for database queries that should return one row (i.e. get, first etc.) but has multiple matching results in response. """ pass class QueryDefinitionError(AsyncOrmException): """ Raised for errors in query definition: * using contains or icontains filter with instance of the Model * using Queryset.update() without filter and setting each flag to True * using Queryset.delete() without filter and setting each flag to True """ pass class RelationshipInstanceError(AsyncOrmException): pass class ModelPersistenceError(AsyncOrmException): """ Raised for update of models without primary_key set (cannot retrieve from db) or for saving a model with relation to unsaved model (cannot extract fk value). """ pass class SignalDefinitionError(AsyncOrmException): """ Raised when non callable receiver is passed as signal callback. """ pass class ModelListEmptyError(AsyncOrmException): """ Raised for objects is empty when bulk_update """ pass ormar-0.12.2/ormar/fields/000077500000000000000000000000001444363446500153175ustar00rootroot00000000000000ormar-0.12.2/ormar/fields/__init__.py000066400000000000000000000030101444363446500174220ustar00rootroot00000000000000""" Module with classes and constructors for ormar Fields. Base Fields types (like String, Integer etc.) as well as relation Fields (ForeignKey, ManyToMany). Also a definition for custom CHAR based sqlalchemy UUID field """ from ormar.fields.base import BaseField from ormar.fields.constraints import IndexColumns, UniqueColumns, CheckColumns from ormar.fields.foreign_key import ForeignKey, ForeignKeyField from ormar.fields.many_to_many import ManyToMany, ManyToManyField from ormar.fields.model_fields import ( BigInteger, Boolean, Date, DateTime, Decimal, Enum, Float, Integer, JSON, LargeBinary, SmallInteger, String, Text, Time, UUID, ) from ormar.fields.parsers import DECODERS_MAP, ENCODERS_MAP, SQL_ENCODERS_MAP from ormar.fields.sqlalchemy_encrypted import EncryptBackend, EncryptBackends from ormar.fields.through_field import Through, ThroughField from ormar.fields.referential_actions import ReferentialAction __all__ = [ "Decimal", "BigInteger", "SmallInteger", "Boolean", "Date", "DateTime", "String", "JSON", "Integer", "Text", "Float", "Time", "UUID", "Enum", "ForeignKey", "ManyToMany", "ManyToManyField", "BaseField", "ForeignKeyField", "ThroughField", "Through", "EncryptBackends", "EncryptBackend", "DECODERS_MAP", "ENCODERS_MAP", "SQL_ENCODERS_MAP", "LargeBinary", "UniqueColumns", "IndexColumns", "CheckColumns", "ReferentialAction", ] ormar-0.12.2/ormar/fields/base.py000066400000000000000000000340151444363446500166060ustar00rootroot00000000000000import warnings from typing import Any, Dict, List, Optional, TYPE_CHECKING, Type, Union import sqlalchemy from pydantic import Json, typing from pydantic.fields import FieldInfo, Required, Undefined import ormar # noqa I101 from ormar import ModelDefinitionError from ormar.fields.sqlalchemy_encrypted import ( EncryptBackend, EncryptBackends, EncryptedString, ) if TYPE_CHECKING: # pragma no cover from ormar.models import Model from ormar.models import NewBaseModel class BaseField(FieldInfo): """ BaseField serves as a parent class for all basic Fields in ormar. It keeps all common parameters available for all fields as well as set of useful functions. All values are kept as class variables, ormar Fields are never instantiated. Subclasses pydantic.FieldInfo to keep the fields related to pydantic field types like ConstrainedStr """ def __init__(self, **kwargs: Any) -> None: self.__type__: type = kwargs.pop("__type__", None) self.__pydantic_type__: type = kwargs.pop("__pydantic_type__", None) self.__sample__: type = kwargs.pop("__sample__", None) self.related_name = kwargs.pop("related_name", None) self.column_type: sqlalchemy.Column = kwargs.pop("column_type", None) self.constraints: List = kwargs.pop("constraints", list()) self.name: str = kwargs.pop("name", None) self.db_alias: str = kwargs.pop("alias", None) self.primary_key: bool = kwargs.pop("primary_key", False) self.autoincrement: bool = kwargs.pop("autoincrement", False) self.nullable: bool = kwargs.pop("nullable", False) self.sql_nullable: bool = kwargs.pop("sql_nullable", False) self.index: bool = kwargs.pop("index", False) self.unique: bool = kwargs.pop("unique", False) self.pydantic_only: bool = kwargs.pop("pydantic_only", False) if self.pydantic_only: warnings.warn( "Parameter `pydantic_only` is deprecated and will " "be removed in one of the next releases.\n You can declare " "pydantic fields in a normal way. \n Check documentation: " "https://collerek.github.io/ormar/fields/pydantic-fields", DeprecationWarning, ) self.choices: typing.Sequence = kwargs.pop("choices", False) self.virtual: bool = kwargs.pop( "virtual", None ) # ManyToManyFields and reverse ForeignKeyFields self.is_multi: bool = kwargs.pop("is_multi", None) # ManyToManyField self.is_relation: bool = kwargs.pop( "is_relation", None ) # ForeignKeyField + subclasses self.is_through: bool = kwargs.pop("is_through", False) # ThroughFields self.through_relation_name = kwargs.pop("through_relation_name", None) self.through_reverse_relation_name = kwargs.pop( "through_reverse_relation_name", None ) self.skip_reverse: bool = kwargs.pop("skip_reverse", False) self.skip_field: bool = kwargs.pop("skip_field", False) self.owner: Type["Model"] = kwargs.pop("owner", None) self.to: Type["Model"] = kwargs.pop("to", None) self.through: Type["Model"] = kwargs.pop("through", None) self.self_reference: bool = kwargs.pop("self_reference", False) self.self_reference_primary: Optional[str] = kwargs.pop( "self_reference_primary", None ) self.orders_by: Optional[List[str]] = kwargs.pop("orders_by", None) self.related_orders_by: Optional[List[str]] = kwargs.pop( "related_orders_by", None ) self.encrypt_secret: str = kwargs.pop("encrypt_secret", None) self.encrypt_backend: EncryptBackends = kwargs.pop( "encrypt_backend", EncryptBackends.NONE ) self.encrypt_custom_backend: Optional[Type[EncryptBackend]] = kwargs.pop( "encrypt_custom_backend", None ) self.ormar_default: Any = kwargs.pop("default", None) self.server_default: Any = kwargs.pop("server_default", None) self.comment: str = kwargs.pop("comment", None) self.represent_as_base64_str: bool = kwargs.pop( "represent_as_base64_str", False ) for name, value in kwargs.items(): setattr(self, name, value) kwargs.update(self.get_pydantic_default()) super().__init__(**kwargs) def is_valid_uni_relation(self) -> bool: """ Checks if field is a relation definition but only for ForeignKey relation, so excludes ManyToMany fields, as well as virtual ForeignKey (second side of FK relation). Is used to define if a field is a db ForeignKey column that should be saved/populated when dealing with internal/own Model columns only. :return: result of the check :rtype: bool """ return not self.is_multi and not self.virtual def get_alias(self) -> str: """ Used to translate Model column names to database column names during db queries. :return: returns custom database column name if defined by user, otherwise field name in ormar/pydantic :rtype: str """ return self.db_alias if self.db_alias else self.name def get_pydantic_default(self) -> Dict: """ Generates base pydantic.FieldInfo with only default and optionally required to fix pydantic Json field being set to required=False. Used in an ormar Model Metaclass. :return: instance of base pydantic.FieldInfo :rtype: pydantic.FieldInfo """ base = self.default_value() if base is None: base = dict(default=None) if self.nullable else dict(default=Undefined) if self.__type__ == Json and base.get("default") is Undefined: base["default"] = Required return base def default_value(self, use_server: bool = False) -> Optional[Dict]: """ Returns a FieldInfo instance with populated default (static) or default_factory (function). If the field is a autoincrement primary key the default is None. Otherwise field have to has either default, or default_factory populated. If all default conditions fail None is returned. Used in converting to pydantic FieldInfo. :param use_server: flag marking if server_default should be treated as default value, default False :type use_server: bool :return: returns a call to pydantic.Field which is returning a FieldInfo instance :rtype: Optional[pydantic.FieldInfo] """ if self.is_auto_primary_key(): return dict(default=None) if self.has_default(use_server=use_server): default = ( self.ormar_default if self.ormar_default is not None else self.server_default ) if callable(default): return dict(default_factory=default) return dict(default=default) return None def get_default(self, use_server: bool = False) -> Any: # noqa CCR001 """ Return default value for a field. If the field is Callable the function is called and actual result is returned. Used to populate default_values for pydantic Model in ormar Model Metaclass. :param use_server: flag marking if server_default should be treated as default value, default False :type use_server: bool :return: default value for the field if set, otherwise implicit None :rtype: Any """ if self.has_default(): default = ( self.ormar_default if self.ormar_default is not None else (self.server_default if use_server else None) ) if callable(default): # pragma: no cover default = default() return default def has_default(self, use_server: bool = True) -> bool: """ Checks if the field has default value set. :param use_server: flag marking if server_default should be treated as default value, default False :type use_server: bool :return: result of the check if default value is set :rtype: bool """ return self.ormar_default is not None or ( self.server_default is not None and use_server ) def is_auto_primary_key(self) -> bool: """ Checks if field is first a primary key and if it, it's than check if it's set to autoincrement. Autoincrement primary_key is nullable/optional. :return: result of the check for primary key and autoincrement :rtype: bool """ if self.primary_key: return self.autoincrement return False def construct_constraints(self) -> List: """ Converts list of ormar constraints into sqlalchemy ForeignKeys. Has to be done dynamically as sqlalchemy binds ForeignKey to the table. And we need a new ForeignKey for subclasses of current model :return: List of sqlalchemy foreign keys - by default one. :rtype: List[sqlalchemy.schema.ForeignKey] """ constraints = [ sqlalchemy.ForeignKey( con.reference, ondelete=con.ondelete, onupdate=con.onupdate, name=f"fk_{self.owner.Meta.tablename}_{self.to.Meta.tablename}" f"_{self.to.get_column_alias(self.to.Meta.pkname)}_{self.name}", ) for con in self.constraints ] return constraints def get_column(self, name: str) -> sqlalchemy.Column: """ Returns definition of sqlalchemy.Column used in creation of sqlalchemy.Table. Populates name, column type constraints, as well as a number of parameters like primary_key, index, unique, nullable, default and server_default. :param name: name of the db column - used if alias is not set :type name: str :return: actual definition of the database column as sqlalchemy requires. :rtype: sqlalchemy.Column """ if self.encrypt_backend == EncryptBackends.NONE: column = sqlalchemy.Column( self.db_alias or name, self.column_type, *self.construct_constraints(), primary_key=self.primary_key, nullable=self.sql_nullable, index=self.index, unique=self.unique, default=self.ormar_default, server_default=self.server_default, comment=self.comment, ) else: column = self._get_encrypted_column(name=name) return column def _get_encrypted_column(self, name: str) -> sqlalchemy.Column: """ Returns EncryptedString column type instead of actual column. :param name: column name :type name: str :return: newly defined column :rtype: sqlalchemy.Column """ if self.primary_key or self.is_relation: raise ModelDefinitionError( "Primary key field and relations fields" "cannot be encrypted!" ) column = sqlalchemy.Column( self.db_alias or name, EncryptedString( _field_type=self, encrypt_secret=self.encrypt_secret, encrypt_backend=self.encrypt_backend, encrypt_custom_backend=self.encrypt_custom_backend, ), nullable=self.nullable, index=self.index, unique=self.unique, default=self.ormar_default, server_default=self.server_default, ) return column def expand_relationship( self, value: Any, child: Union["Model", "NewBaseModel"], to_register: bool = True, ) -> Any: """ Function overwritten for relations, in basic field the value is returned as is. For relations the child model is first constructed (if needed), registered in relation and returned. For relation fields the value can be a pk value (Any type of field), dict (from Model) or actual instance/list of a "Model". :param value: a Model field value, returned untouched for non relation fields. :type value: Any :param child: a child Model to register :type child: Union["Model", "NewBaseModel"] :param to_register: flag if the relation should be set in RelationshipManager :type to_register: bool :return: returns untouched value for normal fields, expands only for relations :rtype: Any """ return value def set_self_reference_flag(self) -> None: """ Sets `self_reference` to True if field to and owner are same model. :return: None :rtype: None """ if self.owner is not None and ( self.owner == self.to or self.owner.Meta == self.to.Meta ): self.self_reference = True self.self_reference_primary = self.name def has_unresolved_forward_refs(self) -> bool: """ Verifies if the filed has any ForwardRefs that require updating before the model can be used. :return: result of the check :rtype: bool """ return False def evaluate_forward_ref(self, globalns: Any, localns: Any) -> None: """ Evaluates the ForwardRef to actual Field based on global and local namespaces :param globalns: global namespace :type globalns: Any :param localns: local namespace :type localns: Any :return: None :rtype: None """ def get_related_name(self) -> str: """ Returns name to use for reverse relation. It's either set as `related_name` or by default it's owner model. get_name + 's' :return: name of the related_name or default related name. :rtype: str """ return "" # pragma: no cover ormar-0.12.2/ormar/fields/constraints.py000066400000000000000000000014601444363446500202410ustar00rootroot00000000000000from typing import Any from sqlalchemy import Index, UniqueConstraint, CheckConstraint class UniqueColumns(UniqueConstraint): """ Subclass of sqlalchemy.UniqueConstraint. Used to avoid importing anything from sqlalchemy by user. """ class IndexColumns(Index): def __init__(self, *args: Any, name: str = None, **kw: Any) -> None: if not name: name = "TEMPORARY_NAME" super().__init__(name, *args, **kw) """ Subclass of sqlalchemy.Index. Used to avoid importing anything from sqlalchemy by user. """ class CheckColumns(CheckConstraint): """ Subclass of sqlalchemy.CheckConstraint. Used to avoid importing anything from sqlalchemy by user. Note that some databases do not actively support check constraints such as MySQL. """ ormar-0.12.2/ormar/fields/foreign_key.py000066400000000000000000000464451444363446500202070ustar00rootroot00000000000000import string import uuid from dataclasses import dataclass from random import choices from typing import ( Any, Dict, List, Optional, TYPE_CHECKING, Tuple, Type, Union, overload, ) import ormar # noqa I101 import sqlalchemy from ormar.exceptions import ModelDefinitionError, RelationshipInstanceError from ormar.fields.base import BaseField from ormar.fields.referential_actions import ReferentialAction from pydantic import BaseModel, create_model from pydantic.typing import ForwardRef, evaluate_forwardref if TYPE_CHECKING: # pragma no cover from ormar.models import Model, NewBaseModel, T from ormar.fields import ManyToManyField def create_dummy_instance(fk: Type["T"], pk: Any = None) -> "T": """ Ormar never returns you a raw data. So if you have a related field that has a value populated it will construct you a Model instance out of it. Creates a "fake" instance of passed Model from pk value. The instantiated Model has only pk value filled. To achieve this __pk_only__ flag has to be passed as it skips the validation. If the nested related Models are required they are set with -1 as pk value. :param fk: class of the related Model to which instance should be constructed :type fk: Model class :param pk: value of the primary_key column :type pk: Any :return: Model instance populated with only pk :rtype: Model """ init_dict = { **{fk.Meta.pkname: pk or -1, "__pk_only__": True}, **{ k: create_dummy_instance(v.to) for k, v in fk.Meta.model_fields.items() if v.is_relation and not v.nullable and not v.virtual }, } return fk(**init_dict) def create_dummy_model( base_model: Type["T"], pk_field: Union[BaseField, "ForeignKeyField", "ManyToManyField"], ) -> Type["BaseModel"]: """ Used to construct a dummy pydantic model for type hints and pydantic validation. Populates only pk field and set it to desired type. :param base_model: class of target dummy model :type base_model: Model class :param pk_field: ormar Field to be set on pydantic Model :type pk_field: Union[BaseField, "ForeignKeyField", "ManyToManyField"] :return: constructed dummy model :rtype: pydantic.BaseModel """ alias = ( "".join(choices(string.ascii_uppercase, k=6)) # + uuid.uuid4().hex[:4] ).lower() fields = {f"{pk_field.name}": (pk_field.__type__, None)} dummy_model = create_model( # type: ignore f"PkOnly{base_model.get_name(lower=False)}{alias}", __module__=base_model.__module__, **fields, # type: ignore ) return dummy_model def populate_fk_params_based_on_to_model( to: Type["T"], nullable: bool, onupdate: str = None, ondelete: str = None ) -> Tuple[Any, List, Any]: """ Based on target to model to which relation leads to populates the type of the pydantic field to use, ForeignKey constraint and type of the target column field. :param to: target related ormar Model :type to: Model class :param nullable: marks field as optional/ required :type nullable: bool :param onupdate: parameter passed to sqlalchemy.ForeignKey. How to treat child rows on update of parent (the one where FK is defined) model. :type onupdate: str :param ondelete: parameter passed to sqlalchemy.ForeignKey. How to treat child rows on delete of parent (the one where FK is defined) model. :type ondelete: str :return: tuple with target pydantic type, list of fk constraints and target col type :rtype: Tuple[Any, List, Any] """ fk_string = to.Meta.tablename + "." + to.get_column_alias(to.Meta.pkname) to_field = to.Meta.model_fields[to.Meta.pkname] pk_only_model = create_dummy_model(to, to_field) __type__ = ( Union[to_field.__type__, to, pk_only_model] if not nullable else Optional[Union[to_field.__type__, to, pk_only_model]] ) constraints = [ ForeignKeyConstraint( reference=fk_string, ondelete=ondelete, onupdate=onupdate, name=None ) ] column_type = to_field.column_type return __type__, constraints, column_type def validate_not_allowed_fields(kwargs: Dict) -> None: """ Verifies if not allowed parameters are set on relation models. Usually they are omitted later anyway but this way it's explicitly notify the user that it's not allowed/ supported. :raises ModelDefinitionError: if any forbidden field is set :param kwargs: dict of kwargs to verify passed to relation field :type kwargs: Dict """ default = kwargs.pop("default", None) encrypt_secret = kwargs.pop("encrypt_secret", None) encrypt_backend = kwargs.pop("encrypt_backend", None) encrypt_custom_backend = kwargs.pop("encrypt_custom_backend", None) overwrite_pydantic_type = kwargs.pop("overwrite_pydantic_type", None) not_supported = [ default, encrypt_secret, encrypt_backend, encrypt_custom_backend, overwrite_pydantic_type, ] if any(x is not None for x in not_supported): raise ModelDefinitionError( f"Argument {next((x for x in not_supported if x is not None))} " f"is not supported " "on relation fields!" ) def validate_referential_action( action: Optional[Union[ReferentialAction, str]], ) -> Optional[str]: """ Validation `onupdate` and `ondelete` action cast to a string value :raises ModelDefinitionError: if action is a not valid name string value :param action: referential action attribute or name string :type action: Optional[Union[ReferentialAction, str]] :rtype: Optional[str] """ if action is not None and not isinstance(action, ReferentialAction): try: action = ReferentialAction(action.upper()) except (ValueError, AttributeError): raise ModelDefinitionError(f"{action} ReferentialAction not supported.") return action.value if action is not None else None @dataclass class ForeignKeyConstraint: """ Internal container to store ForeignKey definitions used later to produce sqlalchemy.ForeignKeys """ reference: Union[str, sqlalchemy.Column] name: Optional[str] ondelete: Optional[str] onupdate: Optional[str] @overload def ForeignKey(to: Type["T"], **kwargs: Any) -> "T": # pragma: no cover ... @overload def ForeignKey(to: ForwardRef, **kwargs: Any) -> "Model": # pragma: no cover ... def ForeignKey( # type: ignore # noqa CFQ002 to: Union[Type["T"], "ForwardRef"], *, name: str = None, unique: bool = False, nullable: bool = True, related_name: str = None, virtual: bool = False, onupdate: Union[ReferentialAction, str] = None, ondelete: Union[ReferentialAction, str] = None, **kwargs: Any, ) -> "T": """ Despite a name it's a function that returns constructed ForeignKeyField. This function is actually used in model declaration (as ormar.ForeignKey(ToModel)). Accepts number of relation setting parameters as well as all BaseField ones. :param to: target related ormar Model :type to: Model class :param name: name of the database field - later called alias :type name: str :param unique: parameter passed to sqlalchemy.ForeignKey, unique flag :type unique: bool :param nullable: marks field as optional/ required :type nullable: bool :param related_name: name of reversed FK relation populated for you on to model :type related_name: str :param virtual: marks if relation is virtual. It is for reversed FK and auto generated FK on through model in Many2Many relations. :type virtual: bool :param onupdate: parameter passed to sqlalchemy.ForeignKey. How to treat child rows on update of parent (the one where FK is defined) model. :type onupdate: Union[ReferentialAction, str] :param ondelete: parameter passed to sqlalchemy.ForeignKey. How to treat child rows on delete of parent (the one where FK is defined) model. :type ondelete: Union[ReferentialAction, str] :param kwargs: all other args to be populated by BaseField :type kwargs: Any :return: ormar ForeignKeyField with relation to selected model :rtype: ForeignKeyField """ onupdate = validate_referential_action(action=onupdate) ondelete = validate_referential_action(action=ondelete) owner = kwargs.pop("owner", None) self_reference = kwargs.pop("self_reference", False) orders_by = kwargs.pop("orders_by", None) related_orders_by = kwargs.pop("related_orders_by", None) skip_reverse = kwargs.pop("skip_reverse", False) skip_field = kwargs.pop("skip_field", False) sql_nullable = kwargs.pop("sql_nullable", None) sql_nullable = nullable if sql_nullable is None else sql_nullable validate_not_allowed_fields(kwargs) if to.__class__ == ForwardRef: __type__ = to if not nullable else Optional[to] constraints: List = [] column_type = None else: __type__, constraints, column_type = populate_fk_params_based_on_to_model( to=to, # type: ignore nullable=nullable, ondelete=ondelete, onupdate=onupdate, ) namespace = dict( __type__=__type__, to=to, through=None, alias=name, name=kwargs.pop("real_name", None), nullable=nullable, sql_nullable=sql_nullable, constraints=constraints, unique=unique, column_type=column_type, related_name=related_name, virtual=virtual, primary_key=False, index=False, pydantic_only=False, default=None, server_default=None, onupdate=onupdate, ondelete=ondelete, owner=owner, self_reference=self_reference, is_relation=True, orders_by=orders_by, related_orders_by=related_orders_by, skip_reverse=skip_reverse, skip_field=skip_field, ) Field = type("ForeignKey", (ForeignKeyField, BaseField), {}) return Field(**namespace) class ForeignKeyField(BaseField): """ Actual class returned from ForeignKey function call and stored in model_fields. """ def __init__(self, **kwargs: Any) -> None: if TYPE_CHECKING: # pragma: no cover self.__type__: type self.to: Type["Model"] self.ondelete: str = kwargs.pop("ondelete", None) self.onupdate: str = kwargs.pop("onupdate", None) super().__init__(**kwargs) def get_source_related_name(self) -> str: """ Returns name to use for source relation name. For FK it's the same, differs for m2m fields. It's either set as `related_name` or by default it's owner model. get_name + 's' :return: name of the related_name or default related name. :rtype: str """ return self.get_related_name() def get_related_name(self) -> str: """ Returns name to use for reverse relation. It's either set as `related_name` or by default it's owner model. get_name + 's' :return: name of the related_name or default related name. :rtype: str """ return self.related_name or self.owner.get_name() + "s" def default_target_field_name(self) -> str: """ Returns default target model name on through model. :return: name of the field :rtype: str """ prefix = "from_" if self.self_reference else "" return self.through_reverse_relation_name or f"{prefix}{self.to.get_name()}" def default_source_field_name(self) -> str: """ Returns default target model name on through model. :return: name of the field :rtype: str """ prefix = "to_" if self.self_reference else "" return self.through_relation_name or f"{prefix}{self.owner.get_name()}" def evaluate_forward_ref(self, globalns: Any, localns: Any) -> None: """ Evaluates the ForwardRef to actual Field based on global and local namespaces :param globalns: global namespace :type globalns: Any :param localns: local namespace :type localns: Any :return: None :rtype: None """ if self.to.__class__ == ForwardRef: self.to = evaluate_forwardref( self.to, globalns, localns or None # type: ignore ) ( self.__type__, self.constraints, self.column_type, ) = populate_fk_params_based_on_to_model( to=self.to, nullable=self.nullable, ondelete=self.ondelete, onupdate=self.onupdate, ) def _extract_model_from_sequence( self, value: List, child: "Model", to_register: bool ) -> List["Model"]: """ Takes a list of Models and registers them on parent. Registration is mutual, so children have also reference to parent. Used in reverse FK relations. :param value: list of Model :type value: List :param child: child/ related Model :type child: Model :param to_register: flag if the relation should be set in RelationshipManager :type to_register: bool :return: list (if needed) registered Models :rtype: List["Model"] """ return [ self.expand_relationship( # type: ignore value=val, child=child, to_register=to_register ) for val in value ] def _register_existing_model( self, value: "Model", child: "Model", to_register: bool ) -> "Model": """ Takes already created instance and registers it for parent. Registration is mutual, so children have also reference to parent. Used in reverse FK relations and normal FK for single models. :param value: already instantiated Model :type value: Model :param child: child/ related Model :type child: Model :param to_register: flag if the relation should be set in RelationshipManager :type to_register: bool :return: (if needed) registered Model :rtype: Model """ if to_register: self.register_relation(model=value, child=child) return value def _construct_model_from_dict( self, value: dict, child: "Model", to_register: bool ) -> "Model": """ Takes a dictionary, creates a instance and registers it for parent. If dictionary contains only one field and it's a pk it is a __pk_only__ model. Registration is mutual, so children have also reference to parent. Used in normal FK for dictionaries. :param value: dictionary of a Model :type value: dict :param child: child/ related Model :type child: Model :param to_register: flag if the relation should be set in RelationshipManager :type to_register: bool :return: (if needed) registered Model :rtype: Model """ if len(value.keys()) == 1 and list(value.keys())[0] == self.to.Meta.pkname: value["__pk_only__"] = True model = self.to(**value) if to_register: self.register_relation(model=model, child=child) return model def _construct_model_from_pk( self, value: Any, child: "Model", to_register: bool ) -> "Model": """ Takes a pk value, creates a dummy instance and registers it for parent. Registration is mutual, so children have also reference to parent. Used in normal FK for dictionaries. :param value: value of a related pk / fk column :type value: Any :param child: child/ related Model :type child: Model :param to_register: flag if the relation should be set in RelationshipManager :type to_register: bool :return: (if needed) registered Model :rtype: Model """ if self.to.pk_type() == uuid.UUID and isinstance(value, str): # pragma: nocover value = uuid.UUID(value) if not isinstance(value, self.to.pk_type()): raise RelationshipInstanceError( f"Relationship error - ForeignKey {self.to.__name__} " f"is of type {self.to.pk_type()} " f"while {type(value)} passed as a parameter." ) model = create_dummy_instance(fk=self.to, pk=value) if to_register: self.register_relation(model=model, child=child) return model def register_relation(self, model: "Model", child: "Model") -> None: """ Registers relation between parent and child in relation manager. Relation manager is kep on each model (different instance). Used in Metaclass and sometimes some relations are missing (i.e. cloned Models in fastapi might miss one). :param model: parent model (with relation definition) :type model: Model class :param child: child model :type child: Model class """ model._orm.add(parent=model, child=child, field=self) def has_unresolved_forward_refs(self) -> bool: """ Verifies if the filed has any ForwardRefs that require updating before the model can be used. :return: result of the check :rtype: bool """ return self.to.__class__ == ForwardRef def expand_relationship( self, value: Any, child: Union["Model", "NewBaseModel"], to_register: bool = True, ) -> Optional[Union["Model", List["Model"]]]: """ For relations the child model is first constructed (if needed), registered in relation and returned. For relation fields the value can be a pk value (Any type of field), dict (from Model) or actual instance/list of a "Model". Selects the appropriate constructor based on a passed value. :param value: a Model field value, returned untouched for non relation fields. :type value: Any :param child: a child Model to register :type child: Union["Model", "NewBaseModel"] :param to_register: flag if the relation should be set in RelationshipManager :type to_register: bool :return: returns a Model or a list of Models :rtype: Optional[Union["Model", List["Model"]]] """ if value is None: return None if not self.virtual else [] constructors = { f"{self.to.__name__}": self._register_existing_model, "dict": self._construct_model_from_dict, "list": self._extract_model_from_sequence, } model = constructors.get( # type: ignore value.__class__.__name__, self._construct_model_from_pk )(value, child, to_register) return model def get_relation_name(self) -> str: # pragma: no cover """ Returns name of the relation, which can be a own name or through model names for m2m models :return: result of the check :rtype: bool """ return self.name def get_source_model(self) -> Type["Model"]: # pragma: no cover """ Returns model from which the relation comes -> either owner or through model :return: source model :rtype: Type["Model"] """ return self.owner ormar-0.12.2/ormar/fields/many_to_many.py000066400000000000000000000215661444363446500203750ustar00rootroot00000000000000import sys from typing import ( Any, List, Optional, TYPE_CHECKING, Tuple, Type, Union, cast, overload, ) from pydantic.typing import ForwardRef, evaluate_forwardref import ormar # noqa: I100 from ormar import ModelDefinitionError from ormar.fields import BaseField from ormar.fields.foreign_key import ForeignKeyField, validate_not_allowed_fields if TYPE_CHECKING: # pragma no cover from ormar.models import Model, T from ormar.relations.relation_proxy import RelationProxy if sys.version_info < (3, 7): ToType = Type["T"] else: ToType = Union[Type["T"], "ForwardRef"] REF_PREFIX = "#/components/schemas/" def forbid_through_relations(through: Type["Model"]) -> None: """ Verifies if the through model does not have relations. :param through: through Model to be checked :type through: Type['Model] """ if any(field.is_relation for field in through.Meta.model_fields.values()): raise ModelDefinitionError( f"Through Models cannot have explicit relations " f"defined. Remove the relations from Model " f"{through.get_name(lower=False)}" ) def populate_m2m_params_based_on_to_model( to: Type["Model"], nullable: bool ) -> Tuple[Any, Any]: """ Based on target to model to which relation leads to populates the type of the pydantic field to use and type of the target column field. :param to: target related ormar Model :type to: Model class :param nullable: marks field as optional/ required :type nullable: bool :return: Tuple[List, Any] :rtype: tuple with target pydantic type and target col type """ to_field = to.Meta.model_fields[to.Meta.pkname] __type__ = ( Union[to_field.__type__, to, List[to]] # type: ignore if not nullable else Optional[Union[to_field.__type__, to, List[to]]] # type: ignore ) column_type = to_field.column_type return __type__, column_type @overload def ManyToMany(to: Type["T"], **kwargs: Any) -> "RelationProxy[T]": # pragma: no cover ... @overload def ManyToMany(to: ForwardRef, **kwargs: Any) -> "RelationProxy": # pragma: no cover ... def ManyToMany( # type: ignore to: "ToType", through: Optional["ToType"] = None, *, name: str = None, unique: bool = False, virtual: bool = False, **kwargs: Any, ) -> "RelationProxy[T]": """ Despite a name it's a function that returns constructed ManyToManyField. This function is actually used in model declaration (as ormar.ManyToMany(ToModel, through=ThroughModel)). Accepts number of relation setting parameters as well as all BaseField ones. :param to: target related ormar Model :type to: Model class :param through: through model for m2m relation :type through: Model class :param name: name of the database field - later called alias :type name: str :param unique: parameter passed to sqlalchemy.ForeignKey, unique flag :type unique: bool :param virtual: marks if relation is virtual. It is for reversed FK and auto generated FK on through model in Many2Many relations. :type virtual: bool :param kwargs: all other args to be populated by BaseField :type kwargs: Any :return: ormar ManyToManyField with m2m relation to selected model :rtype: ManyToManyField """ related_name = kwargs.pop("related_name", None) nullable = kwargs.pop("nullable", True) owner = kwargs.pop("owner", None) self_reference = kwargs.pop("self_reference", False) orders_by = kwargs.pop("orders_by", None) related_orders_by = kwargs.pop("related_orders_by", None) skip_reverse = kwargs.pop("skip_reverse", False) skip_field = kwargs.pop("skip_field", False) through_relation_name = kwargs.pop("through_relation_name", None) through_reverse_relation_name = kwargs.pop("through_reverse_relation_name", None) if through is not None and through.__class__ != ForwardRef: forbid_through_relations(cast(Type["Model"], through)) validate_not_allowed_fields(kwargs) if to.__class__ == ForwardRef: __type__ = ( Union[to, List[to]] # type: ignore if not nullable else Optional[Union[to, List[to]]] # type: ignore ) column_type = None else: __type__, column_type = populate_m2m_params_based_on_to_model( to=to, nullable=nullable # type: ignore ) namespace = dict( __type__=__type__, to=to, through=through, alias=name, name=name, nullable=nullable, unique=unique, column_type=column_type, related_name=related_name, virtual=virtual, primary_key=False, index=False, pydantic_only=False, default=None, server_default=None, owner=owner, self_reference=self_reference, is_relation=True, is_multi=True, orders_by=orders_by, related_orders_by=related_orders_by, skip_reverse=skip_reverse, skip_field=skip_field, through_relation_name=through_relation_name, through_reverse_relation_name=through_reverse_relation_name, ) Field = type("ManyToMany", (ManyToManyField, BaseField), {}) return Field(**namespace) class ManyToManyField(ForeignKeyField, ormar.QuerySetProtocol, ormar.RelationProtocol): """ Actual class returned from ManyToMany function call and stored in model_fields. """ def __init__(self, **kwargs: Any) -> None: if TYPE_CHECKING: # pragma: no cover self.__type__: type self.to: Type["Model"] self.through: Type["Model"] super().__init__(**kwargs) def get_source_related_name(self) -> str: """ Returns name to use for source relation name. For FK it's the same, differs for m2m fields. It's either set as `related_name` or by default it's field name. :return: name of the related_name or default related name. :rtype: str """ return ( self.through.Meta.model_fields[ self.default_source_field_name() ].related_name or self.name ) def has_unresolved_forward_refs(self) -> bool: """ Verifies if the filed has any ForwardRefs that require updating before the model can be used. :return: result of the check :rtype: bool """ return self.to.__class__ == ForwardRef or self.through.__class__ == ForwardRef def evaluate_forward_ref(self, globalns: Any, localns: Any) -> None: """ Evaluates the ForwardRef to actual Field based on global and local namespaces :param globalns: global namespace :type globalns: Any :param localns: local namespace :type localns: Any :return: None :rtype: None """ if self.to.__class__ == ForwardRef: self.to = evaluate_forwardref( self.to, globalns, localns or None # type: ignore ) (self.__type__, self.column_type) = populate_m2m_params_based_on_to_model( to=self.to, nullable=self.nullable ) if self.through.__class__ == ForwardRef: self.through = evaluate_forwardref( self.through, globalns, localns or None # type: ignore ) forbid_through_relations(self.through) def get_relation_name(self) -> str: """ Returns name of the relation, which can be a own name or through model names for m2m models :return: result of the check :rtype: bool """ if self.self_reference and self.name == self.self_reference_primary: return self.default_source_field_name() return self.default_target_field_name() def get_source_model(self) -> Type["Model"]: """ Returns model from which the relation comes -> either owner or through model :return: source model :rtype: Type["Model"] """ return self.through def create_default_through_model(self) -> None: """ Creates default empty through model if no additional fields are required. """ owner_name = self.owner.get_name(lower=False) to_name = self.to.get_name(lower=False) class_name = f"{owner_name}{to_name}" table_name = f"{owner_name.lower()}s_{to_name.lower()}s" new_meta_namespace = { "tablename": table_name, "database": self.owner.Meta.database, "metadata": self.owner.Meta.metadata, } new_meta = type("Meta", (), new_meta_namespace) through_model = type( class_name, (ormar.Model,), {"Meta": new_meta, "id": ormar.Integer(name="id", primary_key=True)}, ) self.through = cast(Type["Model"], through_model) ormar-0.12.2/ormar/fields/model_fields.py000066400000000000000000000627111444363446500203260ustar00rootroot00000000000000import datetime import decimal import uuid from enum import Enum as E, EnumMeta from typing import Any, Optional, Set, TYPE_CHECKING, Type, TypeVar, Union, overload import pydantic import sqlalchemy import ormar # noqa I101 from ormar import ModelDefinitionError # noqa I101 from ormar.fields import sqlalchemy_uuid from ormar.fields.base import BaseField # noqa I101 from ormar.fields.sqlalchemy_encrypted import EncryptBackends try: from typing import Literal # type: ignore except ImportError: # pragma: no cover from typing_extensions import Literal # type: ignore def is_field_nullable( nullable: Optional[bool], default: Any, server_default: Any, pydantic_only: Optional[bool], ) -> bool: """ Checks if the given field should be nullable/ optional based on parameters given. :param nullable: flag explicit setting a column as nullable :type nullable: Optional[bool] :param default: value or function to be called as default in python :type default: Any :param server_default: function to be called as default by sql server :type server_default: Any :param pydantic_only: flag if fields should not be included in the sql table :type pydantic_only: Optional[bool] :return: result of the check :rtype: bool """ if nullable is None: return ( default is not None or server_default is not None or (pydantic_only is not None and pydantic_only) ) return nullable def is_auto_primary_key(primary_key: bool, autoincrement: bool) -> bool: """ Checks if field is an autoincrement pk -> if yes it's optional. :param primary_key: flag if field is a pk field :type primary_key: bool :param autoincrement: flag if field should be autoincrement :type autoincrement: bool :return: result of the check :rtype: bool """ return primary_key and autoincrement def convert_choices_if_needed( field_type: "Type", choices: Set, nullable: bool, scale: int = None, represent_as_str: bool = False, ) -> Set: """ Converts dates to isoformat as fastapi can check this condition in routes and the fields are not yet parsed. Converts enums to list of it's values. Converts uuids to strings. Converts decimal to float with given scale. :param field_type: type o the field :type field_type: Type :param choices: set of choices :type choices: Set :param scale: scale for decimals :type scale: int :param nullable: flag if field_nullable :type nullable: bool :param represent_as_str: flag for bytes fields :type represent_as_str: bool :param scale: scale for decimals :type scale: int :return: value, choices list :rtype: Tuple[Any, Set] """ choices = {o.value if isinstance(o, E) else o for o in choices} encoder = ormar.ENCODERS_MAP.get(field_type, lambda x: x) if field_type == decimal.Decimal: precision = scale choices = {encoder(o, precision) for o in choices} elif field_type == bytes: choices = {encoder(o, represent_as_str) for o in choices} elif encoder: choices = {encoder(o) for o in choices} if nullable: choices.add(None) return choices class ModelFieldFactory: """ Default field factory that construct Field classes and populated their values. """ _bases: Any = (BaseField,) _type: Any = None _sample: Any = None def __new__(cls, *args: Any, **kwargs: Any) -> BaseField: # type: ignore cls.validate(**kwargs) default = kwargs.pop("default", None) server_default = kwargs.pop("server_default", None) nullable = kwargs.pop("nullable", None) sql_nullable = kwargs.pop("sql_nullable", None) pydantic_only = kwargs.pop("pydantic_only", False) primary_key = kwargs.pop("primary_key", False) autoincrement = kwargs.pop("autoincrement", False) encrypt_secret = kwargs.pop("encrypt_secret", None) encrypt_backend = kwargs.pop("encrypt_backend", EncryptBackends.NONE) encrypt_custom_backend = kwargs.pop("encrypt_custom_backend", None) overwrite_pydantic_type = kwargs.pop("overwrite_pydantic_type", None) nullable = is_field_nullable( nullable, default, server_default, pydantic_only ) or is_auto_primary_key(primary_key, autoincrement) sql_nullable = ( False if primary_key else (nullable if sql_nullable is None else sql_nullable) ) choices = set(kwargs.pop("choices", [])) if choices: choices = convert_choices_if_needed( field_type=cls._type, choices=choices, nullable=nullable, scale=kwargs.get("scale", None), represent_as_str=kwargs.get("represent_as_base64_str", False), ) enum_class = kwargs.pop("enum_class", None) field_type = cls._type if enum_class is None else enum_class namespace = dict( __type__=field_type, __pydantic_type__=overwrite_pydantic_type if overwrite_pydantic_type is not None else field_type, __sample__=cls._sample, alias=kwargs.pop("name", None), name=None, primary_key=primary_key, default=default, server_default=server_default, nullable=nullable, sql_nullable=sql_nullable, index=kwargs.pop("index", False), unique=kwargs.pop("unique", False), pydantic_only=pydantic_only, autoincrement=autoincrement, column_type=cls.get_column_type( **kwargs, sql_nullable=sql_nullable, enum_class=enum_class ), choices=choices, encrypt_secret=encrypt_secret, encrypt_backend=encrypt_backend, encrypt_custom_backend=encrypt_custom_backend, **kwargs ) Field = type(cls.__name__, cls._bases, {}) return Field(**namespace) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: # pragma no cover """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return None @classmethod def validate(cls, **kwargs: Any) -> None: # pragma no cover """ Used to validate if all required parameters on a given field type are set. :param kwargs: all params passed during construction :type kwargs: Any """ class String(ModelFieldFactory, str): """ String field factory that construct Field classes and populated their values. """ _type = str _sample = "string" def __new__( # type: ignore # noqa CFQ002 cls, *, max_length: int, min_length: int = None, regex: str = None, **kwargs: Any ) -> BaseField: # type: ignore kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.String(length=kwargs.get("max_length")) @classmethod def validate(cls, **kwargs: Any) -> None: """ Used to validate if all required parameters on a given field type are set. :param kwargs: all params passed during construction :type kwargs: Any """ max_length = kwargs.get("max_length", None) if max_length <= 0: raise ModelDefinitionError( "Parameter max_length is required for field String" ) class Integer(ModelFieldFactory, int): """ Integer field factory that construct Field classes and populated their values. """ _type = int _sample = 0 def __new__( # type: ignore cls, *, minimum: int = None, maximum: int = None, multiple_of: int = None, **kwargs: Any ) -> BaseField: autoincrement = kwargs.pop("autoincrement", None) autoincrement = ( autoincrement if autoincrement is not None else kwargs.get("primary_key", False) ) kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } kwargs["ge"] = kwargs["minimum"] kwargs["le"] = kwargs["maximum"] return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.Integer() class Text(ModelFieldFactory, str): """ Text field factory that construct Field classes and populated their values. """ _type = str _sample = "text" def __new__(cls, **kwargs: Any) -> BaseField: # type: ignore kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.Text() class Float(ModelFieldFactory, float): """ Float field factory that construct Field classes and populated their values. """ _type = float _sample = 0.0 def __new__( # type: ignore cls, *, minimum: float = None, maximum: float = None, multiple_of: int = None, **kwargs: Any ) -> BaseField: kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } kwargs["ge"] = kwargs["minimum"] kwargs["le"] = kwargs["maximum"] return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.Float() if TYPE_CHECKING: # pragma: nocover def Boolean(**kwargs: Any) -> bool: pass else: class Boolean(ModelFieldFactory, int): """ Boolean field factory that construct Field classes and populated their values. """ _type = bool _sample = True @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.Boolean() class DateTime(ModelFieldFactory, datetime.datetime): """ DateTime field factory that construct Field classes and populated their values. """ _type = datetime.datetime _sample = "datetime" def __new__( # type: ignore # noqa CFQ002 cls, *, timezone: bool = False, **kwargs: Any ) -> BaseField: # type: ignore kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.DateTime(timezone=kwargs.get("timezone", False)) class Date(ModelFieldFactory, datetime.date): """ Date field factory that construct Field classes and populated their values. """ _type = datetime.date _sample = "date" @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.Date() class Time(ModelFieldFactory, datetime.time): """ Time field factory that construct Field classes and populated their values. """ _type = datetime.time _sample = "time" def __new__( # type: ignore # noqa CFQ002 cls, *, timezone: bool = False, **kwargs: Any ) -> BaseField: # type: ignore kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.Time(timezone=kwargs.get("timezone", False)) class JSON(ModelFieldFactory, pydantic.Json): """ JSON field factory that construct Field classes and populated their values. """ _type = pydantic.Json _sample = '{"json": "json"}' @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.JSON(none_as_null=kwargs.get("sql_nullable", False)) if TYPE_CHECKING: # pragma: nocover # noqa: C901 @overload def LargeBinary( # type: ignore max_length: int, *, represent_as_base64_str: Literal[True], **kwargs: Any ) -> str: ... @overload def LargeBinary( # type: ignore max_length: int, *, represent_as_base64_str: Literal[False], **kwargs: Any ) -> bytes: ... @overload def LargeBinary( max_length: int, represent_as_base64_str: Literal[False] = ..., **kwargs: Any ) -> bytes: ... def LargeBinary( max_length: int, represent_as_base64_str: bool = False, **kwargs: Any ) -> Union[str, bytes]: pass else: class LargeBinary(ModelFieldFactory, bytes): """ LargeBinary field factory that construct Field classes and populated their values. """ _type = bytes _sample = "bytes" def __new__( # type: ignore # noqa CFQ002 cls, *, max_length: int, represent_as_base64_str: bool = False, **kwargs: Any ) -> BaseField: # type: ignore kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.LargeBinary(length=kwargs.get("max_length")) @classmethod def validate(cls, **kwargs: Any) -> None: """ Used to validate if all required parameters on a given field type are set. :param kwargs: all params passed during construction :type kwargs: Any """ max_length = kwargs.get("max_length", None) if max_length <= 0: raise ModelDefinitionError( "Parameter max_length is required for field LargeBinary" ) class BigInteger(Integer, int): """ BigInteger field factory that construct Field classes and populated their values. """ _type = int _sample = 0 def __new__( # type: ignore cls, *, minimum: int = None, maximum: int = None, multiple_of: int = None, **kwargs: Any ) -> BaseField: autoincrement = kwargs.pop("autoincrement", None) autoincrement = ( autoincrement if autoincrement is not None else kwargs.get("primary_key", False) ) kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } kwargs["ge"] = kwargs["minimum"] kwargs["le"] = kwargs["maximum"] return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.BigInteger() class SmallInteger(Integer, int): """ SmallInteger field factory that construct Field classes and populated their values. """ _type = int _sample = 0 def __new__( # type: ignore cls, *, minimum: int = None, maximum: int = None, multiple_of: int = None, **kwargs: Any ) -> BaseField: autoincrement = kwargs.pop("autoincrement", None) autoincrement = ( autoincrement if autoincrement is not None else kwargs.get("primary_key", False) ) kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } kwargs["ge"] = kwargs["minimum"] kwargs["le"] = kwargs["maximum"] return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ return sqlalchemy.SmallInteger() class Decimal(ModelFieldFactory, decimal.Decimal): """ Decimal field factory that construct Field classes and populated their values. """ _type = decimal.Decimal _sample = 0.0 def __new__( # type: ignore # noqa CFQ002 cls, *, minimum: float = None, maximum: float = None, multiple_of: int = None, precision: int = None, scale: int = None, max_digits: int = None, decimal_places: int = None, **kwargs: Any ) -> BaseField: kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } kwargs["ge"] = kwargs["minimum"] kwargs["le"] = kwargs["maximum"] if kwargs.get("max_digits"): kwargs["precision"] = kwargs["max_digits"] elif kwargs.get("precision"): kwargs["max_digits"] = kwargs["precision"] if kwargs.get("decimal_places"): kwargs["scale"] = kwargs["decimal_places"] elif kwargs.get("scale"): kwargs["decimal_places"] = kwargs["scale"] return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ precision = kwargs.get("precision") scale = kwargs.get("scale") return sqlalchemy.DECIMAL(precision=precision, scale=scale) @classmethod def validate(cls, **kwargs: Any) -> None: """ Used to validate if all required parameters on a given field type are set. :param kwargs: all params passed during construction :type kwargs: Any """ precision = kwargs.get("precision") scale = kwargs.get("scale") if precision is None or precision < 0 or scale is None or scale < 0: raise ModelDefinitionError( "Parameters scale and precision are required for field Decimal" ) class UUID(ModelFieldFactory, uuid.UUID): """ UUID field factory that construct Field classes and populated their values. """ _type = uuid.UUID _sample = "uuid" def __new__( # type: ignore # noqa CFQ002 cls, *, uuid_format: str = "hex", **kwargs: Any ) -> BaseField: kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } return super().__new__(cls, **kwargs) @classmethod def get_column_type(cls, **kwargs: Any) -> Any: """ Return proper type of db column for given field type. Accepts required and optional parameters that each column type accepts. :param kwargs: key, value pairs of sqlalchemy options :type kwargs: Any :return: initialized column with proper options :rtype: sqlalchemy Column """ uuid_format = kwargs.get("uuid_format", "hex") return sqlalchemy_uuid.UUID(uuid_format=uuid_format) if TYPE_CHECKING: # pragma: nocover T = TypeVar("T", bound=E) def Enum(enum_class: Type[T], **kwargs: Any) -> T: pass else: class Enum(ModelFieldFactory): """ Enum field factory that construct Field classes and populated their values. """ _type = E _sample = None def __new__( # type: ignore # noqa CFQ002 cls, *, enum_class: Type[E], **kwargs: Any ) -> BaseField: kwargs = { **kwargs, **{ k: v for k, v in locals().items() if k not in ["cls", "__class__", "kwargs"] }, } return super().__new__(cls, **kwargs) @classmethod def validate(cls, **kwargs: Any) -> None: enum_class = kwargs.get("enum_class") if enum_class is None or not isinstance(enum_class, EnumMeta): raise ModelDefinitionError("Enum Field choices must be EnumType") @classmethod def get_column_type(cls, **kwargs: Any) -> Any: enum_cls = kwargs.get("enum_class") return sqlalchemy.Enum(enum_cls) ormar-0.12.2/ormar/fields/parsers.py000066400000000000000000000044061444363446500173540ustar00rootroot00000000000000import base64 import datetime import decimal import uuid from typing import Any, Callable, Dict, Optional, Union import pydantic from pydantic.datetime_parse import parse_date, parse_datetime, parse_time try: import orjson as json except ImportError: # pragma: no cover import json # type: ignore def parse_bool(value: str) -> bool: return value == "true" def encode_bool(value: bool) -> str: return "true" if value else "false" def encode_decimal(value: decimal.Decimal, precision: int = None) -> float: if precision: return ( round(float(value), precision) if isinstance(value, decimal.Decimal) else value ) return float(value) def encode_bytes(value: Union[str, bytes], represent_as_string: bool = False) -> bytes: if represent_as_string: return value if isinstance(value, bytes) else base64.b64decode(value) return value if isinstance(value, bytes) else value.encode("utf-8") def encode_json(value: Any) -> Optional[str]: if isinstance(value, (datetime.date, datetime.datetime, datetime.time)): value = value.isoformat() value = json.dumps(value) if not isinstance(value, str) else re_dump_value(value) value = value.decode("utf-8") if isinstance(value, bytes) else value return value def re_dump_value(value: str) -> Union[str, bytes]: """ Rw-dumps choices due to different string representation in orjson and json :param value: string to re-dump :type value: str :return: re-dumped choices :rtype: List[str] """ try: result: Union[str, bytes] = json.dumps(json.loads(value)) except json.JSONDecodeError: result = value return result ENCODERS_MAP: Dict[type, Callable] = { datetime.datetime: lambda x: x.isoformat(), datetime.date: lambda x: x.isoformat(), datetime.time: lambda x: x.isoformat(), pydantic.Json: encode_json, decimal.Decimal: encode_decimal, uuid.UUID: str, bytes: encode_bytes, } SQL_ENCODERS_MAP: Dict[type, Callable] = {bool: encode_bool, **ENCODERS_MAP} DECODERS_MAP = { bool: parse_bool, datetime.datetime: parse_datetime, datetime.date: parse_date, datetime.time: parse_time, pydantic.Json: json.loads, decimal.Decimal: decimal.Decimal, } ormar-0.12.2/ormar/fields/referential_actions.py000066400000000000000000000012351444363446500217120ustar00rootroot00000000000000""" Gathers all referential actions by ormar. """ from enum import Enum class ReferentialAction(Enum): """ Because the database management system(DBMS) enforces referential constraints, it must ensure data integrity if rows in a referenced table are to be deleted (or updated). If dependent rows in referencing tables still exist, those references have to be considered. SQL specifies 5 different referential actions that shall take place in such occurrences. """ CASCADE: str = "CASCADE" RESTRICT: str = "RESTRICT" SET_NULL: str = "SET NULL" SET_DEFAULT: str = "SET DEFAULT" DO_NOTHING: str = "NO ACTION" ormar-0.12.2/ormar/fields/sqlalchemy_encrypted.py000066400000000000000000000135601444363446500221150ustar00rootroot00000000000000# inspired by sqlalchemy-utils (https://github.com/kvesteri/sqlalchemy-utils) import abc import base64 from enum import Enum from typing import Any, Callable, Optional, TYPE_CHECKING, Type, Union import sqlalchemy.types as types from pydantic.utils import lenient_issubclass from sqlalchemy.engine import Dialect import ormar # noqa: I100, I202 from ormar import ModelDefinitionError # noqa: I202, I100 cryptography = None try: # pragma: nocover import cryptography # type: ignore from cryptography.fernet import Fernet from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes except ImportError: # pragma: nocover pass if TYPE_CHECKING: # pragma: nocover from ormar import BaseField class EncryptBackend(abc.ABC): def _refresh(self, key: Union[str, bytes]) -> None: if isinstance(key, str): key = key.encode() digest = hashes.Hash(hashes.SHA256(), backend=default_backend()) digest.update(key) engine_key = digest.finalize() self._initialize_backend(engine_key) @abc.abstractmethod def _initialize_backend(self, secret_key: bytes) -> None: # pragma: nocover pass @abc.abstractmethod def encrypt(self, value: Any) -> str: # pragma: nocover pass @abc.abstractmethod def decrypt(self, value: Any) -> str: # pragma: nocover pass class HashBackend(EncryptBackend): """ One-way hashing - in example for passwords, no way to decrypt the value! """ def _initialize_backend(self, secret_key: bytes) -> None: self.secret_key = base64.urlsafe_b64encode(secret_key) def encrypt(self, value: Any) -> str: if not isinstance(value, str): # pragma: nocover value = repr(value) value = value.encode() digest = hashes.Hash(hashes.SHA512(), backend=default_backend()) digest.update(self.secret_key) digest.update(value) hashed_value = digest.finalize() return hashed_value.hex() def decrypt(self, value: Any) -> str: if not isinstance(value, str): # pragma: nocover value = str(value) return value class FernetBackend(EncryptBackend): """ Two-way encryption, data stored in db are encrypted but decrypted during query. """ def _initialize_backend(self, secret_key: bytes) -> None: self.secret_key = base64.urlsafe_b64encode(secret_key) self.fernet = Fernet(self.secret_key) def encrypt(self, value: Any) -> str: if not isinstance(value, str): value = repr(value) value = value.encode() encrypted = self.fernet.encrypt(value) return encrypted.decode("utf-8") def decrypt(self, value: Any) -> str: if not isinstance(value, str): # pragma: nocover value = str(value) decrypted: Union[str, bytes] = self.fernet.decrypt(value.encode()) if not isinstance(decrypted, str): decrypted = decrypted.decode("utf-8") return decrypted class EncryptBackends(Enum): NONE = 0 FERNET = 1 HASH = 2 CUSTOM = 3 BACKENDS_MAP = { EncryptBackends.FERNET: FernetBackend, EncryptBackends.HASH: HashBackend, } class EncryptedString(types.TypeDecorator): """ Used to store encrypted values in a database """ impl = types.TypeEngine def __init__( self, encrypt_secret: Union[str, Callable], encrypt_backend: EncryptBackends = EncryptBackends.FERNET, encrypt_custom_backend: Type[EncryptBackend] = None, **kwargs: Any, ) -> None: _field_type = kwargs.pop("_field_type") super().__init__() if not cryptography: # pragma: nocover raise ModelDefinitionError( "In order to encrypt a column 'cryptography' is required!" ) backend = BACKENDS_MAP.get(encrypt_backend, encrypt_custom_backend) if not backend or not lenient_issubclass(backend, EncryptBackend): raise ModelDefinitionError("Wrong or no encrypt backend provided!") self.backend: EncryptBackend = backend() self._field_type: "BaseField" = _field_type self._underlying_type: Any = _field_type.column_type self._key: Union[str, Callable] = encrypt_secret type_ = self._field_type.__type__ if type_ is None: # pragma: nocover raise ModelDefinitionError( f"Improperly configured field " f"{self._field_type.name}" ) self.type_: Any = type_ def __repr__(self) -> str: # pragma: nocover return "TEXT()" def load_dialect_impl(self, dialect: Dialect) -> Any: return dialect.type_descriptor(types.TEXT()) def _refresh(self) -> None: key = self._key() if callable(self._key) else self._key self.backend._refresh(key) def process_bind_param(self, value: Any, dialect: Dialect) -> Optional[str]: if value is None: return value self._refresh() try: value = self._underlying_type.process_bind_param(value, dialect) except AttributeError: encoder = ormar.SQL_ENCODERS_MAP.get(self.type_, None) if encoder: value = encoder(value) # type: ignore encrypted_value = self.backend.encrypt(value) return encrypted_value def process_result_value(self, value: Any, dialect: Dialect) -> Any: if value is None: return value self._refresh() decrypted_value = self.backend.decrypt(value) try: return self._underlying_type.process_result_value(decrypted_value, dialect) except AttributeError: decoder = ormar.DECODERS_MAP.get(self.type_, None) if decoder: return decoder(decrypted_value) # type: ignore return self._field_type.__type__(decrypted_value) # type: ignore ormar-0.12.2/ormar/fields/sqlalchemy_uuid.py000066400000000000000000000026621444363446500210670ustar00rootroot00000000000000import uuid from typing import Any, Optional from sqlalchemy import CHAR from sqlalchemy.engine import Dialect from sqlalchemy.types import TypeDecorator class UUID(TypeDecorator): """ Platform-independent GUID type. Uses CHAR(36) if in a string mode, otherwise uses CHAR(32), to store UUID. For details for different methods check documentation of parent class. """ impl = CHAR def __init__(self, *args: Any, uuid_format: str = "hex", **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.uuid_format = uuid_format def __repr__(self) -> str: # pragma: nocover if self.uuid_format == "string": return "CHAR(36)" return "CHAR(32)" def load_dialect_impl(self, dialect: Dialect) -> Any: return ( dialect.type_descriptor(CHAR(36)) if self.uuid_format == "string" else dialect.type_descriptor(CHAR(32)) ) def process_bind_param(self, value: uuid.UUID, dialect: Dialect) -> Optional[str]: if value is None: return value return str(value) if self.uuid_format == "string" else "%.32x" % value.int def process_result_value( self, value: Optional[str], dialect: Dialect ) -> Optional[uuid.UUID]: if value is None: return value if not isinstance(value, uuid.UUID): return uuid.UUID(value) return value # pragma: nocover ormar-0.12.2/ormar/fields/through_field.py000066400000000000000000000037741444363446500205270ustar00rootroot00000000000000import sys from typing import Any, TYPE_CHECKING, Type, Union from ormar.fields.base import BaseField from ormar.fields.foreign_key import ForeignKeyField if TYPE_CHECKING: # pragma no cover from ormar import Model from pydantic.typing import ForwardRef if sys.version_info < (3, 7): ToType = Type[Model] else: ToType = Union[Type[Model], ForwardRef] def Through( # noqa CFQ002 to: "ToType", *, name: str = None, related_name: str = None, **kwargs: Any ) -> Any: """ Despite a name it's a function that returns constructed ThroughField. It's a special field populated only for m2m relations. Accepts number of relation setting parameters as well as all BaseField ones. :param to: target related ormar Model :type to: Model class :param name: name of the database field - later called alias :type name: str :param related_name: name of reversed FK relation populated for you on to model :type related_name: str It is for reversed FK and auto generated FK on through model in Many2Many relations. :param kwargs: all other args to be populated by BaseField :type kwargs: Any :return: ormar ForeignKeyField with relation to selected model :rtype: ForeignKeyField """ nullable = kwargs.pop("nullable", False) owner = kwargs.pop("owner", None) namespace = dict( __type__=to, to=to, through=None, alias=name, name=kwargs.pop("real_name", None), related_name=related_name, virtual=True, owner=owner, nullable=nullable, unique=False, column_type=None, primary_key=False, index=False, pydantic_only=False, default=None, server_default=None, is_relation=True, is_through=True, ) Field = type("Through", (ThroughField, BaseField), {}) return Field(**namespace) class ThroughField(ForeignKeyField): """ Field class used to access ManyToMany model through model. """ ormar-0.12.2/ormar/models/000077500000000000000000000000001444363446500153345ustar00rootroot00000000000000ormar-0.12.2/ormar/models/__init__.py000066400000000000000000000011561444363446500174500ustar00rootroot00000000000000""" Definition of Model, it's parents NewBaseModel and mixins used by models. Also defines a Metaclass that handles all constructions and relations registration, ass well as vast number of helper functions for pydantic, sqlalchemy and relations. """ from ormar.models.newbasemodel import NewBaseModel # noqa I100 from ormar.models.model_row import ModelRow # noqa I100 from ormar.models.model import Model, T # noqa I100 from ormar.models.excludable import ExcludableItems # noqa I100 from ormar.models.utils import Extra # noqa I100 __all__ = ["NewBaseModel", "Model", "ModelRow", "ExcludableItems", "T", "Extra"] ormar-0.12.2/ormar/models/descriptors/000077500000000000000000000000001444363446500176755ustar00rootroot00000000000000ormar-0.12.2/ormar/models/descriptors/__init__.py000066400000000000000000000005261444363446500220110ustar00rootroot00000000000000from ormar.models.descriptors.descriptors import ( BytesDescriptor, JsonDescriptor, PkDescriptor, PropertyDescriptor, PydanticDescriptor, RelationDescriptor, ) __all__ = [ "PydanticDescriptor", "RelationDescriptor", "PropertyDescriptor", "PkDescriptor", "JsonDescriptor", "BytesDescriptor", ] ormar-0.12.2/ormar/models/descriptors/descriptors.py000066400000000000000000000110211444363446500226030ustar00rootroot00000000000000import base64 from typing import Any, TYPE_CHECKING, Type from ormar.fields.parsers import encode_json if TYPE_CHECKING: # pragma: no cover from ormar import Model class PydanticDescriptor: """ Pydantic descriptor simply delegates everything to pydantic model """ def __init__(self, name: str) -> None: self.name = name def __get__(self, instance: "Model", owner: Type["Model"]) -> Any: value = instance.__dict__.get(self.name, None) return value def __set__(self, instance: "Model", value: Any) -> None: instance._internal_set(self.name, value) instance.set_save_status(False) class JsonDescriptor: """ Json descriptor dumps/loads strings to actual data on write/read """ def __init__(self, name: str) -> None: self.name = name def __get__(self, instance: "Model", owner: Type["Model"]) -> Any: value = instance.__dict__.get(self.name, None) return value def __set__(self, instance: "Model", value: Any) -> None: value = encode_json(value) instance._internal_set(self.name, value) instance.set_save_status(False) class BytesDescriptor: """ Bytes descriptor converts strings to bytes on write and converts bytes to str if represent_as_base64_str flag is set, so the value can be dumped to json """ def __init__(self, name: str) -> None: self.name = name def __get__(self, instance: "Model", owner: Type["Model"]) -> Any: value = instance.__dict__.get(self.name, None) field = instance.Meta.model_fields[self.name] if ( value is not None and field.represent_as_base64_str and not isinstance(value, str) ): value = base64.b64encode(value).decode() return value def __set__(self, instance: "Model", value: Any) -> None: field = instance.Meta.model_fields[self.name] if isinstance(value, str): if field.represent_as_base64_str: value = base64.b64decode(value) else: value = value.encode("utf-8") instance._internal_set(self.name, value) instance.set_save_status(False) class PkDescriptor: """ As of now it's basically a copy of PydanticDescriptor but that will change in the future with multi column primary keys """ def __init__(self, name: str) -> None: self.name = name def __get__(self, instance: "Model", owner: Type["Model"]) -> Any: value = instance.__dict__.get(self.name, None) return value def __set__(self, instance: "Model", value: Any) -> None: instance._internal_set(self.name, value) instance.set_save_status(False) class RelationDescriptor: """ Relation descriptor expands the relation to initialize the related model before setting it to __dict__. Note that expanding also registers the related model in RelationManager. """ def __init__(self, name: str) -> None: self.name = name def __get__(self, instance: "Model", owner: Type["Model"]) -> Any: if self.name in instance._orm: return instance._orm.get(self.name) # type: ignore return None # pragma no cover def __set__(self, instance: "Model", value: Any) -> None: model = instance.Meta.model_fields[self.name].expand_relationship( value=value, child=instance ) if isinstance(instance.__dict__.get(self.name), list): # virtual foreign key or many to many # TODO: Fix double items in dict, no effect on real action just ugly repr instance.__dict__[self.name].append(model) else: # foreign key relation instance.__dict__[self.name] = model instance.set_save_status(False) class PropertyDescriptor: """ Property descriptor handles methods decorated with @property_field decorator. They are read only. """ def __init__(self, name: str, function: Any) -> None: self.name = name self.function = function def __get__(self, instance: "Model", owner: Type["Model"]) -> Any: if instance is None: return self if instance is not None and self.function is not None: bound = self.function.__get__(instance, instance.__class__) return bound() if callable(bound) else bound def __set__(self, instance: "Model", value: Any) -> None: # pragma: no cover # kept here so it's a data-descriptor and precedes __dict__ lookup pass ormar-0.12.2/ormar/models/excludable.py000066400000000000000000000233111444363446500200160ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Dict, List, Set, TYPE_CHECKING, Tuple, Type, Union from ormar.queryset.utils import get_relationship_alias_model_and_str if TYPE_CHECKING: # pragma: no cover from ormar import Model @dataclass class Excludable: """ Class that keeps sets of fields to exclude and include """ include: Set = field(default_factory=set) exclude: Set = field(default_factory=set) def get_copy(self) -> "Excludable": """ Return copy of self to avoid in place modifications :return: copy of self with copied sets :rtype: ormar.models.excludable.Excludable """ _copy = self.__class__() _copy.include = {x for x in self.include} _copy.exclude = {x for x in self.exclude} return _copy def set_values(self, value: Set, is_exclude: bool) -> None: """ Appends the data to include/exclude sets. :param value: set of values to add :type value: set :param is_exclude: flag if values are to be excluded or included :type is_exclude: bool """ prop = "exclude" if is_exclude else "include" current_value = getattr(self, prop) current_value.update(value) setattr(self, prop, current_value) def is_included(self, key: str) -> bool: """ Check if field in included (in set or set is {...}) :param key: key to check :type key: str :return: result of the check :rtype: bool """ return (... in self.include or key in self.include) if self.include else True def is_excluded(self, key: str) -> bool: """ Check if field in excluded (in set or set is {...}) :param key: key to check :type key: str :return: result of the check :rtype: bool """ return (... in self.exclude or key in self.exclude) if self.exclude else False class ExcludableItems: """ Keeps a dictionary of Excludables by alias + model_name keys to allow quick lookup by nested models without need to travers deeply nested dictionaries and passing include/exclude around """ def __init__(self) -> None: self.items: Dict[str, Excludable] = dict() @classmethod def from_excludable(cls, other: "ExcludableItems") -> "ExcludableItems": """ Copy passed ExcludableItems to avoid inplace modifications. :param other: other excludable items to be copied :type other: ormar.models.excludable.ExcludableItems :return: copy of other :rtype: ormar.models.excludable.ExcludableItems """ new_excludable = cls() for key, value in other.items.items(): new_excludable.items[key] = value.get_copy() return new_excludable def include_entry_count(self) -> int: """ Returns count of include items inside """ count = 0 for key in self.items.keys(): count += len(self.items[key].include) return count def get(self, model_cls: Type["Model"], alias: str = "") -> Excludable: """ Return Excludable for given model and alias. :param model_cls: target model to check :type model_cls: ormar.models.metaclass.ModelMetaclass :param alias: table alias from relation manager :type alias: str :return: Excludable for given model and alias :rtype: ormar.models.excludable.Excludable """ key = f"{alias + '_' if alias else ''}{model_cls.get_name(lower=True)}" excludable = self.items.get(key) if not excludable: excludable = Excludable() self.items[key] = excludable return excludable def build( self, items: Union[List[str], str, Tuple[str], Set[str], Dict], model_cls: Type["Model"], is_exclude: bool = False, ) -> None: """ Receives the one of the types of items and parses them as to achieve a end situation with one excludable per alias/model in relation. Each excludable has two sets of values - one to include, one to exclude. :param items: values to be included or excluded :type items: Union[List[str], str, Tuple[str], Set[str], Dict] :param model_cls: source model from which relations are constructed :type model_cls: ormar.models.metaclass.ModelMetaclass :param is_exclude: flag if items should be included or excluded :type is_exclude: bool """ if isinstance(items, str): items = {items} if isinstance(items, Dict): self._traverse_dict( values=items, source_model=model_cls, model_cls=model_cls, is_exclude=is_exclude, ) else: items = set(items) nested_items = set(x for x in items if "__" in x) items.difference_update(nested_items) self._set_excludes( items=items, model_name=model_cls.get_name(lower=True), is_exclude=is_exclude, ) if nested_items: self._traverse_list( values=nested_items, model_cls=model_cls, is_exclude=is_exclude ) def _set_excludes( self, items: Set, model_name: str, is_exclude: bool, alias: str = "" ) -> None: """ Sets set of values to be included or excluded for given key and model. :param items: items to include/exclude :type items: set :param model_name: name of model to construct key :type model_name: str :param is_exclude: flag if values should be included or excluded :type is_exclude: bool :param alias: :type alias: str """ key = f"{alias + '_' if alias else ''}{model_name}" excludable = self.items.get(key) if not excludable: excludable = Excludable() excludable.set_values(value=items, is_exclude=is_exclude) self.items[key] = excludable def _traverse_dict( # noqa: CFQ002 self, values: Dict, source_model: Type["Model"], model_cls: Type["Model"], is_exclude: bool, related_items: List = None, alias: str = "", ) -> None: """ Goes through dict of nested values and construct/update Excludables. :param values: items to include/exclude :type values: Dict :param source_model: source model from which relations are constructed :type source_model: ormar.models.metaclass.ModelMetaclass :param model_cls: model from which current relation is constructed :type model_cls: ormar.models.metaclass.ModelMetaclass :param is_exclude: flag if values should be included or excluded :type is_exclude: bool :param related_items: list of names of related fields chain :type related_items: List :param alias: alias of relation :type alias: str """ self_fields = set() related_items = related_items[:] if related_items else [] for key, value in values.items(): if value is ...: self_fields.add(key) elif isinstance(value, set): ( table_prefix, target_model, _, _, ) = get_relationship_alias_model_and_str( source_model=source_model, related_parts=related_items + [key] ) self._set_excludes( items=value, model_name=target_model.get_name(), is_exclude=is_exclude, alias=table_prefix, ) else: # dict related_items.append(key) ( table_prefix, target_model, _, _, ) = get_relationship_alias_model_and_str( source_model=source_model, related_parts=related_items ) self._traverse_dict( values=value, source_model=source_model, model_cls=target_model, is_exclude=is_exclude, related_items=related_items, alias=table_prefix, ) if self_fields: self._set_excludes( items=self_fields, model_name=model_cls.get_name(), is_exclude=is_exclude, alias=alias, ) def _traverse_list( self, values: Set[str], model_cls: Type["Model"], is_exclude: bool ) -> None: """ Goes through list of values and construct/update Excludables. :param values: items to include/exclude :type values: set :param model_cls: model from which current relation is constructed :type model_cls: ormar.models.metaclass.ModelMetaclass :param is_exclude: flag if values should be included or excluded :type is_exclude: bool """ # here we have only nested related keys for key in values: key_split = key.split("__") related_items, field_name = key_split[:-1], key_split[-1] (table_prefix, target_model, _, _) = get_relationship_alias_model_and_str( source_model=model_cls, related_parts=related_items ) self._set_excludes( items={field_name}, model_name=target_model.get_name(), is_exclude=is_exclude, alias=table_prefix, ) ormar-0.12.2/ormar/models/helpers/000077500000000000000000000000001444363446500167765ustar00rootroot00000000000000ormar-0.12.2/ormar/models/helpers/__init__.py000066400000000000000000000026171444363446500211150ustar00rootroot00000000000000from ormar.models.helpers.models import ( check_required_meta_parameters, extract_annotations_and_default_vals, meta_field_not_set, populate_default_options_values, ) from ormar.models.helpers.pydantic import ( get_potential_fields, get_pydantic_base_orm_config, get_pydantic_field, merge_or_generate_pydantic_config, remove_excluded_parent_fields, ) from ormar.models.helpers.relations import ( alias_manager, register_relation_in_alias_manager, ) from ormar.models.helpers.relations import expand_reverse_relationships from ormar.models.helpers.sqlalchemy import ( populate_meta_sqlalchemy_table_if_required, populate_meta_tablename_columns_and_pk, sqlalchemy_columns_from_model_fields, ) from ormar.models.helpers.validation import populate_choices_validators __all__ = [ "expand_reverse_relationships", "extract_annotations_and_default_vals", "populate_meta_tablename_columns_and_pk", "populate_meta_sqlalchemy_table_if_required", "populate_default_options_values", "alias_manager", "register_relation_in_alias_manager", "get_pydantic_field", "get_potential_fields", "get_pydantic_base_orm_config", "merge_or_generate_pydantic_config", "check_required_meta_parameters", "sqlalchemy_columns_from_model_fields", "populate_choices_validators", "meta_field_not_set", "remove_excluded_parent_fields", ] ormar-0.12.2/ormar/models/helpers/models.py000066400000000000000000000144061444363446500206400ustar00rootroot00000000000000import itertools import sqlite3 from typing import Any, Dict, List, TYPE_CHECKING, Tuple, Type import pydantic from pydantic.typing import ForwardRef import ormar # noqa: I100 from ormar.models.helpers.pydantic import populate_pydantic_default_values from ormar.models.utils import Extra if TYPE_CHECKING: # pragma no cover from ormar import Model from ormar.fields import BaseField def is_field_an_forward_ref(field: "BaseField") -> bool: """ Checks if field is a relation field and whether any of the referenced models are ForwardRefs that needs to be updated before proceeding. :param field: model field to verify :type field: Type[BaseField] :return: result of the check :rtype: bool """ return field.is_relation and ( field.to.__class__ == ForwardRef or field.through.__class__ == ForwardRef ) def populate_default_options_values( # noqa: CCR001 new_model: Type["Model"], model_fields: Dict ) -> None: """ Sets all optional Meta values to it's defaults and set model_fields that were already previously extracted. Here should live all options that are not overwritten/set for all models. Current options are: * constraints = [] * abstract = False :param new_model: newly constructed Model :type new_model: Model class :param model_fields: dict of model fields :type model_fields: Union[Dict[str, type], Dict] """ defaults = { "queryset_class": ormar.QuerySet, "constraints": [], "model_fields": model_fields, "abstract": False, "extra": Extra.forbid, "orders_by": [], "exclude_parent_fields": [], } for key, value in defaults.items(): if not hasattr(new_model.Meta, key): setattr(new_model.Meta, key, value) if any( is_field_an_forward_ref(field) for field in new_model.Meta.model_fields.values() ): new_model.Meta.requires_ref_update = True else: new_model.Meta.requires_ref_update = False new_model._json_fields = { name for name, field in new_model.Meta.model_fields.items() if field.__type__ == pydantic.Json } new_model._bytes_fields = { name for name, field in new_model.Meta.model_fields.items() if field.__type__ == bytes } new_model.__relation_map__ = None class Connection(sqlite3.Connection): def __init__(self, *args: Any, **kwargs: Any) -> None: # pragma: no cover super().__init__(*args, **kwargs) self.execute("PRAGMA foreign_keys=1;") def substitue_backend_pool_for_sqlite(new_model: Type["Model"]) -> None: """ Recreates Connection pool for sqlite3 with new factory that executes "PRAGMA foreign_keys=1; on initialization to enable foreign keys. :param new_model: newly declared ormar Model :type new_model: Model class """ backend = new_model.Meta.database._backend if ( backend._dialect.name == "sqlite" and "factory" not in backend._options ): # pragma: no cover backend._options["factory"] = Connection old_pool = backend._pool backend._pool = old_pool.__class__(backend._database_url, **backend._options) def check_required_meta_parameters(new_model: Type["Model"]) -> None: """ Verifies if ormar.Model has database and metadata set. Recreates Connection pool for sqlite3 :param new_model: newly declared ormar Model :type new_model: Model class """ if not hasattr(new_model.Meta, "database"): if not getattr(new_model.Meta, "abstract", False): raise ormar.ModelDefinitionError( f"{new_model.__name__} does not have database defined." ) else: substitue_backend_pool_for_sqlite(new_model=new_model) if not hasattr(new_model.Meta, "metadata"): if not getattr(new_model.Meta, "abstract", False): raise ormar.ModelDefinitionError( f"{new_model.__name__} does not have metadata defined." ) def extract_annotations_and_default_vals(attrs: Dict) -> Tuple[Dict, Dict]: """ Extracts annotations from class namespace dict and triggers extraction of ormar model_fields. :param attrs: namespace of the class created :type attrs: Dict :return: namespace of the class updated, dict of extracted model_fields :rtype: Tuple[Dict, Dict] """ key = "__annotations__" attrs[key] = attrs.get(key, {}) attrs, model_fields = populate_pydantic_default_values(attrs) return attrs, model_fields def group_related_list(list_: List) -> Dict: """ Translates the list of related strings into a dictionary. That way nested models are grouped to traverse them in a right order and to avoid repetition. Sample: ["people__houses", "people__cars__models", "people__cars__colors"] will become: {'people': {'houses': [], 'cars': ['models', 'colors']}} Result dictionary is sorted by length of the values and by key :param list_: list of related models used in select related :type list_: List[str] :return: list converted to dictionary to avoid repetition and group nested models :rtype: Dict[str, List] """ result_dict: Dict[str, Any] = dict() list_.sort(key=lambda x: x.split("__")[0]) grouped = itertools.groupby(list_, key=lambda x: x.split("__")[0]) for key, group in grouped: group_list = list(group) new = sorted( ["__".join(x.split("__")[1:]) for x in group_list if len(x.split("__")) > 1] ) if any("__" in x for x in new): result_dict[key] = group_related_list(new) else: result_dict.setdefault(key, []).extend(new) return dict(sorted(result_dict.items(), key=lambda item: len(item[1]))) def meta_field_not_set(model: Type["Model"], field_name: str) -> bool: """ Checks if field with given name is already present in model.Meta. Then check if it's set to something truthful (in practice meaning not None, as it's non or ormar Field only). :param model: newly constructed model :type model: Model class :param field_name: name of the ormar field :type field_name: str :return: result of the check :rtype: bool """ return not hasattr(model.Meta, field_name) or not getattr(model.Meta, field_name) ormar-0.12.2/ormar/models/helpers/pydantic.py000066400000000000000000000123041444363446500211630ustar00rootroot00000000000000import inspect from types import MappingProxyType from typing import Dict, Optional, TYPE_CHECKING, Tuple, Type, Union import pydantic from pydantic.fields import ModelField from pydantic.utils import lenient_issubclass from ormar.exceptions import ModelDefinitionError # noqa: I100, I202 from ormar.fields import BaseField if TYPE_CHECKING: # pragma no cover from ormar import Model from ormar.fields import ManyToManyField def create_pydantic_field( field_name: str, model: Type["Model"], model_field: "ManyToManyField" ) -> None: """ Registers pydantic field on through model that leads to passed model and is registered as field_name passed. Through model is fetched from through attributed on passed model_field. :param field_name: field name to register :type field_name: str :param model: type of field to register :type model: Model class :param model_field: relation field from which through model is extracted :type model_field: ManyToManyField class """ model_field.through.__fields__[field_name] = ModelField( name=field_name, type_=model, model_config=model.__config__, required=False, class_validators={}, ) def get_pydantic_field(field_name: str, model: Type["Model"]) -> "ModelField": """ Extracts field type and if it's required from Model model_fields by passed field_name. Returns a pydantic field with type of field_name field type. :param field_name: field name to fetch from Model and name of pydantic field :type field_name: str :param model: type of field to register :type model: Model class :return: newly created pydantic field :rtype: pydantic.ModelField """ type_ = model.Meta.model_fields[field_name].__type__ return ModelField( name=field_name, type_=type_, # type: ignore model_config=model.__config__, required=not model.Meta.model_fields[field_name].nullable, class_validators={}, ) def populate_pydantic_default_values(attrs: Dict) -> Tuple[Dict, Dict]: """ Extracts ormar fields from annotations (deprecated) and from namespace dictionary of the class. Fields declared on model are all subclasses of the BaseField class. Trigger conversion of ormar field into pydantic FieldInfo, which has all needed parameters saved. Overwrites the annotations of ormar fields to corresponding types declared on ormar fields (constructed dynamically for relations). Those annotations are later used by pydantic to construct it's own fields. :param attrs: current class namespace :type attrs: Dict :return: namespace of the class updated, dict of extracted model_fields :rtype: Tuple[Dict, Dict] """ model_fields = {} potential_fields = {} potential_fields.update(get_potential_fields(attrs)) for field_name, field in potential_fields.items(): field.name = field_name model_fields[field_name] = field default_type = ( field.__type__ if not field.nullable else Optional[field.__type__] ) overwrite_type = ( field.__pydantic_type__ if field.__type__ != field.__pydantic_type__ else None ) attrs["__annotations__"][field_name] = overwrite_type or default_type return attrs, model_fields def merge_or_generate_pydantic_config(attrs: Dict, name: str) -> None: """ Checks if the user provided pydantic Config, and if he did merges it with the default one. Updates the attrs in place with a new config. :rtype: None """ DefaultConfig = get_pydantic_base_orm_config() if "Config" in attrs: ProvidedConfig = attrs["Config"] if not inspect.isclass(ProvidedConfig): raise ModelDefinitionError( f"Config provided for class {name} has to be a class." ) class Config(ProvidedConfig, DefaultConfig): # type: ignore pass attrs["Config"] = Config else: attrs["Config"] = DefaultConfig def get_pydantic_base_orm_config() -> Type[pydantic.BaseConfig]: """ Returns empty pydantic Config with orm_mode set to True. :return: empty default config with orm_mode set. :rtype: pydantic Config """ class Config(pydantic.BaseConfig): orm_mode = True validate_assignment = True return Config def get_potential_fields(attrs: Union[Dict, MappingProxyType]) -> Dict: """ Gets all the fields in current class namespace that are Fields. :param attrs: current class namespace :type attrs: Dict :return: extracted fields that are ormar Fields :rtype: Dict """ return { k: v for k, v in attrs.items() if (lenient_issubclass(v, BaseField) or isinstance(v, BaseField)) } def remove_excluded_parent_fields(model: Type["Model"]) -> None: """ Removes pydantic fields that should be excluded from parent models :param model: :type model: Type["Model"] """ excludes = {*model.Meta.exclude_parent_fields} - {*model.Meta.model_fields.keys()} if excludes: model.__fields__ = { k: v for k, v in model.__fields__.items() if k not in excludes } ormar-0.12.2/ormar/models/helpers/related_names_validation.py000066400000000000000000000034751444363446500243760ustar00rootroot00000000000000from typing import Dict, List, Optional, TYPE_CHECKING, Type from pydantic.typing import ForwardRef import ormar # noqa: I100 if TYPE_CHECKING: # pragma no cover from ormar import Model def validate_related_names_in_relations( # noqa CCR001 model_fields: Dict, new_model: Type["Model"] ) -> None: """ Performs a validation of relation_names in relation fields. If multiple fields are leading to the same related model only one can have empty related_name param (populated by default as model.name.lower()+'s'). Also related_names have to be unique for given related model. :raises ModelDefinitionError: if validation of related_names fail :param model_fields: dictionary of declared ormar model fields :type model_fields: Dict[str, ormar.Field] :param new_model: :type new_model: Model class """ already_registered: Dict[str, List[Optional[str]]] = dict() for field in model_fields.values(): if field.is_relation: to_name = ( field.to.get_name() if not field.to.__class__ == ForwardRef else str(field.to) ) previous_related_names = already_registered.setdefault(to_name, []) if field.related_name in previous_related_names: raise ormar.ModelDefinitionError( f"Multiple fields declared on {new_model.get_name(lower=False)} " f"model leading to {field.to.get_name(lower=False)} model without " f"related_name property set. \nThere can be only one relation with " f"default/empty name: '{new_model.get_name() + 's'}'" f"\nTip: provide different related_name for FK and/or M2M fields" ) previous_related_names.append(field.related_name) ormar-0.12.2/ormar/models/helpers/relations.py000066400000000000000000000222551444363446500213560ustar00rootroot00000000000000from typing import TYPE_CHECKING, Type, cast import ormar from ormar import ForeignKey, ManyToMany from ormar.fields import Through from ormar.models.descriptors import RelationDescriptor from ormar.models.helpers.sqlalchemy import adjust_through_many_to_many_model from ormar.relations import AliasManager if TYPE_CHECKING: # pragma no cover from ormar import Model from ormar.fields import ManyToManyField, ForeignKeyField alias_manager = AliasManager() def register_relation_on_build(field: "ForeignKeyField") -> None: """ Registers ForeignKey relation in alias_manager to set a table_prefix. Registration include also reverse relation side to be able to join both sides. Relation is registered by model name and relation field name to allow for multiple relations between two Models that needs to have different aliases for proper sql joins. :param field: relation field :type field: ForeignKey class """ alias_manager.add_relation_type( source_model=field.owner, relation_name=field.name, reverse_name=field.get_source_related_name(), ) def register_many_to_many_relation_on_build(field: "ManyToManyField") -> None: """ Registers connection between through model and both sides of the m2m relation. Registration include also reverse relation side to be able to join both sides. Relation is registered by model name and relation field name to allow for multiple relations between two Models that needs to have different aliases for proper sql joins. By default relation name is a model.name.lower(). :param field: relation field :type field: ManyToManyField class """ alias_manager.add_relation_type( source_model=field.through, relation_name=field.default_source_field_name(), reverse_name=field.get_source_related_name(), ) alias_manager.add_relation_type( source_model=field.through, relation_name=field.default_target_field_name(), reverse_name=field.get_related_name(), ) def expand_reverse_relationship(model_field: "ForeignKeyField") -> None: """ If the reverse relation has not been set before it's set here. :param model_field: :type model_field: :return: None :rtype: None """ if reverse_field_not_already_registered(model_field=model_field): register_reverse_model_fields(model_field=model_field) def expand_reverse_relationships(model: Type["Model"]) -> None: """ Iterates through model_fields of given model and verifies if all reverse relation have been populated on related models. If the reverse relation has not been set before it's set here. :param model: model on which relation should be checked and registered :type model: Model class """ model_fields = list(model.Meta.model_fields.values()) for model_field in model_fields: if model_field.is_relation and not model_field.has_unresolved_forward_refs(): model_field = cast("ForeignKeyField", model_field) expand_reverse_relationship(model_field=model_field) def register_reverse_model_fields(model_field: "ForeignKeyField") -> None: """ Registers reverse ForeignKey field on related model. By default it's name.lower()+'s' of the model on which relation is defined. But if the related_model name is provided it's registered with that name. Autogenerated reverse fields also set related_name to the original field name. :param model_field: original relation ForeignKey field :type model_field: relation Field """ related_name = model_field.get_related_name() # TODO: Reverse relations does not register pydantic fields? if model_field.is_multi: model_field.to.Meta.model_fields[related_name] = ManyToMany( # type: ignore model_field.owner, through=model_field.through, name=related_name, virtual=True, related_name=model_field.name, owner=model_field.to, self_reference=model_field.self_reference, self_reference_primary=model_field.self_reference_primary, orders_by=model_field.related_orders_by, skip_field=model_field.skip_reverse, through_relation_name=model_field.through_reverse_relation_name, through_reverse_relation_name=model_field.through_relation_name, ) # register foreign keys on through model model_field = cast("ManyToManyField", model_field) register_through_shortcut_fields(model_field=model_field) adjust_through_many_to_many_model(model_field=model_field) else: model_field.to.Meta.model_fields[related_name] = ForeignKey( # type: ignore model_field.owner, real_name=related_name, virtual=True, related_name=model_field.name, owner=model_field.to, self_reference=model_field.self_reference, orders_by=model_field.related_orders_by, skip_field=model_field.skip_reverse, ) if not model_field.skip_reverse: setattr(model_field.to, related_name, RelationDescriptor(name=related_name)) def register_through_shortcut_fields(model_field: "ManyToManyField") -> None: """ Registers m2m relation through shortcut on both ends of the relation. :param model_field: relation field defined in parent model :type model_field: ManyToManyField """ through_model = model_field.through through_name = through_model.get_name(lower=True) related_name = model_field.get_related_name() model_field.owner.Meta.model_fields[through_name] = Through( through_model, real_name=through_name, virtual=True, related_name=model_field.name, owner=model_field.owner, nullable=True, ) model_field.to.Meta.model_fields[through_name] = Through( through_model, real_name=through_name, virtual=True, related_name=related_name, owner=model_field.to, nullable=True, ) setattr(model_field.owner, through_name, RelationDescriptor(name=through_name)) setattr(model_field.to, through_name, RelationDescriptor(name=through_name)) def register_relation_in_alias_manager(field: "ForeignKeyField") -> None: """ Registers the relation (and reverse relation) in alias manager. The m2m relations require registration of through model between actual end models of the relation. Delegates the actual registration to: m2m - register_many_to_many_relation_on_build fk - register_relation_on_build :param field: relation field :type field: ForeignKey or ManyToManyField class """ if field.is_multi: if field.has_unresolved_forward_refs(): return field = cast("ManyToManyField", field) register_many_to_many_relation_on_build(field=field) elif field.is_relation and not field.is_through: if field.has_unresolved_forward_refs(): return register_relation_on_build(field=field) def verify_related_name_dont_duplicate( related_name: str, model_field: "ForeignKeyField" ) -> None: """ Verifies whether the used related_name (regardless of the fact if user defined or auto generated) is already used on related model, but is connected with other model than the one that we connect right now. :raises ModelDefinitionError: if name is already used but lead to different related model :param related_name: :type related_name: :param model_field: original relation ForeignKey field :type model_field: relation Field :return: None :rtype: None """ fk_field = model_field.to.Meta.model_fields.get(related_name) if not fk_field: # pragma: no cover return if fk_field.to != model_field.owner and fk_field.to.Meta != model_field.owner.Meta: raise ormar.ModelDefinitionError( f"Relation with related_name " f"'{related_name}' " f"leading to model " f"{model_field.to.get_name(lower=False)} " f"cannot be used on model " f"{model_field.owner.get_name(lower=False)} " f"because it's already used by model " f"{fk_field.to.get_name(lower=False)}" ) def reverse_field_not_already_registered(model_field: "ForeignKeyField") -> bool: """ Checks if child is already registered in parents pydantic fields. :raises ModelDefinitionError: if related name is already used but lead to different related model :param model_field: original relation ForeignKey field :type model_field: relation Field :return: result of the check :rtype: bool """ related_name = model_field.get_related_name() check_result = related_name not in model_field.to.Meta.model_fields check_result2 = model_field.owner.get_name() not in model_field.to.Meta.model_fields if not check_result: verify_related_name_dont_duplicate( related_name=related_name, model_field=model_field ) if not check_result2: verify_related_name_dont_duplicate( related_name=model_field.owner.get_name(), model_field=model_field ) return check_result and check_result2 ormar-0.12.2/ormar/models/helpers/sqlalchemy.py000066400000000000000000000304231444363446500215140ustar00rootroot00000000000000import logging from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Type, Union import sqlalchemy from pydantic.typing import ForwardRef import ormar # noqa: I100, I202 from ormar.models.descriptors import RelationDescriptor from ormar.models.helpers.pydantic import create_pydantic_field from ormar.models.helpers.related_names_validation import ( validate_related_names_in_relations, ) if TYPE_CHECKING: # pragma no cover from ormar import Model, ModelMeta, ManyToManyField, BaseField, ForeignKeyField from ormar.models import NewBaseModel def adjust_through_many_to_many_model(model_field: "ManyToManyField") -> None: """ Registers m2m relation on through model. Sets ormar.ForeignKey from through model to both child and parent models. Sets sqlalchemy.ForeignKey to both child and parent models. Sets pydantic fields with child and parent model types. :param model_field: relation field defined in parent model :type model_field: ManyToManyField """ parent_name = model_field.default_target_field_name() child_name = model_field.default_source_field_name() model_fields = model_field.through.Meta.model_fields model_fields[parent_name] = ormar.ForeignKey( # type: ignore model_field.to, real_name=parent_name, ondelete="CASCADE", owner=model_field.through, ) model_fields[child_name] = ormar.ForeignKey( # type: ignore model_field.owner, real_name=child_name, ondelete="CASCADE", owner=model_field.through, ) create_and_append_m2m_fk( model=model_field.to, model_field=model_field, field_name=parent_name ) create_and_append_m2m_fk( model=model_field.owner, model_field=model_field, field_name=child_name ) create_pydantic_field(parent_name, model_field.to, model_field) create_pydantic_field(child_name, model_field.owner, model_field) setattr(model_field.through, parent_name, RelationDescriptor(name=parent_name)) setattr(model_field.through, child_name, RelationDescriptor(name=child_name)) def create_and_append_m2m_fk( model: Type["Model"], model_field: "ManyToManyField", field_name: str ) -> None: """ Registers sqlalchemy Column with sqlalchemy.ForeignKey leading to the model. Newly created field is added to m2m relation through model Meta columns and table. :param field_name: name of the column to create :type field_name: str :param model: Model class to which FK should be created :type model: Model class :param model_field: field with ManyToMany relation :type model_field: ManyToManyField field """ pk_alias = model.get_column_alias(model.Meta.pkname) pk_column = next((col for col in model.Meta.columns if col.name == pk_alias), None) if pk_column is None: # pragma: no cover raise ormar.ModelDefinitionError( "ManyToMany relation cannot lead to field without pk" ) column = sqlalchemy.Column( field_name, pk_column.type, sqlalchemy.schema.ForeignKey( model.Meta.tablename + "." + pk_alias, ondelete="CASCADE", onupdate="CASCADE", name=f"fk_{model_field.through.Meta.tablename}_{model.Meta.tablename}" f"_{field_name}_{pk_alias}", ), ) model_field.through.Meta.columns.append(column) model_field.through.Meta.table.append_column(column) def check_pk_column_validity( field_name: str, field: "BaseField", pkname: Optional[str] ) -> Optional[str]: """ Receives the field marked as primary key and verifies if the pkname was not already set (only one allowed per model) and if field is not marked as pydantic_only as it needs to be a database field. :raises ModelDefintionError: if pkname already set or field is pydantic_only :param field_name: name of field :type field_name: str :param field: ormar.Field :type field: BaseField :param pkname: already set pkname :type pkname: Optional[str] :return: name of the field that should be set as pkname :rtype: str """ if pkname is not None: raise ormar.ModelDefinitionError("Only one primary key column is allowed.") if field.pydantic_only: raise ormar.ModelDefinitionError("Primary key column cannot be pydantic only") return field_name def sqlalchemy_columns_from_model_fields( model_fields: Dict, new_model: Type["Model"] ) -> Tuple[Optional[str], List[sqlalchemy.Column]]: """ Iterates over declared on Model model fields and extracts fields that should be treated as database fields. If the model is empty it sets mandatory id field as primary key (used in through models in m2m relations). Triggers a validation of relation_names in relation fields. If multiple fields are leading to the same related model only one can have empty related_name param. Also related_names have to be unique. Trigger validation of primary_key - only one and required pk can be set, cannot be pydantic_only. Append fields to columns if it's not pydantic_only, virtual ForeignKey or ManyToMany field. Sets `owner` on each model_field as reference to newly created Model. :raises ModelDefinitionError: if validation of related_names fail, or pkname validation fails. :param model_fields: dictionary of declared ormar model fields :type model_fields: Dict[str, ormar.Field] :param new_model: :type new_model: Model class :return: pkname, list of sqlalchemy columns :rtype: Tuple[Optional[str], List[sqlalchemy.Column]] """ if len(model_fields.keys()) == 0: model_fields["id"] = ormar.Integer(name="id", primary_key=True) logging.warning( f"Table {new_model.Meta.tablename} had no fields so auto " "Integer primary key named `id` created." ) validate_related_names_in_relations(model_fields, new_model) return _process_fields(model_fields=model_fields, new_model=new_model) def _process_fields( model_fields: Dict, new_model: Type["Model"] ) -> Tuple[Optional[str], List[sqlalchemy.Column]]: """ Helper method. Populates pkname and columns. Trigger validation of primary_key - only one and required pk can be set, cannot be pydantic_only. Append fields to columns if it's not pydantic_only, virtual ForeignKey or ManyToMany field. Sets `owner` on each model_field as reference to newly created Model. :raises ModelDefinitionError: if validation of related_names fail, or pkname validation fails. :param model_fields: dictionary of declared ormar model fields :type model_fields: Dict[str, ormar.Field] :param new_model: :type new_model: Model class :return: pkname, list of sqlalchemy columns :rtype: Tuple[Optional[str], List[sqlalchemy.Column]] """ columns = [] pkname = None for field_name, field in model_fields.items(): field.owner = new_model if _is_through_model_not_set(field): field.create_default_through_model() if field.primary_key: pkname = check_pk_column_validity(field_name, field, pkname) if _is_db_field(field): columns.append(field.get_column(field.get_alias())) return pkname, columns def _is_through_model_not_set(field: "BaseField") -> bool: """ Alias to if check that verifies if through model was created. :param field: field to check :type field: "BaseField" :return: result of the check :rtype: bool """ return field.is_multi and not field.through and not field.to.__class__ == ForwardRef def _is_db_field(field: "BaseField") -> bool: """ Alias to if check that verifies if field should be included in database. :param field: field to check :type field: "BaseField" :return: result of the check :rtype: bool """ return not field.pydantic_only and not field.virtual and not field.is_multi def populate_meta_tablename_columns_and_pk( name: str, new_model: Type["Model"] ) -> Type["Model"]: """ Sets Model tablename if it's not already set in Meta. Default tablename if not present is class name lower + s (i.e. Bed becomes -> beds) Checks if Model's Meta have pkname and columns set. If not calls the sqlalchemy_columns_from_model_fields to populate columns from ormar.fields definitions. :raises ModelDefinitionError: if pkname is not present raises ModelDefinitionError. Each model has to have pk. :param name: name of the current Model :type name: str :param new_model: currently constructed Model :type new_model: ormar.models.metaclass.ModelMetaclass :return: Model with populated pkname and columns in Meta :rtype: ormar.models.metaclass.ModelMetaclass """ tablename = name.lower() + "s" new_model.Meta.tablename = ( new_model.Meta.tablename if hasattr(new_model.Meta, "tablename") else tablename ) pkname: Optional[str] if hasattr(new_model.Meta, "columns"): columns = new_model.Meta.columns pkname = new_model.Meta.pkname else: pkname, columns = sqlalchemy_columns_from_model_fields( new_model.Meta.model_fields, new_model ) if pkname is None: raise ormar.ModelDefinitionError("Table has to have a primary key.") new_model.Meta.columns = columns new_model.Meta.pkname = pkname if not new_model.Meta.orders_by: # by default we sort by pk name if other option not provided new_model.Meta.orders_by.append(pkname) return new_model def check_for_null_type_columns_from_forward_refs(meta: "ModelMeta") -> bool: """ Check is any column is of NUllType() meaning it's empty column from ForwardRef :param meta: Meta class of the Model without sqlalchemy table constructed :type meta: Model class Meta :return: result of the check :rtype: bool """ return not any( isinstance(col.type, sqlalchemy.sql.sqltypes.NullType) for col in meta.columns ) def populate_meta_sqlalchemy_table_if_required(meta: "ModelMeta") -> None: """ Constructs sqlalchemy table out of columns and parameters set on Meta class. It populates name, metadata, columns and constraints. :param meta: Meta class of the Model without sqlalchemy table constructed :type meta: Model class Meta """ if not hasattr(meta, "table") and check_for_null_type_columns_from_forward_refs( meta ): set_constraint_names(meta=meta) table = sqlalchemy.Table( meta.tablename, meta.metadata, *meta.columns, *meta.constraints ) meta.table = table def set_constraint_names(meta: "ModelMeta") -> None: """ Populates the names on IndexColumns and UniqueColumns and CheckColumns constraints. :param meta: Meta class of the Model without sqlalchemy table constructed :type meta: Model class Meta """ for constraint in meta.constraints: if isinstance(constraint, sqlalchemy.UniqueConstraint) and not constraint.name: constraint.name = ( f"uc_{meta.tablename}_" f'{"_".join([str(col) for col in constraint._pending_colargs])}' ) elif ( isinstance(constraint, sqlalchemy.Index) and constraint.name == "TEMPORARY_NAME" ): constraint.name = ( f"ix_{meta.tablename}_" f'{"_".join([col for col in constraint._pending_colargs])}' ) elif isinstance(constraint, sqlalchemy.CheckConstraint) and not constraint.name: sql_condition: str = str(constraint.sqltext).replace(" ", "_") constraint.name = f"check_{meta.tablename}_{sql_condition}" def update_column_definition( model: Union[Type["Model"], Type["NewBaseModel"]], field: "ForeignKeyField" ) -> None: """ Updates a column with a new type column based on updated parameters in FK fields. :param model: model on which columns needs to be updated :type model: Type["Model"] :param field: field with column definition that requires update :type field: ForeignKeyField :return: None :rtype: None """ columns = model.Meta.columns for ind, column in enumerate(columns): if column.name == field.get_alias(): new_column = field.get_column(field.get_alias()) columns[ind] = new_column break ormar-0.12.2/ormar/models/helpers/validation.py000066400000000000000000000251501444363446500215050ustar00rootroot00000000000000import base64 import decimal import numbers from typing import ( Any, Callable, Dict, List, Set, TYPE_CHECKING, Type, Union, ) try: import orjson as json except ImportError: # pragma: no cover import json # type: ignore # noqa: F401 import pydantic from pydantic.class_validators import make_generic_validator from pydantic.fields import ModelField, SHAPE_LIST import ormar # noqa: I100, I202 from ormar.models.helpers.models import meta_field_not_set from ormar.queryset.utils import translate_list_to_dict if TYPE_CHECKING: # pragma no cover from ormar import Model from ormar.fields import BaseField def check_if_field_has_choices(field: "BaseField") -> bool: """ Checks if given field has choices populated. A if it has one, a validator for this field needs to be attached. :param field: ormar field to check :type field: BaseField :return: result of the check :rtype: bool """ return hasattr(field, "choices") and bool(field.choices) def convert_value_if_needed(field: "BaseField", value: Any) -> Any: """ Converts dates to isoformat as fastapi can check this condition in routes and the fields are not yet parsed. Converts enums to list of it's values. Converts uuids to strings. Converts decimal to float with given scale. :param field: ormar field to check with choices :type field: BaseField :param value: current values of the model to verify :type value: Any :return: value, choices list :rtype: Any """ encoder = ormar.ENCODERS_MAP.get(field.__type__, lambda x: x) if field.__type__ == decimal.Decimal: precision = field.scale # type: ignore value = encoder(value, precision) elif field.__type__ == bytes: represent_as_string = field.represent_as_base64_str value = encoder(value, represent_as_string) elif encoder: value = encoder(value) return value def generate_validator(ormar_field: "BaseField") -> Callable: choices = ormar_field.choices def validate_choices(cls: type, value: Any, field: "ModelField") -> None: """ Validates if given value is in provided choices. :raises ValueError: If value is not in choices. :param field:field to validate :type field: BaseField :param value: value of the field :type value: Any """ adjusted_value = convert_value_if_needed(field=ormar_field, value=value) if adjusted_value is not ormar.Undefined and adjusted_value not in choices: raise ValueError( f"{field.name}: '{adjusted_value}' " f"not in allowed choices set:" f" {choices}" ) return value return validate_choices def generate_model_example(model: Type["Model"], relation_map: Dict = None) -> Dict: """ Generates example to be included in schema in fastapi. :param model: ormar.Model :type model: Type["Model"] :param relation_map: dict with relations to follow :type relation_map: Optional[Dict] :return: dict with example values :rtype: Dict[str, int] """ example: Dict[str, Any] = dict() relation_map = ( relation_map if relation_map is not None else translate_list_to_dict(model._iterate_related_models()) ) for name, field in model.Meta.model_fields.items(): populates_sample_fields_values( example=example, name=name, field=field, relation_map=relation_map ) to_exclude = {name for name in model.Meta.model_fields} pydantic_repr = generate_pydantic_example(pydantic_model=model, exclude=to_exclude) example.update(pydantic_repr) return example def populates_sample_fields_values( example: Dict[str, Any], name: str, field: "BaseField", relation_map: Dict = None ) -> None: """ Iterates the field and sets fields to sample values :param field: ormar field :type field: BaseField :param name: name of the field :type name: str :param example: example dict :type example: Dict[str, Any] :param relation_map: dict with relations to follow :type relation_map: Optional[Dict] """ if not field.is_relation: is_bytes_str = field.__type__ == bytes and field.represent_as_base64_str example[name] = field.__sample__ if not is_bytes_str else "string" elif isinstance(relation_map, dict) and name in relation_map: example[name] = get_nested_model_example( name=name, field=field, relation_map=relation_map ) def get_nested_model_example( name: str, field: "BaseField", relation_map: Dict ) -> Union[List, Dict]: """ Gets representation of nested model. :param name: name of the field to follow :type name: str :param field: ormar field :type field: BaseField :param relation_map: dict with relation map :type relation_map: Dict :return: nested model or list of nested model repr :rtype: Union[List, Dict] """ value = generate_model_example(field.to, relation_map=relation_map.get(name, {})) new_value: Union[List, Dict] = [value] if field.is_multi or field.virtual else value return new_value def generate_pydantic_example( pydantic_model: Type[pydantic.BaseModel], exclude: Set = None ) -> Dict: """ Generates dict with example. :param pydantic_model: model to parse :type pydantic_model: Type[pydantic.BaseModel] :param exclude: list of fields to exclude :type exclude: Optional[Set] :return: dict with fields and sample values :rtype: Dict """ example: Dict[str, Any] = dict() exclude = exclude or set() name_to_check = [name for name in pydantic_model.__fields__ if name not in exclude] for name in name_to_check: field = pydantic_model.__fields__[name] type_ = field.type_ if field.shape == SHAPE_LIST: example[name] = [get_pydantic_example_repr(type_)] else: example[name] = get_pydantic_example_repr(type_) return example def get_pydantic_example_repr(type_: Any) -> Any: """ Gets sample representation of pydantic field for example dict. :param type_: type of pydantic field :type type_: Any :return: representation to include in example :rtype: Any """ if issubclass(type_, (numbers.Number, decimal.Decimal)): return 0 if issubclass(type_, pydantic.BaseModel): return generate_pydantic_example(pydantic_model=type_) return "string" def overwrite_example_and_description( schema: Dict[str, Any], model: Type["Model"] ) -> None: """ Overwrites the example with properly nested children models. Overwrites the description if it's taken from ormar.Model. :param schema: schema of current model :type schema: Dict[str, Any] :param model: model class :type model: Type["Model"] """ schema["example"] = generate_model_example(model=model) if "Main base class of ormar Model." in schema.get("description", ""): schema["description"] = f"{model.__name__}" def overwrite_binary_format(schema: Dict[str, Any], model: Type["Model"]) -> None: """ Overwrites format of the field if it's a LargeBinary field with a flag to represent the field as base64 encoded string. :param schema: schema of current model :type schema: Dict[str, Any] :param model: model class :type model: Type["Model"] """ for field_id, prop in schema.get("properties", {}).items(): if ( field_id in model._bytes_fields and model.Meta.model_fields[field_id].represent_as_base64_str ): prop["format"] = "base64" if prop.get("enum"): prop["enum"] = [ base64.b64encode(choice).decode() for choice in prop.get("enum", []) ] def construct_modify_schema_function(fields_with_choices: List) -> Callable: """ Modifies the schema to include fields with choices validator. Those fields will be displayed in schema as Enum types with available choices values listed next to them. Note that schema extra has to be a function, otherwise it's called to soon before all the relations are expanded. :param fields_with_choices: list of fields with choices validation :type fields_with_choices: List :return: callable that will be run by pydantic to modify the schema :rtype: Callable """ def schema_extra(schema: Dict[str, Any], model: Type["Model"]) -> None: for field_id, prop in schema.get("properties", {}).items(): if field_id in fields_with_choices: prop["enum"] = list(model.Meta.model_fields[field_id].choices) prop["description"] = prop.get("description", "") + "An enumeration." overwrite_example_and_description(schema=schema, model=model) overwrite_binary_format(schema=schema, model=model) return staticmethod(schema_extra) # type: ignore def construct_schema_function_without_choices() -> Callable: """ Modifies model example and description if needed. Note that schema extra has to be a function, otherwise it's called to soon before all the relations are expanded. :return: callable that will be run by pydantic to modify the schema :rtype: Callable """ def schema_extra(schema: Dict[str, Any], model: Type["Model"]) -> None: overwrite_example_and_description(schema=schema, model=model) overwrite_binary_format(schema=schema, model=model) return staticmethod(schema_extra) # type: ignore def populate_choices_validators(model: Type["Model"]) -> None: # noqa CCR001 """ Checks if Model has any fields with choices set. If yes it adds choices validation into pre root validators. :param model: newly constructed Model :type model: Model class """ fields_with_choices = [] if not meta_field_not_set(model=model, field_name="model_fields"): if not hasattr(model, "_choices_fields"): model._choices_fields = set() for name, field in model.Meta.model_fields.items(): if check_if_field_has_choices(field) and name not in model._choices_fields: fields_with_choices.append(name) validator = make_generic_validator(generate_validator(field)) model.__fields__[name].validators.append(validator) model._choices_fields.add(name) if fields_with_choices: model.Config.schema_extra = construct_modify_schema_function( fields_with_choices=fields_with_choices ) else: model.Config.schema_extra = construct_schema_function_without_choices() ormar-0.12.2/ormar/models/metaclass.py000066400000000000000000000616371444363446500176770ustar00rootroot00000000000000from typing import ( Any, Dict, List, Optional, Set, TYPE_CHECKING, Tuple, Type, Union, cast, Callable, ) import databases import pydantic import sqlalchemy from sqlalchemy.sql.schema import ColumnCollectionConstraint import ormar # noqa I100 import ormar.fields.constraints from ormar.fields.constraints import UniqueColumns, IndexColumns, CheckColumns from ormar import ModelDefinitionError # noqa I100 from ormar.exceptions import ModelError from ormar.fields import BaseField from ormar.fields.foreign_key import ForeignKeyField from ormar.fields.many_to_many import ManyToManyField from ormar.models.descriptors import ( JsonDescriptor, PkDescriptor, PropertyDescriptor, PydanticDescriptor, RelationDescriptor, ) from ormar.models.descriptors.descriptors import BytesDescriptor from ormar.models.helpers import ( alias_manager, check_required_meta_parameters, expand_reverse_relationships, extract_annotations_and_default_vals, get_potential_fields, get_pydantic_field, merge_or_generate_pydantic_config, meta_field_not_set, populate_choices_validators, populate_default_options_values, populate_meta_sqlalchemy_table_if_required, populate_meta_tablename_columns_and_pk, register_relation_in_alias_manager, remove_excluded_parent_fields, sqlalchemy_columns_from_model_fields, ) from ormar.models.quick_access_views import quick_access_set from ormar.models.utils import Extra from ormar.queryset import FieldAccessor, QuerySet from ormar.relations.alias_manager import AliasManager from ormar.signals import Signal, SignalEmitter if TYPE_CHECKING: # pragma no cover from ormar import Model from ormar.models import T CONFIG_KEY = "Config" PARSED_FIELDS_KEY = "__parsed_fields__" class ModelMeta: """ Class used for type hinting. Users can subclass this one for convenience but it's not required. The only requirement is that ormar.Model has to have inner class with name Meta. """ tablename: str table: sqlalchemy.Table metadata: sqlalchemy.MetaData database: databases.Database columns: List[sqlalchemy.Column] constraints: List[ColumnCollectionConstraint] pkname: str model_fields: Dict[str, Union[BaseField, ForeignKeyField, ManyToManyField]] alias_manager: AliasManager property_fields: Set signals: SignalEmitter abstract: bool requires_ref_update: bool orders_by: List[str] exclude_parent_fields: List[str] extra: Extra queryset_class: Type[QuerySet] def add_cached_properties(new_model: Type["Model"]) -> None: """ Sets cached properties for both pydantic and ormar models. Quick access fields are fields grabbed in getattribute to skip all checks. Related fields and names are populated to None as they can change later. When children models are constructed they can modify parent to register itself. All properties here are used as "cache" to not recalculate them constantly. :param new_model: newly constructed Model :type new_model: Model class """ new_model._quick_access_fields = quick_access_set new_model._related_names = None new_model._through_names = None new_model._related_fields = None new_model._pydantic_fields = {name for name in new_model.__fields__} new_model._json_fields = set() new_model._bytes_fields = set() def add_property_fields(new_model: Type["Model"], attrs: Dict) -> None: # noqa: CCR001 """ Checks class namespace for properties or functions with __property_field__. If attribute have __property_field__ it was decorated with @property_field. Functions like this are exposed in dict() (therefore also fastapi result). Names of property fields are cached for quicker access / extraction. :param new_model: newly constructed model :type new_model: Model class :param attrs: :type attrs: Dict[str, str] """ props = set() for var_name, value in attrs.items(): if isinstance(value, property): value = value.fget field_config = getattr(value, "__property_field__", None) if field_config: props.add(var_name) if meta_field_not_set(model=new_model, field_name="property_fields"): new_model.Meta.property_fields = props else: new_model.Meta.property_fields = new_model.Meta.property_fields.union(props) def register_signals(new_model: Type["Model"]) -> None: # noqa: CCR001 """ Registers on model's SignalEmmiter and sets pre defined signals. Predefined signals are (pre/post) + (save/update/delete). Signals are emitted in both model own methods and in selected queryset ones. :param new_model: newly constructed model :type new_model: Model class """ if meta_field_not_set(model=new_model, field_name="signals"): signals = SignalEmitter() signals.pre_save = Signal() signals.pre_update = Signal() signals.pre_delete = Signal() signals.post_save = Signal() signals.post_update = Signal() signals.post_delete = Signal() signals.pre_relation_add = Signal() signals.post_relation_add = Signal() signals.pre_relation_remove = Signal() signals.post_relation_remove = Signal() signals.post_bulk_update = Signal() new_model.Meta.signals = signals def verify_constraint_names( base_class: "Model", model_fields: Dict, parent_value: List ) -> None: """ Verifies if redefined fields that are overwritten in subclasses did not remove any name of the column that is used in constraint as it will fail in sqlalchemy Table creation. :param base_class: one of the parent classes :type base_class: Model or model parent class :param model_fields: ormar fields in defined in current class :type model_fields: Dict[str, BaseField] :param parent_value: list of base class constraints :type parent_value: List """ new_aliases = {x.name: x.get_alias() for x in model_fields.values()} old_aliases = {x.name: x.get_alias() for x in base_class.Meta.model_fields.values()} old_aliases.update(new_aliases) constraints_columns = [x._pending_colargs for x in parent_value] for column_set in constraints_columns: if any(x not in old_aliases.values() for x in column_set): raise ModelDefinitionError( f"Column constraints " f"{column_set} " f"has column names " f"that are not in the model fields." f"\n Check columns redefined in subclasses " f"to verify that they have proper 'name' set." ) def get_constraint_copy( constraint: ColumnCollectionConstraint, ) -> Union[UniqueColumns, IndexColumns, CheckColumns]: """ Copy the constraint and unpacking it's values :raises ValueError: if non subclass of ColumnCollectionConstraint :param value: an instance of the ColumnCollectionConstraint class :type value: Instance of ColumnCollectionConstraint child :return: copy ColumnCollectionConstraint ormar constraints :rtype: Union[UniqueColumns, IndexColumns, CheckColumns] """ constraints = { sqlalchemy.UniqueConstraint: lambda x: UniqueColumns(*x._pending_colargs), sqlalchemy.Index: lambda x: IndexColumns(*x._pending_colargs), sqlalchemy.CheckConstraint: lambda x: CheckColumns(x.sqltext), } checks = (key if isinstance(constraint, key) else None for key in constraints) target_class = next((target for target in checks if target is not None), None) constructor: Optional[Callable] = constraints.get(target_class) if not constructor: raise ValueError(f"{constraint} must be a ColumnCollectionMixin!") return constructor(constraint) def update_attrs_from_base_meta( # noqa: CCR001 base_class: "Model", attrs: Dict, model_fields: Dict ) -> None: """ Updates Meta parameters in child from parent if needed. :param base_class: one of the parent classes :type base_class: Model or model parent class :param attrs: new namespace for class being constructed :type attrs: Dict :param model_fields: ormar fields in defined in current class :type model_fields: Dict[str, BaseField] """ params_to_update = ["metadata", "database", "constraints", "property_fields"] for param in params_to_update: current_value = attrs.get("Meta", {}).__dict__.get(param, ormar.Undefined) parent_value = ( base_class.Meta.__dict__.get(param) if hasattr(base_class, "Meta") else None ) if parent_value: if param == "constraints": verify_constraint_names( base_class=base_class, model_fields=model_fields, parent_value=parent_value, ) parent_value = [get_constraint_copy(value) for value in parent_value] if isinstance(current_value, list): current_value.extend(parent_value) else: setattr(attrs["Meta"], param, parent_value) def copy_and_replace_m2m_through_model( # noqa: CFQ002 field: ManyToManyField, field_name: str, table_name: str, parent_fields: Dict, attrs: Dict, meta: ModelMeta, base_class: Type["Model"], ) -> None: """ Clones class with Through model for m2m relations, appends child name to the name of the cloned class. Clones non foreign keys fields from parent model, the same with database columns. Modifies related_name with appending child table name after '_' For table name, the table name of child is appended after '_'. Removes the original sqlalchemy table from metadata if it was not removed. :param base_class: base class model :type base_class: Type["Model"] :param field: field with relations definition :type field: ManyToManyField :param field_name: name of the relation field :type field_name: str :param table_name: name of the table :type table_name: str :param parent_fields: dictionary of fields to copy to new models from parent :type parent_fields: Dict :param attrs: new namespace for class being constructed :type attrs: Dict :param meta: metaclass of currently created model :type meta: ModelMeta """ Field: Type[BaseField] = type( # type: ignore field.__class__.__name__, (ManyToManyField, BaseField), {} ) copy_field = Field(**dict(field.__dict__)) related_name = field.related_name + "_" + table_name copy_field.related_name = related_name # type: ignore through_class = field.through if not through_class: field.owner = base_class field.create_default_through_model() through_class = field.through new_meta: ormar.ModelMeta = type( # type: ignore "Meta", (), dict(through_class.Meta.__dict__) ) copy_name = through_class.__name__ + attrs.get("__name__", "") copy_through = type(copy_name, (ormar.Model,), {"Meta": new_meta}) new_meta.tablename += "_" + meta.tablename # create new table with copied columns but remove foreign keys # they will be populated later in expanding reverse relation if hasattr(new_meta, "table"): del new_meta.table new_meta.model_fields = { name: field for name, field in new_meta.model_fields.items() if not field.is_relation } _, columns = sqlalchemy_columns_from_model_fields( new_meta.model_fields, copy_through ) # type: ignore new_meta.columns = columns populate_meta_sqlalchemy_table_if_required(new_meta) copy_field.through = copy_through parent_fields[field_name] = copy_field if through_class.Meta.table in through_class.Meta.metadata: through_class.Meta.metadata.remove(through_class.Meta.table) def copy_data_from_parent_model( # noqa: CCR001 base_class: Type["Model"], curr_class: type, attrs: Dict, model_fields: Dict[str, Union[BaseField, ForeignKeyField, ManyToManyField]], ) -> Tuple[Dict, Dict]: """ Copy the key parameters [database, metadata, property_fields and constraints] and fields from parent models. Overwrites them if needed. Only abstract classes can be subclassed. Since relation fields requires different related_name for different children :raises ModelDefinitionError: if non abstract model is subclassed :param base_class: one of the parent classes :type base_class: Model or model parent class :param curr_class: current constructed class :type curr_class: Model or model parent class :param attrs: new namespace for class being constructed :type attrs: Dict :param model_fields: ormar fields in defined in current class :type model_fields: Dict[str, BaseField] :return: updated attrs and model_fields :rtype: Tuple[Dict, Dict] """ if attrs.get("Meta"): if model_fields and not base_class.Meta.abstract: # type: ignore raise ModelDefinitionError( f"{curr_class.__name__} cannot inherit " f"from non abstract class {base_class.__name__}" ) update_attrs_from_base_meta( base_class=base_class, # type: ignore attrs=attrs, model_fields=model_fields, ) parent_fields: Dict = dict() meta = attrs.get("Meta") if not meta: # pragma: no cover raise ModelDefinitionError( f"Model {curr_class.__name__} declared without Meta" ) table_name = ( meta.tablename if hasattr(meta, "tablename") and meta.tablename else attrs.get("__name__", "").lower() + "s" ) for field_name, field in base_class.Meta.model_fields.items(): if ( hasattr(meta, "exclude_parent_fields") and field_name in meta.exclude_parent_fields ): continue if field.is_multi: field = cast(ManyToManyField, field) copy_and_replace_m2m_through_model( field=field, field_name=field_name, table_name=table_name, parent_fields=parent_fields, attrs=attrs, meta=meta, base_class=base_class, # type: ignore ) elif field.is_relation and field.related_name: Field = type( # type: ignore field.__class__.__name__, (ForeignKeyField, BaseField), {} ) copy_field = Field(**dict(field.__dict__)) related_name = field.related_name + "_" + table_name copy_field.related_name = related_name # type: ignore parent_fields[field_name] = copy_field else: parent_fields[field_name] = field parent_fields.update(model_fields) # type: ignore model_fields = parent_fields return attrs, model_fields def extract_from_parents_definition( # noqa: CCR001 base_class: type, curr_class: type, attrs: Dict, model_fields: Dict[str, Union[BaseField, ForeignKeyField, ManyToManyField]], ) -> Tuple[Dict, Dict]: """ Extracts fields from base classes if they have valid ormar fields. If model was already parsed -> fields definitions need to be removed from class cause pydantic complains about field re-definition so after first child we need to extract from __parsed_fields__ not the class itself. If the class is parsed first time annotations and field definition is parsed from the class.__dict__. If the class is a ormar.Model it is skipped. :param base_class: one of the parent classes :type base_class: Model or model parent class :param curr_class: current constructed class :type curr_class: Model or model parent class :param attrs: new namespace for class being constructed :type attrs: Dict :param model_fields: ormar fields in defined in current class :type model_fields: Dict[str, BaseField] :return: updated attrs and model_fields :rtype: Tuple[Dict, Dict] """ if hasattr(base_class, "Meta"): base_class = cast(Type["Model"], base_class) return copy_data_from_parent_model( base_class=base_class, curr_class=curr_class, attrs=attrs, model_fields=model_fields, ) key = "__annotations__" if hasattr(base_class, PARSED_FIELDS_KEY): # model was already parsed -> fields definitions need to be removed from class # cause pydantic complains about field re-definition so after first child # we need to extract from __parsed_fields__ not the class itself new_attrs, new_model_fields = getattr(base_class, PARSED_FIELDS_KEY) new_fields = set(new_model_fields.keys()) model_fields = update_attrs_and_fields( attrs=attrs, new_attrs=new_attrs, model_fields=model_fields, new_model_fields=new_model_fields, new_fields=new_fields, ) return attrs, model_fields potential_fields = get_potential_fields(base_class.__dict__) if potential_fields: # parent model has ormar fields defined and was not parsed before new_attrs = {key: {k: v for k, v in base_class.__dict__.get(key, {}).items()}} new_attrs.update(potential_fields) new_fields = set(potential_fields.keys()) for name in new_fields: delattr(base_class, name) new_attrs, new_model_fields = extract_annotations_and_default_vals(new_attrs) setattr(base_class, PARSED_FIELDS_KEY, (new_attrs, new_model_fields)) model_fields = update_attrs_and_fields( attrs=attrs, new_attrs=new_attrs, model_fields=model_fields, new_model_fields=new_model_fields, new_fields=new_fields, ) return attrs, model_fields def update_attrs_and_fields( attrs: Dict, new_attrs: Dict, model_fields: Dict, new_model_fields: Dict, new_fields: Set, ) -> Dict: """ Updates __annotations__, values of model fields (so pydantic FieldInfos) as well as model.Meta.model_fields definitions from parents. :param attrs: new namespace for class being constructed :type attrs: Dict :param new_attrs: related of the namespace extracted from parent class :type new_attrs: Dict :param model_fields: ormar fields in defined in current class :type model_fields: Dict[str, BaseField] :param new_model_fields: ormar fields defined in parent classes :type new_model_fields: Dict[str, BaseField] :param new_fields: set of new fields names :type new_fields: Set[str] """ key = "__annotations__" attrs[key].update(new_attrs[key]) attrs.update({name: new_attrs[name] for name in new_fields}) updated_model_fields = {k: v for k, v in new_model_fields.items()} updated_model_fields.update(model_fields) return updated_model_fields def add_field_descriptor( name: str, field: "BaseField", new_model: Type["Model"] ) -> None: """ Sets appropriate descriptor for each model field. There are 5 main types of descriptors, for bytes, json, pure pydantic fields, and 2 ormar ones - one for relation and one for pk shortcut :param name: name of the field :type name: str :param field: model field to add descriptor for :type field: BaseField :param new_model: model with fields :type new_model: Type["Model] """ if field.is_relation: setattr(new_model, name, RelationDescriptor(name=name)) elif field.__type__ == pydantic.Json: setattr(new_model, name, JsonDescriptor(name=name)) elif field.__type__ == bytes: setattr(new_model, name, BytesDescriptor(name=name)) else: setattr(new_model, name, PydanticDescriptor(name=name)) class ModelMetaclass(pydantic.main.ModelMetaclass): def __new__( # type: ignore # noqa: CCR001 mcs: "ModelMetaclass", name: str, bases: Any, attrs: dict ) -> "ModelMetaclass": """ Metaclass used by ormar Models that performs configuration and build of ormar Models. Sets pydantic configuration. Extract model_fields and convert them to pydantic FieldInfo, updates class namespace. Extracts settings and fields from parent classes. Fetches methods decorated with @property_field decorator to expose them later in dict(). Construct parent pydantic Metaclass/ Model. If class has Meta class declared (so actual ormar Models) it also: * populate sqlalchemy columns, pkname and tables from model_fields * register reverse relationships on related models * registers all relations in alias manager that populates table_prefixes * exposes alias manager on each Model * creates QuerySet for each model and exposes it on a class :param name: name of current class :type name: str :param bases: base classes :type bases: Tuple :param attrs: class namespace :type attrs: Dict """ merge_or_generate_pydantic_config(attrs=attrs, name=name) attrs["__name__"] = name attrs, model_fields = extract_annotations_and_default_vals(attrs) for base in reversed(bases): mod = base.__module__ if mod.startswith("ormar.models.") or mod.startswith("pydantic."): continue attrs, model_fields = extract_from_parents_definition( base_class=base, curr_class=mcs, attrs=attrs, model_fields=model_fields ) new_model = super().__new__(mcs, name, bases, attrs) # type: ignore add_cached_properties(new_model) if hasattr(new_model, "Meta"): populate_default_options_values(new_model, model_fields) check_required_meta_parameters(new_model) add_property_fields(new_model, attrs) register_signals(new_model=new_model) populate_choices_validators(new_model) if not new_model.Meta.abstract: new_model = populate_meta_tablename_columns_and_pk(name, new_model) populate_meta_sqlalchemy_table_if_required(new_model.Meta) expand_reverse_relationships(new_model) # TODO: iterate only related fields for field_name, field in new_model.Meta.model_fields.items(): register_relation_in_alias_manager(field=field) add_field_descriptor( name=field_name, field=field, new_model=new_model ) if ( new_model.Meta.pkname and new_model.Meta.pkname not in attrs["__annotations__"] and new_model.Meta.pkname not in new_model.__fields__ ): field_name = new_model.Meta.pkname attrs["__annotations__"][field_name] = Optional[int] # type: ignore attrs[field_name] = None new_model.__fields__[field_name] = get_pydantic_field( field_name=field_name, model=new_model ) new_model.Meta.alias_manager = alias_manager for item in new_model.Meta.property_fields: function = getattr(new_model, item) setattr( new_model, item, PropertyDescriptor(name=item, function=function), ) new_model.pk = PkDescriptor(name=new_model.Meta.pkname) remove_excluded_parent_fields(new_model) return new_model @property def objects(cls: Type["T"]) -> "QuerySet[T]": # type: ignore if cls.Meta.requires_ref_update: raise ModelError( f"Model {cls.get_name()} has not updated " f"ForwardRefs. \nBefore using the model you " f"need to call update_forward_refs()." ) return cls.Meta.queryset_class(model_cls=cls) def __getattr__(self, item: str) -> Any: """ Returns FieldAccessors on access to model fields from a class, that way it can be used in python style filters and order_by. :param item: name of the field :type item: str :return: FieldAccessor for given field :rtype: FieldAccessor """ if item == "pk": item = self.Meta.pkname if item in object.__getattribute__(self, "Meta").model_fields: field = self.Meta.model_fields.get(item) if field.is_relation: return FieldAccessor( source_model=cast(Type["Model"], self), model=field.to, access_chain=item, ) return FieldAccessor( source_model=cast(Type["Model"], self), field=field, access_chain=item ) return object.__getattribute__(self, item) ormar-0.12.2/ormar/models/mixins/000077500000000000000000000000001444363446500166435ustar00rootroot00000000000000ormar-0.12.2/ormar/models/mixins/__init__.py000066400000000000000000000014171444363446500207570ustar00rootroot00000000000000""" Package contains functionalities divided by features. All mixins are combined into ModelTableProxy which is one of the parents of Model. The split into mixins was done to ease the maintainability of the proxy class, as it became quite complicated over time. """ from ormar.models.mixins.alias_mixin import AliasMixin from ormar.models.mixins.excludable_mixin import ExcludableMixin from ormar.models.mixins.merge_mixin import MergeModelMixin from ormar.models.mixins.prefetch_mixin import PrefetchQueryMixin from ormar.models.mixins.pydantic_mixin import PydanticMixin from ormar.models.mixins.save_mixin import SavePrepareMixin __all__ = [ "MergeModelMixin", "AliasMixin", "PrefetchQueryMixin", "SavePrepareMixin", "ExcludableMixin", "PydanticMixin", ] ormar-0.12.2/ormar/models/mixins/alias_mixin.py000066400000000000000000000053071444363446500215170ustar00rootroot00000000000000from typing import Dict, TYPE_CHECKING class AliasMixin: """ Used to translate field names into database column names. """ if TYPE_CHECKING: # pragma: no cover from ormar import ModelMeta Meta: ModelMeta @classmethod def get_column_alias(cls, field_name: str) -> str: """ Returns db alias (column name in db) for given ormar field. For fields without alias field name is returned. :param field_name: name of the field to get alias from :type field_name: str :return: alias (db name) if set, otherwise passed name :rtype: str """ field = cls.Meta.model_fields.get(field_name) return field.get_alias() if field is not None else field_name @classmethod def get_column_name_from_alias(cls, alias: str) -> str: """ Returns ormar field name for given db alias (column name in db). If field do not have alias it's returned as is. :param alias: :type alias: str :return: field name if set, otherwise passed alias (db name) :rtype: str """ for field_name, field in cls.Meta.model_fields.items(): if field.get_alias() == alias: return field_name return alias # if not found it's not an alias but actual name @classmethod def translate_columns_to_aliases(cls, new_kwargs: Dict) -> Dict: """ Translates dictionary of model fields changing field names into aliases. If field has no alias the field name remains intact. Only fields present in the dictionary are translated. :param new_kwargs: dict with fields names and their values :type new_kwargs: Dict :return: dict with aliases and their values :rtype: Dict """ for field_name, field in cls.Meta.model_fields.items(): if field_name in new_kwargs: new_kwargs[field.get_alias()] = new_kwargs.pop(field_name) return new_kwargs @classmethod def translate_aliases_to_columns(cls, new_kwargs: Dict) -> Dict: """ Translates dictionary of model fields changing aliases into field names. If field has no alias the alias is already a field name. Only fields present in the dictionary are translated. :param new_kwargs: dict with aliases and their values :type new_kwargs: Dict :return: dict with fields names and their values :rtype: Dict """ for field_name, field in cls.Meta.model_fields.items(): if field.get_alias() and field.get_alias() in new_kwargs: new_kwargs[field_name] = new_kwargs.pop(field.get_alias()) return new_kwargs ormar-0.12.2/ormar/models/mixins/excludable_mixin.py000066400000000000000000000173211444363446500225350ustar00rootroot00000000000000from typing import ( AbstractSet, Any, Dict, List, Mapping, Set, TYPE_CHECKING, Type, TypeVar, Union, cast, ) from ormar.models.excludable import ExcludableItems from ormar.models.mixins.relation_mixin import RelationMixin if TYPE_CHECKING: # pragma no cover from ormar import Model T = TypeVar("T", bound=Model) IntStr = Union[int, str] AbstractSetIntStr = AbstractSet[IntStr] MappingIntStrAny = Mapping[IntStr, Any] class ExcludableMixin(RelationMixin): """ Used to include/exclude given set of fields on models during load and dict() calls. """ if TYPE_CHECKING: # pragma: no cover from ormar import Model from ormar.models import ModelRow @staticmethod def get_child( items: Union[Set, Dict, None], key: str = None ) -> Union[Set, Dict, None]: """ Used to get nested dictionaries keys if they exists otherwise returns passed items. :param items: bag of items to include or exclude :type items: Union[Set, Dict, None] :param key: name of the child to extract :type key: str :return: child extracted from items if exists :rtype: Union[Set, Dict, None] """ if isinstance(items, dict): return items.get(key, {}) return items @staticmethod def _populate_pk_column( model: Union[Type["Model"], Type["ModelRow"]], columns: List[str], use_alias: bool = False, ) -> List[str]: """ Adds primary key column/alias (depends on use_alias flag) to list of column names that are selected. :param model: model on columns are selected :type model: Type["Model"] :param columns: list of columns names :type columns: List[str] :param use_alias: flag to set if aliases or field names should be used :type use_alias: bool :return: list of columns names with pk column in it :rtype: List[str] """ pk_alias = ( model.get_column_alias(model.Meta.pkname) if use_alias else model.Meta.pkname ) if pk_alias not in columns: columns.append(pk_alias) return columns @classmethod def own_table_columns( cls, model: Union[Type["Model"], Type["ModelRow"]], excludable: ExcludableItems, alias: str = "", use_alias: bool = False, add_pk_columns: bool = True, ) -> List[str]: """ Returns list of aliases or field names for given model. Aliases/names switch is use_alias flag. If provided only fields included in fields will be returned. If provided fields in exclude_fields will be excluded in return. Primary key field is always added and cannot be excluded (will be added anyway). :param add_pk_columns: flag if add primary key - always yes if ormar parses data :type add_pk_columns: bool :param alias: relation prefix :type alias: str :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :param model: model on columns are selected :type model: Type["Model"] :param use_alias: flag if aliases or field names should be used :type use_alias: bool :return: list of column field names or aliases :rtype: List[str] """ model_excludable = excludable.get(model_cls=model, alias=alias) # type: ignore columns = [ model.get_column_name_from_alias(col.name) if not use_alias else col.name for col in model.Meta.table.columns ] field_names = [ model.get_column_name_from_alias(col.name) for col in model.Meta.table.columns ] if model_excludable.include: columns = [ col for col, name in zip(columns, field_names) if model_excludable.is_included(name) ] if model_excludable.exclude: columns = [ col for col, name in zip(columns, field_names) if not model_excludable.is_excluded(name) ] # always has to return pk column for ormar to work if add_pk_columns: columns = cls._populate_pk_column( model=model, columns=columns, use_alias=use_alias ) return columns @classmethod def _update_excluded_with_related(cls, exclude: Union[Set, Dict, None]) -> Set: """ Used during generation of the dict(). To avoid cyclical references and max recurrence limit nested models have to exclude related models that are not mandatory. For a main model (not nested) only nullable related field names are added to exclusion, for nested models all related models are excluded. :param exclude: set/dict with fields to exclude :type exclude: Union[Set, Dict, None] :return: set or dict with excluded fields added. :rtype: Union[Set, Dict] """ exclude = exclude or set() related_set = cls.extract_related_names() if isinstance(exclude, set): exclude = {s for s in exclude} exclude = exclude.union(related_set) elif isinstance(exclude, dict): # relations are handled in ormar - take only own fields (ellipsis in dict) exclude = {k for k, v in exclude.items() if v is Ellipsis} exclude = exclude.union(related_set) return exclude @classmethod def _update_excluded_with_pks_and_through( cls, exclude: Set, exclude_primary_keys: bool, exclude_through_models: bool ) -> Set: """ Updates excluded names with name of pk column if exclude flag is set. :param exclude: set of names to exclude :type exclude: Set :param exclude_primary_keys: flag if the primary keys should be excluded :type exclude_primary_keys: bool :return: set updated with pk if flag is set :rtype: Set """ if exclude_primary_keys: exclude.add(cls.Meta.pkname) if exclude_through_models: exclude = exclude.union(cls.extract_through_names()) return exclude @classmethod def get_names_to_exclude(cls, excludable: ExcludableItems, alias: str) -> Set: """ Returns a set of models field names that should be explicitly excluded during model initialization. Those fields will be set to None to avoid ormar/pydantic setting default values on them. They should be returned as None in any case. Used in parsing data from database rows that construct Models by initializing them with dicts constructed from those db rows. :param alias: alias of current relation :type alias: str :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :return: set of field names that should be excluded :rtype: Set """ model = cast(Type["Model"], cls) model_excludable = excludable.get(model_cls=model, alias=alias) fields_names = cls.extract_db_own_fields() if model_excludable.include: fields_to_keep = model_excludable.include.intersection(fields_names) else: fields_to_keep = fields_names fields_to_exclude = fields_names - fields_to_keep if model_excludable.exclude: fields_to_exclude = fields_to_exclude.union( model_excludable.exclude.intersection(fields_names) ) fields_to_exclude = fields_to_exclude - {cls.Meta.pkname} return fields_to_exclude ormar-0.12.2/ormar/models/mixins/merge_mixin.py000066400000000000000000000140641444363446500215250ustar00rootroot00000000000000from typing import Dict, List, Optional, TYPE_CHECKING, cast import ormar from ormar.queryset.utils import translate_list_to_dict if TYPE_CHECKING: # pragma no cover from ormar import Model class MergeModelMixin: """ Used to merge models instances returned by database, but already initialized to ormar Models.keys Models can duplicate during joins when parent model has multiple child rows, in the end all parent (main) models should be unique. """ @classmethod def _recursive_add(cls, model_group: List["Model"]) -> List["Model"]: """ Instead of accumulating the model additions one by one, this recursively adds the models. E.G. [1, 2, 3, 4].accumulate_add() would give [3, 3, 4], then [6, 4], then [10] where this method looks like [1, 2, 3, 4].recursive_add() gives [[3], [7]], [10] It's the same number of adds, but it gives better O(N) performance on sublists """ if len(model_group) <= 1: return model_group added_values = [] iterable_group = iter(model_group) for model in iterable_group: next_model = next(iterable_group, None) if next_model is not None: combined = cls.merge_two_instances(next_model, model) else: combined = model added_values.append(combined) return cls._recursive_add(added_values) @classmethod def merge_instances_list(cls, result_rows: List["Model"]) -> List["Model"]: """ Merges a list of models into list of unique models. Models can duplicate during joins when parent model has multiple child rows, in the end all parent (main) models should be unique. :param result_rows: list of already initialized Models with child models populated, each instance is one row in db and some models can duplicate :type result_rows: List["Model"] :return: list of merged models where each main model is unique :rtype: List["Model"] """ merged_rows: List["Model"] = [] grouped_instances: Dict = {} for model in result_rows: grouped_instances.setdefault(model.pk, []).append(model) for group in grouped_instances.values(): model = cls._recursive_add(group)[0] merged_rows.append(model) return merged_rows @classmethod def merge_two_instances( cls, one: "Model", other: "Model", relation_map: Dict = None ) -> "Model": """ Merges current (other) Model and previous one (one) and returns the current Model instance with data merged from previous one. If needed it's calling itself recurrently and merges also children models. :param relation_map: map of models relations to follow :type relation_map: Dict :param one: previous model instance :type one: Model :param other: current model instance :type other: Model :return: current Model instance with data merged from previous one. :rtype: Model """ relation_map = ( relation_map if relation_map is not None else translate_list_to_dict(one._iterate_related_models()) ) for field_name in relation_map: current_field = getattr(one, field_name) other_value = getattr(other, field_name, []) if isinstance(current_field, list): value_to_set = cls._merge_items_lists( field_name=field_name, current_field=current_field, other_value=other_value, relation_map=relation_map, ) setattr(other, field_name, value_to_set) elif ( isinstance(current_field, ormar.Model) and isinstance(other_value, ormar.Model) and current_field.pk == other_value.pk ): setattr( other, field_name, cls.merge_two_instances( current_field, other_value, relation_map=one._skip_ellipsis( # type: ignore relation_map, field_name, default_return=dict() ), ), ) other.set_save_status(True) return other @classmethod def _merge_items_lists( cls, field_name: str, current_field: List, other_value: List, relation_map: Optional[Dict], ) -> List: """ Takes two list of nested models and process them going deeper according with the map. If model from one's list is in other -> they are merged with relations to follow passed from map. If one's model is not in other it's simply appended to the list. :param field_name: name of the current relation field :type field_name: str :param current_field: list of nested models from one model :type current_field: List[Model] :param other_value: list of nested models from other model :type other_value: List[Model] :param relation_map: map of relations to follow :type relation_map: Dict :return: merged list of models :rtype: List[Model] """ value_to_set = [x for x in other_value] for cur_field in current_field: if cur_field in other_value: old_value = next((x for x in other_value if x == cur_field), None) new_val = cls.merge_two_instances( cur_field, cast("Model", old_value), relation_map=cur_field._skip_ellipsis( # type: ignore relation_map, field_name, default_return=dict() ), ) value_to_set = [x for x in value_to_set if x != cur_field] + [new_val] else: value_to_set.append(cur_field) return value_to_set ormar-0.12.2/ormar/models/mixins/prefetch_mixin.py000066400000000000000000000111751444363446500222260ustar00rootroot00000000000000from typing import Callable, Dict, List, TYPE_CHECKING, Tuple, Type, cast from ormar.models.mixins.relation_mixin import RelationMixin if TYPE_CHECKING: # pragma: no cover from ormar.fields import ForeignKeyField, ManyToManyField class PrefetchQueryMixin(RelationMixin): """ Used in PrefetchQuery to extract ids and names of models to prefetch. """ if TYPE_CHECKING: # pragma no cover from ormar import Model get_name: Callable # defined in NewBaseModel @staticmethod def get_clause_target_and_filter_column_name( parent_model: Type["Model"], target_model: Type["Model"], reverse: bool, related: str, ) -> Tuple[Type["Model"], str]: """ Returns Model on which query clause should be performed and name of the column. :param parent_model: related model that the relation lead to :type parent_model: Type[Model] :param target_model: model on which query should be perfomed :type target_model: Type[Model] :param reverse: flag if the relation is reverse :type reverse: bool :param related: name of the relation field :type related: str :return: Model on which query clause should be performed and name of the column :rtype: Tuple[Type[Model], str] """ if reverse: field_name = parent_model.Meta.model_fields[related].get_related_name() field = target_model.Meta.model_fields[field_name] if field.is_multi: field = cast("ManyToManyField", field) field_name = field.default_target_field_name() sub_field = field.through.Meta.model_fields[field_name] return field.through, sub_field.get_alias() return target_model, field.get_alias() target_field = target_model.get_column_alias(target_model.Meta.pkname) return target_model, target_field @staticmethod def get_column_name_for_id_extraction( parent_model: Type["Model"], reverse: bool, related: str, use_raw: bool ) -> str: """ Returns name of the column that should be used to extract ids from model. Depending on the relation side it's either primary key column of parent model or field name specified by related parameter. :param parent_model: model from which id column should be extracted :type parent_model: Type[Model] :param reverse: flag if the relation is reverse :type reverse: bool :param related: name of the relation field :type related: str :param use_raw: flag if aliases or field names should be used :type use_raw: bool :return: :rtype: """ if reverse: column_name = parent_model.Meta.pkname return ( parent_model.get_column_alias(column_name) if use_raw else column_name ) column = parent_model.Meta.model_fields[related] return column.get_alias() if use_raw else column.name @classmethod def get_related_field_name(cls, target_field: "ForeignKeyField") -> str: """ Returns name of the relation field that should be used in prefetch query. This field is later used to register relation in prefetch query, populate relations dict, and populate nested model in prefetch query. :param target_field: relation field that should be used in prefetch :type target_field: Type[BaseField] :return: name of the field :rtype: str """ if target_field.is_multi: return cls.get_name() if target_field.virtual: return target_field.get_related_name() return target_field.to.Meta.pkname @classmethod def get_filtered_names_to_extract(cls, prefetch_dict: Dict) -> List: """ Returns list of related fields names that should be followed to prefetch related models from. List of models is translated into dict to assure each model is extracted only once in one query, that's why this function accepts prefetch_dict not list. Only relations from current model are returned. :param prefetch_dict: dictionary of fields to extract :type prefetch_dict: Dict :return: list of fields names to extract :rtype: List """ related_to_extract = [] if prefetch_dict and prefetch_dict is not Ellipsis: related_to_extract = [ related for related in cls.extract_related_names() if related in prefetch_dict ] return related_to_extract ormar-0.12.2/ormar/models/mixins/pydantic_mixin.py000066400000000000000000000125341444363446500222410ustar00rootroot00000000000000import copy import string from random import choices from typing import ( Any, Callable, Dict, List, Optional, Set, TYPE_CHECKING, Type, Union, cast, ) import pydantic from pydantic.fields import ModelField from ormar.models.mixins.relation_mixin import RelationMixin # noqa: I100, I202 from ormar.queryset.utils import translate_list_to_dict class PydanticMixin(RelationMixin): __cache__: Dict[str, Type[pydantic.BaseModel]] = {} if TYPE_CHECKING: # pragma: no cover __fields__: Dict[str, ModelField] _skip_ellipsis: Callable _get_not_excluded_fields: Callable @classmethod def get_pydantic( cls, *, include: Union[Set, Dict] = None, exclude: Union[Set, Dict] = None ) -> Type[pydantic.BaseModel]: """ Returns a pydantic model out of ormar model. Converts also nested ormar models into pydantic models. Can be used to fully exclude certain fields in fastapi response and requests. :param include: fields of own and nested models to include :type include: Union[Set, Dict, None] :param exclude: fields of own and nested models to exclude :type exclude: Union[Set, Dict, None] """ relation_map = translate_list_to_dict(cls._iterate_related_models()) return cls._convert_ormar_to_pydantic( include=include, exclude=exclude, relation_map=relation_map ) @classmethod def _convert_ormar_to_pydantic( cls, relation_map: Dict[str, Any], include: Union[Set, Dict] = None, exclude: Union[Set, Dict] = None, ) -> Type[pydantic.BaseModel]: if include and isinstance(include, Set): include = translate_list_to_dict(include) if exclude and isinstance(exclude, Set): exclude = translate_list_to_dict(exclude) fields_dict: Dict[str, Any] = dict() defaults: Dict[str, Any] = dict() fields_to_process = cls._get_not_excluded_fields( fields={*cls.Meta.model_fields.keys()}, include=include, exclude=exclude ) fields_to_process.sort( key=lambda x: list(cls.Meta.model_fields.keys()).index(x) ) cache_key = f"{cls.__name__}_{str(include)}_{str(exclude)}" if cache_key in cls.__cache__: return cls.__cache__[cache_key] for name in fields_to_process: field = cls._determine_pydantic_field_type( name=name, defaults=defaults, include=include, exclude=exclude, relation_map=relation_map, ) if field is not None: fields_dict[name] = field model = type( f"{cls.__name__}_{''.join(choices(string.ascii_uppercase, k=3))}", (pydantic.BaseModel,), {"__annotations__": fields_dict, **defaults}, ) model = cast(Type[pydantic.BaseModel], model) cls._copy_field_validators(model=model) cls.__cache__[cache_key] = model return model @classmethod def _determine_pydantic_field_type( cls, name: str, defaults: Dict, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None], relation_map: Dict[str, Any], ) -> Any: field = cls.Meta.model_fields[name] target: Any = None if field.is_relation and name in relation_map: # type: ignore target = field.to._convert_ormar_to_pydantic( include=cls._skip_ellipsis(include, name), exclude=cls._skip_ellipsis(exclude, name), relation_map=cls._skip_ellipsis( relation_map, name, default_return=dict() ), ) if field.is_multi or field.virtual: target = List[target] # type: ignore elif not field.is_relation: defaults[name] = cls.__fields__[name].field_info target = field.__type__ if target is not None and field.nullable: target = Optional[target] return target @classmethod def _copy_field_validators(cls, model: Type[pydantic.BaseModel]) -> None: """ Copy field validators from ormar model to generated pydantic model. """ for field_name, field in model.__fields__.items(): if ( field_name not in cls.__fields__ or cls.Meta.model_fields[field_name].is_relation ): continue validators = cls.__fields__[field_name].validators already_attached = [ validator.__wrapped__ for validator in field.validators # type: ignore ] validators_to_copy = [ validator for validator in validators if validator.__wrapped__ not in already_attached # type: ignore ] field.validators.extend(copy.deepcopy(validators_to_copy)) class_validators = cls.__fields__[field_name].class_validators field.class_validators.update(copy.deepcopy(class_validators)) field.pre_validators = copy.deepcopy( cls.__fields__[field_name].pre_validators ) field.post_validators = copy.deepcopy( cls.__fields__[field_name].post_validators ) ormar-0.12.2/ormar/models/mixins/relation_mixin.py000066400000000000000000000146061444363446500222450ustar00rootroot00000000000000from typing import Callable, Dict, List, Optional, Set, TYPE_CHECKING, cast from ormar import BaseField, ForeignKeyField from ormar.models.traversible import NodeList class RelationMixin: """ Used to return relation fields/names etc. from given model """ if TYPE_CHECKING: # pragma no cover from ormar import ModelMeta Meta: ModelMeta __relation_map__: Optional[List[str]] _related_names: Optional[Set] _through_names: Optional[Set] _related_fields: Optional[List] get_name: Callable @classmethod def extract_db_own_fields(cls) -> Set: """ Returns only fields that are stored in the own database table, exclude all related fields. :return: set of model fields with relation fields excluded :rtype: Set """ related_names = cls.extract_related_names() self_fields = { name for name in cls.Meta.model_fields.keys() if name not in related_names } return self_fields @classmethod def extract_related_fields(cls) -> List["ForeignKeyField"]: """ Returns List of ormar Fields for all relations declared on a model. List is cached in cls._related_fields for quicker access. :return: list of related fields :rtype: List """ if cls._related_fields is not None: return cls._related_fields related_fields = [] for name in cls.extract_related_names().union(cls.extract_through_names()): related_fields.append(cast("ForeignKeyField", cls.Meta.model_fields[name])) cls._related_fields = related_fields return related_fields @classmethod def extract_through_names(cls) -> Set[str]: """ Extracts related fields through names which are shortcuts to through models. :return: set of related through fields names :rtype: Set """ if cls._through_names is not None: return cls._through_names related_names = set() for name, field in cls.Meta.model_fields.items(): if isinstance(field, BaseField) and field.is_through: related_names.add(name) cls._through_names = related_names return related_names @classmethod def extract_related_names(cls) -> Set[str]: """ Returns List of fields names for all relations declared on a model. List is cached in cls._related_names for quicker access. :return: set of related fields names :rtype: Set """ if cls._related_names is not None: return cls._related_names related_names = set() for name, field in cls.Meta.model_fields.items(): if ( isinstance(field, BaseField) and field.is_relation and not field.is_through and not field.skip_field ): related_names.add(name) cls._related_names = related_names return related_names @classmethod def _extract_db_related_names(cls) -> Set: """ Returns only fields that are stored in the own database table, exclude related fields that are not stored as foreign keys on given model. :return: set of model fields with non fk relation fields excluded :rtype: Set """ related_names = cls.extract_related_names() related_names = { name for name in related_names if cls.Meta.model_fields[name].is_valid_uni_relation() } return related_names @classmethod def _iterate_related_models( # noqa: CCR001 cls, node_list: NodeList = None, parsed_map: Dict = None, source_relation: str = None, recurrent: bool = False, ) -> List[str]: """ Iterates related models recursively to extract relation strings of nested not visited models. :return: list of relation strings to be passed to select_related :rtype: List[str] """ if not node_list: if cls.__relation_map__: return cls.__relation_map__ node_list = NodeList() parsed_map = dict() current_node = node_list.add(node_class=cls) else: current_node = node_list[-1] relations = sorted(cls.extract_related_names()) processed_relations: List[str] = [] for relation in relations: if not current_node.visited(relation): target_model = cls.Meta.model_fields[relation].to node_list.add( node_class=target_model, relation_name=relation, parent_node=current_node, ) relation_key = f"{cls.get_name()}_{relation}" parsed_map = cast(Dict, parsed_map) deep_relations = parsed_map.get(relation_key) if not deep_relations: deep_relations = target_model._iterate_related_models( source_relation=relation, node_list=node_list, recurrent=True, parsed_map=parsed_map, ) parsed_map[relation_key] = deep_relations processed_relations.extend(deep_relations) result = cls._get_final_relations(processed_relations, source_relation) if not recurrent: cls.__relation_map__ = result return result @staticmethod def _get_final_relations( processed_relations: List, source_relation: Optional[str] ) -> List[str]: """ Helper method to prefix nested relation strings with current source relation :param processed_relations: list of already processed relation str :type processed_relations: List[str] :param source_relation: name of the current relation :type source_relation: str :return: list of relation strings to be passed to select_related :rtype: List[str] """ if processed_relations: final_relations = [ f"{source_relation + '__' if source_relation else ''}{relation}" for relation in processed_relations ] else: final_relations = [source_relation] if source_relation else [] return final_relations ormar-0.12.2/ormar/models/mixins/save_mixin.py000066400000000000000000000361451444363446500213700ustar00rootroot00000000000000import base64 import uuid from enum import Enum from typing import ( Any, Callable, Collection, Dict, List, Optional, Set, TYPE_CHECKING, cast, ) import pydantic import ormar # noqa: I100, I202 from ormar.exceptions import ModelPersistenceError from ormar.fields.parsers import encode_json from ormar.models.mixins import AliasMixin from ormar.models.mixins.relation_mixin import RelationMixin if TYPE_CHECKING: # pragma: no cover from ormar import ForeignKeyField, Model class SavePrepareMixin(RelationMixin, AliasMixin): """ Used to prepare models to be saved in database """ if TYPE_CHECKING: # pragma: nocover _choices_fields: Optional[Set] _skip_ellipsis: Callable _json_fields: Set[str] _bytes_fields: Set[str] __fields__: Dict[str, pydantic.fields.ModelField] @classmethod def prepare_model_to_save(cls, new_kwargs: dict) -> dict: """ Combines all preparation methods before saving. Removes primary key for if it's nullable or autoincrement pk field, and it's set to None. Substitute related models with their primary key values as fk column. Populates the default values for field with default set and no value. Translate columns into aliases (db names). :param new_kwargs: dictionary of model that is about to be saved :type new_kwargs: Dict[str, str] :return: dictionary of model that is about to be saved :rtype: Dict[str, str] """ new_kwargs = cls._remove_pk_from_kwargs(new_kwargs) new_kwargs = cls._remove_not_ormar_fields(new_kwargs) new_kwargs = cls.substitute_models_with_pks(new_kwargs) new_kwargs = cls.populate_default_values(new_kwargs) new_kwargs = cls.reconvert_str_to_bytes(new_kwargs) new_kwargs = cls.translate_columns_to_aliases(new_kwargs) return new_kwargs @classmethod def prepare_model_to_update(cls, new_kwargs: dict) -> dict: """ Combines all preparation methods before updating. :param new_kwargs: dictionary of model that is about to be saved :type new_kwargs: Dict[str, str] :return: dictionary of model that is about to be updated :rtype: Dict[str, str] """ new_kwargs = cls.parse_non_db_fields(new_kwargs) new_kwargs = cls.substitute_models_with_pks(new_kwargs) new_kwargs = cls.reconvert_str_to_bytes(new_kwargs) new_kwargs = cls.dump_all_json_fields_to_str(new_kwargs) new_kwargs = cls.translate_columns_to_aliases(new_kwargs) new_kwargs = cls.translate_enum_columns(new_kwargs) return new_kwargs @classmethod def translate_enum_columns(cls, new_kwargs: dict) -> dict: for key, value in new_kwargs.items(): if isinstance(value, Enum): new_kwargs[key] = value.name return new_kwargs @classmethod def _remove_not_ormar_fields(cls, new_kwargs: dict) -> dict: """ Removes primary key for if it's nullable or autoincrement pk field, and it's set to None. :param new_kwargs: dictionary of model that is about to be saved :type new_kwargs: Dict[str, str] :return: dictionary of model that is about to be saved :rtype: Dict[str, str] """ ormar_fields = { k for k, v in cls.Meta.model_fields.items() if not v.pydantic_only } new_kwargs = {k: v for k, v in new_kwargs.items() if k in ormar_fields} return new_kwargs @classmethod def _remove_pk_from_kwargs(cls, new_kwargs: dict) -> dict: """ Removes primary key for if it's nullable or autoincrement pk field, and it's set to None. :param new_kwargs: dictionary of model that is about to be saved :type new_kwargs: Dict[str, str] :return: dictionary of model that is about to be saved :rtype: Dict[str, str] """ pkname = cls.Meta.pkname pk = cls.Meta.model_fields[pkname] if new_kwargs.get(pkname, ormar.Undefined) is None and ( pk.nullable or pk.autoincrement ): del new_kwargs[pkname] return new_kwargs @classmethod def parse_non_db_fields(cls, model_dict: Dict) -> Dict: """ Receives dictionary of model that is about to be saved and changes uuid fields to strings in bulk_update. :param model_dict: dictionary of model that is about to be saved :type model_dict: Dict :return: dictionary of model that is about to be saved :rtype: Dict """ for name, field in cls.Meta.model_fields.items(): if field.__type__ == uuid.UUID and name in model_dict: parsers = {"string": lambda x: str(x), "hex": lambda x: "%.32x" % x.int} uuid_format = field.column_type.uuid_format parser: Callable[..., Any] = parsers.get(uuid_format, lambda x: x) model_dict[name] = parser(model_dict[name]) return model_dict @classmethod def substitute_models_with_pks(cls, model_dict: Dict) -> Dict: # noqa CCR001 """ Receives dictionary of model that is about to be saved and changes all related models that are stored as foreign keys to their fk value. :param model_dict: dictionary of model that is about to be saved :type model_dict: Dict :return: dictionary of model that is about to be saved :rtype: Dict """ for field in cls.extract_related_names(): field_value = model_dict.get(field, None) if field_value is not None: target_field = cls.Meta.model_fields[field] target_pkname = target_field.to.Meta.pkname if isinstance(field_value, ormar.Model): # pragma: no cover pk_value = getattr(field_value, target_pkname) if not pk_value: raise ModelPersistenceError( f"You cannot save {field_value.get_name()} " f"model without pk set!" ) model_dict[field] = pk_value elif isinstance(field_value, (list, dict)) and field_value: if isinstance(field_value, list): model_dict[field] = [ target.get(target_pkname) for target in field_value ] else: model_dict[field] = field_value.get(target_pkname) else: model_dict.pop(field, None) return model_dict @classmethod def reconvert_str_to_bytes(cls, model_dict: Dict) -> Dict: """ Receives dictionary of model that is about to be saved and changes all bytes fields that are represented as strings back into bytes. :param model_dict: dictionary of model that is about to be saved :type model_dict: Dict :return: dictionary of model that is about to be saved :rtype: Dict """ bytes_base64_fields = { name for name, field in cls.Meta.model_fields.items() if field.represent_as_base64_str } for key, value in model_dict.items(): if key in cls._bytes_fields and isinstance(value, str): model_dict[key] = ( value.encode("utf-8") if key not in bytes_base64_fields else base64.b64decode(value) ) return model_dict @classmethod def dump_all_json_fields_to_str(cls, model_dict: Dict) -> Dict: """ Receives dictionary of model that is about to be saved and changes all json fields into strings :param model_dict: dictionary of model that is about to be saved :type model_dict: Dict :return: dictionary of model that is about to be saved :rtype: Dict """ for key, value in model_dict.items(): if key in cls._json_fields: model_dict[key] = encode_json(value) return model_dict @classmethod def populate_default_values(cls, new_kwargs: Dict) -> Dict: """ Receives dictionary of model that is about to be saved and populates the default value on the fields that have the default value set, but no actual value was passed by the user. :param new_kwargs: dictionary of model that is about to be saved :type new_kwargs: Dict :return: dictionary of model that is about to be saved :rtype: Dict """ for field_name, field in cls.Meta.model_fields.items(): if ( field_name not in new_kwargs and field.has_default(use_server=False) and not field.pydantic_only ): new_kwargs[field_name] = field.get_default() # clear fields with server_default set as None if ( field.server_default is not None and new_kwargs.get(field_name, None) is None ): new_kwargs.pop(field_name, None) return new_kwargs @classmethod def validate_choices(cls, new_kwargs: Dict) -> Dict: """ Receives dictionary of model that is about to be saved and validates the fields with choices set to see if the value is allowed. :param new_kwargs: dictionary of model that is about to be saved :type new_kwargs: Dict :return: dictionary of model that is about to be saved :rtype: Dict """ if not cls._choices_fields: return new_kwargs fields_to_check = [ field for field in cls.Meta.model_fields.values() if field.name in cls._choices_fields and field.name in new_kwargs ] for field in fields_to_check: if new_kwargs[field.name] not in field.choices: raise ValueError( f"{field.name}: '{new_kwargs[field.name]}' " f"not in allowed choices set:" f" {field.choices}" ) return new_kwargs @staticmethod async def _upsert_model( instance: "Model", save_all: bool, previous_model: Optional["Model"], relation_field: Optional["ForeignKeyField"], update_count: int, ) -> int: """ Method updates given instance if: * instance is not saved or * instance have no pk or * save_all=True flag is set and instance is not __pk_only__. If relation leading to instance is a ManyToMany also the through model is saved :param instance: current model to upsert :type instance: Model :param save_all: flag if all models should be saved or only not saved ones :type save_all: bool :param relation_field: field with relation :type relation_field: Optional[ForeignKeyField] :param previous_model: previous model from which method came :type previous_model: Model :param update_count: no of updated models :type update_count: int :return: no of updated models :rtype: int """ if ( save_all or not instance.pk or not instance.saved ) and not instance.__pk_only__: await instance.upsert(__force_save__=True) if relation_field and relation_field.is_multi: await instance._upsert_through_model( instance=instance, relation_field=relation_field, previous_model=cast("Model", previous_model), ) update_count += 1 return update_count @staticmethod async def _upsert_through_model( instance: "Model", previous_model: "Model", relation_field: "ForeignKeyField" ) -> None: """ Upsert through model for m2m relation. :param instance: current model to upsert :type instance: Model :param relation_field: field with relation :type relation_field: Optional[ForeignKeyField] :param previous_model: previous model from which method came :type previous_model: Model """ through_name = previous_model.Meta.model_fields[ relation_field.name ].through.get_name() through = getattr(instance, through_name) if through: through_dict = through.dict(exclude=through.extract_related_names()) else: through_dict = {} await getattr( previous_model, relation_field.name ).queryset_proxy.upsert_through_instance(instance, **through_dict) async def _update_relation_list( self, fields_list: Collection["ForeignKeyField"], follow: bool, save_all: bool, relation_map: Dict, update_count: int, ) -> int: """ Internal method used in save_related to follow deeper from related models and update numbers of updated related instances. :type save_all: flag if all models should be saved :type save_all: bool :param fields_list: list of ormar fields to follow and save :type fields_list: Collection["ForeignKeyField"] :param relation_map: map of relations to follow :type relation_map: Dict :param follow: flag to trigger deep save - by default only directly related models are saved with follow=True also related models of related models are saved :type follow: bool :param update_count: internal parameter for recursive calls - number of updated instances :type update_count: int :return: tuple of update count and visited :rtype: int """ for field in fields_list: values = self._get_field_values(name=field.name) for value in values: if follow: update_count = await value.save_related( follow=follow, save_all=save_all, relation_map=self._skip_ellipsis( # type: ignore relation_map, field.name, default_return={} ), update_count=update_count, previous_model=self, relation_field=field, ) else: update_count = await value._upsert_model( instance=value, save_all=save_all, previous_model=self, relation_field=field, update_count=update_count, ) return update_count def _get_field_values(self, name: str) -> List: """ Extract field values and ensures it is a list. :param name: name of the field :type name: str :return: list of values :rtype: List """ values = getattr(self, name) or [] if not isinstance(values, list): values = [values] return values ormar-0.12.2/ormar/models/model.py000066400000000000000000000312431444363446500170110ustar00rootroot00000000000000from typing import Any, Dict, List, Optional, Set, TYPE_CHECKING, TypeVar, Union import ormar.queryset # noqa I100 from ormar.exceptions import ModelPersistenceError, NoMatch from ormar.models import NewBaseModel # noqa I100 from ormar.models.metaclass import ModelMeta from ormar.models.model_row import ModelRow from ormar.queryset.utils import subtract_dict, translate_list_to_dict T = TypeVar("T", bound="Model") if TYPE_CHECKING: # pragma: no cover from ormar import ForeignKeyField class Model(ModelRow): __abstract__ = False if TYPE_CHECKING: # pragma nocover Meta: ModelMeta def __repr__(self) -> str: # pragma nocover _repr = { k: getattr(self, k) for k, v in self.Meta.model_fields.items() if not v.skip_field } return f"{self.__class__.__name__}({str(_repr)})" async def upsert(self: T, **kwargs: Any) -> T: """ Performs either a save or an update depending on the presence of the pk. If the pk field is filled it's an update, otherwise the save is performed. For save kwargs are ignored, used only in update if provided. :param kwargs: list of fields to update :type kwargs: Any :return: saved Model :rtype: Model """ force_save = kwargs.pop("__force_save__", False) if force_save: expr = self.Meta.table.select().where(self.pk_column == self.pk) row = await self.Meta.database.fetch_one(expr) if not row: return await self.save() return await self.update(**kwargs) if not self.pk: return await self.save() return await self.update(**kwargs) async def save(self: T) -> T: """ Performs a save of given Model instance. If primary key is already saved, db backend will throw integrity error. Related models are saved by pk number, reverse relation and many to many fields are not saved - use corresponding relations methods. If there are fields with server_default set and those fields are not already filled save will trigger also a second query to refreshed the fields populated server side. Does not recognize if model was previously saved. If you want to perform update or insert depending on the pk fields presence use upsert. Sends pre_save and post_save signals. Sets model save status to True. :return: saved Model :rtype: Model """ await self.signals.pre_save.send(sender=self.__class__, instance=self) self_fields = self._extract_model_db_fields() if not self.pk and self.Meta.model_fields[self.Meta.pkname].autoincrement: self_fields.pop(self.Meta.pkname, None) self_fields = self.populate_default_values(self_fields) self.update_from_dict( { k: v for k, v in self_fields.items() if k not in self.extract_related_names() } ) self_fields = self.translate_columns_to_aliases(self_fields) expr = self.Meta.table.insert() expr = expr.values(**self_fields) pk = await self.Meta.database.execute(expr) if pk and isinstance(pk, self.pk_type()): setattr(self, self.Meta.pkname, pk) self.set_save_status(True) # refresh server side defaults if any( field.server_default is not None for name, field in self.Meta.model_fields.items() if name not in self_fields ): await self.load() await self.signals.post_save.send(sender=self.__class__, instance=self) return self async def save_related( # noqa: CCR001, CFQ002 self, follow: bool = False, save_all: bool = False, relation_map: Dict = None, exclude: Union[Set, Dict] = None, update_count: int = 0, previous_model: "Model" = None, relation_field: Optional["ForeignKeyField"] = None, ) -> int: """ Triggers a upsert method on all related models if the instances are not already saved. By default saves only the directly related ones. If follow=True is set it saves also related models of related models. To not get stuck in an infinite loop as related models also keep a relation to parent model visited models set is kept. That way already visited models that are nested are saved, but the save do not follow them inside. So Model A -> Model B -> Model A -> Model C will save second Model A but will never follow into Model C. Nested relations of those kind need to be persisted manually. :param relation_field: field with relation leading to this model :type relation_field: Optional[ForeignKeyField] :param previous_model: previous model from which method came :type previous_model: Model :param exclude: items to exclude during saving of relations :type exclude: Union[Set, Dict] :param relation_map: map of relations to follow :type relation_map: Dict :param save_all: flag if all models should be saved or only not saved ones :type save_all: bool :param follow: flag to trigger deep save - by default only directly related models are saved with follow=True also related models of related models are saved :type follow: bool :param update_count: internal parameter for recursive calls - number of updated instances :type update_count: int :return: number of updated/saved models :rtype: int """ relation_map = ( relation_map if relation_map is not None else translate_list_to_dict(self._iterate_related_models()) ) if exclude and isinstance(exclude, Set): exclude = translate_list_to_dict(exclude) relation_map = subtract_dict(relation_map, exclude or {}) if relation_map: fields_to_visit = { field for field in self.extract_related_fields() if field.name in relation_map } pre_save = { field for field in fields_to_visit if not field.virtual and not field.is_multi } update_count = await self._update_relation_list( fields_list=pre_save, follow=follow, save_all=save_all, relation_map=relation_map, update_count=update_count, ) update_count = await self._upsert_model( instance=self, save_all=save_all, previous_model=previous_model, relation_field=relation_field, update_count=update_count, ) post_save = fields_to_visit - pre_save update_count = await self._update_relation_list( fields_list=post_save, follow=follow, save_all=save_all, relation_map=relation_map, update_count=update_count, ) else: update_count = await self._upsert_model( instance=self, save_all=save_all, previous_model=previous_model, relation_field=relation_field, update_count=update_count, ) return update_count async def update(self: T, _columns: List[str] = None, **kwargs: Any) -> T: """ Performs update of Model instance in the database. Fields can be updated before or you can pass them as kwargs. Sends pre_update and post_update signals. Sets model save status to True. :param _columns: list of columns to update, if None all are updated :type _columns: List :raises ModelPersistenceError: If the pk column is not set :param kwargs: list of fields to update as field=value pairs :type kwargs: Any :return: updated Model :rtype: Model """ if kwargs: self.update_from_dict(kwargs) if not self.pk: raise ModelPersistenceError( "You cannot update not saved model! Use save or upsert method." ) await self.signals.pre_update.send( sender=self.__class__, instance=self, passed_args=kwargs ) self_fields = self._extract_model_db_fields() self_fields.pop(self.get_column_name_from_alias(self.Meta.pkname)) if _columns: self_fields = {k: v for k, v in self_fields.items() if k in _columns} self_fields = self.translate_columns_to_aliases(self_fields) expr = self.Meta.table.update().values(**self_fields) expr = expr.where(self.pk_column == getattr(self, self.Meta.pkname)) await self.Meta.database.execute(expr) self.set_save_status(True) await self.signals.post_update.send(sender=self.__class__, instance=self) return self async def delete(self) -> int: """ Removes the Model instance from the database. Sends pre_delete and post_delete signals. Sets model save status to False. Note it does not delete the Model itself (python object). So you can delete and later save (since pk is deleted no conflict will arise) or update and the Model will be saved in database again. :return: number of deleted rows (for some backends) :rtype: int """ await self.signals.pre_delete.send(sender=self.__class__, instance=self) expr = self.Meta.table.delete() expr = expr.where(self.pk_column == (getattr(self, self.Meta.pkname))) result = await self.Meta.database.execute(expr) self.set_save_status(False) await self.signals.post_delete.send(sender=self.__class__, instance=self) return result async def load(self: T) -> T: """ Allow to refresh existing Models fields from database. Be careful as the related models can be overwritten by pk_only models in load. Does NOT refresh the related models fields if they were loaded before. :raises NoMatch: If given pk is not found in database. :return: reloaded Model :rtype: Model """ expr = self.Meta.table.select().where(self.pk_column == self.pk) row = await self.Meta.database.fetch_one(expr) if not row: # pragma nocover raise NoMatch("Instance was deleted from database and cannot be refreshed") kwargs = dict(row) kwargs = self.translate_aliases_to_columns(kwargs) self.update_from_dict(kwargs) self.set_save_status(True) return self async def load_all( self: T, follow: bool = False, exclude: Union[List, str, Set, Dict] = None, order_by: Union[List, str] = None, ) -> T: """ Allow to refresh existing Models fields from database. Performs refresh of the related models fields. By default, loads only self and the directly related ones. If follow=True is set it loads also related models of related models. To not get stuck in an infinite loop as related models also keep a relation to parent model visited models set is kept. That way already visited models that are nested are loaded, but the load do not follow them inside. So Model A -> Model B -> Model C -> Model A -> Model X will load second Model A but will never follow into Model X. Nested relations of those kind need to be loaded manually. :param order_by: columns by which models should be sorted :type order_by: Union[List, str] :raises NoMatch: If given pk is not found in database. :param exclude: related models to exclude :type exclude: Union[List, str, Set, Dict] :param follow: flag to trigger deep save - by default only directly related models are saved with follow=True also related models of related models are saved :type follow: bool :return: reloaded Model :rtype: Model """ relations = list(self.extract_related_names()) if follow: relations = self._iterate_related_models() queryset = self.__class__.objects if exclude: queryset = queryset.exclude_fields(exclude) if order_by: queryset = queryset.order_by(order_by) instance = await queryset.select_related(relations).get(pk=self.pk) self._orm.clear() self.update_from_dict(instance.dict()) return self ormar-0.12.2/ormar/models/model_row.py000066400000000000000000000355571444363446500177140ustar00rootroot00000000000000from typing import Any, Dict, List, Optional, TYPE_CHECKING, Tuple, Type, Union, cast try: from sqlalchemy.engine.result import ResultProxy # type: ignore except ImportError: # pragma: no cover from sqlalchemy.engine.result import Row as ResultProxy # type: ignore from ormar.models import NewBaseModel # noqa: I202 from ormar.models.excludable import ExcludableItems from ormar.models.helpers.models import group_related_list if TYPE_CHECKING: # pragma: no cover from ormar.fields import ForeignKeyField from ormar.models import Model class ModelRow(NewBaseModel): @classmethod def from_row( # noqa: CFQ002 cls, row: ResultProxy, source_model: Type["Model"], select_related: List = None, related_models: Any = None, related_field: "ForeignKeyField" = None, excludable: ExcludableItems = None, current_relation_str: str = "", proxy_source_model: Optional[Type["Model"]] = None, used_prefixes: List[str] = None, ) -> Optional["Model"]: """ Model method to convert raw sql row from database into ormar.Model instance. Traverses nested models if they were specified in select_related for query. Called recurrently and returns model instance if it's present in the row. Note that it's processing one row at a time, so if there are duplicates of parent row that needs to be joined/combined (like parent row in sql join with 2+ child rows) instances populated in this method are later combined in the QuerySet. Other method working directly on raw database results is in prefetch_query, where rows are populated in a different way as they do not have nested models in result. :param used_prefixes: list of already extracted prefixes :type used_prefixes: List[str] :param proxy_source_model: source model from which querysetproxy is constructed :type proxy_source_model: Optional[Type["ModelRow"]] :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :param current_relation_str: name of the relation field :type current_relation_str: str :param source_model: model on which relation was defined :type source_model: Type[Model] :param row: raw result row from the database :type row: ResultProxy :param select_related: list of names of related models fetched from database :type select_related: List :param related_models: list or dict of related models :type related_models: Union[List, Dict] :param related_field: field with relation declaration :type related_field: ForeignKeyField :return: returns model if model is populated from database :rtype: Optional[Model] """ item: Dict[str, Any] = {} select_related = select_related or [] related_models = related_models or [] table_prefix = "" used_prefixes = used_prefixes if used_prefixes is not None else [] excludable = excludable or ExcludableItems() if select_related: related_models = group_related_list(select_related) if related_field: table_prefix = cls._process_table_prefix( source_model=source_model, current_relation_str=current_relation_str, related_field=related_field, used_prefixes=used_prefixes, ) item = cls._populate_nested_models_from_row( item=item, row=row, related_models=related_models, excludable=excludable, current_relation_str=current_relation_str, source_model=source_model, # type: ignore proxy_source_model=proxy_source_model, # type: ignore table_prefix=table_prefix, used_prefixes=used_prefixes, ) item = cls.extract_prefixed_table_columns( item=item, row=row, table_prefix=table_prefix, excludable=excludable ) instance: Optional["Model"] = None if item.get(cls.Meta.pkname, None) is not None: item["__excluded__"] = cls.get_names_to_exclude( excludable=excludable, alias=table_prefix ) instance = cast("Model", cls(**item)) instance.set_save_status(True) return instance @classmethod def _process_table_prefix( cls, source_model: Type["Model"], current_relation_str: str, related_field: "ForeignKeyField", used_prefixes: List[str], ) -> str: """ :param source_model: model on which relation was defined :type source_model: Type[Model] :param current_relation_str: current relation string :type current_relation_str: str :param related_field: field with relation declaration :type related_field: "ForeignKeyField" :param used_prefixes: list of already extracted prefixes :type used_prefixes: List[str] :return: table_prefix to use :rtype: str """ if related_field.is_multi: previous_model = related_field.through else: previous_model = related_field.owner table_prefix = cls.Meta.alias_manager.resolve_relation_alias( from_model=previous_model, relation_name=related_field.name ) if not table_prefix or table_prefix in used_prefixes: manager = cls.Meta.alias_manager table_prefix = manager.resolve_relation_alias_after_complex( source_model=source_model, relation_str=current_relation_str, relation_field=related_field, ) used_prefixes.append(table_prefix) return table_prefix @classmethod def _populate_nested_models_from_row( # noqa: CFQ002 cls, item: dict, row: ResultProxy, source_model: Type["Model"], related_models: Any, excludable: ExcludableItems, table_prefix: str, used_prefixes: List[str], current_relation_str: str = None, proxy_source_model: Type["Model"] = None, ) -> dict: """ Traverses structure of related models and populates the nested models from the database row. Related models can be a list if only directly related models are to be populated, converted to dict if related models also have their own related models to be populated. Recurrently calls from_row method on nested instances and create nested instances. In the end those instances are added to the final model dictionary. :param proxy_source_model: source model from which querysetproxy is constructed :type proxy_source_model: Optional[Type["ModelRow"]] :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :param source_model: source model from which relation started :type source_model: Type[Model] :param current_relation_str: joined related parts into one string :type current_relation_str: str :param item: dictionary of already populated nested models, otherwise empty dict :type item: Dict :param row: raw result row from the database :type row: ResultProxy :param related_models: list or dict of related models :type related_models: Union[Dict, List] :return: dictionary with keys corresponding to model fields names and values are database values :rtype: Dict """ for related in related_models: field = cls.Meta.model_fields[related] field = cast("ForeignKeyField", field) model_cls = field.to model_excludable = excludable.get( model_cls=cast(Type["Model"], cls), alias=table_prefix ) if model_excludable.is_excluded(related): continue relation_str, remainder = cls._process_remainder_and_relation_string( related_models=related_models, current_relation_str=current_relation_str, related=related, ) child = model_cls.from_row( row, related_models=remainder, related_field=field, excludable=excludable, current_relation_str=relation_str, source_model=source_model, proxy_source_model=proxy_source_model, used_prefixes=used_prefixes, ) item[model_cls.get_column_name_from_alias(related)] = child if ( field.is_multi and child and not model_excludable.is_excluded(field.through.get_name()) ): cls._populate_through_instance( row=row, item=item, related=related, excludable=excludable, child=child, proxy_source_model=proxy_source_model, ) return item @staticmethod def _process_remainder_and_relation_string( related_models: Union[Dict, List], current_relation_str: Optional[str], related: str, ) -> Tuple[str, Optional[Union[Dict, List]]]: """ Process remainder models and relation string :param related_models: list or dict of related models :type related_models: Union[Dict, List] :param current_relation_str: current relation string :type current_relation_str: Optional[str] :param related: name of the relation :type related: str """ relation_str = ( "__".join([current_relation_str, related]) if current_relation_str else related ) remainder = None if isinstance(related_models, dict) and related_models[related]: remainder = related_models[related] return relation_str, remainder @classmethod def _populate_through_instance( # noqa: CFQ002 cls, row: ResultProxy, item: Dict, related: str, excludable: ExcludableItems, child: "Model", proxy_source_model: Optional[Type["Model"]], ) -> None: """ Populates the through model on reverse side of current query. Normally it's child class, unless the query is from queryset. :param row: row from db result :type row: ResultProxy :param item: parent item dict :type item: Dict :param related: current relation name :type related: str :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :param child: child item of parent :type child: "Model" :param proxy_source_model: source model from which querysetproxy is constructed :type proxy_source_model: Type["Model"] """ through_name = cls.Meta.model_fields[related].through.get_name() through_child = cls._create_through_instance( row=row, related=related, through_name=through_name, excludable=excludable ) if child.__class__ != proxy_source_model: setattr(child, through_name, through_child) else: item[through_name] = through_child child.set_save_status(True) @classmethod def _create_through_instance( cls, row: ResultProxy, through_name: str, related: str, excludable: ExcludableItems, ) -> "ModelRow": """ Initialize the through model from db row. Excluded all relation fields and other exclude/include set in excludable. :param row: loaded row from database :type row: sqlalchemy.engine.ResultProxy :param through_name: name of the through field :type through_name: str :param related: name of the relation :type related: str :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :return: initialized through model without relation :rtype: "ModelRow" """ model_cls = cls.Meta.model_fields[through_name].to table_prefix = cls.Meta.alias_manager.resolve_relation_alias( from_model=cls, relation_name=related ) # remove relations on through field model_excludable = excludable.get(model_cls=model_cls, alias=table_prefix) model_excludable.set_values( value=model_cls.extract_related_names(), is_exclude=True ) child_dict = model_cls.extract_prefixed_table_columns( item={}, row=row, excludable=excludable, table_prefix=table_prefix ) child_dict["__excluded__"] = model_cls.get_names_to_exclude( excludable=excludable, alias=table_prefix ) child = model_cls(**child_dict) # type: ignore return child @classmethod def extract_prefixed_table_columns( cls, item: dict, row: ResultProxy, table_prefix: str, excludable: ExcludableItems, ) -> Dict: """ Extracts own fields from raw sql result, using a given prefix. Prefix changes depending on the table's position in a join. If the table is a main table, there is no prefix. All joined tables have prefixes to allow duplicate column names, as well as duplicated joins to the same table from multiple different tables. Extracted fields populates the related dict later used to construct a Model. Used in Model.from_row and PrefetchQuery._populate_rows methods. :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :param item: dictionary of already populated nested models, otherwise empty dict :type item: Dict :param row: raw result row from the database :type row: sqlalchemy.engine.result.ResultProxy :param table_prefix: prefix of the table from AliasManager each pair of tables have own prefix (two of them depending on direction) - used in joins to allow multiple joins to the same table. :type table_prefix: str :return: dictionary with keys corresponding to model fields names and values are database values :rtype: Dict """ selected_columns = cls.own_table_columns( model=cls, excludable=excludable, alias=table_prefix, use_alias=False ) column_prefix = table_prefix + "_" if table_prefix else "" for column in cls.Meta.table.columns: alias = cls.get_column_name_from_alias(column.name) if alias not in item and alias in selected_columns: prefixed_name = f"{column_prefix}{column.name}" item[alias] = row[prefixed_name] return item ormar-0.12.2/ormar/models/modelproxy.py000066400000000000000000000006331444363446500201120ustar00rootroot00000000000000from ormar.models.mixins import ( ExcludableMixin, MergeModelMixin, PrefetchQueryMixin, PydanticMixin, SavePrepareMixin, ) class ModelTableProxy( PrefetchQueryMixin, MergeModelMixin, SavePrepareMixin, ExcludableMixin, PydanticMixin, ): """ Used to combine all mixins with different set of functionalities. One of the bases of the ormar Model class. """ ormar-0.12.2/ormar/models/newbasemodel.py000066400000000000000000001133171444363446500203610ustar00rootroot00000000000000import base64 import sys import warnings from typing import ( AbstractSet, Any, Callable, Dict, List, Mapping, MutableSequence, Optional, Set, TYPE_CHECKING, Tuple, Type, TypeVar, Union, cast, ) import databases import pydantic import sqlalchemy from pydantic import BaseModel import ormar # noqa I100 from ormar.exceptions import ModelError, ModelPersistenceError from ormar.fields import BaseField from ormar.fields.foreign_key import ForeignKeyField from ormar.fields.parsers import encode_json from ormar.models.helpers import register_relation_in_alias_manager from ormar.models.helpers.relations import expand_reverse_relationship from ormar.models.helpers.sqlalchemy import ( populate_meta_sqlalchemy_table_if_required, update_column_definition, ) from ormar.models.metaclass import ModelMeta, ModelMetaclass from ormar.models.modelproxy import ModelTableProxy from ormar.models.utils import Extra from ormar.queryset.utils import translate_list_to_dict from ormar.relations.alias_manager import AliasManager from ormar.relations.relation import Relation from ormar.relations.relation_manager import RelationsManager if TYPE_CHECKING: # pragma no cover from ormar.models import Model from ormar.signals import SignalEmitter T = TypeVar("T", bound="NewBaseModel") IntStr = Union[int, str] DictStrAny = Dict[str, Any] SetStr = Set[str] AbstractSetIntStr = AbstractSet[IntStr] MappingIntStrAny = Mapping[IntStr, Any] class NewBaseModel(pydantic.BaseModel, ModelTableProxy, metaclass=ModelMetaclass): """ Main base class of ormar Model. Inherits from pydantic BaseModel and has all mixins combined in ModelTableProxy. Constructed with ModelMetaclass which in turn also inherits pydantic metaclass. Abstracts away all internals and helper functions, so final Model class has only the logic concerned with database connection and data persistance. """ __slots__ = ( "_orm_id", "_orm_saved", "_orm", "_pk_column", "__pk_only__", "__cached_hash__", ) if TYPE_CHECKING: # pragma no cover pk: Any __model_fields__: Dict[str, BaseField] __table__: sqlalchemy.Table __pydantic_model__: Type[BaseModel] __pkname__: str __tablename__: str __metadata__: sqlalchemy.MetaData __database__: databases.Database __relation_map__: Optional[List[str]] __cached_hash__: Optional[int] _orm_relationship_manager: AliasManager _orm: RelationsManager _orm_id: int _orm_saved: bool _related_names: Optional[Set] _through_names: Optional[Set] _related_names_hash: str _choices_fields: Set _pydantic_fields: Set _quick_access_fields: Set _json_fields: Set _bytes_fields: Set Meta: ModelMeta # noinspection PyMissingConstructor def __init__(self, *args: Any, **kwargs: Any) -> None: # type: ignore """ Initializer that creates a new ormar Model that is also pydantic Model at the same time. Passed keyword arguments can be only field names and their corresponding values as those will be passed to pydantic validation that will complain if extra params are passed. If relations are defined each relation is expanded and children models are also initialized and validated. Relation from both sides is registered so you can access related models from both sides. Json fields are automatically loaded/dumped if needed. Models marked as abstract=True in internal Meta class cannot be initialized. Accepts also special __pk_only__ flag that indicates that Model is constructed only with primary key value (so no other fields, it's a child model on other Model), that causes skipping the validation, that's the only case when the validation can be skipped. Accepts also special __excluded__ parameter that contains a set of fields that should be explicitly set to None, as otherwise pydantic will try to populate them with their default values if default is set. :raises ModelError: if abstract model is initialized, model has ForwardRefs that has not been updated or unknown field is passed :param args: ignored args :type args: Any :param kwargs: keyword arguments - all fields values and some special params :type kwargs: Any """ self._verify_model_can_be_initialized() self._initialize_internal_attributes() pk_only = kwargs.pop("__pk_only__", False) object.__setattr__(self, "__pk_only__", pk_only) new_kwargs, through_tmp_dict = self._process_kwargs(kwargs) if not pk_only: values, fields_set, validation_error = pydantic.validate_model( self, new_kwargs # type: ignore ) if validation_error: raise validation_error else: fields_set = {self.Meta.pkname} values = new_kwargs object.__setattr__(self, "__dict__", values) object.__setattr__(self, "__fields_set__", fields_set) # add back through fields new_kwargs.update(through_tmp_dict) model_fields = object.__getattribute__(self, "Meta").model_fields # register the columns models after initialization for related in self.extract_related_names().union(self.extract_through_names()): model_fields[related].expand_relationship( new_kwargs.get(related), self, to_register=True ) if hasattr(self, "_init_private_attributes"): # introduced in pydantic 1.7 self._init_private_attributes() def __setattr__(self, name: str, value: Any) -> None: # noqa CCR001 """ Overwrites setattr in pydantic parent as otherwise descriptors are not called. :param name: name of the attribute to set :type name: str :param value: value of the attribute to set :type value: Any :return: None :rtype: None """ prev_hash = hash(self) if hasattr(self, name): object.__setattr__(self, name, value) else: # let pydantic handle errors for unknown fields super().__setattr__(name, value) # In this case, the hash could have changed, so update it if name == self.Meta.pkname or self.pk is None: object.__setattr__(self, "__cached_hash__", None) new_hash = hash(self) if prev_hash != new_hash: self._update_relation_cache(prev_hash, new_hash) def __getattr__(self, item: str) -> Any: """ Used only to silence mypy errors for Through models and reverse relations. Not used in real life as in practice calls are intercepted by RelationDescriptors :param item: name of attribute :type item: str :return: Any :rtype: Any """ return super().__getattribute__(item) def __getstate__(self) -> Dict[Any, Any]: state = super().__getstate__() self_dict = self.dict() state["__dict__"].update(**self_dict) return state def __setstate__(self, state: Dict[Any, Any]) -> None: relations = { k: v for k, v in state["__dict__"].items() if k in self.extract_related_names() } basic_state = { k: v for k, v in state["__dict__"].items() if k not in self.extract_related_names() } state["__dict__"] = basic_state super().__setstate__(state) self._initialize_internal_attributes() for name, value in relations.items(): setattr(self, name, value) def _update_relation_cache(self, prev_hash: int, new_hash: int) -> None: """ Update all relation proxy caches with different hash if we have changed :param prev_hash: The previous hash to update :type prev_hash: int :param new_hash: The hash to update to :type new_hash: int """ def _update_cache(relations: List[Relation], recurse: bool = True) -> None: for relation in relations: relation_proxy = relation.get() if hasattr(relation_proxy, "update_cache"): relation_proxy.update_cache(prev_hash, new_hash) # type: ignore elif recurse and hasattr(relation_proxy, "_orm"): _update_cache( relation_proxy._orm._relations.values(), # type: ignore recurse=False, ) _update_cache(list(self._orm._relations.values())) def _internal_set(self, name: str, value: Any) -> None: """ Delegates call to pydantic. :param name: name of param :type name: str :param value: value to set :type value: Any """ super().__setattr__(name, value) def _verify_model_can_be_initialized(self) -> None: """ Raises exception if model is abstract or has ForwardRefs in relation fields. :return: None :rtype: None """ if self.Meta.abstract: raise ModelError(f"You cannot initialize abstract model {self.get_name()}") if self.Meta.requires_ref_update: raise ModelError( f"Model {self.get_name()} has not updated " f"ForwardRefs. \nBefore using the model you " f"need to call update_forward_refs()." ) def _process_kwargs(self, kwargs: Dict) -> Tuple[Dict, Dict]: # noqa: CCR001 """ Initializes nested models. Removes property_fields Checks if field is in the model fields or pydatnic fields. Nullifies fields that should be excluded. Extracts through models from kwargs into temporary dict. :param kwargs: passed to init keyword arguments :type kwargs: Dict :return: modified kwargs :rtype: Tuple[Dict, Dict] """ property_fields = self.Meta.property_fields model_fields = self.Meta.model_fields pydantic_fields = set(self.__fields__.keys()) # remove property fields for prop_filed in property_fields: kwargs.pop(prop_filed, None) excluded: Set[str] = kwargs.pop("__excluded__", set()) if "pk" in kwargs: kwargs[self.Meta.pkname] = kwargs.pop("pk") # extract through fields through_tmp_dict = dict() for field_name in self.extract_through_names(): through_tmp_dict[field_name] = kwargs.pop(field_name, None) kwargs = self._remove_extra_parameters_if_they_should_be_ignored( kwargs=kwargs, model_fields=model_fields, pydantic_fields=pydantic_fields ) try: new_kwargs: Dict[str, Any] = { k: self._convert_to_bytes( k, self._convert_json( k, model_fields[k].expand_relationship(v, self, to_register=False) if k in model_fields else (v if k in pydantic_fields else model_fields[k]), ), ) for k, v in kwargs.items() } except KeyError as e: raise ModelError( f"Unknown field '{e.args[0]}' for model {self.get_name(lower=False)}" ) # explicitly set None to excluded fields # as pydantic populates them with default if set for field_to_nullify in excluded: new_kwargs[field_to_nullify] = None return new_kwargs, through_tmp_dict def _remove_extra_parameters_if_they_should_be_ignored( self, kwargs: Dict, model_fields: Dict, pydantic_fields: Set ) -> Dict: """ Removes the extra fields from kwargs if they should be ignored. :param kwargs: passed arguments :type kwargs: Dict :param model_fields: dictionary of model fields :type model_fields: Dict :param pydantic_fields: set of pydantic fields names :type pydantic_fields: Set :return: dict without extra fields :rtype: Dict """ if self.Meta.extra == Extra.ignore: kwargs = { k: v for k, v in kwargs.items() if k in model_fields or k in pydantic_fields } return kwargs def _initialize_internal_attributes(self) -> None: """ Initializes internal attributes during __init__() :rtype: None """ # object.__setattr__(self, "_orm_id", uuid.uuid4().hex) object.__setattr__(self, "_orm_saved", False) object.__setattr__(self, "_pk_column", None) object.__setattr__( self, "_orm", RelationsManager( related_fields=self.extract_related_fields(), owner=cast("Model", self) ), ) def __eq__(self, other: object) -> bool: """ Compares other model to this model. when == is called. :param other: other model to compare :type other: object :return: result of comparison :rtype: bool """ if isinstance(other, NewBaseModel): return self.__same__(other) return super().__eq__(other) # pragma no cover def __hash__(self) -> int: if getattr(self, "__cached_hash__", None) is not None: return self.__cached_hash__ or 0 if self.pk is not None: ret = hash(str(self.pk) + self.__class__.__name__) else: vals = { k: v for k, v in self.__dict__.items() if k not in self.extract_related_names() } ret = hash(str(vals) + self.__class__.__name__) object.__setattr__(self, "__cached_hash__", ret) return ret def __same__(self, other: "NewBaseModel") -> bool: """ Used by __eq__, compares other model to this model. Compares: * _orm_ids, * primary key values if it's set * dictionary of own fields (excluding relations) :param other: model to compare to :type other: NewBaseModel :return: result of comparison :rtype: bool """ if (self.pk is None and other.pk is not None) or ( self.pk is not None and other.pk is None ): return False else: return hash(self) == other.__hash__() def _copy_and_set_values( self: "NewBaseModel", values: "DictStrAny", fields_set: "SetStr", *, deep: bool ) -> "NewBaseModel": """ Overwrite related models values with dict representation to avoid infinite recursion through related fields. """ self_dict = values self_dict.update(self.dict(exclude_list=True)) return cast( "NewBaseModel", super()._copy_and_set_values( values=self_dict, fields_set=fields_set, deep=deep ), ) @classmethod def get_name(cls, lower: bool = True) -> str: """ Returns name of the Model class, by default lowercase. :param lower: flag if name should be set to lowercase :type lower: bool :return: name of the model :rtype: str """ name = cls.__name__ if lower: name = name.lower() return name @property def pk_column(self) -> sqlalchemy.Column: """ Retrieves primary key sqlalchemy column from models Meta.table. Each model has to have primary key. Only one primary key column is allowed. :return: primary key sqlalchemy column :rtype: sqlalchemy.Column """ if object.__getattribute__(self, "_pk_column") is not None: return object.__getattribute__(self, "_pk_column") pk_columns = self.Meta.table.primary_key.columns.values() pk_col = pk_columns[0] object.__setattr__(self, "_pk_column", pk_col) return pk_col @property def saved(self) -> bool: """Saved status of the model. Changed by setattr and loading from db""" return self._orm_saved @property def signals(self) -> "SignalEmitter": """Exposes signals from model Meta""" return self.Meta.signals @classmethod def pk_type(cls) -> Any: """Shortcut to models primary key field type""" return cls.Meta.model_fields[cls.Meta.pkname].__type__ @classmethod def db_backend_name(cls) -> str: """Shortcut to database dialect, cause some dialect require different treatment""" return cls.Meta.database._backend._dialect.name def remove(self, parent: "Model", name: str) -> None: """Removes child from relation with given name in RelationshipManager""" self._orm.remove_parent(self, parent, name) def set_save_status(self, status: bool) -> None: """Sets value of the save status""" object.__setattr__(self, "_orm_saved", status) @classmethod def get_properties( cls, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None] ) -> Set[str]: """ Returns a set of names of functions/fields decorated with @property_field decorator. They are added to dictionary when called directly and therefore also are present in fastapi responses. :param include: fields to include :type include: Union[Set, Dict, None] :param exclude: fields to exclude :type exclude: Union[Set, Dict, None] :return: set of property fields names :rtype: Set[str] """ props = cls.Meta.property_fields if include: props = {prop for prop in props if prop in include} if exclude: props = {prop for prop in props if prop not in exclude} return props @classmethod def update_forward_refs(cls, **localns: Any) -> None: """ Processes fields that are ForwardRef and need to be evaluated into actual models. Expands relationships, register relation in alias manager and substitutes sqlalchemy columns with new ones with proper column type (null before). Populates Meta table of the Model which is left empty before. Sets self_reference flag on models that links to themselves. Calls the pydantic method to evaluate pydantic fields. :param localns: local namespace :type localns: Any :return: None :rtype: None """ globalns = sys.modules[cls.__module__].__dict__.copy() globalns.setdefault(cls.__name__, cls) fields_to_check = cls.Meta.model_fields.copy() for field in fields_to_check.values(): if field.has_unresolved_forward_refs(): field = cast(ForeignKeyField, field) field.evaluate_forward_ref(globalns=globalns, localns=localns) field.set_self_reference_flag() if field.is_multi and not field.through: field = cast(ormar.ManyToManyField, field) field.create_default_through_model() expand_reverse_relationship(model_field=field) register_relation_in_alias_manager(field=field) update_column_definition(model=cls, field=field) populate_meta_sqlalchemy_table_if_required(meta=cls.Meta) super().update_forward_refs(**localns) cls.Meta.requires_ref_update = False @staticmethod def _get_not_excluded_fields( fields: Union[List, Set], include: Optional[Dict], exclude: Optional[Dict] ) -> List: """ Returns related field names applying on them include and exclude set. :param include: fields to include :type include: Union[Set, Dict, None] :param exclude: fields to exclude :type exclude: Union[Set, Dict, None] :return: :rtype: List of fields with relations that is not excluded """ fields = [*fields] if not isinstance(fields, list) else fields if include: fields = [field for field in fields if field in include] if exclude: fields = [ field for field in fields if field not in exclude or ( exclude.get(field) is not Ellipsis and exclude.get(field) != {"__all__"} ) ] return fields @staticmethod def _extract_nested_models_from_list( relation_map: Dict, models: MutableSequence, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None], exclude_primary_keys: bool, exclude_through_models: bool, ) -> List: """ Converts list of models into list of dictionaries. :param models: List of models :type models: List :param include: fields to include :type include: Union[Set, Dict, None] :param exclude: fields to exclude :type exclude: Union[Set, Dict, None] :return: list of models converted to dictionaries :rtype: List[Dict] """ result = [] for model in models: try: result.append( model.dict( relation_map=relation_map, include=include, exclude=exclude, exclude_primary_keys=exclude_primary_keys, exclude_through_models=exclude_through_models, ) ) except ReferenceError: # pragma no cover continue return result @classmethod def _skip_ellipsis( cls, items: Union[Set, Dict, None], key: str, default_return: Any = None ) -> Union[Set, Dict, None]: """ Helper to traverse the include/exclude dictionaries. In dict() Ellipsis should be skipped as it indicates all fields required and not the actual set/dict with fields names. :param items: current include/exclude value :type items: Union[Set, Dict, None] :param key: key for nested relations to check :type key: str :return: nested value of the items :rtype: Union[Set, Dict, None] """ result = cls.get_child(items, key) return result if result is not Ellipsis else default_return @staticmethod def _convert_all(items: Union[Set, Dict, None]) -> Union[Set, Dict, None]: """ Helper to convert __all__ pydantic special index to ormar which does not support index based exclusions. :param items: current include/exclude value :type items: Union[Set, Dict, None] """ if isinstance(items, dict) and "__all__" in items: return items.get("__all__") return items def _extract_nested_models( # noqa: CCR001, CFQ002 self, relation_map: Dict, dict_instance: Dict, include: Optional[Dict], exclude: Optional[Dict], exclude_primary_keys: bool, exclude_through_models: bool, exclude_list: bool, ) -> Dict: """ Traverse nested models and converts them into dictionaries. Calls itself recursively if needed. :param nested: flag if current instance is nested :type nested: bool :param dict_instance: current instance dict :type dict_instance: Dict :param include: fields to include :type include: Optional[Dict] :param exclude: fields to exclude :type exclude: Optional[Dict] :param exclude: whether to exclude lists :type exclude: bool :return: current model dict with child models converted to dictionaries :rtype: Dict """ fields = self._get_not_excluded_fields( fields=self.extract_related_names(), include=include, exclude=exclude ) for field in fields: if not relation_map or field not in relation_map: continue try: nested_model = getattr(self, field) if isinstance(nested_model, MutableSequence): if exclude_list: continue dict_instance[field] = self._extract_nested_models_from_list( relation_map=self._skip_ellipsis( # type: ignore relation_map, field, default_return=dict() ), models=nested_model, include=self._convert_all(self._skip_ellipsis(include, field)), exclude=self._convert_all(self._skip_ellipsis(exclude, field)), exclude_primary_keys=exclude_primary_keys, exclude_through_models=exclude_through_models, ) elif nested_model is not None: dict_instance[field] = nested_model.dict( relation_map=self._skip_ellipsis( relation_map, field, default_return=dict() ), include=self._convert_all(self._skip_ellipsis(include, field)), exclude=self._convert_all(self._skip_ellipsis(exclude, field)), exclude_primary_keys=exclude_primary_keys, exclude_through_models=exclude_through_models, ) else: dict_instance[field] = None except ReferenceError: dict_instance[field] = None return dict_instance def dict( # type: ignore # noqa A003 self, *, include: Union[Set, Dict] = None, exclude: Union[Set, Dict] = None, by_alias: bool = False, skip_defaults: bool = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, exclude_primary_keys: bool = False, exclude_through_models: bool = False, exclude_list: bool = False, relation_map: Dict = None, ) -> "DictStrAny": # noqa: A003' """ Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. Nested models are also parsed to dictionaries. Additionally fields decorated with @property_field are also added. :param exclude_through_models: flag to exclude through models from dict :type exclude_through_models: bool :param exclude_primary_keys: flag to exclude primary keys from dict :type exclude_primary_keys: bool :param include: fields to include :type include: Union[Set, Dict, None] :param exclude: fields to exclude :type exclude: Union[Set, Dict, None] :param by_alias: flag to get values by alias - passed to pydantic :type by_alias: bool :param skip_defaults: flag to not set values - passed to pydantic :type skip_defaults: bool :param exclude_unset: flag to exclude not set values - passed to pydantic :type exclude_unset: bool :param exclude_defaults: flag to exclude default values - passed to pydantic :type exclude_defaults: bool :param exclude_none: flag to exclude None values - passed to pydantic :type exclude_none: bool :param exclude_list: flag to exclude lists of nested values models from dict :type exclude_list: bool :param relation_map: map of the relations to follow to avoid circural deps :type relation_map: Dict :return: :rtype: """ pydantic_exclude = self._update_excluded_with_related(exclude) pydantic_exclude = self._update_excluded_with_pks_and_through( exclude=pydantic_exclude, exclude_primary_keys=exclude_primary_keys, exclude_through_models=exclude_through_models, ) dict_instance = super().dict( include=include, exclude=pydantic_exclude, by_alias=by_alias, skip_defaults=skip_defaults, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, ) dict_instance = { k: self._convert_bytes_to_str(column_name=k, value=v) for k, v in dict_instance.items() } if include and isinstance(include, Set): include = translate_list_to_dict(include) if exclude and isinstance(exclude, Set): exclude = translate_list_to_dict(exclude) relation_map = ( relation_map if relation_map is not None else translate_list_to_dict(self._iterate_related_models()) ) pk_only = getattr(self, "__pk_only__", False) if relation_map and not pk_only: dict_instance = self._extract_nested_models( relation_map=relation_map, dict_instance=dict_instance, include=include, # type: ignore exclude=exclude, # type: ignore exclude_primary_keys=exclude_primary_keys, exclude_through_models=exclude_through_models, exclude_list=exclude_list, ) # include model properties as fields in dict if object.__getattribute__(self, "Meta").property_fields: props = self.get_properties(include=include, exclude=exclude) if props: dict_instance.update({prop: getattr(self, prop) for prop in props}) return dict_instance def json( # type: ignore # noqa A003 self, *, include: Union[Set, Dict] = None, exclude: Union[Set, Dict] = None, by_alias: bool = False, skip_defaults: bool = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, exclude_primary_keys: bool = False, exclude_through_models: bool = False, **dumps_kwargs: Any, ) -> str: """ Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`. `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`. """ if skip_defaults is not None: # pragma: no cover warnings.warn( f'{self.__class__.__name__}.json(): "skip_defaults" is deprecated ' f'and replaced by "exclude_unset"', DeprecationWarning, ) exclude_unset = skip_defaults encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__) data = self.dict( include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, exclude_primary_keys=exclude_primary_keys, exclude_through_models=exclude_through_models, ) if self.__custom_root_type__: # pragma: no cover data = data["__root__"] return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs) @classmethod def construct( cls: Type["T"], _fields_set: Optional["SetStr"] = None, **values: Any ) -> "T": own_values = { k: v for k, v in values.items() if k not in cls.extract_related_names() } model = cls.__new__(cls) fields_values: Dict[str, Any] = {} for name, field in cls.__fields__.items(): if name in own_values: fields_values[name] = own_values[name] elif not field.required: fields_values[name] = field.get_default() fields_values.update(own_values) object.__setattr__(model, "__dict__", fields_values) model._initialize_internal_attributes() cls._construct_relations(model=model, values=values) if _fields_set is None: _fields_set = set(values.keys()) object.__setattr__(model, "__fields_set__", _fields_set) return model @classmethod def _construct_relations(cls: Type["T"], model: "T", values: Dict) -> None: present_relations = [ relation for relation in cls.extract_related_names() if relation in values ] for relation in present_relations: value_to_set = values[relation] if not isinstance(value_to_set, list): value_to_set = [value_to_set] relation_field = cls.Meta.model_fields[relation] relation_value = [ relation_field.expand_relationship(x, model, to_register=False) for x in value_to_set if x is not None ] for child in relation_value: model._orm.add( parent=cast("Model", child), child=cast("Model", model), field=cast("ForeignKeyField", relation_field), ) def update_from_dict(self, value_dict: Dict) -> "NewBaseModel": """ Updates self with values of fields passed in the dictionary. :param value_dict: dictionary of fields names and values :type value_dict: Dict :return: self :rtype: NewBaseModel """ for key, value in value_dict.items(): setattr(self, key, value) return self def _convert_to_bytes(self, column_name: str, value: Any) -> Union[str, Dict]: """ Converts value to bytes from string :param column_name: name of the field :type column_name: str :param value: value fo the field :type value: Any :return: converted value if needed, else original value :rtype: Any """ if column_name not in self._bytes_fields: return value field = self.Meta.model_fields[column_name] if not isinstance(value, bytes) and value is not None: if field.represent_as_base64_str: value = base64.b64decode(value) else: value = value.encode("utf-8") return value def _convert_bytes_to_str(self, column_name: str, value: Any) -> Union[str, Dict]: """ Converts value to str from bytes for represent_as_base64_str columns. :param column_name: name of the field :type column_name: str :param value: value fo the field :type value: Any :return: converted value if needed, else original value :rtype: Any """ if column_name not in self._bytes_fields: return value field = self.Meta.model_fields[column_name] if ( value is not None and not isinstance(value, str) and field.represent_as_base64_str ): return base64.b64encode(value).decode() return value def _convert_json(self, column_name: str, value: Any) -> Union[str, Dict, None]: """ Converts value to/from json if needed (for Json columns). :param column_name: name of the field :type column_name: str :param value: value fo the field :type value: Any :return: converted value if needed, else original value :rtype: Any """ if column_name not in self._json_fields: return value return encode_json(value) def _extract_own_model_fields(self) -> Dict: """ Returns a dictionary with field names and values for fields that are not relations fields (ForeignKey, ManyToMany etc.) :return: dictionary of fields names and values. :rtype: Dict """ related_names = self.extract_related_names() self_fields = {k: v for k, v in self.__dict__.items() if k not in related_names} return self_fields def _extract_model_db_fields(self) -> Dict: """ Returns a dictionary with field names and values for fields that are stored in current model's table. That includes own non-relational fields ang foreign key fields. :return: dictionary of fields names and values. :rtype: Dict """ # TODO: Cache this dictionary? self_fields = self._extract_own_model_fields() self_fields = { k: v for k, v in self_fields.items() if self.get_column_alias(k) in self.Meta.table.columns } for field in self._extract_db_related_names(): relation_field = self.Meta.model_fields[field] target_pk_name = relation_field.to.Meta.pkname target_field = getattr(self, field) self_fields[field] = getattr(target_field, target_pk_name, None) if not relation_field.nullable and not self_fields[field]: raise ModelPersistenceError( f"You cannot save {relation_field.to.get_name()} " f"model without pk set!" ) return self_fields def get_relation_model_id(self, target_field: "BaseField") -> Optional[int]: """ Returns an id of the relation side model to use in prefetch query. :param target_field: field with relation definition :type target_field: "BaseField" :return: value of pk if set :rtype: Optional[int] """ if target_field.virtual or target_field.is_multi: return self.pk related_name = target_field.name related_model = getattr(self, related_name) return None if not related_model else related_model.pk ormar-0.12.2/ormar/models/quick_access_views.py000066400000000000000000000035201444363446500215600ustar00rootroot00000000000000""" Contains set of fields/methods etc names that are used to bypass the checks in NewBaseModel __getattribute__ calls to speed the calls. """ quick_access_set = { "Config", "Meta", "__class__", "__config__", "__custom_root_type__", "__dict__", "__fields__", "__fields_set__", "__json_encoder__", "__pk_only__", "__post_root_validators__", "__pre_root_validators__", "__private_attributes__", "__same__", "_calculate_keys", "_choices_fields", "_convert_json", "_extract_db_related_names", "_extract_model_db_fields", "_extract_nested_models", "_extract_nested_models_from_list", "_extract_own_model_fields", "_extract_related_model_instead_of_field", "_get_not_excluded_fields", "_get_value", "_init_private_attributes", "_is_conversion_to_json_needed", "_iter", "_iterate_related_models", "_orm", "_orm_id", "_orm_saved", "_related_names", "_skip_ellipsis", "_update_and_follow", "_update_excluded_with_related_not_required", "_verify_model_can_be_initialized", "copy", "delete", "dict", "extract_related_names", "extract_related_fields", "extract_through_names", "update_from_dict", "get_child", "get_column_alias", "get_column_name_from_alias", "get_filtered_names_to_extract", "get_name", "get_properties", "get_related_field_name", "get_relation_model_id", "json", "keys", "load", "load_all", "pk_column", "pk_type", "populate_default_values", "prepare_model_to_save", "remove", "resolve_relation_field", "resolve_relation_name", "save", "save_related", "saved", "set_save_status", "signals", "translate_aliases_to_columns", "translate_columns_to_aliases", "update", "upsert", } ormar-0.12.2/ormar/models/traversible.py000066400000000000000000000073231444363446500202350ustar00rootroot00000000000000from typing import Any, List, Optional, TYPE_CHECKING, Type if TYPE_CHECKING: # pragma no cover from ormar.models.mixins.relation_mixin import RelationMixin class NodeList: """ Helper class that helps with iterating nested models """ def __init__(self) -> None: self.node_list: List["Node"] = [] def __getitem__(self, item: Any) -> Any: return self.node_list.__getitem__(item) def add( self, node_class: Type["RelationMixin"], relation_name: str = None, parent_node: "Node" = None, ) -> "Node": """ Adds new Node or returns the existing one :param node_class: Model in current node :type node_class: ormar.models.metaclass.ModelMetaclass :param relation_name: name of the current relation :type relation_name: str :param parent_node: parent node :type parent_node: Optional[Node] :return: returns new or already existing node :rtype: Node """ existing_node = self.find( relation_name=relation_name, node_class=node_class, parent_node=parent_node ) if not existing_node: current_node = Node( node_class=node_class, relation_name=relation_name, parent_node=parent_node, ) self.node_list.append(current_node) return current_node return existing_node # pragma: no cover def find( self, node_class: Type["RelationMixin"], relation_name: Optional[str] = None, parent_node: "Node" = None, ) -> Optional["Node"]: """ Searches for existing node with given parameters :param node_class: Model in current node :type node_class: ormar.models.metaclass.ModelMetaclass :param relation_name: name of the current relation :type relation_name: str :param parent_node: parent node :type parent_node: Optional[Node] :return: returns already existing node or None :rtype: Optional[Node] """ for node in self.node_list: if ( node.node_class == node_class and node.parent_node == parent_node and node.relation_name == relation_name ): return node # pragma: no cover return None class Node: def __init__( self, node_class: Type["RelationMixin"], relation_name: str = None, parent_node: "Node" = None, ) -> None: self.relation_name = relation_name self.node_class = node_class self.parent_node = parent_node self.visited_children: List["Node"] = [] if self.parent_node: self.parent_node.visited_children.append(self) def __repr__(self) -> str: # pragma: no cover return ( f"{self.node_class.get_name(lower=False)}, " f"relation:{self.relation_name}, " f"parent: {self.parent_node}" ) def visited(self, relation_name: str) -> bool: """ Checks if given relation was already visited. Relation was visited if it's name is in current node children. Relation was visited if one of the parent node had the same Model class :param relation_name: name of relation :type relation_name: str :return: result of the check :rtype: bool """ target_model = self.node_class.Meta.model_fields[relation_name].to if self.parent_node: node = self while node.parent_node: node = node.parent_node if node.node_class == target_model: return True return False ormar-0.12.2/ormar/models/utils.py000066400000000000000000000001341444363446500170440ustar00rootroot00000000000000from enum import Enum class Extra(str, Enum): ignore = "ignore" forbid = "forbid" ormar-0.12.2/ormar/protocols/000077500000000000000000000000001444363446500160755ustar00rootroot00000000000000ormar-0.12.2/ormar/protocols/__init__.py000066400000000000000000000002621444363446500202060ustar00rootroot00000000000000from ormar.protocols.queryset_protocol import QuerySetProtocol from ormar.protocols.relation_protocol import RelationProtocol __all__ = ["QuerySetProtocol", "RelationProtocol"] ormar-0.12.2/ormar/protocols/queryset_protocol.py000066400000000000000000000037101444363446500222520ustar00rootroot00000000000000from typing import Any, Dict, List, Optional, Sequence, Set, TYPE_CHECKING, Tuple, Union try: from typing import Protocol except ImportError: # pragma: nocover from typing_extensions import Protocol # type: ignore if TYPE_CHECKING: # noqa: C901; #pragma nocover from ormar import Model from ormar.relations.querysetproxy import QuerysetProxy class QuerySetProtocol(Protocol): # pragma: nocover def filter(self, **kwargs: Any) -> "QuerysetProxy": # noqa: A003, A001 ... def exclude(self, **kwargs: Any) -> "QuerysetProxy": # noqa: A003, A001 ... def select_related(self, related: Union[List, str]) -> "QuerysetProxy": ... def prefetch_related(self, related: Union[List, str]) -> "QuerysetProxy": ... async def exists(self) -> bool: ... async def count(self, distinct: bool = True) -> int: ... async def clear(self) -> int: ... def limit(self, limit_count: int) -> "QuerysetProxy": ... def offset(self, offset: int) -> "QuerysetProxy": ... async def first(self, **kwargs: Any) -> "Model": ... async def get(self, **kwargs: Any) -> "Model": ... async def all( # noqa: A003, A001 self, **kwargs: Any ) -> Sequence[Optional["Model"]]: ... async def create(self, **kwargs: Any) -> "Model": ... async def update(self, each: bool = False, **kwargs: Any) -> int: ... async def get_or_create( self, _defaults: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Tuple["Model", bool]: ... async def update_or_create(self, **kwargs: Any) -> "Model": ... def fields(self, columns: Union[List, str, Set, Dict]) -> "QuerysetProxy": ... def exclude_fields(self, columns: Union[List, str, Set, Dict]) -> "QuerysetProxy": ... def order_by(self, columns: Union[List, str]) -> "QuerysetProxy": ... ormar-0.12.2/ormar/protocols/relation_protocol.py000066400000000000000000000006671444363446500222160ustar00rootroot00000000000000from typing import TYPE_CHECKING, Type, Union try: from typing import Protocol except ImportError: # pragma: nocover from typing_extensions import Protocol # type: ignore if TYPE_CHECKING: # pragma: nocover from ormar import Model class RelationProtocol(Protocol): # pragma: nocover def add(self, child: "Model") -> None: ... def remove(self, child: Union["Model", Type["Model"]]) -> None: ... ormar-0.12.2/ormar/py.typed000066400000000000000000000000001444363446500155360ustar00rootroot00000000000000ormar-0.12.2/ormar/queryset/000077500000000000000000000000001444363446500157325ustar00rootroot00000000000000ormar-0.12.2/ormar/queryset/__init__.py000066400000000000000000000013061444363446500200430ustar00rootroot00000000000000""" Contains QuerySet and different Query classes to allow for constructing of sql queries. """ from ormar.queryset.actions import FilterAction, OrderAction, SelectAction from ormar.queryset.clause import and_, or_ from ormar.queryset.field_accessor import FieldAccessor from ormar.queryset.queries import FilterQuery from ormar.queryset.queries import LimitQuery from ormar.queryset.queries import OffsetQuery from ormar.queryset.queries import OrderQuery from ormar.queryset.queryset import QuerySet __all__ = [ "QuerySet", "FilterQuery", "LimitQuery", "OffsetQuery", "OrderQuery", "FilterAction", "OrderAction", "SelectAction", "and_", "or_", "FieldAccessor", ] ormar-0.12.2/ormar/queryset/actions/000077500000000000000000000000001444363446500173725ustar00rootroot00000000000000ormar-0.12.2/ormar/queryset/actions/__init__.py000066400000000000000000000003631444363446500215050ustar00rootroot00000000000000from ormar.queryset.actions.filter_action import FilterAction from ormar.queryset.actions.order_action import OrderAction from ormar.queryset.actions.select_action import SelectAction __all__ = ["FilterAction", "OrderAction", "SelectAction"] ormar-0.12.2/ormar/queryset/actions/filter_action.py000066400000000000000000000121251444363446500225670ustar00rootroot00000000000000from typing import Any, TYPE_CHECKING, Type import sqlalchemy import ormar # noqa: I100, I202 from ormar.exceptions import QueryDefinitionError from ormar.queryset.actions.query_action import QueryAction if TYPE_CHECKING: # pragma: nocover from ormar import Model FILTER_OPERATORS = { "exact": "__eq__", "iexact": "ilike", "contains": "like", "icontains": "ilike", "startswith": "like", "istartswith": "ilike", "endswith": "like", "iendswith": "ilike", "isnull": "is_", "in": "in_", "gt": "__gt__", "gte": "__ge__", "lt": "__lt__", "lte": "__le__", } METHODS_TO_OPERATORS = { "__eq__": "exact", "__mod__": "contains", "__gt__": "gt", "__ge__": "gte", "__lt__": "lt", "__le__": "lte", "iexact": "iexact", "contains": "contains", "icontains": "icontains", "startswith": "startswith", "istartswith": "istartswith", "endswith": "endswith", "iendswith": "iendswith", "isnull": "isnull", "in": "in", } ESCAPE_CHARACTERS = ["%", "_"] class FilterAction(QueryAction): """ Filter Actions is populated by queryset when filter() is called. All required params are extracted but kept raw until actual filter clause value is required -> then the action is converted into text() clause. Extracted in order to easily change table prefixes on complex relations. """ def __init__(self, filter_str: str, value: Any, model_cls: Type["Model"]) -> None: super().__init__(query_str=filter_str, model_cls=model_cls) self.filter_value = value self._escape_characters_in_clause() def has_escaped_characters(self) -> bool: """Check if value is a string that contains characters to escape""" return isinstance(self.filter_value, str) and any( c for c in ESCAPE_CHARACTERS if c in self.filter_value ) def _split_value_into_parts(self, query_str: str) -> None: parts = query_str.split("__") if parts[-1] in FILTER_OPERATORS: self.operator = parts[-1] self.field_name = parts[-2] self.related_parts = parts[:-2] else: self.operator = "exact" self.field_name = parts[-1] self.related_parts = parts[:-1] def _escape_characters_in_clause(self) -> None: """ Escapes the special characters ["%", "_"] if needed. Adds `%` for `like` queries. :raises QueryDefinitionError: if contains or icontains is used with ormar model instance :return: escaped value and flag if escaping is needed :rtype: Tuple[Any, bool] """ self.has_escaped_character = False if self.operator in [ "contains", "icontains", "startswith", "istartswith", "endswith", "iendswith", ]: if isinstance(self.filter_value, ormar.Model): raise QueryDefinitionError( "You cannot use contains and icontains with instance of the Model" ) self.has_escaped_character = self.has_escaped_characters() if self.has_escaped_character: self._escape_chars() self._prefix_suffix_quote() def _escape_chars(self) -> None: """Actually replaces chars to escape in value""" for char in ESCAPE_CHARACTERS: self.filter_value = self.filter_value.replace(char, f"\\{char}") def _prefix_suffix_quote(self) -> None: """ Adds % to the beginning of the value if operator checks for containment and not starts with. Adds % to the end of the value if operator checks for containment and not end with. :return: :rtype: """ prefix = "%" if "start" not in self.operator else "" sufix = "%" if "end" not in self.operator else "" self.filter_value = f"{prefix}{self.filter_value}{sufix}" def get_text_clause(self) -> sqlalchemy.sql.expression.BinaryExpression: """ Escapes characters if it's required. Substitutes values of the models if value is a ormar Model with its pk value. Compiles the clause. :return: complied and escaped clause :rtype: sqlalchemy.sql.elements.TextClause """ if isinstance(self.filter_value, ormar.Model): self.filter_value = self.filter_value.pk op_attr = FILTER_OPERATORS[self.operator] if self.operator == "isnull": op_attr = "is_" if self.filter_value else "isnot" filter_value = None else: filter_value = self.filter_value if self.table_prefix: aliased_table = self.source_model.Meta.alias_manager.prefixed_table_name( self.table_prefix, self.column.table ) aliased_column = getattr(aliased_table.c, self.column.name) else: aliased_column = self.column clause = getattr(aliased_column, op_attr)(filter_value) if self.has_escaped_character: clause.modifiers["escape"] = "\\" return clause ormar-0.12.2/ormar/queryset/actions/order_action.py000066400000000000000000000103541444363446500224170ustar00rootroot00000000000000from typing import TYPE_CHECKING, Type import sqlalchemy from sqlalchemy import text from ormar.queryset.actions.query_action import QueryAction # noqa: I100, I202 if TYPE_CHECKING: # pragma: nocover from ormar import Model class OrderAction(QueryAction): """ Order Actions is populated by queryset when order_by() is called. All required params are extracted but kept raw until actual filter clause value is required -> then the action is converted into text() clause. Extracted in order to easily change table prefixes on complex relations. """ def __init__( self, order_str: str, model_cls: Type["Model"], alias: str = None ) -> None: self.direction: str = "" super().__init__(query_str=order_str, model_cls=model_cls) self.is_source_model_order = False if alias: self.table_prefix = alias if self.source_model == self.target_model and "__" not in self.related_str: self.is_source_model_order = True @property def field_alias(self) -> str: return self.target_model.get_column_alias(self.field_name) @property def is_postgres_bool(self) -> bool: dialect = self.target_model.Meta.database._backend._dialect.name field_type = self.target_model.Meta.model_fields[self.field_name].__type__ return dialect == "postgresql" and field_type == bool def get_field_name_text(self) -> str: """ Escapes characters if it's required. Substitutes values of the models if value is a ormar Model with its pk value. Compiles the clause. :return: complied and escaped clause :rtype: sqlalchemy.sql.elements.TextClause """ prefix = f"{self.table_prefix}_" if self.table_prefix else "" return f"{prefix}{self.table}" f".{self.field_alias}" def get_min_or_max(self) -> sqlalchemy.sql.expression.TextClause: """ Used in limit sub queries where you need to use aggregated functions in order to order by columns not included in group by. For postgres bool field it's using bool_or function as aggregates does not work with this type of columns. :return: min or max function to order :rtype: sqlalchemy.sql.elements.TextClause """ prefix = f"{self.table_prefix}_" if self.table_prefix else "" if self.direction == "": function = "min" if not self.is_postgres_bool else "bool_or" return text(f"{function}({prefix}{self.table}" f".{self.field_alias})") function = "max" if not self.is_postgres_bool else "bool_or" return text(f"{function}({prefix}{self.table}" f".{self.field_alias}) desc") def get_text_clause(self) -> sqlalchemy.sql.expression.TextClause: """ Escapes characters if it's required. Substitutes values of the models if value is a ormar Model with its pk value. Compiles the clause. :return: complied and escaped clause :rtype: sqlalchemy.sql.elements.TextClause """ prefix = f"{self.table_prefix}_" if self.table_prefix else "" table_name = self.table.name field_name = self.field_alias if not prefix: dialect = self.target_model.Meta.database._backend._dialect table_name = dialect.identifier_preparer.quote(table_name) field_name = dialect.identifier_preparer.quote(field_name) return text(f"{prefix}{table_name}" f".{field_name} {self.direction}") def _split_value_into_parts(self, order_str: str) -> None: if order_str.startswith("-"): self.direction = "desc" order_str = order_str[1:] parts = order_str.split("__") self.field_name = parts[-1] self.related_parts = parts[:-1] def check_if_filter_apply(self, target_model: Type["Model"], alias: str) -> bool: """ Checks filter conditions to find if they apply to current join. :param target_model: model which is now processed :type target_model: Type["Model"] :param alias: prefix of the relation :type alias: str :return: result of the check :rtype: bool """ return target_model == self.target_model and alias == self.table_prefix ormar-0.12.2/ormar/queryset/actions/query_action.py000066400000000000000000000062031444363446500224470ustar00rootroot00000000000000import abc from typing import Any, List, TYPE_CHECKING, Type import sqlalchemy from ormar.queryset.utils import get_relationship_alias_model_and_str # noqa: I202 if TYPE_CHECKING: # pragma: nocover from ormar import Model class QueryAction(abc.ABC): """ Base QueryAction class with common params for Filter and Order actions. """ def __init__(self, query_str: str, model_cls: Type["Model"]) -> None: self.query_str = query_str self.field_name: str = "" self.related_parts: List[str] = [] self.related_str: str = "" self.table_prefix = "" self.source_model = model_cls self.target_model = model_cls self.is_through = False self._split_value_into_parts(query_str) self._determine_filter_target_table() def __eq__(self, other: object) -> bool: # pragma: no cover if not isinstance(other, QueryAction): return False return self.query_str == other.query_str def __hash__(self) -> Any: return hash((self.table_prefix, self.query_str)) @abc.abstractmethod def _split_value_into_parts(self, query_str: str) -> None: # pragma: no cover """ Splits string into related parts and field_name :param query_str: query action string to split (i..e filter or order by) :type query_str: str """ pass @abc.abstractmethod def get_text_clause( self, ) -> sqlalchemy.sql.expression.TextClause: # pragma: no cover pass @property def table(self) -> sqlalchemy.Table: """Shortcut to sqlalchemy Table of filtered target model""" return self.target_model.Meta.table @property def column(self) -> sqlalchemy.Column: """Shortcut to sqlalchemy column of filtered target model""" aliased_name = self.target_model.get_column_alias(self.field_name) return self.target_model.Meta.table.columns[aliased_name] def update_select_related(self, select_related: List[str]) -> List[str]: """ Updates list of select related with related part included in the filter key. That way If you want to just filter by relation you do not have to provide select_related separately. :param select_related: list of relation join strings :type select_related: List[str] :return: list of relation joins with implied joins from filter added :rtype: List[str] """ select_related = select_related[:] if self.related_str and not any( rel.startswith(self.related_str) for rel in select_related ): select_related.append(self.related_str) return select_related def _determine_filter_target_table(self) -> None: """ Walks the relation to retrieve the actual model on which the clause should be constructed, extracts alias based on last relation leading to target model. """ ( self.table_prefix, self.target_model, self.related_str, self.is_through, ) = get_relationship_alias_model_and_str(self.source_model, self.related_parts) ormar-0.12.2/ormar/queryset/actions/select_action.py000066400000000000000000000034321444363446500225620ustar00rootroot00000000000000import decimal from typing import Any, Callable, TYPE_CHECKING, Type import sqlalchemy from ormar.queryset.actions.query_action import QueryAction # noqa: I202 if TYPE_CHECKING: # pragma: no cover from ormar import Model class SelectAction(QueryAction): """ Order Actions is populated by queryset when order_by() is called. All required params are extracted but kept raw until actual filter clause value is required -> then the action is converted into text() clause. Extracted in order to easily change table prefixes on complex relations. """ def __init__( self, select_str: str, model_cls: Type["Model"], alias: str = None ) -> None: super().__init__(query_str=select_str, model_cls=model_cls) if alias: # pragma: no cover self.table_prefix = alias def _split_value_into_parts(self, order_str: str) -> None: parts = order_str.split("__") self.field_name = parts[-1] self.related_parts = parts[:-1] @property def is_numeric(self) -> bool: return self.get_target_field_type() in [int, float, decimal.Decimal] def get_target_field_type(self) -> Any: return self.target_model.Meta.model_fields[self.field_name].__type__ def get_text_clause(self) -> sqlalchemy.sql.expression.TextClause: alias = f"{self.table_prefix}_" if self.table_prefix else "" return sqlalchemy.text(f"{alias}{self.field_name}") def apply_func( self, func: Callable, use_label: bool = True ) -> sqlalchemy.sql.expression.TextClause: result = func(self.get_text_clause()) if use_label: rel_prefix = f"{self.related_str}__" if self.related_str else "" result = result.label(f"{rel_prefix}{self.field_name}") return result ormar-0.12.2/ormar/queryset/clause.py000066400000000000000000000271041444363446500175640ustar00rootroot00000000000000import itertools from dataclasses import dataclass from enum import Enum from typing import Any, Generator, List, TYPE_CHECKING, Tuple, Type import sqlalchemy import ormar # noqa I100 from ormar.queryset.actions.filter_action import FilterAction from ormar.queryset.utils import get_relationship_alias_model_and_str if TYPE_CHECKING: # pragma no cover from ormar import Model class FilterType(Enum): AND = 1 OR = 2 class FilterGroup: """ Filter groups are used in complex queries condition to group and and or clauses in where condition """ def __init__( self, *args: Any, _filter_type: FilterType = FilterType.AND, _exclude: bool = False, **kwargs: Any, ) -> None: self.filter_type = _filter_type self.exclude = _exclude self._nested_groups: List["FilterGroup"] = list(args) self._resolved = False self.is_source_model_filter = False self._kwargs_dict = kwargs self.actions: List[FilterAction] = [] def __and__(self, other: "FilterGroup") -> "FilterGroup": return FilterGroup(self, other) def __or__(self, other: "FilterGroup") -> "FilterGroup": return FilterGroup(self, other, _filter_type=FilterType.OR) def __invert__(self) -> "FilterGroup": self.exclude = not self.exclude return self def resolve( self, model_cls: Type["Model"], select_related: List = None, filter_clauses: List = None, ) -> Tuple[List[FilterAction], List[str]]: """ Resolves the FilterGroups actions to use proper target model, replace complex relation prefixes if needed and nested groups also resolved. :param model_cls: model from which the query is run :type model_cls: Type["Model"] :param select_related: list of models to join :type select_related: List[str] :param filter_clauses: list of filter conditions :type filter_clauses: List[FilterAction] :return: list of filter conditions and select_related list :rtype: Tuple[List[FilterAction], List[str]] """ select_related = select_related if select_related is not None else [] filter_clauses = filter_clauses if filter_clauses is not None else [] qryclause = QueryClause( model_cls=model_cls, select_related=select_related, filter_clauses=filter_clauses, ) own_filter_clauses, select_related = qryclause.prepare_filter( _own_only=True, **self._kwargs_dict ) self.actions = own_filter_clauses filter_clauses = filter_clauses + own_filter_clauses self._resolved = True if self._nested_groups: for group in self._nested_groups: (filter_clauses, select_related) = group.resolve( model_cls=model_cls, select_related=select_related, filter_clauses=filter_clauses, ) return filter_clauses, select_related def _iter(self) -> Generator: """ Iterates all actions in a tree :return: generator yielding from own actions and nested groups :rtype: Generator """ for group in self._nested_groups: yield from group._iter() yield from self.actions def _get_text_clauses(self) -> List[sqlalchemy.sql.expression.TextClause]: """ Helper to return list of text queries from actions and nested groups :return: list of text queries from actions and nested groups :rtype: List[sqlalchemy.sql.elements.TextClause] """ return [x.get_text_clause() for x in self._nested_groups] + [ x.get_text_clause() for x in self.actions ] def get_text_clause(self) -> sqlalchemy.sql.expression.TextClause: """ Returns all own actions and nested groups conditions compiled and joined inside parentheses. Escapes characters if it's required. Substitutes values of the models if value is a ormar Model with its pk value. Compiles the clause. :return: complied and escaped clause :rtype: sqlalchemy.sql.elements.TextClause """ if self.filter_type == FilterType.AND: clause = sqlalchemy.sql.and_(*self._get_text_clauses()).self_group() else: clause = sqlalchemy.sql.or_(*self._get_text_clauses()).self_group() if self.exclude: clause = sqlalchemy.sql.not_(clause) return clause def or_(*args: FilterGroup, **kwargs: Any) -> FilterGroup: """ Construct or filter from nested groups and keyword arguments :param args: nested filter groups :type args: Tuple[FilterGroup] :param kwargs: fields names and proper value types :type kwargs: Any :return: FilterGroup ready to be resolved :rtype: ormar.queryset.clause.FilterGroup """ return FilterGroup(_filter_type=FilterType.OR, *args, **kwargs) def and_(*args: FilterGroup, **kwargs: Any) -> FilterGroup: """ Construct and filter from nested groups and keyword arguments :param args: nested filter groups :type args: Tuple[FilterGroup] :param kwargs: fields names and proper value types :type kwargs: Any :return: FilterGroup ready to be resolved :rtype: ormar.queryset.clause.FilterGroup """ return FilterGroup(_filter_type=FilterType.AND, *args, **kwargs) @dataclass class Prefix: source_model: Type["Model"] table_prefix: str model_cls: Type["Model"] relation_str: str is_through: bool @property def alias_key(self) -> str: source_model_name = self.source_model.get_name() return f"{source_model_name}_" f"{self.relation_str}" class QueryClause: """ Constructs FilterActions from strings passed as arguments """ def __init__( self, model_cls: Type["Model"], filter_clauses: List, select_related: List ) -> None: self._select_related = select_related[:] self.filter_clauses = filter_clauses[:] self.model_cls = model_cls self.table = self.model_cls.Meta.table def prepare_filter( # noqa: A003 self, _own_only: bool = False, **kwargs: Any ) -> Tuple[List[FilterAction], List[str]]: """ Main external access point that processes the clauses into sqlalchemy text clauses and updates select_related list with implicit related tables mentioned in select_related strings but not included in select_related. :param _own_only: :type _own_only: :param kwargs: key, value pair with column names and values :type kwargs: Any :return: Tuple with list of where clauses and updated select_related list :rtype: Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]] """ if kwargs.get("pk"): pk_name = self.model_cls.get_column_alias(self.model_cls.Meta.pkname) kwargs[pk_name] = kwargs.pop("pk") filter_clauses, select_related = self._populate_filter_clauses( _own_only=_own_only, **kwargs ) return filter_clauses, select_related def _populate_filter_clauses( self, _own_only: bool, **kwargs: Any ) -> Tuple[List[FilterAction], List[str]]: """ Iterates all clauses and extracts used operator and field from related models if needed. Based on the chain of related names the target table is determined and the final clause is escaped if needed and compiled. :param kwargs: key, value pair with column names and values :type kwargs: Any :return: Tuple with list of where clauses and updated select_related list :rtype: Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]] """ filter_clauses = self.filter_clauses own_filter_clauses = [] select_related = list(self._select_related) for key, value in kwargs.items(): filter_action = FilterAction( filter_str=key, value=value, model_cls=self.model_cls ) select_related = filter_action.update_select_related( select_related=select_related ) own_filter_clauses.append(filter_action) self._register_complex_duplicates(select_related) filter_clauses = self._switch_filter_action_prefixes( filter_clauses=filter_clauses + own_filter_clauses ) if _own_only: return own_filter_clauses, select_related return filter_clauses, select_related def _register_complex_duplicates(self, select_related: List[str]) -> None: """ Checks if duplicate aliases are presented which can happen in self relation or when two joins end with the same pair of models. If there are duplicates, the all duplicated joins are registered as source model and whole relation key (not just last relation name). :param select_related: list of relation strings :type select_related: List[str] :return: None :rtype: None """ prefixes = self._parse_related_prefixes(select_related=select_related) manager = self.model_cls.Meta.alias_manager filtered_prefixes = sorted(prefixes, key=lambda x: x.table_prefix) grouped = itertools.groupby(filtered_prefixes, key=lambda x: x.table_prefix) for _, group in grouped: sorted_group = sorted( group, key=lambda x: len(x.relation_str), reverse=True ) for prefix in sorted_group[:-1]: if prefix.alias_key not in manager: manager.add_alias(alias_key=prefix.alias_key) def _parse_related_prefixes(self, select_related: List[str]) -> List[Prefix]: """ Walks all relation strings and parses the target models and prefixes. :param select_related: list of relation strings :type select_related: List[str] :return: list of parsed prefixes :rtype: List[Prefix] """ prefixes: List[Prefix] = [] for related in select_related: prefix = Prefix( self.model_cls, *get_relationship_alias_model_and_str( self.model_cls, related.split("__") ), ) prefixes.append(prefix) return prefixes def _switch_filter_action_prefixes( self, filter_clauses: List[FilterAction] ) -> List[FilterAction]: """ Substitutes aliases for filter action if the complex key (whole relation str) is present in alias_manager. :param filter_clauses: raw list of actions :type filter_clauses: List[FilterAction] :return: list of actions with aliases changed if needed :rtype: List[FilterAction] """ for action in filter_clauses: if isinstance(action, FilterGroup): for action2 in action._iter(): self._verify_prefix_and_switch(action2) else: self._verify_prefix_and_switch(action) return filter_clauses def _verify_prefix_and_switch(self, action: "FilterAction") -> None: """ Helper to switch prefix to complex relation one if required :param action: action to switch prefix in :type action: ormar.queryset.actions.filter_action.FilterAction """ manager = self.model_cls.Meta.alias_manager new_alias = manager.resolve_relation_alias(self.model_cls, action.related_str) if "__" in action.related_str and new_alias: action.table_prefix = new_alias ormar-0.12.2/ormar/queryset/field_accessor.py000066400000000000000000000224521444363446500212560ustar00rootroot00000000000000from typing import Any, TYPE_CHECKING, Type, cast from ormar.queryset.actions import OrderAction from ormar.queryset.actions.filter_action import METHODS_TO_OPERATORS from ormar.queryset.clause import FilterGroup if TYPE_CHECKING: # pragma: no cover from ormar import BaseField, Model class FieldAccessor: """ Helper to access ormar fields directly from Model class also for nested models attributes. """ def __init__( self, source_model: Type["Model"], field: "BaseField" = None, model: Type["Model"] = None, access_chain: str = "", ) -> None: self._source_model = source_model self._field = field self._model = model self._access_chain = access_chain def __bool__(self) -> bool: """ Hack to avoid pydantic name check from parent model, returns false :return: False :rtype: bool """ return False def __getattr__(self, item: str) -> Any: """ Accessor return new accessor for each field and nested models. Thanks to that operator overload is possible to use in filter. :param item: attribute name :type item: str :return: FieldAccessor for field or nested model :rtype: ormar.queryset.field_accessor.FieldAccessor """ if ( object.__getattribute__(self, "_field") and item == object.__getattribute__(self, "_field").name ): return self._field if ( object.__getattribute__(self, "_model") and item in object.__getattribute__(self, "_model").Meta.model_fields ): field = cast("Model", self._model).Meta.model_fields[item] if field.is_relation: return FieldAccessor( source_model=self._source_model, model=field.to, access_chain=self._access_chain + f"__{item}", ) else: return FieldAccessor( source_model=self._source_model, field=field, access_chain=self._access_chain + f"__{item}", ) return object.__getattribute__(self, item) # pragma: no cover def _check_field(self) -> None: if not self._field: raise AttributeError( "Cannot filter by Model, you need to provide model name" ) def _select_operator(self, op: str, other: Any) -> FilterGroup: self._check_field() filter_kwg = {self._access_chain + f"__{METHODS_TO_OPERATORS[op]}": other} return FilterGroup(**filter_kwg) def __eq__(self, other: Any) -> FilterGroup: # type: ignore """ overloaded to work as sql `column = ` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="__eq__", other=other) def __ge__(self, other: Any) -> FilterGroup: """ overloaded to work as sql `column >= ` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="__ge__", other=other) def __gt__(self, other: Any) -> FilterGroup: """ overloaded to work as sql `column > ` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="__gt__", other=other) def __le__(self, other: Any) -> FilterGroup: """ overloaded to work as sql `column <= ` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="__le__", other=other) def __lt__(self, other: Any) -> FilterGroup: """ overloaded to work as sql `column < ` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="__lt__", other=other) def __mod__(self, other: Any) -> FilterGroup: """ overloaded to work as sql `column LIKE '%%'` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="__mod__", other=other) def __lshift__(self, other: Any) -> FilterGroup: """ overloaded to work as sql `column IN (, ,...)` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="in", other=other) def __rshift__(self, other: Any) -> FilterGroup: """ overloaded to work as sql `column IS NULL` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="isnull", other=True) def in_(self, other: Any) -> FilterGroup: """ works as sql `column IN (, ,...)` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="in", other=other) def iexact(self, other: Any) -> FilterGroup: """ works as sql `column = ` case-insensitive :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="iexact", other=other) def contains(self, other: Any) -> FilterGroup: """ works as sql `column LIKE '%%'` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="contains", other=other) def icontains(self, other: Any) -> FilterGroup: """ works as sql `column LIKE '%%'` case-insensitive :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="icontains", other=other) def startswith(self, other: Any) -> FilterGroup: """ works as sql `column LIKE '%'` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="startswith", other=other) def istartswith(self, other: Any) -> FilterGroup: """ works as sql `column LIKE '%'` case-insensitive :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="istartswith", other=other) def endswith(self, other: Any) -> FilterGroup: """ works as sql `column LIKE '%'` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="endswith", other=other) def iendswith(self, other: Any) -> FilterGroup: """ works as sql `column LIKE '%'` case-insensitive :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="iendswith", other=other) def isnull(self, other: Any) -> FilterGroup: """ works as sql `column IS NULL` or `IS NOT NULL` :param other: value to check agains operator :type other: str :return: FilterGroup for operator :rtype: ormar.queryset.clause.FilterGroup """ return self._select_operator(op="isnull", other=other) def asc(self) -> OrderAction: """ works as sql `column asc` :return: OrderGroup for operator :rtype: ormar.queryset.actions.OrderGroup """ return OrderAction(order_str=self._access_chain, model_cls=self._source_model) def desc(self) -> OrderAction: """ works as sql `column desc` :return: OrderGroup for operator :rtype: ormar.queryset.actions.OrderGroup """ return OrderAction( order_str="-" + self._access_chain, model_cls=self._source_model ) ormar-0.12.2/ormar/queryset/join.py000066400000000000000000000356341444363446500172560ustar00rootroot00000000000000from typing import Any, Dict, List, Optional, TYPE_CHECKING, Tuple, Type, cast import sqlalchemy from sqlalchemy import text import ormar # noqa I100 from ormar.exceptions import ModelDefinitionError, RelationshipInstanceError from ormar.relations import AliasManager if TYPE_CHECKING: # pragma no cover from ormar import Model, ManyToManyField from ormar.queryset import OrderAction from ormar.models.excludable import ExcludableItems class SqlJoin: def __init__( # noqa: CFQ002 self, used_aliases: List, select_from: sqlalchemy.sql.select, columns: List[sqlalchemy.Column], excludable: "ExcludableItems", order_columns: Optional[List["OrderAction"]], sorted_orders: Dict, main_model: Type["Model"], relation_name: str, relation_str: str, related_models: Any = None, own_alias: str = "", source_model: Type["Model"] = None, already_sorted: Dict = None, ) -> None: self.relation_name = relation_name self.related_models = related_models or [] self.select_from = select_from self.columns = columns self.excludable = excludable self.order_columns = order_columns self.sorted_orders = sorted_orders self.already_sorted = already_sorted or dict() self.main_model = main_model self.own_alias = own_alias self.used_aliases = used_aliases self.target_field = self.main_model.Meta.model_fields[self.relation_name] self._next_model: Optional[Type["Model"]] = None self._next_alias: Optional[str] = None self.relation_str = relation_str self.source_model = source_model @property def next_model(self) -> Type["Model"]: if not self._next_model: # pragma: nocover raise RelationshipInstanceError( "Cannot link to related table if relation.to model is not set." ) return self._next_model @next_model.setter def next_model(self, value: Type["Model"]) -> None: self._next_model = value @property def next_alias(self) -> str: if not self._next_alias: # pragma: nocover raise RelationshipInstanceError("Alias for given relation not found.") return self._next_alias @next_alias.setter def next_alias(self, value: str) -> None: self._next_alias = value @property def alias_manager(self) -> AliasManager: """ Shortcut for ormar's model AliasManager stored on Meta. :return: alias manager from model's Meta :rtype: AliasManager """ return self.main_model.Meta.alias_manager @property def to_table(self) -> sqlalchemy.Table: """ Shortcut to table name of the next model :return: name of the target table :rtype: str """ return self.next_model.Meta.table def _on_clause(self, previous_alias: str, from_clause: str, to_clause: str) -> text: """ Receives aliases and names of both ends of the join and combines them into one text clause used in joins. :param previous_alias: alias of previous table :type previous_alias: str :param from_clause: from table name :type from_clause: str :param to_clause: to table name :type to_clause: str :return: clause combining all strings :rtype: sqlalchemy.text """ left_part = f"{self.next_alias}_{to_clause}" if not previous_alias: dialect = self.main_model.Meta.database._backend._dialect table, column = from_clause.split(".") quotter = dialect.identifier_preparer.quote right_part = f"{quotter(table)}.{quotter(column)}" else: right_part = f"{previous_alias}_{from_clause}" return text(f"{left_part}={right_part}") def build_join(self) -> Tuple[List, sqlalchemy.sql.select, List, Dict]: """ Main external access point for building a join. Splits the join definition, updates fields and exclude_fields if needed, handles switching to through models for m2m relations, returns updated lists of used_aliases and sort_orders. :return: list of used aliases, select from, list of aliased columns, sort orders :rtype: Tuple[List[str], Join, List[TextClause], Dict] """ if self.target_field.is_multi: self._process_m2m_through_table() self.next_model = self.target_field.to self._forward_join() self._process_following_joins() return (self.used_aliases, self.select_from, self.columns, self.sorted_orders) def _forward_join(self) -> None: """ Process actual join. Registers complex relation join on encountering of the duplicated alias. """ self.next_alias = self.alias_manager.resolve_relation_alias( from_model=self.target_field.owner, relation_name=self.relation_name ) if self.next_alias not in self.used_aliases: self._process_join() else: if "__" in self.relation_str and self.source_model: relation_key = f"{self.source_model.get_name()}_{self.relation_str}" if relation_key not in self.alias_manager: self.next_alias = self.alias_manager.add_alias( alias_key=relation_key ) else: self.next_alias = self.alias_manager[relation_key] self._process_join() def _process_following_joins(self) -> None: """ Iterates through nested models to create subsequent joins. """ for related_name in self.related_models: remainder = None if ( isinstance(self.related_models, dict) and self.related_models[related_name] ): remainder = self.related_models[related_name] self._process_deeper_join(related_name=related_name, remainder=remainder) def _process_deeper_join(self, related_name: str, remainder: Any) -> None: """ Creates nested recurrent instance of SqlJoin for each nested join table, updating needed return params here as a side effect. Updated are: * self.used_aliases, * self.select_from, * self.columns, * self.sorted_orders, :param related_name: name of the relation to follow :type related_name: str :param remainder: deeper tables if there are more nested joins :type remainder: Any """ sql_join = SqlJoin( used_aliases=self.used_aliases, select_from=self.select_from, columns=self.columns, excludable=self.excludable, order_columns=self.order_columns, sorted_orders=self.sorted_orders, main_model=self.next_model, relation_name=related_name, related_models=remainder, relation_str="__".join([self.relation_str, related_name]), own_alias=self.next_alias, source_model=self.source_model or self.main_model, already_sorted=self.already_sorted, ) ( self.used_aliases, self.select_from, self.columns, self.sorted_orders, ) = sql_join.build_join() def _process_m2m_through_table(self) -> None: """ Process Through table of the ManyToMany relation so that source table is linked to the through table (one additional join) Replaces needed parameters like: * self.next_model, * self.next_alias, * self.relation_name, * self.own_alias, * self.target_field To point to through model """ new_part = self._process_m2m_related_name_change() self.next_model = self.target_field.through self._forward_join() self.relation_name = new_part self.own_alias = self.next_alias self.target_field = self.next_model.Meta.model_fields[self.relation_name] def _process_m2m_related_name_change(self, reverse: bool = False) -> str: """ Extracts relation name to link join through the Through model declared on relation field. Changes the same names in order_by queries if they are present. :param reverse: flag if it's on_clause lookup - use reverse fields :type reverse: bool :return: new relation name switched to through model field :rtype: str """ target_field = self.target_field is_primary_self_ref = ( target_field.self_reference and self.relation_name == target_field.self_reference_primary ) if (is_primary_self_ref and not reverse) or ( not is_primary_self_ref and reverse ): new_part = target_field.default_source_field_name() # type: ignore else: new_part = target_field.default_target_field_name() # type: ignore return new_part def _process_join(self) -> None: # noqa: CFQ002 """ Resolves to and from column names and table names. Produces on_clause. Performs actual join updating select_from parameter. Adds aliases of required column to list of columns to include in query. Updates the used aliases list directly. Process order_by causes for non m2m relations. """ to_key, from_key = self._get_to_and_from_keys() on_clause = self._on_clause( previous_alias=self.own_alias, from_clause=f"{self.target_field.owner.Meta.tablename}.{from_key}", to_clause=f"{self.to_table.name}.{to_key}", ) target_table = self.alias_manager.prefixed_table_name( self.next_alias, self.to_table ) self.select_from = sqlalchemy.sql.outerjoin( self.select_from, target_table, on_clause ) self._get_order_bys() self_related_fields = self.next_model.own_table_columns( model=self.next_model, excludable=self.excludable, alias=self.next_alias, use_alias=True, ) self.columns.extend( self.alias_manager.prefixed_columns( self.next_alias, target_table, self_related_fields ) ) self.used_aliases.append(self.next_alias) def _set_default_primary_key_order_by(self) -> None: for order_by in self.next_model.Meta.orders_by: clause = ormar.OrderAction( order_str=order_by, model_cls=self.next_model, alias=self.next_alias ) self.sorted_orders[clause] = clause.get_text_clause() def _verify_allowed_order_field(self, order_by: str) -> None: """ Verifies if proper field string is used. :param order_by: string with order by definition :type order_by: str """ parts = order_by.split("__") if len(parts) > 2 or parts[0] != self.target_field.through.get_name(): raise ModelDefinitionError( "You can order the relation only " "by related or link table columns!" ) def _get_alias_and_model(self, order_by: str) -> Tuple[str, Type["Model"]]: """ Returns proper model and alias to be applied in the clause. :param order_by: string with order by definition :type order_by: str :return: alias and model to be used in clause :rtype: Tuple[str, Type["Model"]] """ if self.target_field.is_multi and "__" in order_by: self._verify_allowed_order_field(order_by=order_by) alias = self.next_alias model = self.target_field.owner elif self.target_field.is_multi: alias = self.alias_manager.resolve_relation_alias( from_model=self.target_field.through, relation_name=cast( "ManyToManyField", self.target_field ).default_target_field_name(), ) model = self.target_field.to else: alias = self.alias_manager.resolve_relation_alias( from_model=self.target_field.owner, relation_name=self.target_field.name ) model = self.target_field.to return alias, model def _get_order_bys(self) -> None: # noqa: CCR001 """ Triggers construction of order bys if they are given. Otherwise by default each table is sorted by a primary key column asc. """ alias = self.next_alias current_table_sorted = False if f"{alias}_{self.next_model.get_name()}" in self.already_sorted: current_table_sorted = True if self.order_columns: for condition in self.order_columns: if condition.check_if_filter_apply( target_model=self.next_model, alias=alias ): current_table_sorted = True self.sorted_orders[condition] = condition.get_text_clause() self.already_sorted[ f"{self.next_alias}_{self.next_model.get_name()}" ] = condition if self.target_field.orders_by and not current_table_sorted: current_table_sorted = True for order_by in self.target_field.orders_by: alias, model = self._get_alias_and_model(order_by=order_by) clause = ormar.OrderAction( order_str=order_by, model_cls=model, alias=alias ) self.sorted_orders[clause] = clause.get_text_clause() self.already_sorted[f"{alias}_{model.get_name()}"] = clause if not current_table_sorted and not self.target_field.is_multi: self._set_default_primary_key_order_by() def _get_to_and_from_keys(self) -> Tuple[str, str]: """ Based on the relation type, name of the relation and previous models and parts stored in JoinParameters it resolves the current to and from keys, which are different for ManyToMany relation, ForeignKey and reverse related of relations. :return: to key and from key :rtype: Tuple[str, str] """ if self.target_field.is_multi: to_key = self._process_m2m_related_name_change(reverse=True) from_key = self.main_model.get_column_alias(self.main_model.Meta.pkname) elif self.target_field.virtual: to_field = self.target_field.get_related_name() to_key = self.target_field.to.get_column_alias(to_field) from_key = self.main_model.get_column_alias(self.main_model.Meta.pkname) else: to_key = self.target_field.to.get_column_alias( self.target_field.to.Meta.pkname ) from_key = self.main_model.get_column_alias(self.relation_name) return to_key, from_key ormar-0.12.2/ormar/queryset/queries/000077500000000000000000000000001444363446500174075ustar00rootroot00000000000000ormar-0.12.2/ormar/queryset/queries/__init__.py000066400000000000000000000007261444363446500215250ustar00rootroot00000000000000from ormar.queryset.queries.filter_query import FilterQuery from ormar.queryset.queries.limit_query import LimitQuery from ormar.queryset.queries.offset_query import OffsetQuery from ormar.queryset.queries.order_query import OrderQuery from ormar.queryset.queries.prefetch_query import PrefetchQuery from ormar.queryset.queries.query import Query __all__ = [ "FilterQuery", "LimitQuery", "OffsetQuery", "OrderQuery", "PrefetchQuery", "Query", ] ormar-0.12.2/ormar/queryset/queries/filter_query.py000066400000000000000000000021671444363446500225010ustar00rootroot00000000000000from typing import List import sqlalchemy from ormar.queryset.actions.filter_action import FilterAction class FilterQuery: """ Modifies the select query with given list of where/filter clauses. """ def __init__( self, filter_clauses: List[FilterAction], exclude: bool = False ) -> None: self.exclude = exclude self.filter_clauses = filter_clauses def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: """ Applies all filter clauses if set. :param expr: query to modify :type expr: sqlalchemy.sql.selectable.Select :return: modified query :rtype: sqlalchemy.sql.selectable.Select """ if self.filter_clauses: if len(self.filter_clauses) == 1: clause = self.filter_clauses[0].get_text_clause() else: clause = sqlalchemy.sql.and_( *[x.get_text_clause() for x in self.filter_clauses] ) clause = sqlalchemy.sql.not_(clause) if self.exclude else clause expr = expr.where(clause) return expr ormar-0.12.2/ormar/queryset/queries/limit_query.py000066400000000000000000000012111444363446500223170ustar00rootroot00000000000000from typing import Optional import sqlalchemy class LimitQuery: """ Modifies the select query with limit clause. """ def __init__(self, limit_count: Optional[int]) -> None: self.limit_count = limit_count def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: """ Applies the limit clause. :param expr: query to modify :type expr: sqlalchemy.sql.selectable.Select :return: modified query :rtype: sqlalchemy.sql.selectable.Select """ if self.limit_count is not None: expr = expr.limit(self.limit_count) return expr ormar-0.12.2/ormar/queryset/queries/offset_query.py000066400000000000000000000012031444363446500224700ustar00rootroot00000000000000from typing import Optional import sqlalchemy class OffsetQuery: """ Modifies the select query with offset if set """ def __init__(self, query_offset: Optional[int]) -> None: self.query_offset = query_offset def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: """ Applies the offset clause. :param expr: query to modify :type expr: sqlalchemy.sql.selectable.Select :return: modified query :rtype: sqlalchemy.sql.selectable.Select """ if self.query_offset: expr = expr.offset(self.query_offset) return expr ormar-0.12.2/ormar/queryset/queries/order_query.py000066400000000000000000000013651444363446500223260ustar00rootroot00000000000000from typing import Dict import sqlalchemy class OrderQuery: """ Modifies the select query with given list of order_by clauses. """ def __init__(self, sorted_orders: Dict) -> None: self.sorted_orders = sorted_orders def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: """ Applies all order_by clauses if set. :param expr: query to modify :type expr: sqlalchemy.sql.selectable.Select :return: modified query :rtype: sqlalchemy.sql.selectable.Select """ if self.sorted_orders: for order in list(self.sorted_orders.values()): if order is not None: expr = expr.order_by(order) return expr ormar-0.12.2/ormar/queryset/queries/prefetch_query.py000066400000000000000000000561311444363446500230140ustar00rootroot00000000000000from typing import Dict, List, Sequence, Set, TYPE_CHECKING, Tuple, Type, cast import ormar from ormar.queryset.clause import QueryClause from ormar.queryset.queries.query import Query from ormar.queryset.utils import extract_models_to_dict_of_lists, translate_list_to_dict if TYPE_CHECKING: # pragma: no cover from ormar import Model from ormar.fields import ForeignKeyField, BaseField from ormar.queryset import OrderAction from ormar.models.excludable import ExcludableItems def sort_models(models: List["Model"], orders_by: Dict) -> List["Model"]: """ Since prefetch query gets all related models by ids the sorting needs to happen in python. Since by default models are already sorted by id here we resort only if order_by parameters was set. :param models: list of models already fetched from db :type models: List[tests.test_prefetch_related.Division] :param orders_by: order by dictionary :type orders_by: Dict[str, str] :return: sorted list of models :rtype: List[tests.test_prefetch_related.Division] """ sort_criteria = [ (key, value) for key, value in orders_by.items() if isinstance(value, str) ] sort_criteria = sort_criteria[::-1] for criteria in sort_criteria: key_name, value = criteria if value == "desc": models.sort(key=lambda x: getattr(x, key_name), reverse=True) else: models.sort(key=lambda x: getattr(x, key_name)) return models def set_children_on_model( # noqa: CCR001 model: "Model", related: str, children: Dict, model_id: int, models: Dict, orders_by: Dict, ) -> None: """ Extract ids of child models by given relation id key value. Based on those ids the actual children model instances are fetched from already fetched data. If needed the child models are resorted according to passed orders_by dict. Also relation is registered as each child is set as parent related field name value. :param model: parent model instance :type model: Model :param related: name of the related field :type related: str :param children: dictionary of children ids/ related field value :type children: Dict[int, set] :param model_id: id of the model on which children should be set :type model_id: int :param models: dictionary of child models instances :type models: Dict :param orders_by: order_by dictionary :type orders_by: Dict """ for key, child_models in children.items(): if key == model_id: models_to_set = [models[child] for child in sorted(child_models)] if models_to_set: if orders_by and any(isinstance(x, str) for x in orders_by.values()): models_to_set = sort_models( models=models_to_set, orders_by=orders_by ) for child in models_to_set: setattr(model, related, child) class PrefetchQuery: """ Query used to fetch related models in subsequent queries. Each model is fetched only ones by the name of the relation. That means that for each prefetch_related entry next query is issued to database. """ def __init__( # noqa: CFQ002 self, model_cls: Type["Model"], excludable: "ExcludableItems", prefetch_related: List, select_related: List, orders_by: List["OrderAction"], ) -> None: self.model = model_cls self.database = self.model.Meta.database self._prefetch_related = prefetch_related self._select_related = select_related self.excludable = excludable self.already_extracted: Dict = dict() self.models: Dict = {} self.select_dict = translate_list_to_dict(self._select_related) self.orders_by = orders_by or [] # TODO: refactor OrderActions to use it instead of strings from it self.order_dict = translate_list_to_dict( [x.query_str for x in self.orders_by], is_order=True ) async def prefetch_related( self, models: Sequence["Model"], rows: List ) -> Sequence["Model"]: """ Main entry point for prefetch_query. Receives list of already initialized parent models with all children from select_related already populated. Receives also list of row sql result rows as it's quicker to extract ids that way instead of calling each model. Returns list with related models already prefetched and set. :param models: list of already instantiated models from main query :type models: List[Model] :param rows: row sql result of the main query before the prefetch :type rows: List[sqlalchemy.engine.result.RowProxy] :return: list of models with children prefetched :rtype: List[Model] """ self.models = extract_models_to_dict_of_lists( model_type=self.model, models=models, select_dict=self.select_dict ) self.models[self.model.get_name()] = models return await self._prefetch_related_models(models=models, rows=rows) def _extract_ids_from_raw_data( self, parent_model: Type["Model"], column_name: str ) -> Set: """ Iterates over raw rows and extract id values of relation columns by using prefixed column name. :param parent_model: ormar model class :type parent_model: Type[Model] :param column_name: name of the relation column which is a key column :type column_name: str :return: set of ids of related model that should be extracted :rtype: set """ list_of_ids = set() current_data = self.already_extracted.get(parent_model.get_name(), {}) table_prefix = current_data.get("prefix", "") column_name = (f"{table_prefix}_" if table_prefix else "") + column_name for row in current_data.get("raw", []): if row[column_name]: list_of_ids.add(row[column_name]) return list_of_ids def _extract_ids_from_preloaded_models( self, parent_model: Type["Model"], column_name: str ) -> Set: """ Extracts relation ids from already populated models if they were included in the original query before. :param parent_model: model from which related ids should be extracted :type parent_model: Type["Model"] :param column_name: name of the relation column which is a key column :type column_name: str :return: set of ids of related model that should be extracted :rtype: set """ list_of_ids = set() for model in self.models.get(parent_model.get_name(), []): child = getattr(model, column_name) if isinstance(child, ormar.Model): list_of_ids.add(child.pk) else: list_of_ids.add(child) return list_of_ids def _extract_required_ids( self, parent_model: Type["Model"], reverse: bool, related: str ) -> Set: """ Delegates extraction of the fields to either get ids from raw sql response or from already populated models. :param parent_model: model from which related ids should be extracted :type parent_model: Type["Model"] :param reverse: flag if the relation is reverse :type reverse: bool :param related: name of the field with relation :type related: str :return: set of ids of related model that should be extracted :rtype: set """ use_raw = parent_model.get_name() not in self.models column_name = parent_model.get_column_name_for_id_extraction( parent_model=parent_model, reverse=reverse, related=related, use_raw=use_raw ) if use_raw: return self._extract_ids_from_raw_data( parent_model=parent_model, column_name=column_name ) return self._extract_ids_from_preloaded_models( parent_model=parent_model, column_name=column_name ) def _get_filter_for_prefetch( self, parent_model: Type["Model"], target_model: Type["Model"], reverse: bool, related: str, ) -> List: """ Populates where clause with condition to return only models within the set of extracted ids. If there are no ids for relation the empty list is returned. :param parent_model: model from which related ids should be extracted :type parent_model: Type["Model"] :param target_model: model to which relation leads to :type target_model: Type["Model"] :param reverse: flag if the relation is reverse :type reverse: bool :param related: name of the field with relation :type related: str :return: :rtype: List[sqlalchemy.sql.elements.TextClause] """ ids = self._extract_required_ids( parent_model=parent_model, reverse=reverse, related=related ) if ids: ( clause_target, filter_column, ) = parent_model.get_clause_target_and_filter_column_name( parent_model=parent_model, target_model=target_model, reverse=reverse, related=related, ) qryclause = QueryClause( model_cls=clause_target, select_related=[], filter_clauses=[] ) kwargs = {f"{filter_column}__in": ids} filter_clauses, _ = qryclause.prepare_filter(_own_only=False, **kwargs) return filter_clauses return [] def _populate_nested_related( self, model: "Model", prefetch_dict: Dict, orders_by: Dict ) -> "Model": """ Populates all related models children of parent model that are included in prefetch query. :param model: ormar model instance :type model: Model :param prefetch_dict: dictionary of models to prefetch :type prefetch_dict: Dict :param orders_by: dictionary of order bys :type orders_by: Dict :return: model with children populated :rtype: Model """ related_to_extract = model.get_filtered_names_to_extract( prefetch_dict=prefetch_dict ) for related in related_to_extract: target_field = model.Meta.model_fields[related] target_field = cast("ForeignKeyField", target_field) target_model = target_field.to.get_name() model_id = model.get_relation_model_id(target_field=target_field) if model_id is None: # pragma: no cover continue field_name = model.get_related_field_name(target_field=target_field) children = self.already_extracted.get(target_model, {}).get(field_name, {}) models = self.already_extracted.get(target_model, {}).get("pk_models", {}) set_children_on_model( model=model, related=related, children=children, model_id=model_id, models=models, orders_by=orders_by.get(related, {}), ) return model async def _prefetch_related_models( self, models: Sequence["Model"], rows: List ) -> Sequence["Model"]: """ Main method of the query. Translates select nad prefetch list into dictionaries to avoid querying the same related models multiple times. Keeps the list of already extracted models. Extracts the related models from the database and later populate all children on each of the parent models from list. :param models: list of parent models from main query :type models: List[Model] :param rows: raw response from sql query :type rows: List[sqlalchemy.engine.result.RowProxy] :return: list of models with prefetch children populated :rtype: List[Model] """ self.already_extracted = {self.model.get_name(): {"raw": rows}} select_dict = translate_list_to_dict(self._select_related) prefetch_dict = translate_list_to_dict(self._prefetch_related) target_model = self.model orders_by = self.order_dict for related in prefetch_dict.keys(): await self._extract_related_models( related=related, target_model=target_model, prefetch_dict=prefetch_dict.get(related, {}), select_dict=select_dict.get(related, {}), excludable=self.excludable, orders_by=orders_by.get(related, {}), ) final_models = [] for model in models: final_models.append( self._populate_nested_related( model=model, prefetch_dict=prefetch_dict, orders_by=self.order_dict ) ) return models async def _extract_related_models( # noqa: CFQ002, CCR001 self, related: str, target_model: Type["Model"], prefetch_dict: Dict, select_dict: Dict, excludable: "ExcludableItems", orders_by: Dict, ) -> None: """ Constructs queries with required ids and extracts data with fields that should be included/excluded. Runs the queries against the database and populated dictionaries with ids and with actual extracted children models. Calls itself recurrently to extract deeper nested relations of related model. :param related: name of the relation :type related: str :param target_model: model to which relation leads to :type target_model: Type[Model] :param prefetch_dict: prefetch related list converted into dictionary :type prefetch_dict: Dict :param select_dict: select related list converted into dictionary :type select_dict: Dict :param fields: fields to include :type fields: Union[Set[Any], Dict[Any, Any], None] :param exclude_fields: fields to exclude :type exclude_fields: Union[Set[Any], Dict[Any, Any], None] :param orders_by: dictionary of order bys clauses :type orders_by: Dict :return: None :rtype: None """ target_field = target_model.Meta.model_fields[related] target_field = cast("ForeignKeyField", target_field) reverse = False if target_field.virtual or target_field.is_multi: reverse = True parent_model = target_model filter_clauses = self._get_filter_for_prefetch( parent_model=parent_model, target_model=target_field.to, reverse=reverse, related=related, ) if not filter_clauses: # related field is empty return already_loaded = select_dict is Ellipsis or related in select_dict if not already_loaded: # If not already loaded with select_related related_field_name = parent_model.get_related_field_name( target_field=target_field ) table_prefix, exclude_prefix, rows = await self._run_prefetch_query( target_field=target_field, excludable=excludable, filter_clauses=filter_clauses, related_field_name=related_field_name, ) else: rows = [] table_prefix = "" exclude_prefix = "" if prefetch_dict and prefetch_dict is not Ellipsis: for subrelated in prefetch_dict.keys(): await self._extract_related_models( related=subrelated, target_model=target_field.to, prefetch_dict=prefetch_dict.get(subrelated, {}), select_dict=self._get_select_related_if_apply( subrelated, select_dict ), excludable=excludable, orders_by=self._get_select_related_if_apply(subrelated, orders_by), ) if not already_loaded: self._populate_rows( rows=rows, parent_model=parent_model, target_field=target_field, table_prefix=table_prefix, exclude_prefix=exclude_prefix, excludable=excludable, prefetch_dict=prefetch_dict, orders_by=orders_by, ) else: self._update_already_loaded_rows( target_field=target_field, prefetch_dict=prefetch_dict, orders_by=orders_by, ) async def _run_prefetch_query( self, target_field: "BaseField", excludable: "ExcludableItems", filter_clauses: List, related_field_name: str, ) -> Tuple[str, str, List]: """ Actually runs the queries against the database and populates the raw response for given related model. Returns table prefix as it's later needed to eventually initialize the children models. :param target_field: ormar field with relation definition :type target_field: "BaseField" :param filter_clauses: list of clauses, actually one clause with ids of relation :type filter_clauses: List[sqlalchemy.sql.elements.TextClause] :return: table prefix and raw rows from sql response :rtype: Tuple[str, List] """ target_model = target_field.to target_name = target_model.get_name() select_related = [] query_target = target_model table_prefix = "" exclude_prefix = target_field.to.Meta.alias_manager.resolve_relation_alias( from_model=target_field.owner, relation_name=target_field.name ) if target_field.is_multi: query_target = target_field.through select_related = [target_name] table_prefix = target_field.to.Meta.alias_manager.resolve_relation_alias( from_model=query_target, relation_name=target_name ) exclude_prefix = table_prefix self.already_extracted.setdefault(target_name, {})["prefix"] = table_prefix model_excludable = excludable.get(model_cls=target_model, alias=exclude_prefix) if model_excludable.include and not model_excludable.is_included( related_field_name ): model_excludable.set_values({related_field_name}, is_exclude=False) qry = Query( model_cls=query_target, select_related=select_related, filter_clauses=filter_clauses, exclude_clauses=[], offset=None, limit_count=None, excludable=excludable, order_bys=None, limit_raw_sql=False, ) expr = qry.build_select_expression() # print(expr.compile(compile_kwargs={"literal_binds": True})) rows = await self.database.fetch_all(expr) self.already_extracted.setdefault(target_name, {}).update({"raw": rows}) return table_prefix, exclude_prefix, rows @staticmethod def _get_select_related_if_apply(related: str, select_dict: Dict) -> Dict: """ Extract nested related of select_related dictionary to extract models nested deeper on related model and already loaded in select related query. :param related: name of the relation :type related: str :param select_dict: dictionary of select related models in main query :type select_dict: Dict :return: dictionary with nested related of select related :rtype: Dict """ return ( select_dict.get(related, {}) if (select_dict and select_dict is not Ellipsis and related in select_dict) else {} ) def _update_already_loaded_rows( # noqa: CFQ002 self, target_field: "BaseField", prefetch_dict: Dict, orders_by: Dict ) -> None: """ Updates models that are already loaded, usually children of children. :param target_field: ormar field with relation definition :type target_field: "BaseField" :param prefetch_dict: dictionaries of related models to prefetch :type prefetch_dict: Dict :param orders_by: dictionary of order by clauses by model :type orders_by: Dict """ target_model = target_field.to for instance in self.models.get(target_model.get_name(), []): self._populate_nested_related( model=instance, prefetch_dict=prefetch_dict, orders_by=orders_by ) def _populate_rows( # noqa: CFQ002 self, rows: List, target_field: "ForeignKeyField", parent_model: Type["Model"], table_prefix: str, exclude_prefix: str, excludable: "ExcludableItems", prefetch_dict: Dict, orders_by: Dict, ) -> None: """ Instantiates children models extracted from given relation. Populates them with their own nested children if they are included in prefetch query. Sets the initialized models and ids of them under corresponding keys in already_extracted dictionary. Later those instances will be fetched by ids and set on the parent model after sorting if needed. :param excludable: structure of fields to include and exclude :type excludable: ExcludableItems :param rows: raw sql response from the prefetch query :type rows: List[sqlalchemy.engine.result.RowProxy] :param target_field: field with relation definition from parent model :type target_field: "BaseField" :param parent_model: model with relation definition :type parent_model: Type[Model] :param table_prefix: prefix of the target table from current relation :type table_prefix: str :param prefetch_dict: dictionaries of related models to prefetch :type prefetch_dict: Dict :param orders_by: dictionary of order by clauses by model :type orders_by: Dict """ target_model = target_field.to for row in rows: field_name = parent_model.get_related_field_name(target_field=target_field) item = target_model.extract_prefixed_table_columns( item={}, row=row, table_prefix=table_prefix, excludable=excludable ) item["__excluded__"] = target_model.get_names_to_exclude( excludable=excludable, alias=exclude_prefix ) instance = target_model(**item) instance = self._populate_nested_related( model=instance, prefetch_dict=prefetch_dict, orders_by=orders_by ) field_db_name = target_model.get_column_alias(field_name) models = self.already_extracted[target_model.get_name()].setdefault( "pk_models", {} ) if instance.pk not in models: models[instance.pk] = instance self.already_extracted[target_model.get_name()].setdefault( field_name, dict() ).setdefault(row[field_db_name], set()).add(instance.pk) ormar-0.12.2/ormar/queryset/queries/query.py000066400000000000000000000225661444363446500211410ustar00rootroot00000000000000from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Type, Union import sqlalchemy from sqlalchemy import Table, text from sqlalchemy.sql import Join import ormar # noqa I100 from ormar.models.helpers.models import group_related_list from ormar.queryset.queries import FilterQuery, LimitQuery, OffsetQuery, OrderQuery from ormar.queryset.actions.filter_action import FilterAction from ormar.queryset.join import SqlJoin if TYPE_CHECKING: # pragma no cover from ormar import Model from ormar.queryset import OrderAction from ormar.models.excludable import ExcludableItems class Query: def __init__( # noqa CFQ002 self, model_cls: Type["Model"], filter_clauses: List[FilterAction], exclude_clauses: List[FilterAction], select_related: List, limit_count: Optional[int], offset: Optional[int], excludable: "ExcludableItems", order_bys: Optional[List["OrderAction"]], limit_raw_sql: bool, ) -> None: self.query_offset = offset self.limit_count = limit_count self._select_related = select_related[:] self.filter_clauses = filter_clauses[:] self.exclude_clauses = exclude_clauses[:] self.excludable = excludable self.model_cls = model_cls self.table = self.model_cls.Meta.table self.used_aliases: List[str] = [] self.select_from: Union[Join, Table, List[str]] = [] self.columns = [sqlalchemy.Column] self.order_columns = order_bys self.sorted_orders: Dict[OrderAction, text] = {} self._init_sorted_orders() self.limit_raw_sql = limit_raw_sql def _init_sorted_orders(self) -> None: """ Initialize empty order_by dict to be populated later during the query call """ if self.order_columns: for clause in self.order_columns: self.sorted_orders[clause] = None def apply_order_bys_for_primary_model(self) -> None: # noqa: CCR001 """ Applies order_by queries on main model when it's used as a subquery. That way the subquery with limit and offset only on main model has proper sorting applied and correct models are fetched. """ current_table_sorted = False if self.order_columns: for clause in self.order_columns: if clause.is_source_model_order: current_table_sorted = True self.sorted_orders[clause] = clause.get_text_clause() if not current_table_sorted: self._apply_default_model_sorting() def _apply_default_model_sorting(self) -> None: """ Applies orders_by from model Meta class (if provided), if it was not provided it was filled by metaclass so it's always there and falls back to pk column """ for order_by in self.model_cls.Meta.orders_by: clause = ormar.OrderAction(order_str=order_by, model_cls=self.model_cls) self.sorted_orders[clause] = clause.get_text_clause() def _pagination_query_required(self) -> bool: """ Checks if limit or offset are set, the flag limit_sql_raw is not set and query has select_related applied. Otherwise we can limit/offset normally at the end of whole query. :return: result of the check :rtype: bool """ return bool( (self.limit_count or self.query_offset) and not self.limit_raw_sql and self._select_related ) def build_select_expression(self) -> Tuple[sqlalchemy.sql.select, List[str]]: """ Main entry point from outside (after proper initialization). Extracts columns list to fetch, construct all required joins for select related, then applies all conditional and sort clauses. Returns ready to run query with all joins and clauses. :return: ready to run query with all joins and clauses. :rtype: sqlalchemy.sql.selectable.Select """ self_related_fields = self.model_cls.own_table_columns( model=self.model_cls, excludable=self.excludable, use_alias=True ) self.columns = self.model_cls.Meta.alias_manager.prefixed_columns( "", self.table, self_related_fields ) self.apply_order_bys_for_primary_model() self.select_from = self.table related_models = group_related_list(self._select_related) for related in related_models: remainder = None if isinstance(related_models, dict) and related_models[related]: remainder = related_models[related] sql_join = SqlJoin( used_aliases=self.used_aliases, select_from=self.select_from, columns=self.columns, excludable=self.excludable, order_columns=self.order_columns, sorted_orders=self.sorted_orders, main_model=self.model_cls, relation_name=related, relation_str=related, related_models=remainder, ) ( self.used_aliases, self.select_from, self.columns, self.sorted_orders, ) = sql_join.build_join() if self._pagination_query_required(): limit_qry, on_clause = self._build_pagination_condition() self.select_from = sqlalchemy.sql.join( self.select_from, limit_qry, on_clause ) expr = sqlalchemy.sql.select(self.columns) expr = expr.select_from(self.select_from) expr = self._apply_expression_modifiers(expr) # print("\n", expr.compile(compile_kwargs={"literal_binds": True})) self._reset_query_parameters() return expr def _build_pagination_condition( self, ) -> Tuple[ sqlalchemy.sql.expression.TextClause, sqlalchemy.sql.expression.TextClause ]: """ In order to apply limit and offset on main table in join only (otherwise you can get only partially constructed main model if number of children exceeds the applied limit and select_related is used) Used also to change first and get() without argument behaviour. Needed only if limit or offset are set, the flag limit_sql_raw is not set and query has select_related applied. Otherwise we can limit/offset normally at the end of whole query. The condition is added to filters to filter out desired number of main model primary key values. Whole query is used to determine the values. """ pk_alias = self.model_cls.get_column_alias(self.model_cls.Meta.pkname) pk_aliased_name = f"{self.table.name}.{pk_alias}" qry_text = sqlalchemy.text(f"{pk_aliased_name}") maxes = {} for order in list(self.sorted_orders.keys()): if order is not None and order.get_field_name_text() != pk_aliased_name: aliased_col = order.get_field_name_text() # maxes[aliased_col] = order.get_text_clause() maxes[aliased_col] = order.get_min_or_max() elif order.get_field_name_text() == pk_aliased_name: maxes[pk_aliased_name] = order.get_text_clause() limit_qry = sqlalchemy.sql.select([qry_text]) limit_qry = limit_qry.select_from(self.select_from) limit_qry = FilterQuery(filter_clauses=self.filter_clauses).apply(limit_qry) limit_qry = FilterQuery( filter_clauses=self.exclude_clauses, exclude=True ).apply(limit_qry) limit_qry = limit_qry.group_by(qry_text) for order_by in maxes.values(): limit_qry = limit_qry.order_by(order_by) limit_qry = LimitQuery(limit_count=self.limit_count).apply(limit_qry) limit_qry = OffsetQuery(query_offset=self.query_offset).apply(limit_qry) limit_qry = limit_qry.alias("limit_query") on_clause = sqlalchemy.text( f"limit_query.{pk_alias}={self.table.name}.{pk_alias}" ) return limit_qry, on_clause def _apply_expression_modifiers( self, expr: sqlalchemy.sql.select ) -> sqlalchemy.sql.select: """ Receives the select query (might be join) and applies: * Filter clauses * Exclude filter clauses * Limit clauses * Offset clauses * Order by clauses Returns complete ready to run query. :param expr: select expression before clauses :type expr: sqlalchemy.sql.selectable.Select :return: expresion with all present clauses applied :rtype: sqlalchemy.sql.selectable.Select """ expr = FilterQuery(filter_clauses=self.filter_clauses).apply(expr) expr = FilterQuery(filter_clauses=self.exclude_clauses, exclude=True).apply( expr ) if not self._pagination_query_required(): expr = LimitQuery(limit_count=self.limit_count).apply(expr) expr = OffsetQuery(query_offset=self.query_offset).apply(expr) expr = OrderQuery(sorted_orders=self.sorted_orders).apply(expr) return expr def _reset_query_parameters(self) -> None: """ Although it should be created each time before the call we reset the key params anyway. """ self.select_from = [] self.columns = [] self.used_aliases = [] ormar-0.12.2/ormar/queryset/queryset.py000066400000000000000000001327701444363446500201770ustar00rootroot00000000000000import asyncio from typing import ( Any, Dict, Generic, List, Optional, Sequence, Set, TYPE_CHECKING, Tuple, Type, TypeVar, Union, cast, AsyncGenerator, ) import databases import sqlalchemy from sqlalchemy import bindparam try: from sqlalchemy.engine import LegacyRow except ImportError: # pragma: no cover if TYPE_CHECKING: class LegacyRow(dict): # type: ignore pass import ormar # noqa I100 from ormar import MultipleMatches, NoMatch from ormar.exceptions import ( ModelListEmptyError, ModelPersistenceError, QueryDefinitionError, ) from ormar.queryset import FieldAccessor, FilterQuery, SelectAction from ormar.queryset.actions.order_action import OrderAction from ormar.queryset.clause import FilterGroup, QueryClause from ormar.queryset.queries.prefetch_query import PrefetchQuery from ormar.queryset.queries.query import Query from ormar.queryset.reverse_alias_resolver import ReverseAliasResolver if TYPE_CHECKING: # pragma no cover from ormar import Model from ormar.models import T from ormar.models.metaclass import ModelMeta from ormar.models.excludable import ExcludableItems else: T = TypeVar("T", bound="Model") class QuerySet(Generic[T]): """ Main class to perform database queries, exposed on each model as objects attribute. """ def __init__( # noqa CFQ002 self, model_cls: Optional[Type["T"]] = None, filter_clauses: List = None, exclude_clauses: List = None, select_related: List = None, limit_count: int = None, offset: int = None, excludable: "ExcludableItems" = None, order_bys: List = None, prefetch_related: List = None, limit_raw_sql: bool = False, proxy_source_model: Optional[Type["Model"]] = None, ) -> None: self.proxy_source_model = proxy_source_model self.model_cls = model_cls self.filter_clauses = [] if filter_clauses is None else filter_clauses self.exclude_clauses = [] if exclude_clauses is None else exclude_clauses self._select_related = [] if select_related is None else select_related self._prefetch_related = [] if prefetch_related is None else prefetch_related self.limit_count = limit_count self.query_offset = offset self._excludable = excludable or ormar.ExcludableItems() self.order_bys = order_bys or [] self.limit_sql_raw = limit_raw_sql @property def model_meta(self) -> "ModelMeta": """ Shortcut to model class Meta set on QuerySet model. :return: Meta class of the model :rtype: model Meta class """ if not self.model_cls: # pragma nocover raise ValueError("Model class of QuerySet is not initialized") return self.model_cls.Meta @property def model(self) -> Type["T"]: """ Shortcut to model class set on QuerySet. :return: model class :rtype: Type[Model] """ if not self.model_cls: # pragma nocover raise ValueError("Model class of QuerySet is not initialized") return self.model_cls def rebuild_self( # noqa: CFQ002 self, filter_clauses: List = None, exclude_clauses: List = None, select_related: List = None, limit_count: int = None, offset: int = None, excludable: "ExcludableItems" = None, order_bys: List = None, prefetch_related: List = None, limit_raw_sql: bool = None, proxy_source_model: Optional[Type["Model"]] = None, ) -> "QuerySet": """ Method that returns new instance of queryset based on passed params, all not passed params are taken from current values. """ overwrites = { "select_related": "_select_related", "offset": "query_offset", "excludable": "_excludable", "prefetch_related": "_prefetch_related", "limit_raw_sql": "limit_sql_raw", } passed_args = locals() def replace_if_none(arg_name: str) -> Any: if passed_args.get(arg_name) is None: return getattr(self, overwrites.get(arg_name, arg_name)) return passed_args.get(arg_name) return self.__class__( model_cls=self.model_cls, filter_clauses=replace_if_none("filter_clauses"), exclude_clauses=replace_if_none("exclude_clauses"), select_related=replace_if_none("select_related"), limit_count=replace_if_none("limit_count"), offset=replace_if_none("offset"), excludable=replace_if_none("excludable"), order_bys=replace_if_none("order_bys"), prefetch_related=replace_if_none("prefetch_related"), limit_raw_sql=replace_if_none("limit_raw_sql"), proxy_source_model=replace_if_none("proxy_source_model"), ) async def _prefetch_related_models( self, models: List["T"], rows: List ) -> List["T"]: """ Performs prefetch query for selected models names. :param models: list of already parsed main Models from main query :type models: List[Model] :param rows: database rows from main query :type rows: List[sqlalchemy.engine.result.RowProxy] :return: list of models with prefetch models populated :rtype: List[Model] """ query = PrefetchQuery( model_cls=self.model, excludable=self._excludable, prefetch_related=self._prefetch_related, select_related=self._select_related, orders_by=self.order_bys, ) return await query.prefetch_related(models=models, rows=rows) # type: ignore async def _process_query_result_rows(self, rows: List) -> List["T"]: """ Process database rows and initialize ormar Model from each of the rows. :param rows: list of database rows from query result :type rows: List[sqlalchemy.engine.result.RowProxy] :return: list of models :rtype: List[Model] """ result_rows = [] for row in rows: result_rows.append( self.model.from_row( row=row, select_related=self._select_related, excludable=self._excludable, source_model=self.model, proxy_source_model=self.proxy_source_model, ) ) await asyncio.sleep(0) if result_rows: return self.model.merge_instances_list(result_rows) # type: ignore return cast(List["T"], result_rows) def _resolve_filter_groups( self, groups: Any ) -> Tuple[List[FilterGroup], List[str]]: """ Resolves filter groups to populate FilterAction params in group tree. :param groups: tuple of FilterGroups :type groups: Any :return: list of resolver groups :rtype: Tuple[List[FilterGroup], List[str]] """ filter_groups = [] select_related = self._select_related if groups: for group in groups: if not isinstance(group, FilterGroup): raise QueryDefinitionError( "Only ormar.and_ and ormar.or_ " "can be passed as filter positional" " arguments," "other values need to be passed by" "keyword arguments" ) _, select_related = group.resolve( model_cls=self.model, select_related=self._select_related, filter_clauses=self.filter_clauses, ) filter_groups.append(group) return filter_groups, select_related @staticmethod def check_single_result_rows_count(rows: Sequence[Optional["T"]]) -> None: """ Verifies if the result has one and only one row. :param rows: one element list of Models :type rows: List[Model] """ if not rows or rows[0] is None: raise NoMatch() if len(rows) > 1: raise MultipleMatches() @property def database(self) -> databases.Database: """ Shortcut to models database from Meta class. :return: database :rtype: databases.Database """ return self.model_meta.database @property def table(self) -> sqlalchemy.Table: """ Shortcut to models table from Meta class. :return: database table :rtype: sqlalchemy.Table """ return self.model_meta.table def build_select_expression( self, limit: int = None, offset: int = None, order_bys: List = None ) -> sqlalchemy.sql.select: """ Constructs the actual database query used in the QuerySet. If any of the params is not passed the QuerySet own value is used. :param limit: number to limit the query :type limit: int :param offset: number to offset by :type offset: int :param order_bys: list of order-by fields names :type order_bys: List :return: built sqlalchemy select expression :rtype: sqlalchemy.sql.selectable.Select """ qry = Query( model_cls=self.model, select_related=self._select_related, filter_clauses=self.filter_clauses, exclude_clauses=self.exclude_clauses, offset=offset or self.query_offset, excludable=self._excludable, order_bys=order_bys or self.order_bys, limit_raw_sql=self.limit_sql_raw, limit_count=limit if limit is not None else self.limit_count, ) exp = qry.build_select_expression() # print("\n", exp.compile(compile_kwargs={"literal_binds": True})) return exp def filter( # noqa: A003 self, *args: Any, _exclude: bool = False, **kwargs: Any ) -> "QuerySet[T]": """ Allows you to filter by any `Model` attribute/field as well as to fetch instances, with a filter across an FK relationship. You can use special filter suffix to change the filter operands: * exact - like `album__name__exact='Malibu'` (exact match) * iexact - like `album__name__iexact='malibu'` (exact match case insensitive) * contains - like `album__name__contains='Mal'` (sql like) * icontains - like `album__name__icontains='mal'` (sql like case insensitive) * in - like `album__name__in=['Malibu', 'Barclay']` (sql in) * isnull - like `album__name__isnull=True` (sql is null) (isnotnull `album__name__isnull=False` (sql is not null)) * gt - like `position__gt=3` (sql >) * gte - like `position__gte=3` (sql >=) * lt - like `position__lt=3` (sql <) * lte - like `position__lte=3` (sql <=) * startswith - like `album__name__startswith='Mal'` (exact start match) * istartswith - like `album__name__istartswith='mal'` (case insensitive) * endswith - like `album__name__endswith='ibu'` (exact end match) * iendswith - like `album__name__iendswith='IBU'` (case insensitive) Note that you can also use python style filters - check the docs! :param _exclude: flag if it should be exclude or filter :type _exclude: bool :param kwargs: fields names and proper value types :type kwargs: Any :return: filtered QuerySet :rtype: QuerySet """ filter_groups, select_related = self._resolve_filter_groups(groups=args) qryclause = QueryClause( model_cls=self.model, select_related=select_related, filter_clauses=self.filter_clauses, ) filter_clauses, select_related = qryclause.prepare_filter(**kwargs) filter_clauses = filter_clauses + filter_groups # type: ignore if _exclude: exclude_clauses = filter_clauses filter_clauses = self.filter_clauses else: exclude_clauses = self.exclude_clauses filter_clauses = filter_clauses return self.rebuild_self( filter_clauses=filter_clauses, exclude_clauses=exclude_clauses, select_related=select_related, ) def exclude(self, *args: Any, **kwargs: Any) -> "QuerySet[T]": # noqa: A003 """ Works exactly the same as filter and all modifiers (suffixes) are the same, but returns a *not* condition. So if you use `filter(name='John')` which is `where name = 'John'` in SQL, the `exclude(name='John')` equals to `where name <> 'John'` Note that all conditions are joined so if you pass multiple values it becomes a union of conditions. `exclude(name='John', age>=35)` will become `where not (name='John' and age>=35)` :param kwargs: fields names and proper value types :type kwargs: Any :return: filtered QuerySet :rtype: QuerySet """ return self.filter(_exclude=True, *args, **kwargs) def select_related(self, related: Union[List, str, FieldAccessor]) -> "QuerySet[T]": """ Allows to prefetch related models during the same query. **With `select_related` always only one query is run against the database**, meaning that one (sometimes complicated) join is generated and later nested models are processed in python. To fetch related model use `ForeignKey` names. To chain related `Models` relation use double underscores between names. :param related: list of relation field names, can be linked by '__' to nest :type related: Union[List, str] :return: QuerySet :rtype: QuerySet """ if not isinstance(related, list): related = [related] related = [ rel._access_chain if isinstance(rel, FieldAccessor) else rel for rel in related ] related = sorted(list(set(list(self._select_related) + related))) return self.rebuild_self(select_related=related) def select_all(self, follow: bool = False) -> "QuerySet[T]": """ By default adds only directly related models. If follow=True is set it adds also related models of related models. To not get stuck in an infinite loop as related models also keep a relation to parent model visited models set is kept. That way already visited models that are nested are loaded, but the load do not follow them inside. So Model A -> Model B -> Model C -> Model A -> Model X will load second Model A but will never follow into Model X. Nested relations of those kind need to be loaded manually. :param follow: flag to trigger deep save - by default only directly related models are saved with follow=True also related models of related models are saved :type follow: bool :return: reloaded Model :rtype: Model """ relations = list(self.model.extract_related_names()) if follow: relations = self.model._iterate_related_models() return self.rebuild_self(select_related=relations) def prefetch_related( self, related: Union[List, str, FieldAccessor] ) -> "QuerySet[T]": """ Allows to prefetch related models during query - but opposite to `select_related` each subsequent model is fetched in a separate database query. **With `prefetch_related` always one query per Model is run against the database**, meaning that you will have multiple queries executed one after another. To fetch related model use `ForeignKey` names. To chain related `Models` relation use double underscores between names. :param related: list of relation field names, can be linked by '__' to nest :type related: Union[List, str] :return: QuerySet :rtype: QuerySet """ if not isinstance(related, list): related = [related] related = [ rel._access_chain if isinstance(rel, FieldAccessor) else rel for rel in related ] related = list(set(list(self._prefetch_related) + related)) return self.rebuild_self(prefetch_related=related) def fields( self, columns: Union[List, str, Set, Dict], _is_exclude: bool = False ) -> "QuerySet[T]": """ With `fields()` you can select subset of model columns to limit the data load. Note that `fields()` and `exclude_fields()` works both for main models (on normal queries like `get`, `all` etc.) as well as `select_related` and `prefetch_related` models (with nested notation). You can select specified fields by passing a `str, List[str], Set[str] or dict` with nested definition. To include related models use notation `{related_name}__{column}[__{optional_next} etc.]`. `fields()` can be called several times, building up the columns to select. If you include related models into `select_related()` call but you won't specify columns for those models in fields - implies a list of all fields for those nested models. Mandatory fields cannot be excluded as it will raise `ValidationError`, to exclude a field it has to be nullable. Pk column cannot be excluded - it's always auto added even if not explicitly included. You can also pass fields to include as dictionary or set. To mark a field as included in a dictionary use it's name as key and ellipsis as value. To traverse nested models use nested dictionaries. To include fields at last level instead of nested dictionary a set can be used. To include whole nested model specify model related field name and ellipsis. :param _is_exclude: flag if it's exclude or include operation :type _is_exclude: bool :param columns: columns to include :type columns: Union[List, str, Set, Dict] :return: QuerySet :rtype: QuerySet """ excludable = ormar.ExcludableItems.from_excludable(self._excludable) excludable.build( items=columns, model_cls=self.model_cls, # type: ignore is_exclude=_is_exclude, ) return self.rebuild_self(excludable=excludable) def exclude_fields(self, columns: Union[List, str, Set, Dict]) -> "QuerySet[T]": """ With `exclude_fields()` you can select subset of model columns that will be excluded to limit the data load. It's the opposite of `fields()` method so check documentation above to see what options are available. Especially check above how you can pass also nested dictionaries and sets as a mask to exclude fields from whole hierarchy. Note that `fields()` and `exclude_fields()` works both for main models (on normal queries like `get`, `all` etc.) as well as `select_related` and `prefetch_related` models (with nested notation). Mandatory fields cannot be excluded as it will raise `ValidationError`, to exclude a field it has to be nullable. Pk column cannot be excluded - it's always auto added even if explicitly excluded. :param columns: columns to exclude :type columns: Union[List, str, Set, Dict] :return: QuerySet :rtype: QuerySet """ return self.fields(columns=columns, _is_exclude=True) def order_by(self, columns: Union[List, str, OrderAction]) -> "QuerySet[T]": """ With `order_by()` you can order the results from database based on your choice of fields. You can provide a string with field name or list of strings with fields names. Ordering in sql will be applied in order of names you provide in order_by. By default if you do not provide ordering `ormar` explicitly orders by all primary keys If you are sorting by nested models that causes that the result rows are unsorted by the main model `ormar` will combine those children rows into one main model. The main model will never duplicate in the result To order by main model field just provide a field name To sort on nested models separate field names with dunder '__'. You can sort this way across all relation types -> `ForeignKey`, reverse virtual FK and `ManyToMany` fields. To sort in descending order provide a hyphen in front of the field name :param columns: columns by which models should be sorted :type columns: Union[List, str] :return: QuerySet :rtype: QuerySet """ if not isinstance(columns, list): columns = [columns] orders_by = [ OrderAction(order_str=x, model_cls=self.model_cls) # type: ignore if not isinstance(x, OrderAction) else x for x in columns ] order_bys = self.order_bys + [x for x in orders_by if x not in self.order_bys] return self.rebuild_self(order_bys=order_bys) async def values( self, fields: Union[List, str, Set, Dict] = None, exclude_through: bool = False, _as_dict: bool = True, _flatten: bool = False, ) -> List: """ Return a list of dictionaries with column values in order of the fields passed or all fields from queried models. To filter for given row use filter/exclude methods before values, to limit number of rows use limit/offset or paginate before values. Note that it always return a list even for one row from database. :param exclude_through: flag if through models should be excluded :type exclude_through: bool :param _flatten: internal parameter to flatten one element tuples :type _flatten: bool :param _as_dict: internal parameter if return dict or tuples :type _as_dict: bool :param fields: field name or list of field names to extract from db :type fields: Union[List, str, Set, Dict] """ if fields: return await self.fields(columns=fields).values( _as_dict=_as_dict, _flatten=_flatten, exclude_through=exclude_through ) expr = self.build_select_expression() rows = await self.database.fetch_all(expr) if not rows: return [] alias_resolver = ReverseAliasResolver( select_related=self._select_related, excludable=self._excludable, model_cls=self.model_cls, # type: ignore exclude_through=exclude_through, ) column_map = alias_resolver.resolve_columns( columns_names=list(cast(LegacyRow, rows[0]).keys()) ) result = [ {column_map.get(k): v for k, v in dict(x).items() if k in column_map} for x in rows ] if _as_dict: return result if _flatten and self._excludable.include_entry_count() != 1: raise QueryDefinitionError( "You cannot flatten values_list if more than one field is selected!" ) tuple_result = [tuple(x.values()) for x in result] return tuple_result if not _flatten else [x[0] for x in tuple_result] async def values_list( self, fields: Union[List, str, Set, Dict] = None, flatten: bool = False, exclude_through: bool = False, ) -> List: """ Return a list of tuples with column values in order of the fields passed or all fields from queried models. When one field is passed you can flatten the list of tuples into list of values of that single field. To filter for given row use filter/exclude methods before values, to limit number of rows use limit/offset or paginate before values. Note that it always return a list even for one row from database. :param exclude_through: flag if through models should be excluded :type exclude_through: bool :param fields: field name or list of field names to extract from db :type fields: Union[str, List[str]] :param flatten: when one field is passed you can flatten the list of tuples :type flatten: bool """ return await self.values( fields=fields, exclude_through=exclude_through, _as_dict=False, _flatten=flatten, ) async def exists(self) -> bool: """ Returns a bool value to confirm if there are rows matching the given criteria (applied with `filter` and `exclude` if set). :return: result of the check :rtype: bool """ expr = self.build_select_expression() expr = sqlalchemy.exists(expr).select() return await self.database.fetch_val(expr) async def count(self, distinct: bool = True) -> int: """ Returns number of rows matching the given criteria (applied with `filter` and `exclude` if set before). If `distinct` is `True` (the default), this will return the number of primary rows selected. If `False`, the count will be the total number of rows returned (including extra rows for `one-to-many` or `many-to-many` left `select_related` table joins). `False` is the legacy (buggy) behavior for workflows that depend on it. :param distinct: flag if the primary table rows should be distinct or not :return: number of rows :rtype: int """ expr = self.build_select_expression().alias("subquery_for_count") expr = sqlalchemy.func.count().select().select_from(expr) if distinct: pk_column_name = self.model.get_column_alias(self.model_meta.pkname) expr_distinct = expr.group_by(pk_column_name).alias("subquery_for_group") expr = sqlalchemy.func.count().select().select_from(expr_distinct) return await self.database.fetch_val(expr) async def _query_aggr_function(self, func_name: str, columns: List) -> Any: func = getattr(sqlalchemy.func, func_name) select_actions = [ SelectAction(select_str=column, model_cls=self.model) for column in columns ] if func_name in ["sum", "avg"]: if any(not x.is_numeric for x in select_actions): raise QueryDefinitionError( "You can use sum and svg only with" "numeric types of columns" ) select_columns = [x.apply_func(func, use_label=True) for x in select_actions] expr = self.build_select_expression().alias(f"subquery_for_{func_name}") expr = sqlalchemy.select(select_columns).select_from(expr) # print("\n", expr.compile(compile_kwargs={"literal_binds": True})) result = await self.database.fetch_one(expr) return dict(result) if len(result) > 1 else result[0] # type: ignore async def max(self, columns: Union[str, List[str]]) -> Any: # noqa: A003 """ Returns max value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: max value of column(s) :rtype: Any """ if not isinstance(columns, list): columns = [columns] return await self._query_aggr_function(func_name="max", columns=columns) async def min(self, columns: Union[str, List[str]]) -> Any: # noqa: A003 """ Returns min value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: min value of column(s) :rtype: Any """ if not isinstance(columns, list): columns = [columns] return await self._query_aggr_function(func_name="min", columns=columns) async def sum(self, columns: Union[str, List[str]]) -> Any: # noqa: A003 """ Returns sum value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: sum value of columns :rtype: int """ if not isinstance(columns, list): columns = [columns] return await self._query_aggr_function(func_name="sum", columns=columns) async def avg(self, columns: Union[str, List[str]]) -> Any: """ Returns avg value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: avg value of columns :rtype: Union[int, float, List] """ if not isinstance(columns, list): columns = [columns] return await self._query_aggr_function(func_name="avg", columns=columns) async def update(self, each: bool = False, **kwargs: Any) -> int: """ Updates the model table after applying the filters from kwargs. You have to either pass a filter to narrow down a query or explicitly pass each=True flag to affect whole table. :param each: flag if whole table should be affected if no filter is passed :type each: bool :param kwargs: fields names and proper value types :type kwargs: Any :return: number of updated rows :rtype: int """ if not each and not (self.filter_clauses or self.exclude_clauses): raise QueryDefinitionError( "You cannot update without filtering the queryset first. " "If you want to update all rows use update(each=True, **kwargs)" ) self_fields = self.model.extract_db_own_fields().union( self.model.extract_related_names() ) updates = {k: v for k, v in kwargs.items() if k in self_fields} updates = self.model.validate_choices(updates) updates = self.model.translate_columns_to_aliases(updates) expr = FilterQuery(filter_clauses=self.filter_clauses).apply( self.table.update().values(**updates) ) expr = FilterQuery(filter_clauses=self.exclude_clauses, exclude=True).apply( expr ) return await self.database.execute(expr) async def delete(self, *args: Any, each: bool = False, **kwargs: Any) -> int: """ Deletes from the model table after applying the filters from kwargs. You have to either pass a filter to narrow down a query or explicitly pass each=True flag to affect whole table. :param each: flag if whole table should be affected if no filter is passed :type each: bool :param kwargs: fields names and proper value types :type kwargs: Any :return: number of deleted rows :rtype:int """ if kwargs or args: return await self.filter(*args, **kwargs).delete() if not each and not (self.filter_clauses or self.exclude_clauses): raise QueryDefinitionError( "You cannot delete without filtering the queryset first. " "If you want to delete all rows use delete(each=True)" ) expr = FilterQuery(filter_clauses=self.filter_clauses).apply( self.table.delete() ) expr = FilterQuery(filter_clauses=self.exclude_clauses, exclude=True).apply( expr ) return await self.database.execute(expr) def paginate(self, page: int, page_size: int = 20) -> "QuerySet[T]": """ You can paginate the result which is a combination of offset and limit clauses. Limit is set to page size and offset is set to (page-1) * page_size. :param page_size: numbers of items per page :type page_size: int :param page: page number :type page: int :return: QuerySet :rtype: QuerySet """ if page < 1 or page_size < 1: raise QueryDefinitionError("Page size and page have to be greater than 0.") limit_count = page_size query_offset = (page - 1) * page_size return self.rebuild_self(limit_count=limit_count, offset=query_offset) def limit(self, limit_count: int, limit_raw_sql: bool = None) -> "QuerySet[T]": """ You can limit the results to desired number of parent models. To limit the actual number of database query rows instead of number of main models use the `limit_raw_sql` parameter flag, and set it to `True`. :param limit_raw_sql: flag if raw sql should be limited :type limit_raw_sql: bool :param limit_count: number of models to limit :type limit_count: int :return: QuerySet :rtype: QuerySet """ limit_raw_sql = self.limit_sql_raw if limit_raw_sql is None else limit_raw_sql return self.rebuild_self(limit_count=limit_count, limit_raw_sql=limit_raw_sql) def offset(self, offset: int, limit_raw_sql: bool = None) -> "QuerySet[T]": """ You can also offset the results by desired number of main models. To offset the actual number of database query rows instead of number of main models use the `limit_raw_sql` parameter flag, and set it to `True`. :param limit_raw_sql: flag if raw sql should be offset :type limit_raw_sql: bool :param offset: numbers of models to offset :type offset: int :return: QuerySet :rtype: QuerySet """ limit_raw_sql = self.limit_sql_raw if limit_raw_sql is None else limit_raw_sql return self.rebuild_self(offset=offset, limit_raw_sql=limit_raw_sql) async def first(self, *args: Any, **kwargs: Any) -> "T": """ Gets the first row from the db ordered by primary key column ascending. :raises NoMatch: if no rows are returned :raises MultipleMatches: if more than 1 row is returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: returned model :rtype: Model """ if kwargs or args: return await self.filter(*args, **kwargs).first() expr = self.build_select_expression( limit=1, order_bys=( [ OrderAction( order_str=f"{self.model.Meta.pkname}", model_cls=self.model_cls, # type: ignore ) ] if not any([x.is_source_model_order for x in self.order_bys]) else [] ) + self.order_bys, ) rows = await self.database.fetch_all(expr) processed_rows = await self._process_query_result_rows(rows) if self._prefetch_related and processed_rows: processed_rows = await self._prefetch_related_models(processed_rows, rows) self.check_single_result_rows_count(processed_rows) return processed_rows[0] # type: ignore async def get_or_none(self, *args: Any, **kwargs: Any) -> Optional["T"]: """ Get's the first row from the db meeting the criteria set by kwargs. If no criteria set it will return the last row in db sorted by pk. Passing a criteria is actually calling filter(*args, **kwargs) method described below. If not match is found None will be returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: returned model :rtype: Model """ try: return await self.get(*args, **kwargs) except ormar.NoMatch: return None async def get(self, *args: Any, **kwargs: Any) -> "T": # noqa: CCR001 """ Get's the first row from the db meeting the criteria set by kwargs. If no criteria set it will return the last row in db sorted by pk. Passing a criteria is actually calling filter(*args, **kwargs) method described below. :raises NoMatch: if no rows are returned :raises MultipleMatches: if more than 1 row is returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: returned model :rtype: Model """ if kwargs or args: return await self.filter(*args, **kwargs).get() if not self.filter_clauses: expr = self.build_select_expression( limit=1, order_bys=( [ OrderAction( order_str=f"-{self.model.Meta.pkname}", model_cls=self.model_cls, # type: ignore ) ] if not any([x.is_source_model_order for x in self.order_bys]) else [] ) + self.order_bys, ) else: expr = self.build_select_expression() rows = await self.database.fetch_all(expr) processed_rows = await self._process_query_result_rows(rows) if self._prefetch_related and processed_rows: processed_rows = await self._prefetch_related_models(processed_rows, rows) self.check_single_result_rows_count(processed_rows) return processed_rows[0] # type: ignore async def get_or_create( self, _defaults: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Tuple["T", bool]: """ Combination of create and get methods. Tries to get a row meeting the criteria for kwargs and if `NoMatch` exception is raised it creates a new one with given kwargs and _defaults. Passing a criteria is actually calling filter(*args, **kwargs) method described below. :param kwargs: fields names and proper value types :type kwargs: Any :param _defaults: default values for creating object :type _defaults: Optional[Dict[str, Any]] :return: model instance and a boolean :rtype: Tuple("T", bool) """ try: return await self.get(*args, **kwargs), False except NoMatch: _defaults = _defaults or {} return await self.create(**{**kwargs, **_defaults}), True async def update_or_create(self, **kwargs: Any) -> "T": """ Updates the model, or in case there is no match in database creates a new one. :param kwargs: fields names and proper value types :type kwargs: Any :return: updated or created model :rtype: Model """ pk_name = self.model_meta.pkname if "pk" in kwargs: kwargs[pk_name] = kwargs.pop("pk") if pk_name not in kwargs or kwargs.get(pk_name) is None: return await self.create(**kwargs) model = await self.get(pk=kwargs[pk_name]) return await model.update(**kwargs) async def all(self, *args: Any, **kwargs: Any) -> List["T"]: # noqa: A003 """ Returns all rows from a database for given model for set filter options. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).all()`. If there are no rows meeting the criteria an empty list is returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: list of returned models :rtype: List[Model] """ if kwargs or args: return await self.filter(*args, **kwargs).all() expr = self.build_select_expression() rows = await self.database.fetch_all(expr) result_rows = await self._process_query_result_rows(rows) if self._prefetch_related and result_rows: result_rows = await self._prefetch_related_models(result_rows, rows) return result_rows async def iterate( # noqa: A003 self, *args: Any, **kwargs: Any, ) -> AsyncGenerator["T", None]: """ Return async iterable generator for all rows from a database for given model. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).iterate()`. If there are no rows meeting the criteria an empty async generator is returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: asynchronous iterable generator of returned models :rtype: AsyncGenerator[Model] """ if self._prefetch_related: raise QueryDefinitionError( "Prefetch related queries are not supported in iterators" ) if kwargs or args: async for result in self.filter(*args, **kwargs).iterate(): yield result return expr = self.build_select_expression() rows: list = [] last_primary_key = None pk_alias = self.model.get_column_alias(self.model_meta.pkname) async for row in self.database.iterate(query=expr): current_primary_key = row[pk_alias] if last_primary_key == current_primary_key or last_primary_key is None: last_primary_key = current_primary_key rows.append(row) continue yield (await self._process_query_result_rows(rows))[0] last_primary_key = current_primary_key rows = [row] if rows: yield (await self._process_query_result_rows(rows))[0] async def create(self, **kwargs: Any) -> "T": """ Creates the model instance, saves it in a database and returns the updates model (with pk populated if not passed and autoincrement is set). The allowed kwargs are `Model` fields names and proper value types. :param kwargs: fields names and proper value types :type kwargs: Any :return: created model :rtype: Model """ instance = self.model(**kwargs) instance = await instance.save() return instance async def bulk_create(self, objects: List["T"]) -> None: """ Performs a bulk create in one database session to speed up the process. Allows you to create multiple objects at once. A valid list of `Model` objects needs to be passed. Bulk operations do not send signals. :param objects: list of ormar models already initialized and ready to save. :type objects: List[Model] """ if not objects: raise ModelListEmptyError("Bulk create objects are empty!") ready_objects = [] for obj in objects: ready_objects.append(obj.prepare_model_to_save(obj.dict())) await asyncio.sleep(0) # Allow context switching to prevent blocking # don't use execute_many, as in databases it's executed in a loop # instead of using execute_many from drivers expr = self.table.insert().values(ready_objects) await self.database.execute(expr) for obj in objects: obj.set_save_status(True) async def bulk_update( # noqa: CCR001 self, objects: List["T"], columns: List[str] = None ) -> None: """ Performs bulk update in one database session to speed up the process. Allows you to update multiple instance at once. All `Models` passed need to have primary key column populated. You can also select which fields to update by passing `columns` list as a list of string names. Bulk operations do not send signals. :param objects: list of ormar models :type objects: List[Model] :param columns: list of columns to update :type columns: List[str] """ if not objects: raise ModelListEmptyError("Bulk update objects are empty!") ready_objects = [] pk_name = self.model_meta.pkname if not columns: columns = list( self.model.extract_db_own_fields().union( self.model.extract_related_names() ) ) if pk_name not in columns: columns.append(pk_name) columns = [self.model.get_column_alias(k) for k in columns] for obj in objects: new_kwargs = obj.dict() if new_kwargs.get(pk_name) is None: raise ModelPersistenceError( "You cannot update unsaved objects. " f"{self.model.__name__} has to have {pk_name} filled." ) new_kwargs = obj.prepare_model_to_update(new_kwargs) ready_objects.append( {"new_" + k: v for k, v in new_kwargs.items() if k in columns} ) await asyncio.sleep(0) pk_column = self.model_meta.table.c.get(self.model.get_column_alias(pk_name)) pk_column_name = self.model.get_column_alias(pk_name) table_columns = [c.name for c in self.model_meta.table.c] expr = self.table.update().where( pk_column == bindparam("new_" + pk_column_name) ) expr = expr.values( **{ k: bindparam("new_" + k) for k in columns if k != pk_column_name and k in table_columns } ) # databases bind params only where query is passed as string # otherwise it just passes all data to values and results in unconsumed columns expr = str(expr) await self.database.execute_many(expr, ready_objects) for obj in objects: obj.set_save_status(True) await cast(Type["Model"], self.model_cls).Meta.signals.post_bulk_update.send( sender=self.model_cls, instances=objects # type: ignore ) ormar-0.12.2/ormar/queryset/reverse_alias_resolver.py000066400000000000000000000216151444363446500230560ustar00rootroot00000000000000from typing import Dict, List, TYPE_CHECKING, Type, cast if TYPE_CHECKING: # pragma: no cover from ormar import ForeignKeyField, Model from ormar.models.excludable import Excludable, ExcludableItems class ReverseAliasResolver: """ Class is used to reverse resolve table aliases into relation strings to parse raw data columns and replace table prefixes with full relation string """ def __init__( self, model_cls: Type["Model"], excludable: "ExcludableItems", select_related: List[str], exclude_through: bool = False, ) -> None: self.select_related = select_related self.model_cls = model_cls self.reversed_aliases = self.model_cls.Meta.alias_manager.reversed_aliases self.excludable = excludable self.exclude_through = exclude_through self._fields: Dict[str, "ForeignKeyField"] = dict() self._prefixes: Dict[str, str] = dict() self._previous_prefixes: List[str] = [""] self._resolved_names: Dict[str, str] = dict() def resolve_columns(self, columns_names: List[str]) -> Dict: """ Takes raw query prefixed column and resolves the prefixes to relation strings (relation names connected with dunders). :param columns_names: list of column names with prefixes from query :type columns_names: List[str] :return: dictionary of prefix: resolved names :rtype: Union[None, Dict[str, str]] """ self._create_prefixes_map() for column_name in columns_names: column_parts = column_name.split("_") potential_prefix = column_parts[0] if potential_prefix in self.reversed_aliases: self._resolve_column_with_prefix( column_name=column_name, prefix=potential_prefix ) else: allowed_columns = self.model_cls.own_table_columns( model=self.model_cls, excludable=self.excludable, add_pk_columns=False, ) if column_name in allowed_columns: self._resolved_names[column_name] = column_name return self._resolved_names def _resolve_column_with_prefix(self, column_name: str, prefix: str) -> None: """ Takes the prefixed column, checks if field should be excluded, and if not it proceeds to replace prefix of a table with full relation string. Sample: translates: "xsd12df_name" -> into: "posts__user__name" :param column_name: prefixed name of the column :type column_name: str :param prefix: extracted prefix :type prefix: str """ relation = self.reversed_aliases.get(prefix, None) relation_str = self._prefixes.get(relation, None) field = self._fields.get(relation, None) if relation_str is None or field is None: return is_through = field.is_multi and field.through.get_name() in relation_str if self._check_if_field_is_excluded( prefix=prefix, field=field, is_through=is_through ): return target_model = field.through if is_through else field.to allowed_columns = target_model.own_table_columns( model=target_model, excludable=self.excludable, alias=prefix, add_pk_columns=False, ) new_column_name = column_name.replace(f"{prefix}_", "") if new_column_name in allowed_columns: self._resolved_names[column_name] = column_name.replace( f"{prefix}_", f"{relation_str}__" ) def _check_if_field_is_excluded( self, prefix: str, field: "ForeignKeyField", is_through: bool ) -> bool: """ Checks if given relation is excluded in current query. Note that in contrary to other queryset methods here you can exclude the in-between models but keep the end columns, which does not make sense when parsing the raw data into models. So in relation category -> category_x_post -> post -> user you can exclude category_x_post and post models but can keep the user one. (in ormar model context that is not possible as if you would exclude through and post model there would be no way to reach user model). Exclusions happen on a model before the current one, so we need to move back in chain of model by one or by two (m2m relations have through model in between) :param prefix: table alias :type prefix: str :param field: field with relation :type field: ForeignKeyField :param is_through: flag if current table is a through table :type is_through: bool :return: result of the check :rtype: bool """ shift, field_name = 1, field.name if is_through: field_name = field.through.get_name() elif field.is_multi: shift = 2 previous_excludable = self._get_previous_excludable( prefix=prefix, field=field, shift=shift ) return previous_excludable.is_excluded(field_name) def _get_previous_excludable( self, prefix: str, field: "ForeignKeyField", shift: int = 1 ) -> "Excludable": """ Returns excludable related to model previous in chain of models. Used to check if current model should be excluded. :param prefix: prefix of a current table :type prefix: str :param field: field with relation :type field: ForeignKeyField :param shift: how many model back to go - for m2m it's 2 due to through models :type shift: int :return: excludable for previous model :rtype: Excludable """ if prefix not in self._previous_prefixes: self._previous_prefixes.append(prefix) previous_prefix_ind = self._previous_prefixes.index(prefix) previous_prefix = ( self._previous_prefixes[previous_prefix_ind - shift] if previous_prefix_ind > (shift - 1) else "" ) return self.excludable.get(field.owner, alias=previous_prefix) def _create_prefixes_map(self) -> None: """ Creates a map of alias manager aliases keys to relation strings. I.e in alias manager you can have alias user_roles: xas12ad This method will create entry user_roles: roles, where roles is a name of relation on user model. Will also keep the relation field in separate dictionary so we can later extract field names and owner models. """ for related in self.select_related: model_cls = self.model_cls related_split = related.split("__") related_str = "" for relation in related_split: previous_related_str = f"{related_str}__" if related_str else "" new_related_str = previous_related_str + relation field = model_cls.Meta.model_fields[relation] field = cast("ForeignKeyField", field) prefix_name = self._handle_through_fields_and_prefix( model_cls=model_cls, field=field, previous_related_str=previous_related_str, relation=relation, ) self._prefixes[prefix_name] = new_related_str self._fields[prefix_name] = field model_cls = field.to related_str = new_related_str def _handle_through_fields_and_prefix( self, model_cls: Type["Model"], field: "ForeignKeyField", previous_related_str: str, relation: str, ) -> str: """ Registers through models for m2m relations and switches prefix for the one linking from through model to target model. For other relations returns current model name + relation name as prefix. Nested relations are a chain of relation names with __ in between. :param model_cls: model of current relation :type model_cls: Type["Model"] :param field: field with relation :type field: ForeignKeyField :param previous_related_str: concatenated chain linked with "__" :type previous_related_str: str :param relation: name of the current relation in chain :type relation: str :return: name of prefix to populate :rtype: str """ prefix_name = f"{model_cls.get_name()}_{relation}" if field.is_multi: through_name = field.through.get_name() if not self.exclude_through: self._fields[prefix_name] = field new_through_str = previous_related_str + through_name self._prefixes[prefix_name] = new_through_str prefix_name = f"{through_name}_{field.default_target_field_name()}" return prefix_name ormar-0.12.2/ormar/queryset/utils.py000066400000000000000000000275441444363446500174600ustar00rootroot00000000000000import collections.abc import copy from typing import ( Any, Dict, List, Optional, Sequence, Set, TYPE_CHECKING, Tuple, Type, Union, ) if TYPE_CHECKING: # pragma no cover from ormar import Model, BaseField def check_node_not_dict_or_not_last_node( part: str, is_last: bool, current_level: Any ) -> bool: """ Checks if given name is not present in the current level of the structure. Checks if given name is not the last name in the split list of parts. Checks if the given name in current level is not a dictionary. All those checks verify if there is a need for deeper traversal. :param part: :type part: str :param is_last: flag to check if last element :type is_last: bool :param current_level: current level of the traversed structure :type current_level: Any :return: result of the check :rtype: bool """ return (part not in current_level and not is_last) or ( part in current_level and not isinstance(current_level[part], dict) ) def translate_list_to_dict( # noqa: CCR001 list_to_trans: Union[List, Set], is_order: bool = False ) -> Dict: """ Splits the list of strings by '__' and converts them to dictionary with nested models grouped by parent model. That way each model appears only once in the whole dictionary and children are grouped under parent name. Default required key ise Ellipsis like in pydantic. :param list_to_trans: input list :type list_to_trans: Union[List, Set] :param is_order: flag if change affects order_by clauses are they require special default value with sort order. :type is_order: bool :return: converted to dictionary input list :rtype: Dict """ new_dict: Dict = dict() for path in list_to_trans: current_level = new_dict parts = path.split("__") def_val: Any = ... if is_order: if parts[0][0] == "-": def_val = "desc" parts[0] = parts[0][1:] else: def_val = "asc" for ind, part in enumerate(parts): is_last = ind == len(parts) - 1 if check_node_not_dict_or_not_last_node( part=part, is_last=is_last, current_level=current_level ): current_level[part] = dict() elif part not in current_level: current_level[part] = def_val current_level = current_level[part] return new_dict def convert_set_to_required_dict(set_to_convert: set) -> Dict: """ Converts set to dictionary of required keys. Required key is Ellipsis. :param set_to_convert: set to convert to dict :type set_to_convert: set :return: set converted to dict of ellipsis :rtype: Dict """ new_dict = dict() for key in set_to_convert: new_dict[key] = Ellipsis return new_dict def update(current_dict: Any, updating_dict: Any) -> Dict: # noqa: CCR001 """ Update one dict with another but with regard for nested keys. That way nested sets are unionised, dicts updated and only other values are overwritten. :param current_dict: dict to update :type current_dict: Dict[str, ellipsis] :param updating_dict: dict with values to update :type updating_dict: Dict :return: combination of both dicts :rtype: Dict """ if current_dict is Ellipsis: current_dict = dict() for key, value in updating_dict.items(): if isinstance(value, collections.abc.Mapping): old_key = current_dict.get(key, {}) if isinstance(old_key, set): old_key = convert_set_to_required_dict(old_key) current_dict[key] = update(old_key, value) elif isinstance(value, set) and isinstance(current_dict.get(key), set): current_dict[key] = current_dict.get(key).union(value) else: current_dict[key] = value return current_dict def subtract_dict(current_dict: Any, updating_dict: Any) -> Dict: # noqa: CCR001 """ Update one dict with another but with regard for nested keys. That way nested sets are unionised, dicts updated and only other values are overwritten. :param current_dict: dict to update :type current_dict: Dict[str, ellipsis] :param updating_dict: dict with values to update :type updating_dict: Dict :return: combination of both dicts :rtype: Dict """ for key, value in updating_dict.items(): old_key = current_dict.get(key, {}) new_value: Optional[Union[Dict, Set]] = None if not old_key: continue if isinstance(value, set) and isinstance(old_key, set): new_value = old_key.difference(value) elif isinstance(value, (set, collections.abc.Mapping)) and isinstance( old_key, (set, collections.abc.Mapping) ): value = ( convert_set_to_required_dict(value) if not isinstance(value, collections.abc.Mapping) else value ) old_key = ( convert_set_to_required_dict(old_key) if not isinstance(old_key, collections.abc.Mapping) else old_key ) new_value = subtract_dict(old_key, value) if new_value: current_dict[key] = new_value else: current_dict.pop(key, None) return current_dict def update_dict_from_list(curr_dict: Dict, list_to_update: Union[List, Set]) -> Dict: """ Converts the list into dictionary and later performs special update, where nested keys that are sets or dicts are combined and not overwritten. :param curr_dict: dict to update :type curr_dict: Dict :param list_to_update: list with values to update the dict :type list_to_update: List[str] :return: updated dict :rtype: Dict """ updated_dict = copy.copy(curr_dict) dict_to_update = translate_list_to_dict(list_to_update) update(updated_dict, dict_to_update) return updated_dict def extract_nested_models( # noqa: CCR001 model: "Model", model_type: Type["Model"], select_dict: Dict, extracted: Dict ) -> None: """ Iterates over model relations and extracts all nested models from select_dict and puts them in corresponding list under relation name in extracted dict.keys Basically flattens all relation to dictionary of all related models, that can be used on several models and extract all of their children into dictionary of lists witch children models. Goes also into nested relations if needed (specified in select_dict). :param model: parent Model :type model: Model :param model_type: parent model class :type model_type: Type[Model] :param select_dict: dictionary of related models from select_related :type select_dict: Dict :param extracted: dictionary with already extracted models :type extracted: Dict """ follow = [rel for rel in model_type.extract_related_names() if rel in select_dict] for related in follow: child = getattr(model, related) if not child: continue target_model = model_type.Meta.model_fields[related].to if isinstance(child, list): extracted.setdefault(target_model.get_name(), []).extend(child) if select_dict[related] is not Ellipsis: for sub_child in child: extract_nested_models( sub_child, target_model, select_dict[related], extracted ) else: extracted.setdefault(target_model.get_name(), []).append(child) if select_dict[related] is not Ellipsis: extract_nested_models( child, target_model, select_dict[related], extracted ) def extract_models_to_dict_of_lists( model_type: Type["Model"], models: Sequence["Model"], select_dict: Dict, extracted: Dict = None, ) -> Dict: """ Receives a list of models and extracts all of the children and their children into dictionary of lists with children models, flattening the structure to one dict with all children models under their relation keys. :param model_type: parent model class :type model_type: Type[Model] :param models: list of models from which related models should be extracted. :type models: List[Model] :param select_dict: dictionary of related models from select_related :type select_dict: Dict :param extracted: dictionary with already extracted models :type extracted: Dict :return: dictionary of lists f related models :rtype: Dict """ if not extracted: extracted = dict() for model in models: extract_nested_models(model, model_type, select_dict, extracted) return extracted def get_relationship_alias_model_and_str( source_model: Type["Model"], related_parts: List ) -> Tuple[str, Type["Model"], str, bool]: """ Walks the relation to retrieve the actual model on which the clause should be constructed, extracts alias based on last relation leading to target model. :param related_parts: list of related names extracted from string :type related_parts: Union[List, List[str]] :param source_model: model from which relation starts :type source_model: Type[Model] :return: table prefix, target model and relation string :rtype: Tuple[str, Type["Model"], str] """ table_prefix = "" is_through = False target_model = source_model previous_model = target_model previous_models = [target_model] manager = target_model.Meta.alias_manager for relation in related_parts[:]: related_field = target_model.Meta.model_fields[relation] if related_field.is_through: (previous_model, relation, is_through) = _process_through_field( related_parts=related_parts, relation=relation, related_field=related_field, previous_model=previous_model, previous_models=previous_models, ) if related_field.is_multi: previous_model = related_field.through relation = related_field.default_target_field_name() # type: ignore table_prefix = manager.resolve_relation_alias( from_model=previous_model, relation_name=relation ) target_model = related_field.to previous_model = target_model if not is_through: previous_models.append(previous_model) relation_str = "__".join(related_parts) return table_prefix, target_model, relation_str, is_through def _process_through_field( related_parts: List, relation: Optional[str], related_field: "BaseField", previous_model: Type["Model"], previous_models: List[Type["Model"]], ) -> Tuple[Type["Model"], Optional[str], bool]: """ Helper processing through models as they need to be treated differently. :param related_parts: split relation string :type related_parts: List[str] :param relation: relation name :type relation: str :param related_field: field with relation declaration :type related_field: "ForeignKeyField" :param previous_model: model from which relation is coming :type previous_model: Type["Model"] :param previous_models: list of already visited models in relation chain :type previous_models: List[Type["Model"]] :return: previous_model, relation, is_through :rtype: Tuple[Type["Model"], str, bool] """ is_through = True related_parts.remove(relation) through_field = related_field.owner.Meta.model_fields[ related_field.related_name or "" ] if len(previous_models) > 1 and previous_models[-2] == through_field.to: previous_model = through_field.to relation = through_field.related_name else: relation = related_field.related_name return previous_model, relation, is_through ormar-0.12.2/ormar/relations/000077500000000000000000000000001444363446500160515ustar00rootroot00000000000000ormar-0.12.2/ormar/relations/__init__.py000066400000000000000000000007761444363446500201740ustar00rootroot00000000000000""" Package handles relations on models, returning related models on calls and exposing QuerySetProxy for m2m and reverse relations. """ from ormar.relations.alias_manager import AliasManager from ormar.relations.relation import Relation, RelationType from ormar.relations.relation_manager import RelationsManager from ormar.relations.utils import get_relations_sides_and_names __all__ = [ "AliasManager", "Relation", "RelationsManager", "RelationType", "get_relations_sides_and_names", ] ormar-0.12.2/ormar/relations/alias_manager.py000066400000000000000000000161721444363446500212150ustar00rootroot00000000000000import string import uuid from random import choices from typing import Any, Dict, List, TYPE_CHECKING, Type, Union import sqlalchemy from sqlalchemy import text if TYPE_CHECKING: # pragma: no cover from ormar import Model from ormar.models import ModelRow from ormar.fields import ForeignKeyField def get_table_alias() -> str: """ Creates a random string that is used to alias tables in joins. It's necessary that each relation has it's own aliases cause you can link to the same target tables from multiple fields on one model as well as from multiple different models in one join. :return: randomly generated alias :rtype: str """ alias = "".join(choices(string.ascii_uppercase, k=2)) + uuid.uuid4().hex[:4] return alias.lower() class AliasManager: """ Keep all aliases of relations between different tables. One global instance is shared between all models. """ def __init__(self) -> None: self._aliases_new: Dict[str, str] = dict() self._reversed_aliases: Dict[str, str] = dict() self._prefixed_tables: Dict[str, text] = dict() def __contains__(self, item: str) -> bool: return self._aliases_new.__contains__(item) def __getitem__(self, key: str) -> Any: return self._aliases_new.__getitem__(key) @property def reversed_aliases(self) -> Dict: """ Returns swapped key-value pairs from aliases where alias is the key. :return: dictionary of prefix to relation :rtype: Dict """ if self._reversed_aliases: return self._reversed_aliases reversed_aliases = {v: k for k, v in self._aliases_new.items()} self._reversed_aliases = reversed_aliases return self._reversed_aliases @staticmethod def prefixed_columns( alias: str, table: sqlalchemy.Table, fields: List = None ) -> List[text]: """ Creates a list of aliases sqlalchemy text clauses from string alias and sqlalchemy.Table. Optional list of fields to include can be passed to extract only those columns. List has to have sqlalchemy names of columns (ormar aliases) not the ormar ones. :param alias: alias of given table :type alias: str :param table: table from which fields should be aliased :type table: sqlalchemy.Table :param fields: fields to include :type fields: Optional[List[str]] :return: list of sqlalchemy text clauses with "column name as aliased name" :rtype: List[text] """ alias = f"{alias}_" if alias else "" aliased_fields = [f"{alias}{x}" for x in fields] if fields else [] all_columns = ( table.columns if not fields else [ col for col in table.columns if col.name in fields or col.name in aliased_fields ] ) return [column.label(f"{alias}{column.name}") for column in all_columns] def prefixed_table_name(self, alias: str, table: sqlalchemy.Table) -> text: """ Creates text clause with table name with aliased name. :param alias: alias of given table :type alias: str :param table: table :type table: sqlalchemy.Table :return: sqlalchemy text clause as "table_name aliased_name" :rtype: sqlalchemy text clause """ full_alias = f"{alias}_{table.name}" key = f"{full_alias}_{id(table)}" return self._prefixed_tables.setdefault(key, table.alias(full_alias)) def add_relation_type( self, source_model: Type["Model"], relation_name: str, reverse_name: str = None ) -> None: """ Registers the relations defined in ormar models. Given the relation it registers also the reverse side of this relation. Used by both ForeignKey and ManyToMany relations. Each relation is registered as Model name and relation name. Each alias registered has to be unique. Aliases are used to construct joins to assure proper links between tables. That way you can link to the same target tables from multiple fields on one model as well as from multiple different models in one join. :param source_model: model with relation defined :type source_model: source Model :param relation_name: name of the relation to define :type relation_name: str :param reverse_name: name of related_name fo given relation for m2m relations :type reverse_name: Optional[str] :return: none :rtype: None """ parent_key = f"{source_model.get_name()}_{relation_name}" if parent_key not in self._aliases_new: self.add_alias(parent_key) to_field = source_model.Meta.model_fields[relation_name] child_model = to_field.to child_key = f"{child_model.get_name()}_{reverse_name}" if child_key not in self._aliases_new: self.add_alias(child_key) def add_alias(self, alias_key: str) -> str: """ Adds alias to the dictionary of aliases under given key. :param alias_key: key of relation to generate alias for :type alias_key: str :return: generated alias :rtype: str """ alias = get_table_alias() self._aliases_new[alias_key] = alias return alias def resolve_relation_alias( self, from_model: Union[Type["Model"], Type["ModelRow"]], relation_name: str ) -> str: """ Given model and relation name returns the alias for this relation. :param from_model: model with relation defined :type from_model: source Model :param relation_name: name of the relation field :type relation_name: str :return: alias of the relation :rtype: str """ alias = self._aliases_new.get(f"{from_model.get_name()}_{relation_name}", "") return alias def resolve_relation_alias_after_complex( self, source_model: Union[Type["Model"], Type["ModelRow"]], relation_str: str, relation_field: "ForeignKeyField", ) -> str: """ Given source model and relation string returns the alias for this complex relation if it exists, otherwise fallback to normal relation from a relation field definition. :param relation_field: field with direct relation definition :type relation_field: "ForeignKeyField" :param source_model: model with query starts :type source_model: source Model :param relation_str: string with relation joins defined :type relation_str: str :return: alias of the relation :rtype: str """ alias = "" if relation_str and "__" in relation_str: alias = self.resolve_relation_alias( from_model=source_model, relation_name=relation_str ) if not alias: alias = self.resolve_relation_alias( from_model=relation_field.get_source_model(), relation_name=relation_field.get_relation_name(), ) return alias ormar-0.12.2/ormar/relations/querysetproxy.py000066400000000000000000000776101444363446500214210ustar00rootroot00000000000000from _weakref import CallableProxyType from typing import ( # noqa: I100, I201 Any, Dict, Generic, List, MutableSequence, Optional, Sequence, Set, TYPE_CHECKING, Tuple, Type, TypeVar, Union, cast, AsyncGenerator, ) import ormar # noqa: I100, I202 from ormar.exceptions import ModelPersistenceError, NoMatch, QueryDefinitionError if TYPE_CHECKING: # pragma no cover from ormar.relations import Relation from ormar.models import Model, T from ormar.queryset import QuerySet from ormar import OrderAction, RelationType else: T = TypeVar("T", bound="Model") class QuerysetProxy(Generic[T]): """ Exposes QuerySet methods on relations, but also handles creating and removing of through Models for m2m relations. """ if TYPE_CHECKING: # pragma no cover relation: "Relation" def __init__( self, relation: "Relation", to: Type["T"], type_: "RelationType", qryset: "QuerySet[T]" = None, ) -> None: self.relation: "Relation" = relation self._queryset: Optional["QuerySet[T]"] = qryset self.type_: "RelationType" = type_ self._owner: Union[CallableProxyType, "Model"] = self.relation.manager.owner self.related_field_name = self._owner.Meta.model_fields[ self.relation.field_name ].get_related_name() self.to: Type[T] = to self.related_field = to.Meta.model_fields[self.related_field_name] self.owner_pk_value = self._owner.pk self.through_model_name = ( self.related_field.through.get_name() if self.type_ == ormar.RelationType.MULTIPLE else "" ) @property def queryset(self) -> "QuerySet[T]": """ Returns queryset if it's set, AttributeError otherwise. :return: QuerySet :rtype: QuerySet """ if not self._queryset: raise AttributeError return self._queryset @queryset.setter def queryset(self, value: "QuerySet") -> None: """ Set's the queryset. Initialized in RelationProxy. :param value: QuerySet :type value: QuerySet """ self._queryset = value def _assign_child_to_parent(self, child: Optional["T"]) -> None: """ Registers child in parents RelationManager. :param child: child to register on parent side. :type child: Model """ if child: owner = self._owner rel_name = self.relation.field_name setattr(owner, rel_name, child) def _register_related(self, child: Union["T", Sequence[Optional["T"]]]) -> None: """ Registers child/ children in parents RelationManager. :param child: child or list of children models to register. :type child: Union[Model,List[Model]] """ if isinstance(child, list): for subchild in child: self._assign_child_to_parent(subchild) else: assert isinstance(child, ormar.Model) child = cast("T", child) self._assign_child_to_parent(child) def _clean_items_on_load(self) -> None: """ Cleans the current list of the related models. """ if isinstance(self.relation.related_models, MutableSequence): for item in self.relation.related_models[:]: self.relation.remove(item) async def create_through_instance(self, child: "T", **kwargs: Any) -> None: """ Crete a through model instance in the database for m2m relations. :param kwargs: dict of additional keyword arguments for through instance :type kwargs: Any :param child: child model instance :type child: Model """ model_cls = self.relation.through owner_column = self.related_field.default_target_field_name() # type: ignore child_column = self.related_field.default_source_field_name() # type: ignore rel_kwargs = {owner_column: self._owner.pk, child_column: child.pk} final_kwargs = {**rel_kwargs, **kwargs} if child.pk is None: raise ModelPersistenceError( f"You cannot save {child.get_name()} " f"model without primary key set! \n" f"Save the child model first." ) await model_cls(**final_kwargs).save() async def update_through_instance(self, child: "T", **kwargs: Any) -> None: """ Updates a through model instance in the database for m2m relations. :param kwargs: dict of additional keyword arguments for through instance :type kwargs: Any :param child: child model instance :type child: Model """ model_cls = self.relation.through owner_column = self.related_field.default_target_field_name() # type: ignore child_column = self.related_field.default_source_field_name() # type: ignore rel_kwargs = {owner_column: self._owner.pk, child_column: child.pk} through_model = await model_cls.objects.get(**rel_kwargs) await through_model.update(**kwargs) async def upsert_through_instance(self, child: "T", **kwargs: Any) -> None: """ Updates a through model instance in the database for m2m relations if it already exists, else creates one. :param kwargs: dict of additional keyword arguments for through instance :type kwargs: Any :param child: child model instance :type child: Model """ try: await self.update_through_instance(child=child, **kwargs) except NoMatch: await self.create_through_instance(child=child, **kwargs) async def delete_through_instance(self, child: "T") -> None: """ Removes through model instance from the database for m2m relations. :param child: child model instance :type child: Model """ queryset = ormar.QuerySet(model_cls=self.relation.through) # type: ignore owner_column = self.related_field.default_target_field_name() # type: ignore child_column = self.related_field.default_source_field_name() # type: ignore kwargs = {owner_column: self._owner, child_column: child} link_instance = await queryset.filter(**kwargs).get() # type: ignore await link_instance.delete() async def exists(self) -> bool: """ Returns a bool value to confirm if there are rows matching the given criteria (applied with `filter` and `exclude` if set). Actual call delegated to QuerySet. :return: result of the check :rtype: bool """ return await self.queryset.exists() async def count(self, distinct: bool = True) -> int: """ Returns number of rows matching the given criteria (applied with `filter` and `exclude` if set before). If `distinct` is `True` (the default), this will return the number of primary rows selected. If `False`, the count will be the total number of rows returned (including extra rows for `one-to-many` or `many-to-many` left `select_related` table joins). `False` is the legacy (buggy) behavior for workflows that depend on it. Actual call delegated to QuerySet. :param distinct: flag if the primary table rows should be distinct or not :return: number of rows :rtype: int """ return await self.queryset.count(distinct=distinct) async def max(self, columns: Union[str, List[str]]) -> Any: # noqa: A003 """ Returns max value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: max value of column(s) :rtype: Any """ return await self.queryset.max(columns=columns) async def min(self, columns: Union[str, List[str]]) -> Any: # noqa: A003 """ Returns min value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: min value of column(s) :rtype: Any """ return await self.queryset.min(columns=columns) async def sum(self, columns: Union[str, List[str]]) -> Any: # noqa: A003 """ Returns sum value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: sum value of columns :rtype: int """ return await self.queryset.sum(columns=columns) async def avg(self, columns: Union[str, List[str]]) -> Any: """ Returns avg value of columns for rows matching the given criteria (applied with `filter` and `exclude` if set before). :return: avg value of columns :rtype: Union[int, float, List] """ return await self.queryset.avg(columns=columns) async def clear(self, keep_reversed: bool = True) -> int: """ Removes all related models from given relation. Removes all through models for m2m relation. For reverse FK relations keep_reversed flag marks if the reversed models should be kept or deleted from the database too (False means that models will be deleted, and not only removed from relation). :param keep_reversed: flag if reverse models in reverse FK should be deleted or not, keep_reversed=False deletes them from database. :type keep_reversed: bool :return: number of deleted models :rtype: int """ if self.type_ == ormar.RelationType.MULTIPLE: queryset = ormar.QuerySet(model_cls=self.relation.through) # type: ignore owner_column = self._owner.get_name() else: queryset = ormar.QuerySet(model_cls=self.relation.to) # type: ignore owner_column = self.related_field_name kwargs = {owner_column: self._owner} self._clean_items_on_load() if keep_reversed and self.type_ == ormar.RelationType.REVERSE: update_kwrgs = {f"{owner_column}": None} return await queryset.filter(_exclude=False, **kwargs).update( each=False, **update_kwrgs ) return await queryset.delete(**kwargs) # type: ignore async def values( self, fields: Union[List, str, Set, Dict] = None, exclude_through: bool = False ) -> List: """ Return a list of dictionaries with column values in order of the fields passed or all fields from queried models. To filter for given row use filter/exclude methods before values, to limit number of rows use limit/offset or paginate before values. Note that it always return a list even for one row from database. :param exclude_through: flag if through models should be excluded :type exclude_through: bool :param fields: field name or list of field names to extract from db :type fields: Union[List, str, Set, Dict] """ return await self.queryset.values( fields=fields, exclude_through=exclude_through ) async def values_list( self, fields: Union[List, str, Set, Dict] = None, flatten: bool = False, exclude_through: bool = False, ) -> List: """ Return a list of tuples with column values in order of the fields passed or all fields from queried models. When one field is passed you can flatten the list of tuples into list of values of that single field. To filter for given row use filter/exclude methods before values, to limit number of rows use limit/offset or paginate before values. Note that it always return a list even for one row from database. :param exclude_through: flag if through models should be excluded :type exclude_through: bool :param fields: field name or list of field names to extract from db :type fields: Union[str, List[str]] :param flatten: when one field is passed you can flatten the list of tuples :type flatten: bool """ return await self.queryset.values( fields=fields, exclude_through=exclude_through, _as_dict=False, _flatten=flatten, ) async def first(self, *args: Any, **kwargs: Any) -> "T": """ Gets the first row from the db ordered by primary key column ascending. Actual call delegated to QuerySet. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).first()`. List of related models is cleared before the call. :param kwargs: :type kwargs: :return: :rtype: _asyncio.Future """ first = await self.queryset.first(*args, **kwargs) self._clean_items_on_load() self._register_related(first) return first async def get_or_none(self, *args: Any, **kwargs: Any) -> Optional["T"]: """ Get's the first row from the db meeting the criteria set by kwargs. If no criteria set it will return the last row in db sorted by pk. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).get_or_none()`. If not match is found None will be returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: returned model :rtype: Model """ try: get = await self.queryset.get(*args, **kwargs) except ormar.NoMatch: return None self._clean_items_on_load() self._register_related(get) return get async def get(self, *args: Any, **kwargs: Any) -> "T": """ Get's the first row from the db meeting the criteria set by kwargs. If no criteria set it will return the last row in db sorted by pk. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).get()`. Actual call delegated to QuerySet. List of related models is cleared before the call. :raises NoMatch: if no rows are returned :raises MultipleMatches: if more than 1 row is returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: returned model :rtype: Model """ get = await self.queryset.get(*args, **kwargs) self._clean_items_on_load() self._register_related(get) return get async def all(self, *args: Any, **kwargs: Any) -> List["T"]: # noqa: A003 """ Returns all rows from a database for given model for set filter options. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).all()`. If there are no rows meeting the criteria an empty list is returned. Actual call delegated to QuerySet. List of related models is cleared before the call. :param kwargs: fields names and proper value types :type kwargs: Any :return: list of returned models :rtype: List[Model] """ all_items = await self.queryset.all(*args, **kwargs) self._clean_items_on_load() self._register_related(all_items) return all_items async def iterate( # noqa: A003 self, *args: Any, **kwargs: Any, ) -> AsyncGenerator["T", None]: """ Return async iterable generator for all rows from a database for given model. Passing args and/or kwargs is a shortcut and equals to calling `filter(*args, **kwargs).iterate()`. If there are no rows meeting the criteria an empty async generator is returned. :param kwargs: fields names and proper value types :type kwargs: Any :return: asynchronous iterable generator of returned models :rtype: AsyncGenerator[Model] """ async for item in self.queryset.iterate(*args, **kwargs): yield item async def create(self, **kwargs: Any) -> "T": """ Creates the model instance, saves it in a database and returns the updates model (with pk populated if not passed and autoincrement is set). The allowed kwargs are `Model` fields names and proper value types. For m2m relation the through model is created automatically. Actual call delegated to QuerySet. :param kwargs: fields names and proper value types :type kwargs: Any :return: created model :rtype: Model """ through_kwargs = kwargs.pop(self.through_model_name, {}) if self.type_ == ormar.RelationType.REVERSE: kwargs[self.related_field_name] = self._owner created = await self.queryset.create(**kwargs) self._register_related(created) if self.type_ == ormar.RelationType.MULTIPLE: await self.create_through_instance(created, **through_kwargs) return created async def update(self, each: bool = False, **kwargs: Any) -> int: """ Updates the model table after applying the filters from kwargs. You have to either pass a filter to narrow down a query or explicitly pass each=True flag to affect whole table. :param each: flag if whole table should be affected if no filter is passed :type each: bool :param kwargs: fields names and proper value types :type kwargs: Any :return: number of updated rows :rtype: int """ # queryset proxy always have one filter for pk of parent model if ( not each and (len(self.queryset.filter_clauses) + len(self.queryset.exclude_clauses)) == 1 ): raise QueryDefinitionError( "You cannot update without filtering the queryset first. " "If you want to update all rows use update(each=True, **kwargs)" ) through_kwargs = kwargs.pop(self.through_model_name, {}) children = await self.queryset.all() for child in children: await child.update(**kwargs) # type: ignore if self.type_ == ormar.RelationType.MULTIPLE and through_kwargs: await self.update_through_instance( child=child, **through_kwargs # type: ignore ) return len(children) async def get_or_create( self, _defaults: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Tuple["T", bool]: """ Combination of create and get methods. Tries to get a row meeting the criteria fro kwargs and if `NoMatch` exception is raised it creates a new one with given kwargs and _defaults. :param kwargs: fields names and proper value types :type kwargs: Any :param _defaults: default values for creating object :type _defaults: Optional[Dict[str, Any]] :return: model instance and a boolean :rtype: Tuple("T", bool) """ try: return await self.get(*args, **kwargs), False except NoMatch: _defaults = _defaults or {} return await self.create(**{**kwargs, **_defaults}), True async def update_or_create(self, **kwargs: Any) -> "T": """ Updates the model, or in case there is no match in database creates a new one. Actual call delegated to QuerySet. :param kwargs: fields names and proper value types :type kwargs: Any :return: updated or created model :rtype: Model """ pk_name = self.queryset.model_meta.pkname if "pk" in kwargs: kwargs[pk_name] = kwargs.pop("pk") if pk_name not in kwargs or kwargs.get(pk_name) is None: return await self.create(**kwargs) model = await self.queryset.get(pk=kwargs[pk_name]) return await model.update(**kwargs) def filter( # noqa: A003, A001 self, *args: Any, **kwargs: Any ) -> "QuerysetProxy[T]": """ Allows you to filter by any `Model` attribute/field as well as to fetch instances, with a filter across an FK relationship. You can use special filter suffix to change the filter operands: * exact - like `album__name__exact='Malibu'` (exact match) * iexact - like `album__name__iexact='malibu'` (exact match case insensitive) * contains - like `album__name__contains='Mal'` (sql like) * icontains - like `album__name__icontains='mal'` (sql like case insensitive) * in - like `album__name__in=['Malibu', 'Barclay']` (sql in) * isnull - like `album__name__isnull=True` (sql is null) (isnotnull `album__name__isnull=False` (sql is not null)) * gt - like `position__gt=3` (sql >) * gte - like `position__gte=3` (sql >=) * lt - like `position__lt=3` (sql <) * lte - like `position__lte=3` (sql <=) * startswith - like `album__name__startswith='Mal'` (exact start match) * istartswith - like `album__name__istartswith='mal'` (case insensitive) * endswith - like `album__name__endswith='ibu'` (exact end match) * iendswith - like `album__name__iendswith='IBU'` (case insensitive) Actual call delegated to QuerySet. :param kwargs: fields names and proper value types :type kwargs: Any :return: filtered QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.filter(*args, **kwargs) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def exclude( self, *args: Any, **kwargs: Any ) -> "QuerysetProxy[T]": # noqa: A003, A001 """ Works exactly the same as filter and all modifiers (suffixes) are the same, but returns a *not* condition. So if you use `filter(name='John')` which is `where name = 'John'` in SQL, the `exclude(name='John')` equals to `where name <> 'John'` Note that all conditions are joined so if you pass multiple values it becomes a union of conditions. `exclude(name='John', age>=35)` will become `where not (name='John' and age>=35)` Actual call delegated to QuerySet. :param kwargs: fields names and proper value types :type kwargs: Any :return: filtered QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.exclude(*args, **kwargs) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def select_all(self, follow: bool = False) -> "QuerysetProxy[T]": """ By default adds only directly related models. If follow=True is set it adds also related models of related models. To not get stuck in an infinite loop as related models also keep a relation to parent model visited models set is kept. That way already visited models that are nested are loaded, but the load do not follow them inside. So Model A -> Model B -> Model C -> Model A -> Model X will load second Model A but will never follow into Model X. Nested relations of those kind need to be loaded manually. :param follow: flag to trigger deep save - by default only directly related models are saved with follow=True also related models of related models are saved :type follow: bool :return: reloaded Model :rtype: Model """ queryset = self.queryset.select_all(follow=follow) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def select_related(self, related: Union[List, str]) -> "QuerysetProxy[T]": """ Allows to prefetch related models during the same query. **With `select_related` always only one query is run against the database**, meaning that one (sometimes complicated) join is generated and later nested models are processed in python. To fetch related model use `ForeignKey` names. To chain related `Models` relation use double underscores between names. Actual call delegated to QuerySet. :param related: list of relation field names, can be linked by '__' to nest :type related: Union[List, str] :return: QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.select_related(related) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def prefetch_related(self, related: Union[List, str]) -> "QuerysetProxy[T]": """ Allows to prefetch related models during query - but opposite to `select_related` each subsequent model is fetched in a separate database query. **With `prefetch_related` always one query per Model is run against the database**, meaning that you will have multiple queries executed one after another. To fetch related model use `ForeignKey` names. To chain related `Models` relation use double underscores between names. Actual call delegated to QuerySet. :param related: list of relation field names, can be linked by '__' to nest :type related: Union[List, str] :return: QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.prefetch_related(related) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def paginate(self, page: int, page_size: int = 20) -> "QuerysetProxy[T]": """ You can paginate the result which is a combination of offset and limit clauses. Limit is set to page size and offset is set to (page-1) * page_size. Actual call delegated to QuerySet. :param page_size: numbers of items per page :type page_size: int :param page: page number :type page: int :return: QuerySet :rtype: QuerySet """ queryset = self.queryset.paginate(page=page, page_size=page_size) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def limit(self, limit_count: int) -> "QuerysetProxy[T]": """ You can limit the results to desired number of parent models. Actual call delegated to QuerySet. :param limit_count: number of models to limit :type limit_count: int :return: QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.limit(limit_count) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def offset(self, offset: int) -> "QuerysetProxy[T]": """ You can also offset the results by desired number of main models. Actual call delegated to QuerySet. :param offset: numbers of models to offset :type offset: int :return: QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.offset(offset) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def fields(self, columns: Union[List, str, Set, Dict]) -> "QuerysetProxy[T]": """ With `fields()` you can select subset of model columns to limit the data load. Note that `fields()` and `exclude_fields()` works both for main models (on normal queries like `get`, `all` etc.) as well as `select_related` and `prefetch_related` models (with nested notation). You can select specified fields by passing a `str, List[str], Set[str] or dict` with nested definition. To include related models use notation `{related_name}__{column}[__{optional_next} etc.]`. `fields()` can be called several times, building up the columns to select. If you include related models into `select_related()` call but you won't specify columns for those models in fields - implies a list of all fields for those nested models. Mandatory fields cannot be excluded as it will raise `ValidationError`, to exclude a field it has to be nullable. Pk column cannot be excluded - it's always auto added even if not explicitly included. You can also pass fields to include as dictionary or set. To mark a field as included in a dictionary use it's name as key and ellipsis as value. To traverse nested models use nested dictionaries. To include fields at last level instead of nested dictionary a set can be used. To include whole nested model specify model related field name and ellipsis. Actual call delegated to QuerySet. :param columns: columns to include :type columns: Union[List, str, Set, Dict] :return: QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.fields(columns) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def exclude_fields( self, columns: Union[List, str, Set, Dict] ) -> "QuerysetProxy[T]": """ With `exclude_fields()` you can select subset of model columns that will be excluded to limit the data load. It's the opposite of `fields()` method so check documentation above to see what options are available. Especially check above how you can pass also nested dictionaries and sets as a mask to exclude fields from whole hierarchy. Note that `fields()` and `exclude_fields()` works both for main models (on normal queries like `get`, `all` etc.) as well as `select_related` and `prefetch_related` models (with nested notation). Mandatory fields cannot be excluded as it will raise `ValidationError`, to exclude a field it has to be nullable. Pk column cannot be excluded - it's always auto added even if explicitly excluded. Actual call delegated to QuerySet. :param columns: columns to exclude :type columns: Union[List, str, Set, Dict] :return: QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.exclude_fields(columns=columns) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) def order_by(self, columns: Union[List, str, "OrderAction"]) -> "QuerysetProxy[T]": """ With `order_by()` you can order the results from database based on your choice of fields. You can provide a string with field name or list of strings with fields names. Ordering in sql will be applied in order of names you provide in order_by. By default if you do not provide ordering `ormar` explicitly orders by all primary keys If you are sorting by nested models that causes that the result rows are unsorted by the main model `ormar` will combine those children rows into one main model. The main model will never duplicate in the result To order by main model field just provide a field name To sort on nested models separate field names with dunder '__'. You can sort this way across all relation types -> `ForeignKey`, reverse virtual FK and `ManyToMany` fields. To sort in descending order provide a hyphen in front of the field name Actual call delegated to QuerySet. :param columns: columns by which models should be sorted :type columns: Union[List, str] :return: QuerysetProxy :rtype: QuerysetProxy """ queryset = self.queryset.order_by(columns) return self.__class__( relation=self.relation, type_=self.type_, to=self.to, qryset=queryset ) ormar-0.12.2/ormar/relations/relation.py000066400000000000000000000155111444363446500202430ustar00rootroot00000000000000from enum import Enum from typing import ( Generic, List, Optional, Set, TYPE_CHECKING, Type, TypeVar, Union, cast, ) import ormar # noqa I100 from ormar.exceptions import RelationshipInstanceError # noqa I100 from ormar.relations.relation_proxy import RelationProxy if TYPE_CHECKING: # pragma no cover from ormar.relations import RelationsManager from ormar.models import Model, NewBaseModel, T else: T = TypeVar("T", bound="Model") class RelationType(Enum): """ Different types of relations supported by ormar: * ForeignKey = PRIMARY * reverse ForeignKey = REVERSE * ManyToMany = MULTIPLE """ PRIMARY = 1 REVERSE = 2 MULTIPLE = 3 THROUGH = 4 class Relation(Generic[T]): """ Keeps related Models and handles adding/removing of the children. """ def __init__( self, manager: "RelationsManager", type_: RelationType, field_name: str, to: Type["T"], through: Type["Model"] = None, ) -> None: """ Initialize the Relation and keep the related models either as instances of passed Model, or as a RelationProxy which is basically a list of models with some special behavior, as it exposes QuerySetProxy and allows querying the related models already pre filtered by parent model. :param manager: reference to relation manager :type manager: RelationsManager :param type_: type of the relation :type type_: RelationType :param field_name: name of the relation field :type field_name: str :param to: model to which relation leads to :type to: Type[Model] :param through: model through which relation goes for m2m relations :type through: Type[Model] """ self.manager = manager self._owner: "Model" = manager.owner self._type: RelationType = type_ self._to_remove: Set = set() self.to: Type["T"] = to self._through = through self.field_name: str = field_name self.related_models: Optional[Union[RelationProxy, "Model"]] = ( RelationProxy(relation=self, type_=type_, to=to, field_name=field_name) if type_ in (RelationType.REVERSE, RelationType.MULTIPLE) else None ) def clear(self) -> None: if self._type in (RelationType.PRIMARY, RelationType.THROUGH): self.related_models = None self._owner.__dict__[self.field_name] = None elif self.related_models is not None: related_models = cast("RelationProxy", self.related_models) related_models._clear() self._owner.__dict__[self.field_name] = None @property def through(self) -> Type["Model"]: if not self._through: # pragma: no cover raise RelationshipInstanceError("Relation does not have through model!") return self._through def _clean_related(self) -> None: """ Removes dead weakrefs from RelationProxy. """ cleaned_data = [ x for i, x in enumerate(self.related_models) # type: ignore if i not in self._to_remove ] self.related_models = RelationProxy( relation=self, type_=self._type, to=self.to, field_name=self.field_name, data_=cleaned_data, ) relation_name = self.field_name self._owner.__dict__[relation_name] = cleaned_data self._to_remove = set() def _find_existing( self, child: Union["NewBaseModel", Type["NewBaseModel"]] ) -> Optional[int]: """ Find child model in RelationProxy if exists. :param child: child model to find :type child: Model :return: index of child in RelationProxy :rtype: Optional[ind] """ if not isinstance(self.related_models, RelationProxy): # pragma nocover raise ValueError("Cannot find existing models in parent relation type") if child not in self.related_models: return None else: # We need to clear the weakrefs that don't point to anything anymore # There's an assumption here that if some of the related models # went out of scope, then they all did, so we can just check the first one try: self.related_models[0].__repr__.__self__ return self.related_models.index(child) except ReferenceError: missing = self.related_models._get_list_of_missing_weakrefs() self._to_remove.update(missing) return self.related_models.index(child) def add(self, child: "Model") -> None: """ Adds child Model to relation, either sets child as related model or adds it to the list in RelationProxy depending on relation type. :param child: model to add to relation :type child: Model """ relation_name = self.field_name if self._type in (RelationType.PRIMARY, RelationType.THROUGH): self.related_models = child self._owner.__dict__[relation_name] = child else: if self._find_existing(child) is None: self.related_models.append(child) # type: ignore rel = self._owner.__dict__.get(relation_name, []) rel = rel or [] if not isinstance(rel, list): rel = [rel] rel.append(child) self._owner.__dict__[relation_name] = rel def remove(self, child: Union["NewBaseModel", Type["NewBaseModel"]]) -> None: """ Removes child Model from relation, either sets None as related model or removes it from the list in RelationProxy depending on relation type. :param child: model to remove from relation :type child: Model """ relation_name = self.field_name if self._type == RelationType.PRIMARY: if self.related_models == child: self.related_models = None del self._owner.__dict__[relation_name] else: position = self._find_existing(child) if position is not None: self.related_models.pop(position) # type: ignore del self._owner.__dict__[relation_name][position] def get(self) -> Optional[Union[List["Model"], "Model"]]: """ Return the related model or models from RelationProxy. :return: related model/models if set :rtype: Optional[Union[List[Model], Model]] """ if self._to_remove: self._clean_related() return self.related_models def __repr__(self) -> str: # pragma no cover if self._to_remove: self._clean_related() return str(self.related_models) ormar-0.12.2/ormar/relations/relation_manager.py000066400000000000000000000136321444363446500217370ustar00rootroot00000000000000from typing import Dict, List, Optional, Sequence, TYPE_CHECKING, Type, Union from weakref import proxy from ormar.relations.relation import Relation, RelationType from ormar.relations.utils import get_relations_sides_and_names if TYPE_CHECKING: # pragma no cover from ormar.models import NewBaseModel, Model from ormar.fields import ForeignKeyField, BaseField class RelationsManager: """ Manages relations on a Model, each Model has it's own instance. """ def __init__( self, related_fields: List["ForeignKeyField"] = None, owner: Optional["Model"] = None, ) -> None: self.owner = proxy(owner) self._related_fields = related_fields or [] self._related_names = [field.name for field in self._related_fields] self._relations: Dict[str, Relation] = dict() for field in self._related_fields: self._add_relation(field) def __contains__(self, item: str) -> bool: """ Checks if relation with given name is already registered. :param item: name of attribute :type item: str :return: result of the check :rtype: bool """ return item in self._related_names def clear(self) -> None: for relation in self._relations.values(): relation.clear() def get(self, name: str) -> Optional[Union["Model", Sequence["Model"]]]: """ Returns the related model/models if relation is set. Actual call is delegated to Relation instance registered under relation name. :param name: name of the relation :type name: str :return: related model or list of related models if set :rtype: Optional[Union[Model, List[Model]] """ relation = self._relations.get(name, None) if relation is not None: return relation.get() return None # pragma nocover @staticmethod def add(parent: "Model", child: "Model", field: "ForeignKeyField") -> None: """ Adds relation on both sides -> meaning on both child and parent models. One side of the relation is always weakref proxy to avoid circular refs. Based on the side from which relation is added and relation name actual names of parent and child relations are established. The related models are registered on both ends. :param parent: parent model on which relation should be registered :type parent: Model :param child: child model to register :type child: Model :param field: field with relation definition :type field: ForeignKeyField """ (parent, child, child_name, to_name) = get_relations_sides_and_names( field, parent, child ) # print('adding parent', parent.get_name(), child.get_name(), child_name) parent_relation = parent._orm._get(child_name) if parent_relation: parent_relation.add(child) # type: ignore # print('adding child', child.get_name(), parent.get_name(), to_name) child_relation = child._orm._get(to_name) if child_relation: child_relation.add(parent) def remove( self, name: str, child: Union["NewBaseModel", Type["NewBaseModel"]] ) -> None: """ Removes given child from relation with given name. Since you can have many relations between two models you need to pass a name of relation from which you want to remove the child. :param name: name of the relation :type name: str :param child: child to remove from relation :type child: Union[Model, Type[Model]] """ relation = self._get(name) if relation: relation.remove(child) @staticmethod def remove_parent( item: Union["NewBaseModel", Type["NewBaseModel"]], parent: "Model", name: str ) -> None: """ Removes given parent from relation with given name. Since you can have many relations between two models you need to pass a name of relation from which you want to remove the parent. :param item: model with parent registered :type item: Union[Model, Type[Model]] :param parent: parent Model :type parent: Model :param name: name of the relation :type name: str """ relation_name = item.Meta.model_fields[name].get_related_name() item._orm.remove(name, parent) parent._orm.remove(relation_name, item) def _get(self, name: str) -> Optional[Relation]: """ Returns the actual relation and not the related model(s). :param name: name of the relation :type name: str :return: Relation instance :rtype: ormar.relations.relation.Relation """ relation = self._relations.get(name, None) if relation is not None: return relation return None def _get_relation_type(self, field: "BaseField") -> RelationType: """ Returns type of the relation declared on a field. :param field: field with relation declaration :type field: BaseField :return: type of the relation defined on field :rtype: RelationType """ if field.is_multi: return RelationType.MULTIPLE if field.is_through: return RelationType.THROUGH return RelationType.PRIMARY if not field.virtual else RelationType.REVERSE def _add_relation(self, field: "BaseField") -> None: """ Registers relation in the manager. Adds Relation instance under field.name. :param field: field with relation declaration :type field: BaseField """ self._relations[field.name] = Relation( manager=self, type_=self._get_relation_type(field), field_name=field.name, to=field.to, through=getattr(field, "through", None), ) ormar-0.12.2/ormar/relations/relation_proxy.py000066400000000000000000000264061444363446500215110ustar00rootroot00000000000000from typing import ( Any, Dict, Generic, List, Optional, TYPE_CHECKING, Set, Type, TypeVar, ) from typing_extensions import SupportsIndex import ormar from ormar.exceptions import NoMatch, RelationshipInstanceError from ormar.relations.querysetproxy import QuerysetProxy if TYPE_CHECKING: # pragma no cover from ormar import Model, RelationType from ormar.models import T from ormar.relations import Relation from ormar.queryset import QuerySet else: T = TypeVar("T", bound="Model") class RelationProxy(Generic[T], List[T]): """ Proxy of the Relation that is a list with special methods. """ def __init__( self, relation: "Relation", type_: "RelationType", to: Type["T"], field_name: str, data_: Any = None, ) -> None: self.relation: "Relation[T]" = relation self.type_: "RelationType" = type_ self.field_name = field_name self._owner: "Model" = self.relation.manager.owner self.queryset_proxy: QuerysetProxy[T] = QuerysetProxy[T]( relation=self.relation, to=to, type_=type_ ) self._related_field_name: Optional[str] = None self._relation_cache: Dict[int, int] = {} validated_data = [] if data_ is not None: idx = 0 for d in data_: try: self._relation_cache[d.__hash__()] = idx validated_data.append(d) idx += 1 except ReferenceError: pass super().__init__(validated_data or ()) @property def related_field_name(self) -> str: """ On first access calculates the name of the related field, later stored in _related_field_name property. :return: name of the related field :rtype: str """ if self._related_field_name: return self._related_field_name owner_field = self._owner.Meta.model_fields[self.field_name] self._related_field_name = owner_field.get_related_name() return self._related_field_name def __getitem__(self, item: Any) -> "T": # type: ignore return super().__getitem__(item) def append(self, item: "T") -> None: """ Appends an item to the list in place :param item: The generic item of the list :type item: T """ idx = len(self) self._relation_cache[item.__hash__()] = idx super().append(item) def update_cache(self, prev_hash: int, new_hash: int) -> None: """ Updates the cache from the old hash to the new one. This maintains the index cache, which allows O(1) indexing and existence checks :param prev_hash: The hash to update :type prev_hash: int :param prev_hash: The new hash to update to :type new_hash: int """ try: idx = self._relation_cache.pop(prev_hash) self._relation_cache[new_hash] = idx except KeyError: pass def index(self, item: T, *args: Any) -> int: """ Gets the index of the item in the list :param item: The item to get the index of :type item: "T" """ return self._relation_cache[item.__hash__()] def _get_list_of_missing_weakrefs(self) -> Set[int]: """ Iterates through the list and checks for weakrefs. :return: The set of missing weakref indices :rtype: Set[int] """ to_remove = set() for ind, relation_child in enumerate(self[:]): try: relation_child.__repr__.__self__ # type: ignore except ReferenceError: # pragma no cover to_remove.add(ind) return to_remove def pop(self, index: SupportsIndex = 0) -> T: """ Pops the index off the list and returns it. By default, it pops off the element at index 0. This also clears the value from the relation cache. :param index: The index to pop :type index: SupportsIndex :return: The item at the provided index :rtype: "T" """ item = self[index] # Try to delete it, but do it a long way # if weakly-referenced thing doesn't exist try: self._relation_cache.pop(item.__hash__()) except ReferenceError: for hash_, idx in self._relation_cache.items(): if idx == index: self._relation_cache.pop(hash_) break index_int = int(index) for idx in range(index_int + 1, len(self)): self._relation_cache[self[idx].__hash__()] -= 1 return super().pop(index) def __contains__(self, item: object) -> bool: """ Checks whether the item exists in self. This relies on the relation cache, which is a hashmap of values in the list. It runs in O(1) time. :param item: The item to check if the list contains :type item: object """ try: return item.__hash__() in self._relation_cache except ReferenceError: return False def __getattribute__(self, item: str) -> Any: """ Since some QuerySetProxy methods overwrite builtin list methods we catch calls to them and delegate it to QuerySetProxy instead. :param item: name of attribute :type item: str :return: value of attribute :rtype: Any """ if item in ["count", "clear"]: self._initialize_queryset() return getattr(self.queryset_proxy, item) return super().__getattribute__(item) def __getattr__(self, item: str) -> Any: """ Delegates calls for non existing attributes to QuerySetProxy. :param item: name of attribute/method :type item: str :return: method from QuerySetProxy if exists :rtype: method """ self._initialize_queryset() return getattr(self.queryset_proxy, item) def _clear(self) -> None: self._relation_cache.clear() super().clear() def _initialize_queryset(self) -> None: """ Initializes the QuerySetProxy if not yet initialized. """ if not self._check_if_queryset_is_initialized(): self.queryset_proxy.queryset = self._set_queryset() def _check_if_queryset_is_initialized(self) -> bool: """ Checks if the QuerySetProxy is already set and ready. :return: result of the check :rtype: bool """ return ( hasattr(self.queryset_proxy, "queryset") and self.queryset_proxy.queryset is not None ) def _check_if_model_saved(self) -> None: """ Verifies if the parent model of the relation has been already saved. Otherwise QuerySetProxy cannot filter by parent primary key. """ pk_value = self._owner.pk if not pk_value: raise RelationshipInstanceError( "You cannot query relationships from unsaved model." ) def _set_queryset(self) -> "QuerySet[T]": """ Creates new QuerySet with relation model and pre filters it with currents parent model primary key, so all queries by definition are already related to the parent model only, without need for user to filter them. :return: initialized QuerySet :rtype: QuerySet """ related_field_name = self.related_field_name pkname = self._owner.get_column_alias(self._owner.Meta.pkname) self._check_if_model_saved() kwargs = {f"{related_field_name}__{pkname}": self._owner.pk} queryset = ( ormar.QuerySet( model_cls=self.relation.to, proxy_source_model=self._owner.__class__ ) .select_related(related_field_name) .filter(**kwargs) ) return queryset async def remove( # type: ignore self, item: "T", keep_reversed: bool = True ) -> None: """ Removes the related from relation with parent. Through models are automatically deleted for m2m relations. For reverse FK relations keep_reversed flag marks if the reversed models should be kept or deleted from the database too (False means that models will be deleted, and not only removed from relation). :param item: child to remove from relation :type item: Model :param keep_reversed: flag if the reversed model should be kept or deleted too :type keep_reversed: bool """ if item not in self: raise NoMatch( f"Object {self._owner.get_name()} has no " f"{item.get_name()} with given primary key!" ) await self._owner.signals.pre_relation_remove.send( sender=self._owner.__class__, instance=self._owner, child=item, relation_name=self.field_name, ) index_to_remove = self._relation_cache[item.__hash__()] self.pop(index_to_remove) relation_name = self.related_field_name relation = item._orm._get(relation_name) # if relation is None: # pragma nocover # raise ValueError( # f"{self._owner.get_name()} does not have relation {relation_name}" # ) if relation: relation.remove(self._owner) self.relation.remove(item) if self.type_ == ormar.RelationType.MULTIPLE: await self.queryset_proxy.delete_through_instance(item) else: if keep_reversed: setattr(item, relation_name, None) await item.update() else: await item.delete() await self._owner.signals.post_relation_remove.send( sender=self._owner.__class__, instance=self._owner, child=item, relation_name=self.field_name, ) async def add(self, item: "T", **kwargs: Any) -> None: """ Adds child model to relation. For ManyToMany relations through instance is automatically created. :param kwargs: dict of additional keyword arguments for through instance :type kwargs: Any :param item: child to add to relation :type item: Model """ new_idx = len(self) relation_name = self.related_field_name await self._owner.signals.pre_relation_add.send( sender=self._owner.__class__, instance=self._owner, child=item, relation_name=self.field_name, passed_kwargs=kwargs, ) self._check_if_model_saved() if self.type_ == ormar.RelationType.MULTIPLE: await self.queryset_proxy.create_through_instance(item, **kwargs) setattr(self._owner, self.field_name, item) else: setattr(item, relation_name, self._owner) await item.upsert() self._relation_cache[item.__hash__()] = new_idx await self._owner.signals.post_relation_add.send( sender=self._owner.__class__, instance=self._owner, child=item, relation_name=self.field_name, passed_kwargs=kwargs, ) ormar-0.12.2/ormar/relations/utils.py000066400000000000000000000020431444363446500175620ustar00rootroot00000000000000from typing import TYPE_CHECKING, Tuple from weakref import proxy from ormar.fields.foreign_key import ForeignKeyField if TYPE_CHECKING: # pragma no cover from ormar import Model def get_relations_sides_and_names( to_field: ForeignKeyField, parent: "Model", child: "Model" ) -> Tuple["Model", "Model", str, str]: """ Determines the names of child and parent relations names, as well as changes one of the sides of the relation into weakref.proxy to model. :param to_field: field with relation definition :type to_field: ForeignKeyField :param parent: parent model :type parent: Model :param child: child model :type child: Model :return: parent, child, child_name, to_name :rtype: Tuple["Model", "Model", str, str] """ to_name = to_field.name child_name = to_field.get_related_name() if to_field.virtual: child_name, to_name = to_name, child_name child, parent = parent, proxy(child) else: child = proxy(child) return parent, child, child_name, to_name ormar-0.12.2/ormar/signals/000077500000000000000000000000001444363446500155115ustar00rootroot00000000000000ormar-0.12.2/ormar/signals/__init__.py000066400000000000000000000003721444363446500176240ustar00rootroot00000000000000""" Signals and SignalEmitter that gathers the signals on models Meta. Used to signal receivers functions about events, i.e. post_save, pre_delete etc. """ from ormar.signals.signal import Signal, SignalEmitter __all__ = ["Signal", "SignalEmitter"] ormar-0.12.2/ormar/signals/signal.py000066400000000000000000000065761444363446500173560ustar00rootroot00000000000000import asyncio import inspect from typing import Any, Callable, Dict, TYPE_CHECKING, Tuple, Type, Union from ormar.exceptions import SignalDefinitionError if TYPE_CHECKING: # pragma: no cover from ormar import Model def callable_accepts_kwargs(func: Callable) -> bool: """ Checks if function accepts **kwargs. :param func: function which signature needs to be checked :type func: function :return: result of the check :rtype: bool """ return any( p for p in inspect.signature(func).parameters.values() if p.kind == p.VAR_KEYWORD ) def make_id(target: Any) -> Union[int, Tuple[int, int]]: """ Creates id of a function or method to be used as key to store signal :param target: target which id we want :type target: Any :return: id of the target :rtype: int """ if hasattr(target, "__func__"): return id(target.__self__), id(target.__func__) return id(target) class Signal: """ Signal that notifies all receiver functions. In ormar used by models to send pre_save, post_save etc. signals. """ def __init__(self) -> None: self._receivers: Dict[Union[int, Tuple[int, int]], Callable] = {} def connect(self, receiver: Callable) -> None: """ Connects given receiver function to the signal. :raises SignalDefinitionError: if receiver is not callable or not accept **kwargs :param receiver: receiver function :type receiver: Callable """ if not callable(receiver): raise SignalDefinitionError("Signal receivers must be callable.") if not callable_accepts_kwargs(receiver): raise SignalDefinitionError( "Signal receivers must accept **kwargs argument." ) new_receiver_key = make_id(receiver) if new_receiver_key not in self._receivers: self._receivers[new_receiver_key] = receiver def disconnect(self, receiver: Callable) -> bool: """ Removes the receiver function from the signal. :param receiver: receiver function :type receiver: Callable :return: flag if receiver was removed :rtype: bool """ new_receiver_key = make_id(receiver) receiver_func: Union[Callable, None] = self._receivers.pop( new_receiver_key, None ) return True if receiver_func is not None else False async def send(self, sender: Type["Model"], **kwargs: Any) -> None: """ Notifies all receiver functions with given kwargs :param sender: model that sends the signal :type sender: Type["Model"] :param kwargs: arguments passed to receivers :type kwargs: Any """ receivers = [ receiver_func(sender=sender, **kwargs) for receiver_func in self._receivers.values() ] await asyncio.gather(*receivers) class SignalEmitter(dict): """ Emitter that registers the signals in internal dictionary. If signal with given name does not exist it's auto added on access. """ def __getattr__(self, item: str) -> Signal: return self.setdefault(item, Signal()) def __setattr__(self, key: str, value: Signal) -> None: if not isinstance(value, Signal): raise SignalDefinitionError(f"{value} is not valid signal") self[key] = value ormar-0.12.2/poetry.lock000066400000000000000000006034231444363446500151350ustar00rootroot00000000000000# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "aiomysql" version = "0.2.0" description = "MySQL driver for asyncio." optional = true python-versions = ">=3.7" files = [ {file = "aiomysql-0.2.0-py3-none-any.whl", hash = "sha256:b7c26da0daf23a5ec5e0b133c03d20657276e4eae9b73e040b72787f6f6ade0a"}, {file = "aiomysql-0.2.0.tar.gz", hash = "sha256:558b9c26d580d08b8c5fd1be23c5231ce3aeff2dadad989540fee740253deb67"}, ] [package.dependencies] PyMySQL = ">=1.0" [package.extras] rsa = ["PyMySQL[rsa] (>=1.0)"] sa = ["sqlalchemy (>=1.3,<1.4)"] [[package]] name = "aiopg" version = "1.4.0" description = "Postgres integration with asyncio." optional = true python-versions = ">=3.7" files = [ {file = "aiopg-1.4.0-py3-none-any.whl", hash = "sha256:aea46e8aff30b039cfa818e6db4752c97656e893fc75e5a5dc57355a9e9dedbd"}, {file = "aiopg-1.4.0.tar.gz", hash = "sha256:116253bef86b4d954116716d181e9a0294037f266718b2e1c9766af995639d71"}, ] [package.dependencies] async-timeout = ">=3.0,<5.0" psycopg2-binary = ">=2.9.5" [package.extras] sa = ["sqlalchemy[postgresql-psycopg2binary] (>=1.3,<1.5)"] [[package]] name = "aiosqlite" version = "0.19.0" description = "asyncio bridge to the standard sqlite3 module" optional = true python-versions = ">=3.7" files = [ {file = "aiosqlite-0.19.0-py3-none-any.whl", hash = "sha256:edba222e03453e094a3ce605db1b970c4b3376264e56f32e2a4959f948d66a96"}, {file = "aiosqlite-0.19.0.tar.gz", hash = "sha256:95ee77b91c8d2808bd08a59fbebf66270e9090c3d92ffbf260dc0db0b979577d"}, ] [package.dependencies] typing_extensions = {version = ">=4.0", markers = "python_version < \"3.8\""} [package.extras] dev = ["aiounittest (==1.4.1)", "attribution (==1.6.2)", "black (==23.3.0)", "coverage[toml] (==7.2.3)", "flake8 (==5.0.4)", "flake8-bugbear (==23.3.12)", "flit (==3.7.1)", "mypy (==1.2.0)", "ufmt (==2.1.0)", "usort (==1.0.6)"] docs = ["sphinx (==6.1.3)", "sphinx-mdinclude (==0.5.3)"] [[package]] name = "anyio" version = "3.7.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.7" files = [ {file = "anyio-3.7.0-py3-none-any.whl", hash = "sha256:eddca883c4175f14df8aedce21054bfca3adb70ffe76a9f607aef9d7fa2ea7f0"}, {file = "anyio-3.7.0.tar.gz", hash = "sha256:275d9973793619a5374e1c89a4f4ad3f4b0a5510a2b5b939444bee8f4c4d37ce"}, ] [package.dependencies] exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] doc = ["Sphinx (>=6.1.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme", "sphinxcontrib-jquery"] test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (<0.22)"] [[package]] name = "asgi-lifespan" version = "2.1.0" description = "Programmatic startup/shutdown of ASGI apps." optional = false python-versions = ">=3.7" files = [ {file = "asgi-lifespan-2.1.0.tar.gz", hash = "sha256:5e2effaf0bfe39829cf2d64e7ecc47c7d86d676a6599f7afba378c31f5e3a308"}, {file = "asgi_lifespan-2.1.0-py3-none-any.whl", hash = "sha256:ed840706680e28428c01e14afb3875d7d76d3206f3d5b2f2294e059b5c23804f"}, ] [package.dependencies] sniffio = "*" [[package]] name = "astpretty" version = "2.1.0" description = "Pretty print the output of python stdlib `ast.parse`." optional = false python-versions = ">=3.6.1" files = [ {file = "astpretty-2.1.0-py2.py3-none-any.whl", hash = "sha256:f81f14b5636f7af81fadb1e3c09ca7702ce4615500d9cc6d6829befb2dec2e3c"}, {file = "astpretty-2.1.0.tar.gz", hash = "sha256:8a801fcda604ec741f010bb36d7cbadc3ec8a182ea6fb83e20ab663463e75ff6"}, ] [package.extras] typed = ["typed-ast"] [[package]] name = "async-timeout" version = "4.0.2" description = "Timeout context manager for asyncio programs" optional = true python-versions = ">=3.6" files = [ {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, ] [package.dependencies] typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""} [[package]] name = "asyncpg" version = "0.27.0" description = "An asyncio PostgreSQL driver" optional = true python-versions = ">=3.7.0" files = [ {file = "asyncpg-0.27.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fca608d199ffed4903dce1bcd97ad0fe8260f405c1c225bdf0002709132171c2"}, {file = "asyncpg-0.27.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20b596d8d074f6f695c13ffb8646d0b6bb1ab570ba7b0cfd349b921ff03cfc1e"}, {file = "asyncpg-0.27.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a6206210c869ebd3f4eb9e89bea132aefb56ff3d1b7dd7e26b102b17e27bbb1"}, {file = "asyncpg-0.27.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7a94c03386bb95456b12c66026b3a87d1b965f0f1e5733c36e7229f8f137747"}, {file = "asyncpg-0.27.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bfc3980b4ba6f97138b04f0d32e8af21d6c9fa1f8e6e140c07d15690a0a99279"}, {file = "asyncpg-0.27.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9654085f2b22f66952124de13a8071b54453ff972c25c59b5ce1173a4283ffd9"}, {file = "asyncpg-0.27.0-cp310-cp310-win32.whl", hash = "sha256:879c29a75969eb2722f94443752f4720d560d1e748474de54ae8dd230bc4956b"}, {file = "asyncpg-0.27.0-cp310-cp310-win_amd64.whl", hash = "sha256:ab0f21c4818d46a60ca789ebc92327d6d874d3b7ccff3963f7af0a21dc6cff52"}, {file = "asyncpg-0.27.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:18f77e8e71e826ba2d0c3ba6764930776719ae2b225ca07e014590545928b576"}, {file = "asyncpg-0.27.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2232d4625c558f2aa001942cac1d7952aa9f0dbfc212f63bc754277769e1ef2"}, {file = "asyncpg-0.27.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9a3a4ff43702d39e3c97a8786314123d314e0f0e4dabc8367db5b665c93914de"}, {file = "asyncpg-0.27.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccddb9419ab4e1c48742457d0c0362dbdaeb9b28e6875115abfe319b29ee225d"}, {file = "asyncpg-0.27.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:768e0e7c2898d40b16d4ef7a0b44e8150db3dd8995b4652aa1fe2902e92c7df8"}, {file = "asyncpg-0.27.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609054a1f47292a905582a1cfcca51a6f3f30ab9d822448693e66fdddde27920"}, {file = "asyncpg-0.27.0-cp311-cp311-win32.whl", hash = "sha256:8113e17cfe236dc2277ec844ba9b3d5312f61bd2fdae6d3ed1c1cdd75f6cf2d8"}, {file = "asyncpg-0.27.0-cp311-cp311-win_amd64.whl", hash = "sha256:bb71211414dd1eeb8d31ec529fe77cff04bf53efc783a5f6f0a32d84923f45cf"}, {file = "asyncpg-0.27.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4750f5cf49ed48a6e49c6e5aed390eee367694636c2dcfaf4a273ca832c5c43c"}, {file = "asyncpg-0.27.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:eca01eb112a39d31cc4abb93a5aef2a81514c23f70956729f42fb83b11b3483f"}, {file = "asyncpg-0.27.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5710cb0937f696ce303f5eed6d272e3f057339bb4139378ccecafa9ee923a71c"}, {file = "asyncpg-0.27.0-cp37-cp37m-win_amd64.whl", hash = "sha256:71cca80a056ebe19ec74b7117b09e650990c3ca535ac1c35234a96f65604192f"}, {file = "asyncpg-0.27.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4bb366ae34af5b5cabc3ac6a5347dfb6013af38c68af8452f27968d49085ecc0"}, {file = "asyncpg-0.27.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:16ba8ec2e85d586b4a12bcd03e8d29e3d99e832764d6a1d0b8c27dbbe4a2569d"}, {file = "asyncpg-0.27.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d20dea7b83651d93b1eb2f353511fe7fd554752844523f17ad30115d8b9c8cd6"}, {file = "asyncpg-0.27.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e56ac8a8237ad4adec97c0cd4728596885f908053ab725e22900b5902e7f8e69"}, {file = "asyncpg-0.27.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bf21ebf023ec67335258e0f3d3ad7b91bb9507985ba2b2206346de488267cad0"}, {file = "asyncpg-0.27.0-cp38-cp38-win32.whl", hash = "sha256:69aa1b443a182b13a17ff926ed6627af2d98f62f2fe5890583270cc4073f63bf"}, {file = "asyncpg-0.27.0-cp38-cp38-win_amd64.whl", hash = "sha256:62932f29cf2433988fcd799770ec64b374a3691e7902ecf85da14d5e0854d1ea"}, {file = "asyncpg-0.27.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fddcacf695581a8d856654bc4c8cfb73d5c9df26d5f55201722d3e6a699e9629"}, {file = "asyncpg-0.27.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7d8585707ecc6661d07367d444bbaa846b4e095d84451340da8df55a3757e152"}, {file = "asyncpg-0.27.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:975a320baf7020339a67315284a4d3bf7460e664e484672bd3e71dbd881bc692"}, {file = "asyncpg-0.27.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2232ebae9796d4600a7819fc383da78ab51b32a092795f4555575fc934c1c89d"}, {file = "asyncpg-0.27.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:88b62164738239f62f4af92567b846a8ef7cf8abf53eddd83650603de4d52163"}, {file = "asyncpg-0.27.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:eb4b2fdf88af4fb1cc569781a8f933d2a73ee82cd720e0cb4edabbaecf2a905b"}, {file = "asyncpg-0.27.0-cp39-cp39-win32.whl", hash = "sha256:8934577e1ed13f7d2d9cea3cc016cc6f95c19faedea2c2b56a6f94f257cea672"}, {file = "asyncpg-0.27.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b6499de06fe035cf2fa932ec5617ed3f37d4ebbf663b655922e105a484a6af9"}, {file = "asyncpg-0.27.0.tar.gz", hash = "sha256:720986d9a4705dd8a40fdf172036f5ae787225036a7eb46e704c45aa8f62c054"}, ] [package.dependencies] typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [package.extras] dev = ["Cython (>=0.29.24,<0.30.0)", "Sphinx (>=4.1.2,<4.2.0)", "flake8 (>=5.0.4,<5.1.0)", "pytest (>=6.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)", "uvloop (>=0.15.3)"] docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] test = ["flake8 (>=5.0.4,<5.1.0)", "uvloop (>=0.15.3)"] [[package]] name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, ] [package.dependencies] importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [package.extras] cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] dev = ["attrs[docs,tests]", "pre-commit"] docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] tests = ["attrs[tests-no-zope]", "zope-interface"] tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] [[package]] name = "bandit" version = "1.7.5" description = "Security oriented static analyser for python code." optional = false python-versions = ">=3.7" files = [ {file = "bandit-1.7.5-py3-none-any.whl", hash = "sha256:75665181dc1e0096369112541a056c59d1c5f66f9bb74a8d686c3c362b83f549"}, {file = "bandit-1.7.5.tar.gz", hash = "sha256:bdfc739baa03b880c2d15d0431b31c658ffc348e907fe197e54e0389dd59e11e"}, ] [package.dependencies] colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""} GitPython = ">=1.0.1" PyYAML = ">=5.3.1" rich = "*" stevedore = ">=1.20.0" [package.extras] test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)", "tomli (>=1.1.0)"] toml = ["tomli (>=1.1.0)"] yaml = ["PyYAML"] [[package]] name = "black" version = "23.3.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.7" files = [ {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"}, {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"}, {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"}, {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"}, {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"}, {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"}, {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"}, {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"}, {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"}, {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"}, {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"}, {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"}, {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"}, {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"}, {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"}, {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"}, {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"}, {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"}, {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"}, {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"}, {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"}, {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"}, {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"}, {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"}, {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"}, ] [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} [package.extras] colorama = ["colorama (>=0.4.3)"] d = ["aiohttp (>=3.7.4)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "cached-property" version = "1.5.2" description = "A decorator for caching properties in classes." optional = false python-versions = "*" files = [ {file = "cached-property-1.5.2.tar.gz", hash = "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130"}, {file = "cached_property-1.5.2-py2.py3-none-any.whl", hash = "sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0"}, ] [[package]] name = "certifi" version = "2023.5.7" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, ] [[package]] name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." optional = true python-versions = "*" files = [ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, ] [package.dependencies] pycparser = "*" [[package]] name = "cfgv" version = "3.3.1" description = "Validate configuration and produce human readable error messages." optional = false python-versions = ">=3.6.1" files = [ {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"}, {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, ] [[package]] name = "charset-normalizer" version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, ] [[package]] name = "click" version = "8.1.3" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" files = [ {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, ] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "codecov" version = "2.1.13" description = "Hosted coverage reports for GitHub, Bitbucket and Gitlab" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "codecov-2.1.13-py2.py3-none-any.whl", hash = "sha256:c2ca5e51bba9ebb43644c43d0690148a55086f7f5e6fd36170858fa4206744d5"}, {file = "codecov-2.1.13.tar.gz", hash = "sha256:2362b685633caeaf45b9951a9b76ce359cd3581dd515b430c6c3f5dfb4d92a8c"}, ] [package.dependencies] coverage = "*" requests = ">=2.7.9" [[package]] name = "cognitive-complexity" version = "1.3.0" description = "Library to calculate Python functions cognitive complexity via code" optional = false python-versions = ">=3.6" files = [ {file = "cognitive_complexity-1.3.0.tar.gz", hash = "sha256:a0cfbd47dee0b19f4056f892389f501694b205c3af69fb703cc744541e03dde5"}, ] [package.dependencies] setuptools = "*" [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] [[package]] name = "coverage" version = "7.2.7" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.7" files = [ {file = "coverage-7.2.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8"}, {file = "coverage-7.2.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb"}, {file = "coverage-7.2.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6"}, {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2"}, {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063"}, {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1"}, {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353"}, {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495"}, {file = "coverage-7.2.7-cp310-cp310-win32.whl", hash = "sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818"}, {file = "coverage-7.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850"}, {file = "coverage-7.2.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f"}, {file = "coverage-7.2.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe"}, {file = "coverage-7.2.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3"}, {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f"}, {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb"}, {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833"}, {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97"}, {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a"}, {file = "coverage-7.2.7-cp311-cp311-win32.whl", hash = "sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a"}, {file = "coverage-7.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562"}, {file = "coverage-7.2.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4"}, {file = "coverage-7.2.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4"}, {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01"}, {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6"}, {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d"}, {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de"}, {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d"}, {file = "coverage-7.2.7-cp312-cp312-win32.whl", hash = "sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511"}, {file = "coverage-7.2.7-cp312-cp312-win_amd64.whl", hash = "sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3"}, {file = "coverage-7.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f"}, {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb"}, {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9"}, {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd"}, {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a"}, {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959"}, {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02"}, {file = "coverage-7.2.7-cp37-cp37m-win32.whl", hash = "sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f"}, {file = "coverage-7.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0"}, {file = "coverage-7.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5"}, {file = "coverage-7.2.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5"}, {file = "coverage-7.2.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9"}, {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6"}, {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e"}, {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050"}, {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5"}, {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f"}, {file = "coverage-7.2.7-cp38-cp38-win32.whl", hash = "sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e"}, {file = "coverage-7.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c"}, {file = "coverage-7.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9"}, {file = "coverage-7.2.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2"}, {file = "coverage-7.2.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7"}, {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e"}, {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1"}, {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9"}, {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250"}, {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2"}, {file = "coverage-7.2.7-cp39-cp39-win32.whl", hash = "sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb"}, {file = "coverage-7.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27"}, {file = "coverage-7.2.7-pp37.pp38.pp39-none-any.whl", hash = "sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d"}, {file = "coverage-7.2.7.tar.gz", hash = "sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59"}, ] [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli"] [[package]] name = "cryptography" version = "40.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = true python-versions = ">=3.6" files = [ {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b"}, {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440"}, {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d"}, {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288"}, {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2"}, {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b"}, {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"}, {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c"}, {file = "cryptography-40.0.2-cp36-abi3-win32.whl", hash = "sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9"}, {file = "cryptography-40.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b"}, {file = "cryptography-40.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b"}, {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e"}, {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a"}, {file = "cryptography-40.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958"}, {file = "cryptography-40.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b"}, {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636"}, {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e"}, {file = "cryptography-40.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404"}, {file = "cryptography-40.0.2.tar.gz", hash = "sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99"}, ] [package.dependencies] cffi = ">=1.12" [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] pep8test = ["black", "check-manifest", "mypy", "ruff"] sdist = ["setuptools-rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] test = ["iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist"] test-randomorder = ["pytest-randomly"] tox = ["tox"] [[package]] name = "databases" version = "0.6.2" description = "Async database support for Python." optional = false python-versions = ">=3.7" files = [ {file = "databases-0.6.2-py3-none-any.whl", hash = "sha256:ff4010136ac2bb9da2322a2ffda4ef9185ae1c365e5891e52924dd9499d33dc4"}, {file = "databases-0.6.2.tar.gz", hash = "sha256:b09c370ad7c2f64c7f4316c096e265dc2e28304732639889272390decda2f893"}, ] [package.dependencies] sqlalchemy = ">=1.4,<=1.4.41" [package.extras] aiomysql = ["aiomysql"] aiopg = ["aiopg"] aiosqlite = ["aiosqlite"] asyncmy = ["asyncmy"] asyncpg = ["asyncpg"] mysql = ["aiomysql"] postgresql = ["asyncpg"] sqlite = ["aiosqlite"] [[package]] name = "dataclasses" version = "0.6" description = "A backport of the dataclasses module for Python 3.6" optional = false python-versions = "*" files = [ {file = "dataclasses-0.6-py3-none-any.whl", hash = "sha256:454a69d788c7fda44efd71e259be79577822f5e3f53f029a22d08004e951dc9f"}, {file = "dataclasses-0.6.tar.gz", hash = "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84"}, ] [[package]] name = "distlib" version = "0.3.6" description = "Distribution utilities" optional = false python-versions = "*" files = [ {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, ] [[package]] name = "exceptiongroup" version = "1.1.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, ] [package.extras] test = ["pytest (>=6)"] [[package]] name = "fastapi" version = "0.97.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.7" files = [ {file = "fastapi-0.97.0-py3-none-any.whl", hash = "sha256:95d757511c596409930bd20673358d4a4d709004edb85c5d24d6ffc48fabcbf2"}, {file = "fastapi-0.97.0.tar.gz", hash = "sha256:b53248ee45f64f19bb7600953696e3edf94b0f7de94df1e5433fc5c6136fa986"}, ] [package.dependencies] pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0" starlette = ">=0.27.0,<0.28.0" [package.extras] all = ["email-validator (>=1.1.1)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] [[package]] name = "filelock" version = "3.12.2" description = "A platform independent file lock." optional = false python-versions = ">=3.7" files = [ {file = "filelock-3.12.2-py3-none-any.whl", hash = "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"}, {file = "filelock-3.12.2.tar.gz", hash = "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81"}, ] [package.extras] docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"] [[package]] name = "flake8" version = "3.9.2" description = "the modular source code checker: pep8 pyflakes and co" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"}, {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, ] [package.dependencies] importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.7.0,<2.8.0" pyflakes = ">=2.3.0,<2.4.0" [[package]] name = "flake8-bandit" version = "3.0.0" description = "Automated security testing with bandit and flake8." optional = false python-versions = ">=3.6" files = [ {file = "flake8_bandit-3.0.0-py2.py3-none-any.whl", hash = "sha256:61b617f4f7cdaa0e2b1e6bf7b68afb2b619a227bb3e3ae00dd36c213bd17900a"}, {file = "flake8_bandit-3.0.0.tar.gz", hash = "sha256:54d19427e6a8d50322a7b02e1841c0a7c22d856975f3459803320e0e18e2d6a1"}, ] [package.dependencies] bandit = ">=1.7.3" flake8 = "*" flake8-polyfill = "*" pycodestyle = "*" [[package]] name = "flake8-black" version = "0.3.6" description = "flake8 plugin to call black as a code style validator" optional = false python-versions = ">=3.7" files = [ {file = "flake8-black-0.3.6.tar.gz", hash = "sha256:0dfbca3274777792a5bcb2af887a4cad72c72d0e86c94e08e3a3de151bb41c34"}, {file = "flake8_black-0.3.6-py3-none-any.whl", hash = "sha256:fe8ea2eca98d8a504f22040d9117347f6b367458366952862ac3586e7d4eeaca"}, ] [package.dependencies] black = ">=22.1.0" flake8 = ">=3" tomli = {version = "*", markers = "python_version < \"3.11\""} [package.extras] develop = ["build", "twine"] [[package]] name = "flake8-bugbear" version = "23.3.12" description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." optional = false python-versions = ">=3.7" files = [ {file = "flake8-bugbear-23.3.12.tar.gz", hash = "sha256:e3e7f74c8a49ad3794a7183353026dabd68c74030d5f46571f84c1fb0eb79363"}, {file = "flake8_bugbear-23.3.12-py3-none-any.whl", hash = "sha256:beb5c7efcd7ccc2039ef66a77bb8db925e7be3531ff1cb4d0b7030d0e2113d72"}, ] [package.dependencies] attrs = ">=19.2.0" flake8 = ">=3.0.0" [package.extras] dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "pytest", "tox"] [[package]] name = "flake8-builtins" version = "2.1.0" description = "Check for python builtins being used as variables or parameters." optional = false python-versions = ">=3.7" files = [ {file = "flake8-builtins-2.1.0.tar.gz", hash = "sha256:12ff1ee96dd4e1f3141141ee6c45a5c7d3b3c440d0949e9b8d345c42b39c51d4"}, {file = "flake8_builtins-2.1.0-py3-none-any.whl", hash = "sha256:469e8f03d6d0edf4b1e62b6d5a97dce4598592c8a13ec8f0952e7a185eba50a1"}, ] [package.dependencies] flake8 = "*" [package.extras] test = ["pytest"] [[package]] name = "flake8-cognitive-complexity" version = "0.1.0" description = "An extension for flake8 that validates cognitive functions complexity" optional = false python-versions = ">=3.6" files = [ {file = "flake8_cognitive_complexity-0.1.0.tar.gz", hash = "sha256:f202df054e4f6ff182b659c261922b9c684628a47beb19cb0973c50d6a7831c1"}, ] [package.dependencies] cognitive_complexity = "*" setuptools = "*" [[package]] name = "flake8-expression-complexity" version = "0.0.11" description = "A flake8 extension that checks expressions complexity" optional = false python-versions = ">=3.7" files = [ {file = "flake8_expression_complexity-0.0.11-py3-none-any.whl", hash = "sha256:b56bac37f7dd5d3d102a7111c89f6579c2cbd897b868147794c9ed12aadc627c"}, {file = "flake8_expression_complexity-0.0.11.tar.gz", hash = "sha256:4dd8909fecbc20f53814cdcef9d0b04f61532764278d9b6e8026686812e96631"}, ] [package.dependencies] astpretty = "*" flake8 = "*" [[package]] name = "flake8-functions" version = "0.0.8" description = "A flake8 extension that checks functions" optional = false python-versions = "*" files = [ {file = "flake8_functions-0.0.8-py3-none-any.whl", hash = "sha256:e1a88aa634d1aff6973f8c9dd64f30ab2beaac661e52eea96929ccc7ee7f64df"}, {file = "flake8_functions-0.0.8.tar.gz", hash = "sha256:5446626673a9faecbf389fb411b90bdc87b002c387b72dc097b208e7a58f2a1c"}, ] [package.dependencies] mr-proper = "*" setuptools = "*" [[package]] name = "flake8-import-order" version = "0.18.2" description = "Flake8 and pylama plugin that checks the ordering of import statements." optional = false python-versions = "*" files = [ {file = "flake8-import-order-0.18.2.tar.gz", hash = "sha256:e23941f892da3e0c09d711babbb0c73bc735242e9b216b726616758a920d900e"}, {file = "flake8_import_order-0.18.2-py2.py3-none-any.whl", hash = "sha256:82ed59f1083b629b030ee9d3928d9e06b6213eb196fe745b3a7d4af2168130df"}, ] [package.dependencies] pycodestyle = "*" setuptools = "*" [[package]] name = "flake8-polyfill" version = "1.0.2" description = "Polyfill package for Flake8 plugins" optional = false python-versions = "*" files = [ {file = "flake8-polyfill-1.0.2.tar.gz", hash = "sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda"}, {file = "flake8_polyfill-1.0.2-py2.py3-none-any.whl", hash = "sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9"}, ] [package.dependencies] flake8 = "*" [[package]] name = "flake8-variables-names" version = "0.0.5" description = "A flake8 extension that helps to make more readable variables names" optional = false python-versions = ">=3.7" files = [ {file = "flake8_variables_names-0.0.5-py3-none-any.whl", hash = "sha256:e3277031696bbe10b5132b49938cde1d70fcae9561533b7bd7ab8e69cb27addb"}, {file = "flake8_variables_names-0.0.5.tar.gz", hash = "sha256:30133e14ee2300e13a60393a00f74d98110c76070ac67d1ab91606f02824a7e1"}, ] [[package]] name = "ghp-import" version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." optional = false python-versions = "*" files = [ {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, ] [package.dependencies] python-dateutil = ">=2.8.1" [package.extras] dev = ["flake8", "markdown", "twine", "wheel"] [[package]] name = "gitdb" version = "4.0.10" description = "Git Object Database" optional = false python-versions = ">=3.7" files = [ {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, ] [package.dependencies] smmap = ">=3.0.1,<6" [[package]] name = "gitpython" version = "3.1.31" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"}, {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [[package]] name = "greenlet" version = "2.0.2" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" files = [ {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, ] [package.extras] docs = ["Sphinx", "docutils (<0.18)"] test = ["objgraph", "psutil"] [[package]] name = "griffe" version = "0.29.0" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.7" files = [ {file = "griffe-0.29.0-py3-none-any.whl", hash = "sha256:e62ff34b04630c2382e2e277301cb2c29221fb09c04028e62ef35afccc64344b"}, {file = "griffe-0.29.0.tar.gz", hash = "sha256:6fc892aaa251b3761e3a8d2f5893758e1850ec5d81d4605c4557be0666202a0b"}, ] [package.dependencies] cached-property = {version = "*", markers = "python_version < \"3.8\""} colorama = ">=0.4" [[package]] name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, ] [package.dependencies] typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "httpcore" version = "0.17.2" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.7" files = [ {file = "httpcore-0.17.2-py3-none-any.whl", hash = "sha256:5581b9c12379c4288fe70f43c710d16060c10080617001e6b22a3b6dbcbefd36"}, {file = "httpcore-0.17.2.tar.gz", hash = "sha256:125f8375ab60036db632f34f4b627a9ad085048eef7cb7d2616fea0f739f98af"}, ] [package.dependencies] anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" sniffio = "==1.*" [package.extras] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] [[package]] name = "httpx" version = "0.24.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.7" files = [ {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"}, {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"}, ] [package.dependencies] certifi = "*" httpcore = ">=0.15.0,<0.18.0" idna = "*" sniffio = "*" [package.extras] brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] [[package]] name = "identify" version = "2.5.24" description = "File identification library for Python" optional = false python-versions = ">=3.7" files = [ {file = "identify-2.5.24-py2.py3-none-any.whl", hash = "sha256:986dbfb38b1140e763e413e6feb44cd731faf72d1909543178aa79b0e258265d"}, {file = "identify-2.5.24.tar.gz", hash = "sha256:0aac67d5b4812498056d28a9a512a483f5085cc28640b02b258a59dac34301d4"}, ] [package.extras] license = ["ukkonen"] [[package]] name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] [[package]] name = "importlib-metadata" version = "6.6.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" files = [ {file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"}, {file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"}, ] [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] [[package]] name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, ] [package.dependencies] MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] [[package]] name = "markdown" version = "3.3.7" description = "Python implementation of Markdown." optional = false python-versions = ">=3.6" files = [ {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"}, {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"}, ] [package.dependencies] importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} [package.extras] testing = ["coverage", "pyyaml"] [[package]] name = "markdown-it-py" version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.7" files = [ {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, ] [package.dependencies] mdurl = ">=0.1,<1.0" typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [package.extras] benchmarking = ["psutil", "pytest", "pytest-benchmark"] code-style = ["pre-commit (>=3.0,<4.0)"] compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] linkify = ["linkify-it-py (>=1,<3)"] plugins = ["mdit-py-plugins"] profiling = ["gprof2dot"] rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" files = [ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, ] [[package]] name = "mccabe" version = "0.6.1" description = "McCabe checker, plugin for flake8" optional = false python-versions = "*" files = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, ] [[package]] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] [[package]] name = "mergedeep" version = "1.3.4" description = "A deep merge function for 🐍." optional = false python-versions = ">=3.6" files = [ {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, ] [[package]] name = "mkdocs" version = "1.4.3" description = "Project documentation with Markdown." optional = false python-versions = ">=3.7" files = [ {file = "mkdocs-1.4.3-py3-none-any.whl", hash = "sha256:6ee46d309bda331aac915cd24aab882c179a933bd9e77b80ce7d2eaaa3f689dd"}, {file = "mkdocs-1.4.3.tar.gz", hash = "sha256:5955093bbd4dd2e9403c5afaf57324ad8b04f16886512a3ee6ef828956481c57"}, ] [package.dependencies] click = ">=7.0" colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} ghp-import = ">=1.0" importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} jinja2 = ">=2.11.1" markdown = ">=3.2.1,<3.4" mergedeep = ">=1.3.4" packaging = ">=20.5" pyyaml = ">=5.1" pyyaml-env-tag = ">=0.1" typing-extensions = {version = ">=3.10", markers = "python_version < \"3.8\""} watchdog = ">=2.0" [package.extras] i18n = ["babel (>=2.9.0)"] min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] [[package]] name = "mkdocs-autorefs" version = "0.4.1" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.7" files = [ {file = "mkdocs-autorefs-0.4.1.tar.gz", hash = "sha256:70748a7bd025f9ecd6d6feeba8ba63f8e891a1af55f48e366d6d6e78493aba84"}, {file = "mkdocs_autorefs-0.4.1-py3-none-any.whl", hash = "sha256:a2248a9501b29dc0cc8ba4c09f4f47ff121945f6ce33d760f145d6f89d313f5b"}, ] [package.dependencies] Markdown = ">=3.3" mkdocs = ">=1.1" [[package]] name = "mkdocs-gen-files" version = "0.5.0" description = "MkDocs plugin to programmatically generate documentation pages during the build" optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_gen_files-0.5.0-py3-none-any.whl", hash = "sha256:7ac060096f3f40bd19039e7277dd3050be9a453c8ac578645844d4d91d7978ea"}, {file = "mkdocs_gen_files-0.5.0.tar.gz", hash = "sha256:4c7cf256b5d67062a788f6b1d035e157fc1a9498c2399be9af5257d4ff4d19bc"}, ] [package.dependencies] mkdocs = ">=1.0.3" [[package]] name = "mkdocs-literate-nav" version = "0.6.0" description = "MkDocs plugin to specify the navigation in Markdown instead of YAML" optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_literate_nav-0.6.0-py3-none-any.whl", hash = "sha256:8c1b84714e5974da5e44e011ec0069275ae7647270c13a679662cf6ffce675a4"}, {file = "mkdocs_literate_nav-0.6.0.tar.gz", hash = "sha256:81ccbea18163ae8e10bd0bd39237fe70c32a1f2dff6c170779f5d52dd98a0470"}, ] [package.dependencies] mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" version = "9.1.16" description = "Documentation that simply works" optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_material-9.1.16-py3-none-any.whl", hash = "sha256:f9e62558a6b01ffac314423cbc223d970c25fbc78999860226245b64e64d6751"}, {file = "mkdocs_material-9.1.16.tar.gz", hash = "sha256:1021bfea20f00a9423530c8c2ae9be3c78b80f5a527b3f822e6de3d872e5ab79"}, ] [package.dependencies] colorama = ">=0.4" jinja2 = ">=3.0" markdown = ">=3.2" mkdocs = ">=1.4.2" mkdocs-material-extensions = ">=1.1" pygments = ">=2.14" pymdown-extensions = ">=9.9.1" regex = ">=2022.4.24" requests = ">=2.26" [[package]] name = "mkdocs-material-extensions" version = "1.1.1" description = "Extension pack for Python Markdown and MkDocs Material." optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_material_extensions-1.1.1-py3-none-any.whl", hash = "sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"}, {file = "mkdocs_material_extensions-1.1.1.tar.gz", hash = "sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93"}, ] [[package]] name = "mkdocs-section-index" version = "0.3.5" description = "MkDocs plugin to allow clickable sections that lead to an index page" optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_section_index-0.3.5-py3-none-any.whl", hash = "sha256:1f6359287b0a823d6297cf1cb6c0a49ed75851d0d1cea8b425b207a45ce10141"}, {file = "mkdocs_section_index-0.3.5.tar.gz", hash = "sha256:fa8b1ce0649326b1873c6460c1df2bb0c4825fd21e3dd416f13ec212d31edf12"}, ] [package.dependencies] mkdocs = ">=1.0.3" [[package]] name = "mkdocstrings" version = "0.22.0" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.7" files = [ {file = "mkdocstrings-0.22.0-py3-none-any.whl", hash = "sha256:2d4095d461554ff6a778fdabdca3c00c468c2f1459d469f7a7f622a2b23212ba"}, {file = "mkdocstrings-0.22.0.tar.gz", hash = "sha256:82a33b94150ebb3d4b5c73bab4598c3e21468c79ec072eff6931c8f3bfc38256"}, ] [package.dependencies] importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} Jinja2 = ">=2.11.1" Markdown = ">=3.3" MarkupSafe = ">=1.1" mkdocs = ">=1.2" mkdocs-autorefs = ">=0.3.1" mkdocstrings-python = {version = ">=0.5.2", optional = true, markers = "extra == \"python\""} pymdown-extensions = ">=6.3" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.10\""} [package.extras] crystal = ["mkdocstrings-crystal (>=0.3.4)"] python = ["mkdocstrings-python (>=0.5.2)"] python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] [[package]] name = "mkdocstrings-python" version = "1.1.2" description = "A Python handler for mkdocstrings." optional = false python-versions = ">=3.7" files = [ {file = "mkdocstrings_python-1.1.2-py3-none-any.whl", hash = "sha256:c2b652a850fec8e85034a9cdb3b45f8ad1a558686edc20ed1f40b4e17e62070f"}, {file = "mkdocstrings_python-1.1.2.tar.gz", hash = "sha256:f28bdcacb9bcdb44b6942a5642c1ea8b36870614d33e29e3c923e204a8d8ed61"}, ] [package.dependencies] griffe = ">=0.24" mkdocstrings = ">=0.20" [[package]] name = "mr-proper" version = "0.0.7" description = "Static Python code analyzer, that tries to check if functions in code are pure or not and why." optional = false python-versions = "*" files = [ {file = "mr_proper-0.0.7-py3-none-any.whl", hash = "sha256:74a1b60240c46f10ba518707ef72811a01e5c270da0a78b5dd2dd923d99fdb14"}, {file = "mr_proper-0.0.7.tar.gz", hash = "sha256:03b517b19e617537f711ce418b125e5f2efd82ec881539cdee83195c78c14a02"}, ] [package.dependencies] click = ">=7.1.2" setuptools = "*" stdlib-list = ">=0.5.0" typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [[package]] name = "mypy" version = "0.982" description = "Optional static typing for Python" optional = false python-versions = ">=3.7" files = [ {file = "mypy-0.982-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5085e6f442003fa915aeb0a46d4da58128da69325d8213b4b35cc7054090aed5"}, {file = "mypy-0.982-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:41fd1cf9bc0e1c19b9af13a6580ccb66c381a5ee2cf63ee5ebab747a4badeba3"}, {file = "mypy-0.982-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f793e3dd95e166b66d50e7b63e69e58e88643d80a3dcc3bcd81368e0478b089c"}, {file = "mypy-0.982-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86ebe67adf4d021b28c3f547da6aa2cce660b57f0432617af2cca932d4d378a6"}, {file = "mypy-0.982-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:175f292f649a3af7082fe36620369ffc4661a71005aa9f8297ea473df5772046"}, {file = "mypy-0.982-cp310-cp310-win_amd64.whl", hash = "sha256:8ee8c2472e96beb1045e9081de8e92f295b89ac10c4109afdf3a23ad6e644f3e"}, {file = "mypy-0.982-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58f27ebafe726a8e5ccb58d896451dd9a662a511a3188ff6a8a6a919142ecc20"}, {file = "mypy-0.982-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6af646bd46f10d53834a8e8983e130e47d8ab2d4b7a97363e35b24e1d588947"}, {file = "mypy-0.982-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7aeaa763c7ab86d5b66ff27f68493d672e44c8099af636d433a7f3fa5596d40"}, {file = "mypy-0.982-cp37-cp37m-win_amd64.whl", hash = "sha256:724d36be56444f569c20a629d1d4ee0cb0ad666078d59bb84f8f887952511ca1"}, {file = "mypy-0.982-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14d53cdd4cf93765aa747a7399f0961a365bcddf7855d9cef6306fa41de01c24"}, {file = "mypy-0.982-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:26ae64555d480ad4b32a267d10cab7aec92ff44de35a7cd95b2b7cb8e64ebe3e"}, {file = "mypy-0.982-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6389af3e204975d6658de4fb8ac16f58c14e1bacc6142fee86d1b5b26aa52bda"}, {file = "mypy-0.982-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b35ce03a289480d6544aac85fa3674f493f323d80ea7226410ed065cd46f206"}, {file = "mypy-0.982-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c6e564f035d25c99fd2b863e13049744d96bd1947e3d3d2f16f5828864506763"}, {file = "mypy-0.982-cp38-cp38-win_amd64.whl", hash = "sha256:cebca7fd333f90b61b3ef7f217ff75ce2e287482206ef4a8b18f32b49927b1a2"}, {file = "mypy-0.982-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a705a93670c8b74769496280d2fe6cd59961506c64f329bb179970ff1d24f9f8"}, {file = "mypy-0.982-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:75838c649290d83a2b83a88288c1eb60fe7a05b36d46cbea9d22efc790002146"}, {file = "mypy-0.982-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:91781eff1f3f2607519c8b0e8518aad8498af1419e8442d5d0afb108059881fc"}, {file = "mypy-0.982-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaa97b9ddd1dd9901a22a879491dbb951b5dec75c3b90032e2baa7336777363b"}, {file = "mypy-0.982-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a692a8e7d07abe5f4b2dd32d731812a0175626a90a223d4b58f10f458747dd8a"}, {file = "mypy-0.982-cp39-cp39-win_amd64.whl", hash = "sha256:eb7a068e503be3543c4bd329c994103874fa543c1727ba5288393c21d912d795"}, {file = "mypy-0.982-py3-none-any.whl", hash = "sha256:1021c241e8b6e1ca5a47e4d52601274ac078a89845cfde66c6d5f769819ffa1d"}, {file = "mypy-0.982.tar.gz", hash = "sha256:85f7a343542dc8b1ed0a888cdd34dca56462654ef23aa673907305b260b3d746"}, ] [package.dependencies] mypy-extensions = ">=0.4.3" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} typing-extensions = ">=3.10" [package.extras] dmypy = ["psutil (>=4.0)"] python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] [[package]] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] [[package]] name = "mysqlclient" version = "2.1.1" description = "Python interface to MySQL" optional = true python-versions = ">=3.5" files = [ {file = "mysqlclient-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c1ed71bd6244993b526113cca3df66428609f90e4652f37eb51c33496d478b37"}, {file = "mysqlclient-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:c812b67e90082a840efb82a8978369e6e69fc62ce1bda4ca8f3084a9d862308b"}, {file = "mysqlclient-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:0d1cd3a5a4d28c222fa199002810e8146cffd821410b67851af4cc80aeccd97c"}, {file = "mysqlclient-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:b355c8b5a7d58f2e909acdbb050858390ee1b0e13672ae759e5e784110022994"}, {file = "mysqlclient-2.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:996924f3483fd36a34a5812210c69e71dea5a3d5978d01199b78b7f6d485c855"}, {file = "mysqlclient-2.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dea88c8d3f5a5d9293dfe7f087c16dd350ceb175f2f6631c9cf4caf3e19b7a96"}, {file = "mysqlclient-2.1.1.tar.gz", hash = "sha256:828757e419fb11dd6c5ed2576ec92c3efaa93a0f7c39e263586d1ee779c3d782"}, ] [[package]] name = "nest-asyncio" version = "1.5.6" description = "Patch asyncio to allow nested event loops" optional = false python-versions = ">=3.5" files = [ {file = "nest_asyncio-1.5.6-py3-none-any.whl", hash = "sha256:b9a953fb40dceaa587d109609098db21900182b16440652454a146cffb06e8b8"}, {file = "nest_asyncio-1.5.6.tar.gz", hash = "sha256:d267cc1ff794403f7df692964d1d2a3fa9418ffea2a3f6859a439ff482fef290"}, ] [[package]] name = "nodeenv" version = "1.8.0" description = "Node.js virtual environment builder" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" files = [ {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, ] [package.dependencies] setuptools = "*" [[package]] name = "orjson" version = "3.9.1" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = true python-versions = ">=3.7" files = [ {file = "orjson-3.9.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c4434b7b786fdc394b95d029fb99949d7c2b05bbd4bf5cb5e3906be96ffeee3b"}, {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09faf14f74ed47e773fa56833be118e04aa534956f661eb491522970b7478e3b"}, {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:503eb86a8d53a187fe66aa80c69295a3ca35475804da89a9547e4fce5f803822"}, {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:20f2804b5a1dbd3609c086041bd243519224d47716efd7429db6c03ed28b7cc3"}, {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fd828e0656615a711c4cc4da70f3cac142e66a6703ba876c20156a14e28e3fa"}, {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec53d648176f873203b9c700a0abacab33ca1ab595066e9d616f98cdc56f4434"}, {file = "orjson-3.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e186ae76b0d97c505500664193ddf508c13c1e675d9b25f1f4414a7606100da6"}, {file = "orjson-3.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d4edee78503016f4df30aeede0d999b3cb11fb56f47e9db0e487bce0aaca9285"}, {file = "orjson-3.9.1-cp310-none-win_amd64.whl", hash = "sha256:a4cc5d21e68af982d9a2528ac61e604f092c60eed27aef3324969c68f182ec7e"}, {file = "orjson-3.9.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:761b6efd33c49de20dd73ce64cc59da62c0dab10aa6015f582680e0663cc792c"}, {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31229f9d0b8dc2ef7ee7e4393f2e4433a28e16582d4b25afbfccc9d68dc768f8"}, {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b7ab18d55ecb1de543d452f0a5f8094b52282b916aa4097ac11a4c79f317b86"}, {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db774344c39041f4801c7dfe03483df9203cbd6c84e601a65908e5552228dd25"}, {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae47ef8c0fe89c4677db7e9e1fb2093ca6e66c3acbee5442d84d74e727edad5e"}, {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:103952c21575b9805803c98add2eaecd005580a1e746292ed2ec0d76dd3b9746"}, {file = "orjson-3.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2cb0121e6f2c9da3eddf049b99b95fef0adf8480ea7cb544ce858706cdf916eb"}, {file = "orjson-3.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:24d4ddaa2876e657c0fd32902b5c451fd2afc35159d66a58da7837357044b8c2"}, {file = "orjson-3.9.1-cp311-none-win_amd64.whl", hash = "sha256:0b53b5f72cf536dd8aa4fc4c95e7e09a7adb119f8ff8ee6cc60f735d7740ad6a"}, {file = "orjson-3.9.1-cp37-cp37m-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d4b68d01a506242316a07f1d2f29fb0a8b36cee30a7c35076f1ef59dce0890c1"}, {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9dd4abe6c6fd352f00f4246d85228f6a9847d0cc14f4d54ee553718c225388f"}, {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e20bca5e13041e31ceba7a09bf142e6d63c8a7467f5a9c974f8c13377c75af2"}, {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8ae0467d01eb1e4bcffef4486d964bfd1c2e608103e75f7074ed34be5df48cc"}, {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06f6ab4697fab090517f295915318763a97a12ee8186054adf21c1e6f6abbd3d"}, {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8515867713301fa065c58ec4c9053ba1a22c35113ab4acad555317b8fd802e50"}, {file = "orjson-3.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:393d0697d1dfa18d27d193e980c04fdfb672c87f7765b87952f550521e21b627"}, {file = "orjson-3.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d96747662d3666f79119e5d28c124e7d356c7dc195cd4b09faea4031c9079dc9"}, {file = "orjson-3.9.1-cp37-none-win_amd64.whl", hash = "sha256:6d173d3921dd58a068c88ec22baea7dbc87a137411501618b1292a9d6252318e"}, {file = "orjson-3.9.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d1c2b0b4246c992ce2529fc610a446b945f1429445ece1c1f826a234c829a918"}, {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19f70ba1f441e1c4bb1a581f0baa092e8b3e3ce5b2aac2e1e090f0ac097966da"}, {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:375d65f002e686212aac42680aed044872c45ee4bc656cf63d4a215137a6124a"}, {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4751cee4a7b1daeacb90a7f5adf2170ccab893c3ab7c5cea58b45a13f89b30b3"}, {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78d9a2a4b2302d5ebc3695498ebc305c3568e5ad4f3501eb30a6405a32d8af22"}, {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46b4facc32643b2689dfc292c0c463985dac4b6ab504799cf51fc3c6959ed668"}, {file = "orjson-3.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ec7c8a0f1bf35da0d5fd14f8956f3b82a9a6918a3c6963d718dfd414d6d3b604"}, {file = "orjson-3.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d3a40b0fbe06ccd4d6a99e523d20b47985655bcada8d1eba485b1b32a43e4904"}, {file = "orjson-3.9.1-cp38-none-win_amd64.whl", hash = "sha256:402f9d3edfec4560a98880224ec10eba4c5f7b4791e4bc0d4f4d8df5faf2a006"}, {file = "orjson-3.9.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:49c0d78dcd34626e2e934f1192d7c052b94e0ecadc5f386fd2bda6d2e03dadf5"}, {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:125f63e56d38393daa0a1a6dc6fedefca16c538614b66ea5997c3bd3af35ef26"}, {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08927970365d2e1f3ce4894f9ff928a7b865d53f26768f1bbdd85dd4fee3e966"}, {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9a744e212d4780ecd67f4b6b128b2e727bee1df03e7059cddb2dfe1083e7dc4"}, {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d1dbf36db7240c61eec98c8d21545d671bce70be0730deb2c0d772e06b71af3"}, {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a1e384626f76b66df615f7bb622a79a25c166d08c5d2151ffd41f24c4cc104"}, {file = "orjson-3.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:15d28872fb055bf17ffca913826e618af61b2f689d2b170f72ecae1a86f80d52"}, {file = "orjson-3.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1e4d905338f9ef32c67566929dfbfbb23cc80287af8a2c38930fb0eda3d40b76"}, {file = "orjson-3.9.1-cp39-none-win_amd64.whl", hash = "sha256:48a27da6c7306965846565cc385611d03382bbd84120008653aa2f6741e2105d"}, {file = "orjson-3.9.1.tar.gz", hash = "sha256:db373a25ec4a4fccf8186f9a72a1b3442837e40807a736a815ab42481e83b7d0"}, ] [[package]] name = "packaging" version = "23.1" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, ] [[package]] name = "pathspec" version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.7" files = [ {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, ] [[package]] name = "pbr" version = "5.11.1" description = "Python Build Reasonableness" optional = false python-versions = ">=2.6" files = [ {file = "pbr-5.11.1-py2.py3-none-any.whl", hash = "sha256:567f09558bae2b3ab53cb3c1e2e33e726ff3338e7bae3db5dc954b3a44eef12b"}, {file = "pbr-5.11.1.tar.gz", hash = "sha256:aefc51675b0b533d56bb5fd1c8c6c0522fe31896679882e1c4c63d5e4a0fccb3"}, ] [[package]] name = "platformdirs" version = "3.6.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.7" files = [ {file = "platformdirs-3.6.0-py3-none-any.whl", hash = "sha256:ffa199e3fbab8365778c4a10e1fbf1b9cd50707de826eb304b50e57ec0cc8d38"}, {file = "platformdirs-3.6.0.tar.gz", hash = "sha256:57e28820ca8094678b807ff529196506d7a21e17156cb1cddb3e74cebce54640"}, ] [package.dependencies] typing-extensions = {version = ">=4.6.3", markers = "python_version < \"3.8\""} [package.extras] docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)"] [[package]] name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.6" files = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] [package.dependencies] importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "pre-commit" version = "2.21.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.7" files = [ {file = "pre_commit-2.21.0-py2.py3-none-any.whl", hash = "sha256:e2f91727039fc39a92f58a588a25b87f936de6567eed4f0e673e0507edc75bad"}, {file = "pre_commit-2.21.0.tar.gz", hash = "sha256:31ef31af7e474a8d8995027fefdfcf509b5c913ff31f2015b4ec4beb26a6f658"}, ] [package.dependencies] cfgv = ">=2.0.0" identify = ">=1.0.0" importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" [[package]] name = "psycopg2-binary" version = "2.9.6" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.6" files = [ {file = "psycopg2-binary-2.9.6.tar.gz", hash = "sha256:1f64dcfb8f6e0c014c7f55e51c9759f024f70ea572fbdef123f85318c297947c"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d26e0342183c762de3276cca7a530d574d4e25121ca7d6e4a98e4f05cb8e4df7"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c48d8f2db17f27d41fb0e2ecd703ea41984ee19362cbce52c097963b3a1b4365"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffe9dc0a884a8848075e576c1de0290d85a533a9f6e9c4e564f19adf8f6e54a7"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a76e027f87753f9bd1ab5f7c9cb8c7628d1077ef927f5e2446477153a602f2c"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6460c7a99fc939b849431f1e73e013d54aa54293f30f1109019c56a0b2b2ec2f"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae102a98c547ee2288637af07393dd33f440c25e5cd79556b04e3fca13325e5f"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9972aad21f965599ed0106f65334230ce826e5ae69fda7cbd688d24fa922415e"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7a40c00dbe17c0af5bdd55aafd6ff6679f94a9be9513a4c7e071baf3d7d22a70"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:cacbdc5839bdff804dfebc058fe25684cae322987f7a38b0168bc1b2df703fb1"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7f0438fa20fb6c7e202863e0d5ab02c246d35efb1d164e052f2f3bfe2b152bd0"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-win32.whl", hash = "sha256:b6c8288bb8a84b47e07013bb4850f50538aa913d487579e1921724631d02ea1b"}, {file = "psycopg2_binary-2.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:61b047a0537bbc3afae10f134dc6393823882eb263088c271331602b672e52e9"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:964b4dfb7c1c1965ac4c1978b0f755cc4bd698e8aa2b7667c575fb5f04ebe06b"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afe64e9b8ea66866a771996f6ff14447e8082ea26e675a295ad3bdbffdd72afb"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e2ee79e7cf29582ef770de7dab3d286431b01c3bb598f8e05e09601b890081"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfa74c903a3c1f0d9b1c7e7b53ed2d929a4910e272add6700c38f365a6002820"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b83456c2d4979e08ff56180a76429263ea254c3f6552cd14ada95cff1dec9bb8"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0645376d399bfd64da57148694d78e1f431b1e1ee1054872a5713125681cf1be"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e99e34c82309dd78959ba3c1590975b5d3c862d6f279f843d47d26ff89d7d7e1"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4ea29fc3ad9d91162c52b578f211ff1c931d8a38e1f58e684c45aa470adf19e2"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:4ac30da8b4f57187dbf449294d23b808f8f53cad6b1fc3623fa8a6c11d176dd0"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e78e6e2a00c223e164c417628572a90093c031ed724492c763721c2e0bc2a8df"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-win32.whl", hash = "sha256:1876843d8e31c89c399e31b97d4b9725a3575bb9c2af92038464231ec40f9edb"}, {file = "psycopg2_binary-2.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:b4b24f75d16a89cc6b4cdff0eb6a910a966ecd476d1e73f7ce5985ff1328e9a6"}, {file = "psycopg2_binary-2.9.6-cp36-cp36m-win32.whl", hash = "sha256:498807b927ca2510baea1b05cc91d7da4718a0f53cb766c154c417a39f1820a0"}, {file = "psycopg2_binary-2.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0d236c2825fa656a2d98bbb0e52370a2e852e5a0ec45fc4f402977313329174d"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:34b9ccdf210cbbb1303c7c4db2905fa0319391bd5904d32689e6dd5c963d2ea8"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84d2222e61f313c4848ff05353653bf5f5cf6ce34df540e4274516880d9c3763"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30637a20623e2a2eacc420059be11527f4458ef54352d870b8181a4c3020ae6b"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8122cfc7cae0da9a3077216528b8bb3629c43b25053284cc868744bfe71eb141"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38601cbbfe600362c43714482f43b7c110b20cb0f8172422c616b09b85a750c5"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c7e62ab8b332147a7593a385d4f368874d5fe4ad4e341770d4983442d89603e3"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2ab652e729ff4ad76d400df2624d223d6e265ef81bb8aa17fbd63607878ecbee"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c83a74b68270028dc8ee74d38ecfaf9c90eed23c8959fca95bd703d25b82c88e"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d4e6036decf4b72d6425d5b29bbd3e8f0ff1059cda7ac7b96d6ac5ed34ffbacd"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-win32.whl", hash = "sha256:a8c28fd40a4226b4a84bdf2d2b5b37d2c7bd49486b5adcc200e8c7ec991dfa7e"}, {file = "psycopg2_binary-2.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:51537e3d299be0db9137b321dfb6a5022caaab275775680e0c3d281feefaca6b"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cf4499e0a83b7b7edcb8dabecbd8501d0d3a5ef66457200f77bde3d210d5debb"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7e13a5a2c01151f1208d5207e42f33ba86d561b7a89fca67c700b9486a06d0e2"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e0f754d27fddcfd74006455b6e04e6705d6c31a612ec69ddc040a5468e44b4e"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d57c3fd55d9058645d26ae37d76e61156a27722097229d32a9e73ed54819982a"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71f14375d6f73b62800530b581aed3ada394039877818b2d5f7fc77e3bb6894d"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:441cc2f8869a4f0f4bb408475e5ae0ee1f3b55b33f350406150277f7f35384fc"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:65bee1e49fa6f9cf327ce0e01c4c10f39165ee76d35c846ade7cb0ec6683e303"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:af335bac6b666cc6aea16f11d486c3b794029d9df029967f9938a4bed59b6a19"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:cfec476887aa231b8548ece2e06d28edc87c1397ebd83922299af2e051cf2827"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:65c07febd1936d63bfde78948b76cd4c2a411572a44ac50719ead41947d0f26b"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-win32.whl", hash = "sha256:4dfb4be774c4436a4526d0c554af0cc2e02082c38303852a36f6456ece7b3503"}, {file = "psycopg2_binary-2.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:02c6e3cf3439e213e4ee930308dc122d6fb4d4bea9aef4a12535fbd605d1a2fe"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e9182eb20f41417ea1dd8e8f7888c4d7c6e805f8a7c98c1081778a3da2bee3e4"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8a6979cf527e2603d349a91060f428bcb135aea2be3201dff794813256c274f1"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8338a271cb71d8da40b023a35d9c1e919eba6cbd8fa20a54b748a332c355d896"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ed340d2b858d6e6fb5083f87c09996506af483227735de6964a6100b4e6a54"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f81e65376e52f03422e1fb475c9514185669943798ed019ac50410fb4c4df232"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfb13af3c5dd3a9588000910178de17010ebcccd37b4f9794b00595e3a8ddad3"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4c727b597c6444a16e9119386b59388f8a424223302d0c06c676ec8b4bc1f963"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d67fbdaf177da06374473ef6f7ed8cc0a9dc640b01abfe9e8a2ccb1b1402c1f"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0892ef645c2fabb0c75ec32d79f4252542d0caec1d5d949630e7d242ca4681a3"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:02c0f3757a4300cf379eb49f543fb7ac527fb00144d39246ee40e1df684ab514"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-win32.whl", hash = "sha256:c3dba7dab16709a33a847e5cd756767271697041fbe3fe97c215b1fc1f5c9848"}, {file = "psycopg2_binary-2.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:f6a88f384335bb27812293fdb11ac6aee2ca3f51d3c7820fe03de0a304ab6249"}, ] [[package]] name = "py-cpuinfo" version = "9.0.0" description = "Get CPU info with pure Python" optional = false python-versions = "*" files = [ {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, ] [[package]] name = "pycodestyle" version = "2.7.0" description = "Python style guide checker" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"}, {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"}, ] [[package]] name = "pycparser" version = "2.21" description = "C parser in Python" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, ] [[package]] name = "pydantic" version = "1.10.8" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ {file = "pydantic-1.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1243d28e9b05003a89d72e7915fdb26ffd1d39bdd39b00b7dbe4afae4b557f9d"}, {file = "pydantic-1.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0ab53b609c11dfc0c060d94335993cc2b95b2150e25583bec37a49b2d6c6c3f"}, {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9613fadad06b4f3bc5db2653ce2f22e0de84a7c6c293909b48f6ed37b83c61f"}, {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df7800cb1984d8f6e249351139667a8c50a379009271ee6236138a22a0c0f319"}, {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0c6fafa0965b539d7aab0a673a046466d23b86e4b0e8019d25fd53f4df62c277"}, {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e82d4566fcd527eae8b244fa952d99f2ca3172b7e97add0b43e2d97ee77f81ab"}, {file = "pydantic-1.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:ab523c31e22943713d80d8d342d23b6f6ac4b792a1e54064a8d0cf78fd64e800"}, {file = "pydantic-1.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:666bdf6066bf6dbc107b30d034615d2627e2121506c555f73f90b54a463d1f33"}, {file = "pydantic-1.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35db5301b82e8661fa9c505c800d0990bc14e9f36f98932bb1d248c0ac5cada5"}, {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90c1e29f447557e9e26afb1c4dbf8768a10cc676e3781b6a577841ade126b85"}, {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e766b4a8226e0708ef243e843105bf124e21331694367f95f4e3b4a92bbb3f"}, {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88f195f582851e8db960b4a94c3e3ad25692c1c1539e2552f3df7a9e972ef60e"}, {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:34d327c81e68a1ecb52fe9c8d50c8a9b3e90d3c8ad991bfc8f953fb477d42fb4"}, {file = "pydantic-1.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:d532bf00f381bd6bc62cabc7d1372096b75a33bc197a312b03f5838b4fb84edd"}, {file = "pydantic-1.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d5b8641c24886d764a74ec541d2fc2c7fb19f6da2a4001e6d580ba4a38f7878"}, {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f6cb446470b7ddf86c2e57cd119a24959af2b01e552f60705910663af09a4"}, {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c33b60054b2136aef8cf190cd4c52a3daa20b2263917c49adad20eaf381e823b"}, {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1952526ba40b220b912cdc43c1c32bcf4a58e3f192fa313ee665916b26befb68"}, {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb14388ec45a7a0dc429e87def6396f9e73c8c77818c927b6a60706603d5f2ea"}, {file = "pydantic-1.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:16f8c3e33af1e9bb16c7a91fc7d5fa9fe27298e9f299cff6cb744d89d573d62c"}, {file = "pydantic-1.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ced8375969673929809d7f36ad322934c35de4af3b5e5b09ec967c21f9f7887"}, {file = "pydantic-1.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93e6bcfccbd831894a6a434b0aeb1947f9e70b7468f274154d03d71fabb1d7c6"}, {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:191ba419b605f897ede9892f6c56fb182f40a15d309ef0142212200a10af4c18"}, {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052d8654cb65174d6f9490cc9b9a200083a82cf5c3c5d3985db765757eb3b375"}, {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ceb6a23bf1ba4b837d0cfe378329ad3f351b5897c8d4914ce95b85fba96da5a1"}, {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f2e754d5566f050954727c77f094e01793bcb5725b663bf628fa6743a5a9108"}, {file = "pydantic-1.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a82d6cda82258efca32b40040228ecf43a548671cb174a1e81477195ed3ed56"}, {file = "pydantic-1.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e59417ba8a17265e632af99cc5f35ec309de5980c440c255ab1ca3ae96a3e0e"}, {file = "pydantic-1.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84d80219c3f8d4cad44575e18404099c76851bc924ce5ab1c4c8bb5e2a2227d0"}, {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4148e635994d57d834be1182a44bdb07dd867fa3c2d1b37002000646cc5459"}, {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12f7b0bf8553e310e530e9f3a2f5734c68699f42218bf3568ef49cd9b0e44df4"}, {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42aa0c4b5c3025483240a25b09f3c09a189481ddda2ea3a831a9d25f444e03c1"}, {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17aef11cc1b997f9d574b91909fed40761e13fac438d72b81f902226a69dac01"}, {file = "pydantic-1.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:66a703d1983c675a6e0fed8953b0971c44dba48a929a2000a493c3772eb61a5a"}, {file = "pydantic-1.10.8-py3-none-any.whl", hash = "sha256:7456eb22ed9aaa24ff3e7b4757da20d9e5ce2a81018c1b3ebd81a0b88a18f3b2"}, {file = "pydantic-1.10.8.tar.gz", hash = "sha256:1410275520dfa70effadf4c21811d755e7ef9bb1f1d077a21958153a92c8d9ca"}, ] [package.dependencies] typing-extensions = ">=4.2.0" [package.extras] dotenv = ["python-dotenv (>=0.10.4)"] email = ["email-validator (>=1.0.3)"] [[package]] name = "pyflakes" version = "2.3.1" description = "passive checker of Python programs" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "pyflakes-2.3.1-py2.py3-none-any.whl", hash = "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3"}, {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, ] [[package]] name = "pygments" version = "2.15.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.7" files = [ {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, ] [package.extras] plugins = ["importlib-metadata"] [[package]] name = "pymdown-extensions" version = "10.0.1" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.7" files = [ {file = "pymdown_extensions-10.0.1-py3-none-any.whl", hash = "sha256:ae66d84013c5d027ce055693e09a4628b67e9dec5bce05727e45b0918e36f274"}, {file = "pymdown_extensions-10.0.1.tar.gz", hash = "sha256:b44e1093a43b8a975eae17b03c3a77aad4681b3b56fce60ce746dbef1944c8cb"}, ] [package.dependencies] markdown = ">=3.2" pyyaml = "*" [[package]] name = "pymysql" version = "1.0.3" description = "Pure Python MySQL Driver" optional = true python-versions = ">=3.7" files = [ {file = "PyMySQL-1.0.3-py3-none-any.whl", hash = "sha256:89fc6ae41c0aeb6e1f7710cdd623702ea2c54d040565767a78b00a5ebb12f4e5"}, {file = "PyMySQL-1.0.3.tar.gz", hash = "sha256:3dda943ef3694068a75d69d071755dbecacee1adf9a1fc5b206830d2b67d25e8"}, ] [package.extras] ed25519 = ["PyNaCl (>=1.4.0)"] rsa = ["cryptography"] [[package]] name = "pytest" version = "7.3.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ {file = "pytest-7.3.2-py3-none-any.whl", hash = "sha256:cdcbd012c9312258922f8cd3f1b62a6580fdced17db6014896053d47cddf9295"}, {file = "pytest-7.3.2.tar.gz", hash = "sha256:ee990a3cc55ba808b80795a79944756f315c67c12b56abd3ac993a7b8c17030b"}, ] [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" version = "0.21.0" description = "Pytest support for asyncio" optional = false python-versions = ">=3.7" files = [ {file = "pytest-asyncio-0.21.0.tar.gz", hash = "sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b"}, {file = "pytest_asyncio-0.21.0-py3-none-any.whl", hash = "sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c"}, ] [package.dependencies] pytest = ">=7.0.0" typing-extensions = {version = ">=3.7.2", markers = "python_version < \"3.8\""} [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] [[package]] name = "pytest-benchmark" version = "4.0.0" description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." optional = false python-versions = ">=3.7" files = [ {file = "pytest-benchmark-4.0.0.tar.gz", hash = "sha256:fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1"}, {file = "pytest_benchmark-4.0.0-py3-none-any.whl", hash = "sha256:fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6"}, ] [package.dependencies] py-cpuinfo = "*" pytest = ">=3.8" [package.extras] aspect = ["aspectlib"] elasticsearch = ["elasticsearch"] histogram = ["pygal", "pygaljs"] [[package]] name = "pytest-cov" version = "4.1.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.7" files = [ {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, ] [package.dependencies] coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] [[package]] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] [package.dependencies] six = ">=1.5" [[package]] name = "pyyaml" version = "6.0" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.6" files = [ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, ] [[package]] name = "pyyaml-env-tag" version = "0.1" description = "A custom YAML tag for referencing environment variables in YAML files. " optional = false python-versions = ">=3.6" files = [ {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, ] [package.dependencies] pyyaml = "*" [[package]] name = "regex" version = "2023.6.3" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.6" files = [ {file = "regex-2023.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:824bf3ac11001849aec3fa1d69abcb67aac3e150a933963fb12bda5151fe1bfd"}, {file = "regex-2023.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05ed27acdf4465c95826962528f9e8d41dbf9b1aa8531a387dee6ed215a3e9ef"}, {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b49c764f88a79160fa64f9a7b425620e87c9f46095ef9c9920542ab2495c8bc"}, {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8e3f1316c2293e5469f8f09dc2d76efb6c3982d3da91ba95061a7e69489a14ef"}, {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43e1dd9d12df9004246bacb79a0e5886b3b6071b32e41f83b0acbf293f820ee8"}, {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4959e8bcbfda5146477d21c3a8ad81b185cd252f3d0d6e4724a5ef11c012fb06"}, {file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af4dd387354dc83a3bff67127a124c21116feb0d2ef536805c454721c5d7993d"}, {file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2239d95d8e243658b8dbb36b12bd10c33ad6e6933a54d36ff053713f129aa536"}, {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:890e5a11c97cf0d0c550eb661b937a1e45431ffa79803b942a057c4fb12a2da2"}, {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a8105e9af3b029f243ab11ad47c19b566482c150c754e4c717900a798806b222"}, {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:25be746a8ec7bc7b082783216de8e9473803706723b3f6bef34b3d0ed03d57e2"}, {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3676f1dd082be28b1266c93f618ee07741b704ab7b68501a173ce7d8d0d0ca18"}, {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:10cb847aeb1728412c666ab2e2000ba6f174f25b2bdc7292e7dd71b16db07568"}, {file = "regex-2023.6.3-cp310-cp310-win32.whl", hash = "sha256:dbbbfce33cd98f97f6bffb17801b0576e653f4fdb1d399b2ea89638bc8d08ae1"}, {file = "regex-2023.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:c5f8037000eb21e4823aa485149f2299eb589f8d1fe4b448036d230c3f4e68e0"}, {file = "regex-2023.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c123f662be8ec5ab4ea72ea300359023a5d1df095b7ead76fedcd8babbedf969"}, {file = "regex-2023.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9edcbad1f8a407e450fbac88d89e04e0b99a08473f666a3f3de0fd292badb6aa"}, {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcba6dae7de533c876255317c11f3abe4907ba7d9aa15d13e3d9710d4315ec0e"}, {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29cdd471ebf9e0f2fb3cac165efedc3c58db841d83a518b082077e612d3ee5df"}, {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12b74fbbf6cbbf9dbce20eb9b5879469e97aeeaa874145517563cca4029db65c"}, {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c29ca1bd61b16b67be247be87390ef1d1ef702800f91fbd1991f5c4421ebae8"}, {file = "regex-2023.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77f09bc4b55d4bf7cc5eba785d87001d6757b7c9eec237fe2af57aba1a071d9"}, {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ea353ecb6ab5f7e7d2f4372b1e779796ebd7b37352d290096978fea83c4dba0c"}, {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:10590510780b7541969287512d1b43f19f965c2ece6c9b1c00fc367b29d8dce7"}, {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2fbd6236aae3b7f9d514312cdb58e6494ee1c76a9948adde6eba33eb1c4264f"}, {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:6b2675068c8b56f6bfd5a2bda55b8accbb96c02fd563704732fd1c95e2083461"}, {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74419d2b50ecb98360cfaa2974da8689cb3b45b9deff0dcf489c0d333bcc1477"}, {file = "regex-2023.6.3-cp311-cp311-win32.whl", hash = "sha256:fb5ec16523dc573a4b277663a2b5a364e2099902d3944c9419a40ebd56a118f9"}, {file = "regex-2023.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:09e4a1a6acc39294a36b7338819b10baceb227f7f7dbbea0506d419b5a1dd8af"}, {file = "regex-2023.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0654bca0cdf28a5956c83839162692725159f4cda8d63e0911a2c0dc76166525"}, {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:463b6a3ceb5ca952e66550a4532cef94c9a0c80dc156c4cc343041951aec1697"}, {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87b2a5bb5e78ee0ad1de71c664d6eb536dc3947a46a69182a90f4410f5e3f7dd"}, {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6343c6928282c1f6a9db41f5fd551662310e8774c0e5ebccb767002fcf663ca9"}, {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6192d5af2ccd2a38877bfef086d35e6659566a335b1492786ff254c168b1693"}, {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74390d18c75054947e4194019077e243c06fbb62e541d8817a0fa822ea310c14"}, {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:742e19a90d9bb2f4a6cf2862b8b06dea5e09b96c9f2df1779e53432d7275331f"}, {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8abbc5d54ea0ee80e37fef009e3cec5dafd722ed3c829126253d3e22f3846f1e"}, {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:c2b867c17a7a7ae44c43ebbeb1b5ff406b3e8d5b3e14662683e5e66e6cc868d3"}, {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d831c2f8ff278179705ca59f7e8524069c1a989e716a1874d6d1aab6119d91d1"}, {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ee2d1a9a253b1729bb2de27d41f696ae893507c7db224436abe83ee25356f5c1"}, {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:61474f0b41fe1a80e8dfa70f70ea1e047387b7cd01c85ec88fa44f5d7561d787"}, {file = "regex-2023.6.3-cp36-cp36m-win32.whl", hash = "sha256:0b71e63226e393b534105fcbdd8740410dc6b0854c2bfa39bbda6b0d40e59a54"}, {file = "regex-2023.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bbb02fd4462f37060122e5acacec78e49c0fbb303c30dd49c7f493cf21fc5b27"}, {file = "regex-2023.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b862c2b9d5ae38a68b92e215b93f98d4c5e9454fa36aae4450f61dd33ff48487"}, {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:976d7a304b59ede34ca2921305b57356694f9e6879db323fd90a80f865d355a3"}, {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:83320a09188e0e6c39088355d423aa9d056ad57a0b6c6381b300ec1a04ec3d16"}, {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9427a399501818a7564f8c90eced1e9e20709ece36be701f394ada99890ea4b3"}, {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178bbc1b2ec40eaca599d13c092079bf529679bf0371c602edaa555e10b41c3"}, {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:837328d14cde912af625d5f303ec29f7e28cdab588674897baafaf505341f2fc"}, {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2d44dc13229905ae96dd2ae2dd7cebf824ee92bc52e8cf03dcead37d926da019"}, {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d54af539295392611e7efbe94e827311eb8b29668e2b3f4cadcfe6f46df9c777"}, {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7117d10690c38a622e54c432dfbbd3cbd92f09401d622902c32f6d377e2300ee"}, {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bb60b503ec8a6e4e3e03a681072fa3a5adcbfa5479fa2d898ae2b4a8e24c4591"}, {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:65ba8603753cec91c71de423a943ba506363b0e5c3fdb913ef8f9caa14b2c7e0"}, {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:271f0bdba3c70b58e6f500b205d10a36fb4b58bd06ac61381b68de66442efddb"}, {file = "regex-2023.6.3-cp37-cp37m-win32.whl", hash = "sha256:9beb322958aaca059f34975b0df135181f2e5d7a13b84d3e0e45434749cb20f7"}, {file = "regex-2023.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fea75c3710d4f31389eed3c02f62d0b66a9da282521075061ce875eb5300cf23"}, {file = "regex-2023.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f56fcb7ff7bf7404becdfc60b1e81a6d0561807051fd2f1860b0d0348156a07"}, {file = "regex-2023.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d2da3abc88711bce7557412310dfa50327d5769a31d1c894b58eb256459dc289"}, {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a99b50300df5add73d307cf66abea093304a07eb017bce94f01e795090dea87c"}, {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5708089ed5b40a7b2dc561e0c8baa9535b77771b64a8330b684823cfd5116036"}, {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:687ea9d78a4b1cf82f8479cab23678aff723108df3edeac098e5b2498879f4a7"}, {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3850beab9f527f06ccc94b446c864059c57651b3f911fddb8d9d3ec1d1b25d"}, {file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8915cc96abeb8983cea1df3c939e3c6e1ac778340c17732eb63bb96247b91d2"}, {file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:841d6e0e5663d4c7b4c8099c9997be748677d46cbf43f9f471150e560791f7ff"}, {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9edce5281f965cf135e19840f4d93d55b3835122aa76ccacfd389e880ba4cf82"}, {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b956231ebdc45f5b7a2e1f90f66a12be9610ce775fe1b1d50414aac1e9206c06"}, {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:36efeba71c6539d23c4643be88295ce8c82c88bbd7c65e8a24081d2ca123da3f"}, {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:cf67ca618b4fd34aee78740bea954d7c69fdda419eb208c2c0c7060bb822d747"}, {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b4598b1897837067a57b08147a68ac026c1e73b31ef6e36deeeb1fa60b2933c9"}, {file = "regex-2023.6.3-cp38-cp38-win32.whl", hash = "sha256:f415f802fbcafed5dcc694c13b1292f07fe0befdb94aa8a52905bd115ff41e88"}, {file = "regex-2023.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:d4f03bb71d482f979bda92e1427f3ec9b220e62a7dd337af0aa6b47bf4498f72"}, {file = "regex-2023.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccf91346b7bd20c790310c4147eee6ed495a54ddb6737162a36ce9dbef3e4751"}, {file = "regex-2023.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b28f5024a3a041009eb4c333863d7894d191215b39576535c6734cd88b0fcb68"}, {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0bb18053dfcfed432cc3ac632b5e5e5c5b7e55fb3f8090e867bfd9b054dbcbf"}, {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5bfb3004f2144a084a16ce19ca56b8ac46e6fd0651f54269fc9e230edb5e4a"}, {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c6b48d0fa50d8f4df3daf451be7f9689c2bde1a52b1225c5926e3f54b6a9ed1"}, {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:051da80e6eeb6e239e394ae60704d2b566aa6a7aed6f2890a7967307267a5dc6"}, {file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4c3b7fa4cdaa69268748665a1a6ff70c014d39bb69c50fda64b396c9116cf77"}, {file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:457b6cce21bee41ac292d6753d5e94dcbc5c9e3e3a834da285b0bde7aa4a11e9"}, {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:aad51907d74fc183033ad796dd4c2e080d1adcc4fd3c0fd4fd499f30c03011cd"}, {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0385e73da22363778ef2324950e08b689abdf0b108a7d8decb403ad7f5191938"}, {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c6a57b742133830eec44d9b2290daf5cbe0a2f1d6acee1b3c7b1c7b2f3606df7"}, {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3e5219bf9e75993d73ab3d25985c857c77e614525fac9ae02b1bebd92f7cecac"}, {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e5087a3c59eef624a4591ef9eaa6e9a8d8a94c779dade95d27c0bc24650261cd"}, {file = "regex-2023.6.3-cp39-cp39-win32.whl", hash = "sha256:20326216cc2afe69b6e98528160b225d72f85ab080cbdf0b11528cbbaba2248f"}, {file = "regex-2023.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:bdff5eab10e59cf26bc479f565e25ed71a7d041d1ded04ccf9aee1d9f208487a"}, {file = "regex-2023.6.3.tar.gz", hash = "sha256:72d1a25bf36d2050ceb35b517afe13864865268dfb45910e2e17a84be6cbfeb0"}, ] [[package]] name = "requests" version = "2.31.0" description = "Python HTTP for Humans." optional = false python-versions = ">=3.7" files = [ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, ] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rich" version = "13.4.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.7.0" files = [ {file = "rich-13.4.2-py3-none-any.whl", hash = "sha256:8f87bc7ee54675732fa66a05ebfe489e27264caeeff3728c945d25971b6485ec"}, {file = "rich-13.4.2.tar.gz", hash = "sha256:d653d6bccede5844304c605d5aac802c7cf9621efd700b46c7ec2b51ea914898"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "setuptools" version = "67.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.7" files = [ {file = "setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f"}, {file = "setuptools-67.8.0.tar.gz", hash = "sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] [[package]] name = "smmap" version = "5.0.0" description = "A pure Python implementation of a sliding window memory map manager" optional = false python-versions = ">=3.6" files = [ {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, ] [[package]] name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" files = [ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, ] [[package]] name = "sqlalchemy" version = "1.4.41" description = "Database Abstraction Library" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "SQLAlchemy-1.4.41-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:13e397a9371ecd25573a7b90bd037db604331cf403f5318038c46ee44908c44d"}, {file = "SQLAlchemy-1.4.41-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2d6495f84c4fd11584f34e62f9feec81bf373787b3942270487074e35cbe5330"}, {file = "SQLAlchemy-1.4.41-cp27-cp27m-win32.whl", hash = "sha256:e570cfc40a29d6ad46c9aeaddbdcee687880940a3a327f2c668dd0e4ef0a441d"}, {file = "SQLAlchemy-1.4.41-cp27-cp27m-win_amd64.whl", hash = "sha256:5facb7fd6fa8a7353bbe88b95695e555338fb038ad19ceb29c82d94f62775a05"}, {file = "SQLAlchemy-1.4.41-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f37fa70d95658763254941ddd30ecb23fc4ec0c5a788a7c21034fc2305dab7cc"}, {file = "SQLAlchemy-1.4.41-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:361f6b5e3f659e3c56ea3518cf85fbdae1b9e788ade0219a67eeaaea8a4e4d2a"}, {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0990932f7cca97fece8017414f57fdd80db506a045869d7ddf2dda1d7cf69ecc"}, {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cd767cf5d7252b1c88fcfb58426a32d7bd14a7e4942497e15b68ff5d822b41ad"}, {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5102fb9ee2c258a2218281adcb3e1918b793c51d6c2b4666ce38c35101bb940e"}, {file = "SQLAlchemy-1.4.41-cp310-cp310-win32.whl", hash = "sha256:2082a2d2fca363a3ce21cfa3d068c5a1ce4bf720cf6497fb3a9fc643a8ee4ddd"}, {file = "SQLAlchemy-1.4.41-cp310-cp310-win_amd64.whl", hash = "sha256:e4b12e3d88a8fffd0b4ca559f6d4957ed91bd4c0613a4e13846ab8729dc5c251"}, {file = "SQLAlchemy-1.4.41-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:90484a2b00baedad361402c257895b13faa3f01780f18f4a104a2f5c413e4536"}, {file = "SQLAlchemy-1.4.41-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b67fc780cfe2b306180e56daaa411dd3186bf979d50a6a7c2a5b5036575cbdbb"}, {file = "SQLAlchemy-1.4.41-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad2b727fc41c7f8757098903f85fafb4bf587ca6605f82d9bf5604bd9c7cded"}, {file = "SQLAlchemy-1.4.41-cp311-cp311-win32.whl", hash = "sha256:59bdc291165b6119fc6cdbc287c36f7f2859e6051dd923bdf47b4c55fd2f8bd0"}, {file = "SQLAlchemy-1.4.41-cp311-cp311-win_amd64.whl", hash = "sha256:d2e054aed4645f9b755db85bc69fc4ed2c9020c19c8027976f66576b906a74f1"}, {file = "SQLAlchemy-1.4.41-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:4ba7e122510bbc07258dc42be6ed45997efdf38129bde3e3f12649be70683546"}, {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0dcf127bb99458a9d211e6e1f0f3edb96c874dd12f2503d4d8e4f1fd103790b"}, {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e16c2be5cb19e2c08da7bd3a87fed2a0d4e90065ee553a940c4fc1a0fb1ab72b"}, {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5ebeeec5c14533221eb30bad716bc1fd32f509196318fb9caa7002c4a364e4c"}, {file = "SQLAlchemy-1.4.41-cp36-cp36m-win32.whl", hash = "sha256:3e2ef592ac3693c65210f8b53d0edcf9f4405925adcfc031ff495e8d18169682"}, {file = "SQLAlchemy-1.4.41-cp36-cp36m-win_amd64.whl", hash = "sha256:eb30cf008850c0a26b72bd1b9be6730830165ce049d239cfdccd906f2685f892"}, {file = "SQLAlchemy-1.4.41-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:c23d64a0b28fc78c96289ffbd0d9d1abd48d267269b27f2d34e430ea73ce4b26"}, {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb8897367a21b578b26f5713833836f886817ee2ffba1177d446fa3f77e67c8"}, {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:14576238a5f89bcf504c5f0a388d0ca78df61fb42cb2af0efe239dc965d4f5c9"}, {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:639e1ae8d48b3c86ffe59c0daa9a02e2bfe17ca3d2b41611b30a0073937d4497"}, {file = "SQLAlchemy-1.4.41-cp37-cp37m-win32.whl", hash = "sha256:0005bd73026cd239fc1e8ccdf54db58b6193be9a02b3f0c5983808f84862c767"}, {file = "SQLAlchemy-1.4.41-cp37-cp37m-win_amd64.whl", hash = "sha256:5323252be2bd261e0aa3f33cb3a64c45d76829989fa3ce90652838397d84197d"}, {file = "SQLAlchemy-1.4.41-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:05f0de3a1dc3810a776275763764bb0015a02ae0f698a794646ebc5fb06fad33"}, {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0002e829142b2af00b4eaa26c51728f3ea68235f232a2e72a9508a3116bd6ed0"}, {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:22ff16cedab5b16a0db79f1bc99e46a6ddececb60c396562e50aab58ddb2871c"}, {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccfd238f766a5bb5ee5545a62dd03f316ac67966a6a658efb63eeff8158a4bbf"}, {file = "SQLAlchemy-1.4.41-cp38-cp38-win32.whl", hash = "sha256:58bb65b3274b0c8a02cea9f91d6f44d0da79abc993b33bdedbfec98c8440175a"}, {file = "SQLAlchemy-1.4.41-cp38-cp38-win_amd64.whl", hash = "sha256:ce8feaa52c1640de9541eeaaa8b5fb632d9d66249c947bb0d89dd01f87c7c288"}, {file = "SQLAlchemy-1.4.41-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:199a73c31ac8ea59937cc0bf3dfc04392e81afe2ec8a74f26f489d268867846c"}, {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676d51c9f6f6226ae8f26dc83ec291c088fe7633269757d333978df78d931ab"}, {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:036d8472356e1d5f096c5e0e1a7e0f9182140ada3602f8fff6b7329e9e7cfbcd"}, {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2307495d9e0ea00d0c726be97a5b96615035854972cc538f6e7eaed23a35886c"}, {file = "SQLAlchemy-1.4.41-cp39-cp39-win32.whl", hash = "sha256:9c56e19780cd1344fcd362fd6265a15f48aa8d365996a37fab1495cae8fcd97d"}, {file = "SQLAlchemy-1.4.41-cp39-cp39-win_amd64.whl", hash = "sha256:f5fa526d027d804b1f85cdda1eb091f70bde6fb7d87892f6dd5a48925bc88898"}, {file = "SQLAlchemy-1.4.41.tar.gz", hash = "sha256:0292f70d1797e3c54e862e6f30ae474014648bc9c723e14a2fda730adb0a9791"}, ] [package.dependencies] greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\")"} importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [package.extras] aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] mysql-connector = ["mysql-connector-python"] oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"] postgresql = ["psycopg2 (>=2.7)"] postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] postgresql-psycopg2binary = ["psycopg2-binary"] postgresql-psycopg2cffi = ["psycopg2cffi"] pymysql = ["pymysql", "pymysql (<1)"] sqlcipher = ["sqlcipher3-binary"] [[package]] name = "starlette" version = "0.27.0" description = "The little ASGI library that shines." optional = false python-versions = ">=3.7" files = [ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, ] [package.dependencies] anyio = ">=3.4.0,<5" typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} [package.extras] full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] [[package]] name = "stdlib-list" version = "0.8.0" description = "A list of Python Standard Libraries (2.6-7, 3.2-9)." optional = false python-versions = "*" files = [ {file = "stdlib-list-0.8.0.tar.gz", hash = "sha256:a1e503719720d71e2ed70ed809b385c60cd3fb555ba7ec046b96360d30b16d9f"}, {file = "stdlib_list-0.8.0-py3-none-any.whl", hash = "sha256:2ae0712a55b68f3fbbc9e58d6fa1b646a062188f49745b495f94d3310a9fdd3e"}, ] [package.extras] develop = ["sphinx"] [[package]] name = "stevedore" version = "3.5.2" description = "Manage dynamic plugins for Python applications" optional = false python-versions = ">=3.6" files = [ {file = "stevedore-3.5.2-py3-none-any.whl", hash = "sha256:fa2630e3d0ad3e22d4914aff2501445815b9a4467a6edc49387c667a38faf5bf"}, {file = "stevedore-3.5.2.tar.gz", hash = "sha256:cf99f41fc0d5a4f185ca4d3d42b03be9011b0a1ec1a4ea1a282be1b4b306dcc2"}, ] [package.dependencies] importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""} pbr = ">=2.0.0,<2.1.0 || >2.1.0" [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.7" files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] [[package]] name = "typed-ast" version = "1.5.4" description = "a fork of Python 2 and 3 ast modules with type comment support" optional = false python-versions = ">=3.6" files = [ {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, ] [[package]] name = "types-aiofiles" version = "23.1.0.4" description = "Typing stubs for aiofiles" optional = false python-versions = "*" files = [ {file = "types-aiofiles-23.1.0.4.tar.gz", hash = "sha256:89a58cd0ae93b37a22c323c22d250bd65dde6833f6c6f3c1824784e56f47109f"}, {file = "types_aiofiles-23.1.0.4-py3-none-any.whl", hash = "sha256:65a862f0d36e6b8e1b2df601ba7aeeb2eba8e2f9764ba9d0989bdd498ed8c857"}, ] [[package]] name = "types-cryptography" version = "3.3.23.2" description = "Typing stubs for cryptography" optional = false python-versions = "*" files = [ {file = "types-cryptography-3.3.23.2.tar.gz", hash = "sha256:09cc53f273dd4d8c29fa7ad11fefd9b734126d467960162397bc5e3e604dea75"}, {file = "types_cryptography-3.3.23.2-py3-none-any.whl", hash = "sha256:b965d548f148f8e87f353ccf2b7bd92719fdf6c845ff7cedf2abb393a0643e4f"}, ] [[package]] name = "types-enum34" version = "1.1.8" description = "Typing stubs for enum34" optional = false python-versions = "*" files = [ {file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"}, {file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"}, ] [[package]] name = "types-ipaddress" version = "1.0.8" description = "Typing stubs for ipaddress" optional = false python-versions = "*" files = [ {file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"}, {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, ] [[package]] name = "types-orjson" version = "3.6.2" description = "Typing stubs for orjson" optional = false python-versions = "*" files = [ {file = "types-orjson-3.6.2.tar.gz", hash = "sha256:cf9afcc79a86325c7aff251790338109ed6f6b1bab09d2d4262dd18c85a3c638"}, {file = "types_orjson-3.6.2-py3-none-any.whl", hash = "sha256:22ee9a79236b6b0bfb35a0684eded62ad930a88a56797fa3c449b026cf7dbfe4"}, ] [[package]] name = "types-pkg-resources" version = "0.1.3" description = "Typing stubs for pkg_resources" optional = false python-versions = "*" files = [ {file = "types-pkg_resources-0.1.3.tar.gz", hash = "sha256:834a9b8d3dbea343562fd99d5d3359a726f6bf9d3733bccd2b4f3096fbab9dae"}, {file = "types_pkg_resources-0.1.3-py2.py3-none-any.whl", hash = "sha256:0cb9972cee992249f93fff1a491bf2dc3ce674e5a1926e27d4f0866f7d9b6d9c"}, ] [[package]] name = "types-pymysql" version = "1.0.19.7" description = "Typing stubs for PyMySQL" optional = false python-versions = "*" files = [ {file = "types-PyMySQL-1.0.19.7.tar.gz", hash = "sha256:8ea7083a3bd37c4e77f38d8d93e488fba3bb03e3d3d41cc52cd92110375ed517"}, {file = "types_PyMySQL-1.0.19.7-py3-none-any.whl", hash = "sha256:672f0e3e5070da2dc58448d289b878c5c99febb37c9f2cbe309779301ae914fb"}, ] [[package]] name = "types-requests" version = "2.31.0.1" description = "Typing stubs for requests" optional = false python-versions = "*" files = [ {file = "types-requests-2.31.0.1.tar.gz", hash = "sha256:3de667cffa123ce698591de0ad7db034a5317457a596eb0b4944e5a9d9e8d1ac"}, {file = "types_requests-2.31.0.1-py3-none-any.whl", hash = "sha256:afb06ef8f25ba83d59a1d424bd7a5a939082f94b94e90ab5e6116bd2559deaa3"}, ] [package.dependencies] types-urllib3 = "*" [[package]] name = "types-toml" version = "0.10.8.6" description = "Typing stubs for toml" optional = false python-versions = "*" files = [ {file = "types-toml-0.10.8.6.tar.gz", hash = "sha256:6d3ac79e36c9ee593c5d4fb33a50cca0e3adceb6ef5cff8b8e5aef67b4c4aaf2"}, {file = "types_toml-0.10.8.6-py3-none-any.whl", hash = "sha256:de7b2bb1831d6f7a4b554671ffe5875e729753496961b3e9b202745e4955dafa"}, ] [[package]] name = "types-ujson" version = "5.8.0.0" description = "Typing stubs for ujson" optional = false python-versions = "*" files = [ {file = "types-ujson-5.8.0.0.tar.gz", hash = "sha256:2856a8d0883475bec80c1ac772c006ea691215cc55ded21647b5c87c86249cbb"}, {file = "types_ujson-5.8.0.0-py3-none-any.whl", hash = "sha256:481c27a7bc758fc94de330dcd885ba2fbf5879dd3dfd1c7b6b46f5b98d41ca85"}, ] [[package]] name = "types-urllib3" version = "1.26.25.13" description = "Typing stubs for urllib3" optional = false python-versions = "*" files = [ {file = "types-urllib3-1.26.25.13.tar.gz", hash = "sha256:3300538c9dc11dad32eae4827ac313f5d986b8b21494801f1bf97a1ac6c03ae5"}, {file = "types_urllib3-1.26.25.13-py3-none-any.whl", hash = "sha256:5dbd1d2bef14efee43f5318b5d36d805a489f6600252bb53626d4bfafd95e27c"}, ] [[package]] name = "typing-extensions" version = "4.6.3" description = "Backported and Experimental Type Hints for Python 3.7+" optional = false python-versions = ">=3.7" files = [ {file = "typing_extensions-4.6.3-py3-none-any.whl", hash = "sha256:88a4153d8505aabbb4e13aacb7c486c2b4a33ca3b3f807914a9b4c844c471c26"}, {file = "typing_extensions-4.6.3.tar.gz", hash = "sha256:d91d5919357fe7f681a9f2b5b4cb2a5f1ef0a1e9f59c4d8ff0d3491e05c0ffd5"}, ] [[package]] name = "urllib3" version = "2.0.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.7" files = [ {file = "urllib3-2.0.3-py3-none-any.whl", hash = "sha256:48e7fafa40319d358848e1bc6809b208340fafe2096f1725d05d67443d0483d1"}, {file = "urllib3-2.0.3.tar.gz", hash = "sha256:bee28b5e56addb8226c96f7f13ac28cb4c301dd5ea8a6ca179c0b9835e032825"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" version = "20.23.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ {file = "virtualenv-20.23.1-py3-none-any.whl", hash = "sha256:34da10f14fea9be20e0fd7f04aba9732f84e593dac291b757ce42e3368a39419"}, {file = "virtualenv-20.23.1.tar.gz", hash = "sha256:8ff19a38c1021c742148edc4f81cb43d7f8c6816d2ede2ab72af5b84c749ade1"}, ] [package.dependencies] distlib = ">=0.3.6,<1" filelock = ">=3.12,<4" importlib-metadata = {version = ">=6.6", markers = "python_version < \"3.8\""} platformdirs = ">=3.5.1,<4" [package.extras] docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.3.1)", "pytest-env (>=0.8.1)", "pytest-freezer (>=0.4.6)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=67.8)", "time-machine (>=2.9)"] [[package]] name = "watchdog" version = "3.0.0" description = "Filesystem events monitoring" optional = false python-versions = ">=3.7" files = [ {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, ] [package.extras] watchmedo = ["PyYAML (>=3.10)"] [[package]] name = "yappi" version = "1.4.0" description = "Yet Another Python Profiler" optional = false python-versions = "*" files = [ {file = "yappi-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a6bce588d03240d8c05aa734d97d69c595ac382644701eaaca2421f6e37c9e"}, {file = "yappi-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d80262ef4bf8ebd7c81e37832b41fe3b0b74621a24eb853b0444e06b01a44a1a"}, {file = "yappi-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8791dbdf17673fb14a6cff150a8b2c85a5e40c455eebb37a62ea4dc74c077408"}, {file = "yappi-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:57f9d3a88b822e3727505cf0a59e4b1038de4cd34555749bdc65ac258a58ca23"}, {file = "yappi-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f3fb92fe0ea47142275fbe6e5d1daa9685c2e25bfd6a9478c2669e8828b3abf8"}, {file = "yappi-1.4.0-cp310-cp310-win32.whl", hash = "sha256:52b82a8ec9d5e86e828fe35821a8482c94ca1dec8a278bb8001d21f2c8af98a8"}, {file = "yappi-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5767d79a44d47a34be469d798ddc56cff251394af1f4fde2463de9359a8c38e"}, {file = "yappi-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d8bc404a3201ec9dc93ab669a700b4f3736bbe3a029e85dc046f278541b83f74"}, {file = "yappi-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:733b4088a54996e7811dca94de633ffe4b906b6e6b8147c31913b674ae6e90cc"}, {file = "yappi-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5cd0ed067b4499fa45f08e78e0caf9154bc5ae28eca90167107b1fcfa741dac"}, {file = "yappi-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:34aed429e1ef04d5b432bbbd719d7c7707b9fb310e30f78c61d0b31733626af8"}, {file = "yappi-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:01fc1f7f76a43a2d0ded34313c97395e3c3323e796945b183569a5a0365b14a3"}, {file = "yappi-1.4.0-cp311-cp311-win32.whl", hash = "sha256:987c8f658e1d2e4029612c33a4ff7b04f9a8fbd96e315eefb0384943830ae68b"}, {file = "yappi-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:4f42a9de88cfcbcd3f05834b4cc585e6e70ae0c4e03918b41865ccca02d2514b"}, {file = "yappi-1.4.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a3bb2d75620ac9ef69f11c62e737469ef155e566e51ed85a74126871e45d2051"}, {file = "yappi-1.4.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3033cdbd482a79ecafcafef8a1a0699ad333ee87bc7a28bd07c461ef196b2ea3"}, {file = "yappi-1.4.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42ddd97258604bb1bea1b7dce2790c24f9b9bca970d844cb7afe98a9fbbf1425"}, {file = "yappi-1.4.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:3a9652e7785f4b4c8bb3a8fa9ee33adf5e3f6dd893de4465008f75b1306f7895"}, {file = "yappi-1.4.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:3e5d5a95a8681dc91f5a22c81d458109dcbcd718a551b647d28075574dfb8cbb"}, {file = "yappi-1.4.0-cp36-cp36m-win32.whl", hash = "sha256:89d352ea770860617f55539e860440a166c5b9c1a67a7f351fed4030af9943b0"}, {file = "yappi-1.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:64529504c5522b1c8c79eeb27a68f84979ce9415150c32cd7e06618383443bcc"}, {file = "yappi-1.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d878f66d0b5d79396d6f034f8d89615517a4c4410e97b84d48402e940f9501d5"}, {file = "yappi-1.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bad0766003eaa683e56f77166d4551c2f7530ec13aa602ada5cd8ddfe130d42b"}, {file = "yappi-1.4.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ab8c17980e6bdb522b03b118f6d62362c92f7be40a81a4e89746d0eeae1e3ab"}, {file = "yappi-1.4.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:194a565ab0145ff10e31389fb364a35a4f5160ad6af17362355592cfddf2ae6e"}, {file = "yappi-1.4.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:14068a34907f4e7404b6b87a7bda2d55be2bde4d4d7f9e254b2cd26187cc2ebc"}, {file = "yappi-1.4.0-cp37-cp37m-win32.whl", hash = "sha256:407b119f394ab60bb0a3d07efcb92d4846ef40ab40fff02c8902ca8d800f85d3"}, {file = "yappi-1.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:5e4de1137021f80a238217444c0ad5f0e393082f4744ecae3d92eb3a9b98ca3e"}, {file = "yappi-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7b286c4fcc72812adbd19280329a3c0144582abd1e5a3513d93a8bb2d3d1abaa"}, {file = "yappi-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aef7a5bd5cd7e36adb90419984f29809eee51c9a9b74849b9dfa6077075da21f"}, {file = "yappi-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5092940caea4cc150ba21d9afbafc8b00770f33ab5de55638c2bbd2c6f7f82cf"}, {file = "yappi-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ec8ada826232137560e6ac7644ace8305b4dacbca0f9fff246ffee52db0a3c3a"}, {file = "yappi-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1413d3fb200c0011b22764a227fb9e56e479acb1ec2b7c134c62d70a76a7e1f2"}, {file = "yappi-1.4.0-cp38-cp38-win32.whl", hash = "sha256:38b0235574b7c0c549d97baa63f5fa4660b6d34a0b00ee8cc48d04ef19cf71fb"}, {file = "yappi-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7ab23d95fe8130445f1e089af7efec21f172611b306283496c99089839ef61c5"}, {file = "yappi-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:61a2c7dc15eeccd1909c6bc5783e63fb06ee7725e5aa006b83cd6afb49a343c7"}, {file = "yappi-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c81d957b10085ce32bb232896d258e9e87ae4ac4e044e755eb505f1c8eb148da"}, {file = "yappi-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4a0af76310957d12ff2d661e2ec3509ee4b4661929fec04d0dc40a0c8866ae"}, {file = "yappi-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a279bb01f9c4b4c99cb959210e49151afd6c76693eca8a01311343efe8f31262"}, {file = "yappi-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:36eaa02d53842b22157f1b150db79d03cae1cc635f708fa82737bcdfd4aa2bd9"}, {file = "yappi-1.4.0-cp39-cp39-win32.whl", hash = "sha256:05b2c6c7f0667b46cd7cccbd36cff1b10f4b3f6625aacea5eb0ac99cd9ca7520"}, {file = "yappi-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:bbdd6043e24f5c84a042ea8af69a1f2720571426fd1985814cf41e6d7a17f5c9"}, {file = "yappi-1.4.0.tar.gz", hash = "sha256:504b5d8fc7433736cb5e257991d2e7f2946019174f1faec7b2fe947881a17fc0"}, ] [package.extras] test = ["gevent (>=20.6.2)"] [[package]] name = "zipp" version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.7" files = [ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [extras] aiopg = ["aiopg", "psycopg2-binary"] all = ["PyMySQL", "aiomysql", "aiopg", "aiosqlite", "asyncpg", "cryptography", "mysqlclient", "orjson", "psycopg2-binary"] crypto = ["cryptography"] mysql = ["PyMySQL", "aiomysql"] orjson = ["orjson"] postgres = ["asyncpg", "psycopg2-binary"] postgresql = ["asyncpg", "psycopg2-binary"] sqlite = ["aiosqlite"] [metadata] lock-version = "2.0" python-versions = "^3.7.0" content-hash = "d07be4d98176a677bf13fb3f563285795fc85356962f9a0f57aa87d600c447e3" ormar-0.12.2/pyproject.toml000066400000000000000000000105771444363446500156570ustar00rootroot00000000000000[project] name = "ormar" [tool.poetry] name = "ormar" version = "0.12.2" description = "An async ORM with fastapi in mind and pydantic validation." authors = ["Radosław Drążkiewicz "] license = "MIT" readme = "README.md" homepage = "https://github.com/collerek/ormar" repository = "https://github.com/collerek/ormar" documentation = "https://collerek.github.io/ormar/" packages = [ { include="ormar" } ] keywords = [ "orm", "sqlalchemy", "fastapi", "pydantic", "databases", "async", "alembic", ] classifiers = [ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP", "Framework :: AsyncIO", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3 :: Only", ] [tool.poetry.dependencies] python = "^3.7.0" databases = ">=0.3.2,!=0.5.0,!=0.5.1,!=0.5.2,!=0.5.3,<0.6.3" pydantic = ">=1.6.1,!=1.7,!=1.7.1,!=1.7.2,!=1.7.3,!=1.8,!=1.8.1,<1.10.9" SQLAlchemy = ">=1.3.18,<1.4.42" cryptography = { version = ">=35,<41", optional = true } # Async database drivers aiosqlite = { version = ">=0.17,<0.20", optional = true } aiomysql = { version = ">=0.1.0", optional = true } aiopg = { version = "^1.3.3", optional = true } asyncpg = { version = ">=0.24,<0.28", optional = true } # Sync database drivers for standard tooling around setup/teardown/migrations. psycopg2-binary = { version = "^2.9.1", optional = true } mysqlclient = { version = "^2.1.0", optional = true } PyMySQL = { version = ">=0.9", optional = true } [tool.poetry.dependencies.orjson] version = ">=3.6.4" optional = true [tool.poetry.dependencies.typing-extensions] version = ">=3.7,<=5.0" python = "<3.8" [tool.poetry.dependencies.importlib-metadata] version = ">=3.1" python = "<3.8" [tool.poetry.dev-dependencies] # Testing pytest = "^7.3.1" pytest-cov = "^4.0.0" codecov = "^2.1.13" pytest-asyncio = "^0.21.0" fastapi = ">=0.70.1,<=0.97" flake8 = "^3.9.2" flake8-black = "^0.3.6" flake8-bugbear = "^23.3.12" flake8-import-order = "^0.18.1" flake8-bandit = "^3.0.0" flake8-builtins = "^2.1.0" flake8-variables-names = "^0.0.5" flake8-cognitive-complexity = "^0.1.0" flake8-functions = "^0.0.8" flake8-expression-complexity = "^0.0.11" # types mypy = "^0.982" types-ujson = "^5.7.0" types-PyMySQL = "^1.0.19" types-ipaddress = "^1.0.1" types-enum34 = "^1.1.1" types-cryptography = "^3.3.23" types-orjson = "^3.6.1" types-aiofiles = "^23.1.0" types-pkg-resources = "^0.1.3" types-requests = "^2.31.0" types-toml = "^0.10.8" # Documantation mkdocs = "^1.4.3" mkdocs-material = ">=8.1.2,<9.2" mkdocs-material-extensions = "^1.1" mkdocstrings = {version = "==0.22.0", extras = ["python"]} mkdocs-gen-files = "^0.5.0" mkdocs-literate-nav = "^0.6.0" mkdocs-section-index = "^0.3.4" dataclasses = { version = ">=0.6.0,<0.8 || >0.8,<1.0.0" } # Performance testing yappi = "^1.4.0" pytest-benchmark = "^4.0.0" nest-asyncio = "^1.5.6" pre-commit = "^2.21.0" [tool.poetry.extras] postgresql = ["asyncpg", "psycopg2-binary"] postgres = ["asyncpg", "psycopg2-binary"] aiopg = ["aiopg", "psycopg2-binary"] mysql = ["aiomysql", "PyMySQL"] sqlite = ["aiosqlite"] orjson = ["orjson"] crypto = ["cryptography"] all = [ "aiosqlite", "asyncpg", "aiopg", "psycopg2-binary", "aiomysql", "mysqlclient", "PyMySQL", "orjson", "cryptography", ] [tool.poetry.group.dev.dependencies] httpx = "^0.24.1" asgi-lifespan = "^2.1.0" [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.mypy] # TODO: Enable mypy plugin after pydantic release supporting toml file disallow_untyped_calls = true disallow_untyped_defs = true disallow_incomplete_defs = true [[tool.mypy.overrides]] module = ["tests.*", "benchmarks.*"] disallow_untyped_calls = false disallow_untyped_defs = false disallow_incomplete_defs = false [[tool.mypy.overrides]] module = "docs_src.*" ignore_errors = true [[tool.mypy.overrides]] module = ["sqlalchemy.*", "asyncpg", "nest_asyncio"] ignore_missing_imports = true [tool.yapf] based_on_style = "pep8" disable_ending_comma_heuristic = true split_arguments_when_comma_terminated = true ormar-0.12.2/scripts/000077500000000000000000000000001444363446500144205ustar00rootroot00000000000000ormar-0.12.2/scripts/docker-compose.yml000066400000000000000000000006231444363446500200560ustar00rootroot00000000000000version: '2.1' services: postgres: image: postgres:10.8 environment: POSTGRES_USER: username POSTGRES_PASSWORD: password POSTGRES_DB: testsuite ports: - 5432:5432 mysql: image: mysql:5.7 environment: MYSQL_USER: username MYSQL_PASSWORD: password MYSQL_ROOT_PASSWORD: password MYSQL_DATABASE: testsuite ports: - 3306:3306ormar-0.12.2/scripts/test.sh000077500000000000000000000003721444363446500157400ustar00rootroot00000000000000#!/bin/sh -e PACKAGE="ormar" PREFIX="" if [ -d 'venv' ] ; then PREFIX="venv/bin/" fi set -x PYTHONPATH=. ${PREFIX}pytest --ignore venv --cov=${PACKAGE} --cov=tests --cov-report=xml --cov-fail-under=100 --cov-report=term-missing tests/ "${@}" ormar-0.12.2/tests/000077500000000000000000000000001444363446500140735ustar00rootroot00000000000000ormar-0.12.2/tests/__init__.py000066400000000000000000000000001444363446500161720ustar00rootroot00000000000000ormar-0.12.2/tests/settings.py000066400000000000000000000004511444363446500163050ustar00rootroot00000000000000import os import databases DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///test.db") database_url = databases.DatabaseURL(DATABASE_URL) if database_url.scheme == "postgresql+aiopg": # pragma no cover DATABASE_URL = str(database_url.replace(driver=None)) print("USED DB:", DATABASE_URL) ormar-0.12.2/tests/test_deferred/000077500000000000000000000000001444363446500167125ustar00rootroot00000000000000ormar-0.12.2/tests/test_deferred/__init__.py000066400000000000000000000000001444363446500210110ustar00rootroot00000000000000ormar-0.12.2/tests/test_deferred/test_forward_cross_refs.py000066400000000000000000000125731444363446500242270ustar00rootroot00000000000000# type: ignore from typing import List, Optional import databases import pytest import sqlalchemy as sa from pydantic.typing import ForwardRef from sqlalchemy import create_engine import ormar from ormar import ModelMeta from tests.settings import DATABASE_URL metadata = sa.MetaData() db = databases.Database(DATABASE_URL) engine = create_engine(DATABASE_URL) TeacherRef = ForwardRef("Teacher") class BaseMeta(ormar.ModelMeta): metadata = metadata database = db class Student(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) primary_teacher: TeacherRef = ormar.ForeignKey( TeacherRef, related_name="own_students" ) class StudentTeacher(ormar.Model): class Meta(BaseMeta): tablename = "students_x_teachers" class Teacher(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) students = ormar.ManyToMany( Student, through=StudentTeacher, related_name="teachers" ) Student.update_forward_refs() CityRef = ForwardRef("City") CountryRef = ForwardRef("Country") class Country(ormar.Model): class Meta(BaseMeta): tablename = "countries" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=128) capital: Optional[CityRef] = ormar.ForeignKey( CityRef, related_name="capital_city", nullable=True ) borders: Optional[List[CountryRef]] = ormar.ManyToMany(CountryRef) class City(ormar.Model): class Meta(BaseMeta): tablename = "cities" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=128) country: Country = ormar.ForeignKey( Country, related_name="cities", skip_reverse=True ) Country.update_forward_refs() @pytest.fixture(autouse=True, scope="module") def create_test_database(): metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_double_relations(): async with db: async with db.transaction(force_rollback=True): t1 = await Teacher.objects.create(name="Mr. Jones") t2 = await Teacher.objects.create(name="Ms. Smith") t3 = await Teacher.objects.create(name="Mr. Quibble") s1 = await Student.objects.create(name="Joe", primary_teacher=t1) s2 = await Student.objects.create(name="Sam", primary_teacher=t1) s3 = await Student.objects.create(name="Kate", primary_teacher=t2) s4 = await Student.objects.create(name="Zoe", primary_teacher=t2) s5 = await Student.objects.create(name="John", primary_teacher=t3) s6 = await Student.objects.create(name="Anna", primary_teacher=t3) for t in [t1, t2, t3]: for s in [s1, s2, s3, s4, s5, s6]: await t.students.add(s) jones = ( await Teacher.objects.select_related(["students", "own_students"]) .order_by(["students__name", "own_students__name"]) .get(name="Mr. Jones") ) assert len(jones.students) == 6 assert jones.students[0].name == "Anna" assert jones.students[5].name == "Zoe" assert len(jones.own_students) == 2 assert jones.own_students[0].name == "Joe" assert jones.own_students[1].name == "Sam" smith = ( await Teacher.objects.select_related(["students", "own_students"]) .filter(students__name__contains="a") .order_by(["students__name", "own_students__name"]) .get(name="Ms. Smith") ) assert len(smith.students) == 3 assert smith.students[0].name == "Anna" assert smith.students[2].name == "Sam" assert len(smith.own_students) == 2 assert smith.own_students[0].name == "Kate" assert smith.own_students[1].name == "Zoe" quibble = ( await Teacher.objects.select_related(["students", "own_students"]) .filter(students__name__startswith="J") .order_by(["-students__name", "own_students__name"]) .get(name="Mr. Quibble") ) assert len(quibble.students) == 2 assert quibble.students[1].name == "Joe" assert quibble.students[0].name == "John" assert len(quibble.own_students) == 2 assert quibble.own_students[1].name == "John" assert quibble.own_students[0].name == "Anna" @pytest.mark.asyncio async def test_auto_through_model(): async with db: async with db.transaction(force_rollback=True): england = await Country(name="England").save() france = await Country(name="France").save() london = await City(name="London", country=england).save() england.capital = london await england.update() await england.borders.add(france) check = await Country.objects.select_related(["capital", "borders"]).get( name="England" ) assert check.name == "England" assert check.capital.name == "London" assert check.capital.country.pk == check.pk assert check.borders[0] == france ormar-0.12.2/tests/test_deferred/test_forward_refs.py000066400000000000000000000225271444363446500230160ustar00rootroot00000000000000# type: ignore from typing import List import databases import pytest import pytest_asyncio import sqlalchemy as sa from pydantic.typing import ForwardRef from sqlalchemy import create_engine import ormar from ormar import ModelMeta from ormar.exceptions import ModelError from tests.settings import DATABASE_URL metadata = sa.MetaData() db = databases.Database(DATABASE_URL) engine = create_engine(DATABASE_URL) PersonRef = ForwardRef("Person") class Person(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) supervisor: PersonRef = ormar.ForeignKey(PersonRef, related_name="employees") Person.update_forward_refs() GameRef = ForwardRef("Game") ChildRef = ForwardRef("Child") ChildFriendRef = ForwardRef("ChildFriend") class Child(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) favourite_game: GameRef = ormar.ForeignKey(GameRef, related_name="liked_by") least_favourite_game: GameRef = ormar.ForeignKey( GameRef, related_name="not_liked_by" ) friends = ormar.ManyToMany( ChildRef, through=ChildFriendRef, related_name="also_friends" ) class ChildFriend(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db class Game(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) Child.update_forward_refs() @pytest.fixture(autouse=True, scope="module") def create_test_database(): metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(scope="function") async def cleanup(): yield async with db: await ChildFriend.objects.delete(each=True) await Child.objects.delete(each=True) await Game.objects.delete(each=True) await Person.objects.delete(each=True) @pytest.mark.asyncio async def test_not_updated_model_raises_errors(): Person2Ref = ForwardRef("Person2") class Person2(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) supervisor: Person2Ref = ormar.ForeignKey(Person2Ref, related_name="employees") with pytest.raises(ModelError): await Person2.objects.create(name="Test") with pytest.raises(ModelError): Person2(name="Test") with pytest.raises(ModelError): await Person2.objects.get() @pytest.mark.asyncio async def test_not_updated_model_m2m_raises_errors(): Person3Ref = ForwardRef("Person3") class PersonFriend(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db class Person3(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) supervisors: Person3Ref = ormar.ManyToMany( Person3Ref, through=PersonFriend, related_name="employees" ) with pytest.raises(ModelError): await Person3.objects.create(name="Test") with pytest.raises(ModelError): Person3(name="Test") with pytest.raises(ModelError): await Person3.objects.get() @pytest.mark.asyncio async def test_not_updated_model_m2m_through_raises_errors(): PersonPetRef = ForwardRef("PersonPet") class Pet(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Person4(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) pets: List[Pet] = ormar.ManyToMany( Pet, through=PersonPetRef, related_name="owners" ) class PersonPet(ormar.Model): class Meta(ModelMeta): metadata = metadata database = db with pytest.raises(ModelError): await Person4.objects.create(name="Test") with pytest.raises(ModelError): Person4(name="Test") with pytest.raises(ModelError): await Person4.objects.get() def test_proper_field_init(): assert "supervisor" in Person.Meta.model_fields assert Person.Meta.model_fields["supervisor"].to == Person assert "supervisor" in Person.__fields__ assert Person.__fields__["supervisor"].type_ == Person assert "supervisor" in Person.Meta.table.columns assert isinstance( Person.Meta.table.columns["supervisor"].type, sa.sql.sqltypes.Integer ) assert len(Person.Meta.table.columns["supervisor"].foreign_keys) > 0 assert "person_supervisor" in Person.Meta.alias_manager._aliases_new @pytest.mark.asyncio async def test_self_relation(): async with db: async with db.transaction(force_rollback=True): sam = await Person.objects.create(name="Sam") joe = await Person(name="Joe", supervisor=sam).save() assert joe.supervisor.name == "Sam" joe_check = await Person.objects.select_related("supervisor").get( name="Joe" ) assert joe_check.supervisor.name == "Sam" sam_check = await Person.objects.select_related("employees").get(name="Sam") assert sam_check.name == "Sam" assert sam_check.employees[0].name == "Joe" @pytest.mark.asyncio async def test_other_forwardref_relation(cleanup): async with db: async with db.transaction(force_rollback=True): checkers = await Game.objects.create(name="checkers") uno = await Game(name="Uno").save() await Child( name="Billy", favourite_game=uno, least_favourite_game=checkers ).save() await Child( name="Kate", favourite_game=checkers, least_favourite_game=uno ).save() billy_check = await Child.objects.select_related( ["favourite_game", "least_favourite_game"] ).get(name="Billy") assert billy_check.favourite_game == uno assert billy_check.least_favourite_game == checkers uno_check = await Game.objects.select_related( ["liked_by", "not_liked_by"] ).get(name="Uno") assert uno_check.liked_by[0].name == "Billy" assert uno_check.not_liked_by[0].name == "Kate" @pytest.mark.asyncio async def test_m2m_self_forwardref_relation(cleanup): async with db: async with db.transaction(force_rollback=True): checkers = await Game.objects.create(name="Checkers") uno = await Game(name="Uno").save() jenga = await Game(name="Jenga").save() billy = await Child( name="Billy", favourite_game=uno, least_favourite_game=checkers ).save() kate = await Child( name="Kate", favourite_game=checkers, least_favourite_game=uno ).save() steve = await Child( name="Steve", favourite_game=jenga, least_favourite_game=uno ).save() await billy.friends.add(kate) await billy.friends.add(steve) billy_check = await Child.objects.select_related( [ "friends", "favourite_game", "least_favourite_game", "friends__favourite_game", "friends__least_favourite_game", ] ).get(name="Billy") assert len(billy_check.friends) == 2 assert billy_check.friends[0].name == "Kate" assert billy_check.friends[0].favourite_game.name == "Checkers" assert billy_check.friends[0].least_favourite_game.name == "Uno" assert billy_check.friends[1].name == "Steve" assert billy_check.friends[1].favourite_game.name == "Jenga" assert billy_check.friends[1].least_favourite_game.name == "Uno" assert billy_check.favourite_game.name == "Uno" kate_check = await Child.objects.select_related(["also_friends"]).get( name="Kate" ) assert len(kate_check.also_friends) == 1 assert kate_check.also_friends[0].name == "Billy" billy_check = ( await Child.objects.select_related( [ "friends", "favourite_game", "least_favourite_game", "friends__favourite_game", "friends__least_favourite_game", ] ) .filter(friends__favourite_game__name="Checkers") .get(name="Billy") ) assert len(billy_check.friends) == 1 assert billy_check.friends[0].name == "Kate" assert billy_check.friends[0].favourite_game.name == "Checkers" assert billy_check.friends[0].least_favourite_game.name == "Uno" ormar-0.12.2/tests/test_deferred/test_more_same_table_joins.py000066400000000000000000000130211444363446500246400ustar00rootroot00000000000000import asyncio from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: tablename = "departments" metadata = metadata database = database id: int = ormar.Integer(primary_key=True, autoincrement=False) name: str = ormar.String(max_length=100) class SchoolClass(ormar.Model): class Meta: tablename = "schoolclasses" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) department: Optional[Department] = ormar.ForeignKey(Department, nullable=False) class Student(ormar.Model): class Meta: tablename = "students" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) class Teacher(ormar.Model): class Meta: tablename = "teachers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) async def create_data(): department = await Department.objects.create(id=1, name="Math Department") department2 = await Department.objects.create(id=2, name="Law Department") class1 = await SchoolClass.objects.create(name="Math") class2 = await SchoolClass.objects.create(name="Logic") category = await Category.objects.create(name="Foreign", department=department) category2 = await Category.objects.create(name="Domestic", department=department2) await Student.objects.create(name="Jane", category=category, schoolclass=class1) await Student.objects.create(name="Judy", category=category2, schoolclass=class1) await Student.objects.create(name="Jack", category=category2, schoolclass=class2) await Teacher.objects.create(name="Joe", category=category2, schoolclass=class1) @pytest.mark.asyncio async def test_model_multiple_instances_of_same_table_in_schema(): async with database: await create_data() classes = await SchoolClass.objects.select_related( ["teachers__category__department", "students__category__department"] ).all() assert classes[0].name == "Math" assert classes[0].students[0].name == "Jane" assert len(classes[0].dict().get("students")) == 2 assert classes[0].teachers[0].category.department.name == "Law Department" assert classes[0].students[0].category.department.name == "Math Department" @pytest.mark.asyncio async def test_load_all_multiple_instances_of_same_table_in_schema(): async with database: await create_data() math_class = await SchoolClass.objects.get(name="Math") assert math_class.name == "Math" await math_class.load_all(follow=True) assert math_class.students[0].name == "Jane" assert len(math_class.dict().get("students")) == 2 assert math_class.teachers[0].category.department.name == "Law Department" assert math_class.students[0].category.department.name == "Math Department" @pytest.mark.asyncio async def test_filter_groups_with_instances_of_same_table_in_schema(): async with database: await create_data() math_class = ( await SchoolClass.objects.select_related( ["teachers__category__department", "students__category__department"] ) .filter( ormar.or_( students__name="Jane", teachers__category__name="Domestic", students__category__name="Foreign", ) ) .get(name="Math") ) assert math_class.name == "Math" assert math_class.students[0].name == "Jane" assert len(math_class.dict().get("students")) == 2 assert math_class.teachers[0].category.department.name == "Law Department" assert math_class.students[0].category.department.name == "Math Department" classes = ( await SchoolClass.objects.select_related( ["students__category__department", "teachers__category__department"] ) .filter( ormar.and_( ormar.or_( students__name="Jane", students__category__name="Foreign" ), teachers__category__department__name="Law Department", ) ) .all() ) assert len(classes) == 1 assert classes[0].teachers[0].category.department.name == "Law Department" assert classes[0].students[0].category.department.name == "Math Department" ormar-0.12.2/tests/test_deferred/test_same_table_joins.py000066400000000000000000000121661444363446500236270ustar00rootroot00000000000000import asyncio from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: tablename = "departments" metadata = metadata database = database id: int = ormar.Integer(primary_key=True, autoincrement=False) name: str = ormar.String(max_length=100) class SchoolClass(ormar.Model): class Meta: tablename = "schoolclasses" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) department: Optional[Department] = ormar.ForeignKey(Department, nullable=False) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Student(ormar.Model): class Meta: tablename = "students" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) class Teacher(ormar.Model): class Meta: tablename = "teachers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) schoolclass: Optional[SchoolClass] = ormar.ForeignKey(SchoolClass) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) async def create_data(): department = await Department.objects.create(id=1, name="Math Department") department2 = await Department.objects.create(id=2, name="Law Department") class1 = await SchoolClass.objects.create(name="Math", department=department) class2 = await SchoolClass.objects.create(name="Logic", department=department2) category = await Category.objects.create(name="Foreign") category2 = await Category.objects.create(name="Domestic") await Student.objects.create(name="Jane", category=category, schoolclass=class1) await Student.objects.create(name="Judy", category=category2, schoolclass=class1) await Student.objects.create(name="Jack", category=category2, schoolclass=class2) await Teacher.objects.create(name="Joe", category=category2, schoolclass=class1) @pytest.mark.asyncio async def test_model_multiple_instances_of_same_table_in_schema(): async with database: async with database.transaction(force_rollback=True): await create_data() classes = await SchoolClass.objects.select_related( ["teachers__category", "students__schoolclass"] ).all() assert classes[0].name == "Math" assert classes[0].students[0].name == "Jane" assert len(classes[0].dict().get("students")) == 2 # since it's going from schoolclass => teacher => schoolclass (same class) department is already populated assert classes[0].students[0].schoolclass.name == "Math" assert classes[0].students[0].schoolclass.department.name is None await classes[0].students[0].schoolclass.department.load() assert ( classes[0].students[0].schoolclass.department.name == "Math Department" ) await classes[1].students[0].schoolclass.department.load() assert ( classes[1].students[0].schoolclass.department.name == "Law Department" ) @pytest.mark.asyncio async def test_right_tables_join(): async with database: async with database.transaction(force_rollback=True): await create_data() classes = await SchoolClass.objects.select_related( ["teachers__category", "students"] ).all() assert classes[0].teachers[0].category.name == "Domestic" assert classes[0].students[0].category.name is None await classes[0].students[0].category.load() assert classes[0].students[0].category.name == "Foreign" @pytest.mark.asyncio async def test_multiple_reverse_related_objects(): async with database: async with database.transaction(force_rollback=True): await create_data() classes = await SchoolClass.objects.select_related( ["teachers__category", "students__category"] ).all() assert classes[0].name == "Math" assert classes[0].students[1].name == "Judy" assert classes[0].students[0].category.name == "Foreign" assert classes[0].students[1].category.name == "Domestic" assert classes[0].teachers[0].category.name == "Domestic" ormar-0.12.2/tests/test_encryption/000077500000000000000000000000001444363446500173245ustar00rootroot00000000000000ormar-0.12.2/tests/test_encryption/__init__.py000066400000000000000000000000001444363446500214230ustar00rootroot00000000000000ormar-0.12.2/tests/test_encryption/test_encrypted_columns.py000066400000000000000000000205171444363446500244770ustar00rootroot00000000000000# type: ignore import base64 import decimal import hashlib import uuid import datetime from typing import Any import databases import pytest import sqlalchemy import ormar from ormar import ModelDefinitionError, NoMatch from ormar.fields.sqlalchemy_encrypted import EncryptedString from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database default_fernet = dict( encrypt_secret="asd123", encrypt_backend=ormar.EncryptBackends.FERNET ) class DummyBackend(ormar.fields.EncryptBackend): def _initialize_backend(self, secret_key: bytes) -> None: pass def encrypt(self, value: Any) -> str: return value def decrypt(self, value: Any) -> str: return value class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, **default_fernet) uuid_test = ormar.UUID(default=uuid.uuid4, uuid_format="string") uuid_test2 = ormar.UUID(nullable=True, uuid_format="string") password: str = ormar.String( max_length=128, encrypt_secret="udxc32", encrypt_backend=ormar.EncryptBackends.HASH, ) birth_year: int = ormar.Integer( nullable=True, encrypt_secret="secure89key%^&psdijfipew", encrypt_backend=ormar.EncryptBackends.FERNET, ) test_text: str = ormar.Text(default="", **default_fernet) test_bool: bool = ormar.Boolean(nullable=False, **default_fernet) test_float: float = ormar.Float(**default_fernet) test_float2: float = ormar.Float(nullable=True, **default_fernet) test_datetime = ormar.DateTime(default=datetime.datetime.now, **default_fernet) test_date = ormar.Date(default=datetime.date.today, **default_fernet) test_time = ormar.Time(default=datetime.time, **default_fernet) test_json = ormar.JSON(default={}, **default_fernet) test_bigint: int = ormar.BigInteger(default=0, **default_fernet) test_smallint: int = ormar.SmallInteger(default=0, **default_fernet) test_decimal = ormar.Decimal(scale=2, precision=10, **default_fernet) test_decimal2 = ormar.Decimal(max_digits=10, decimal_places=2, **default_fernet) custom_backend: str = ormar.String( max_length=200, encrypt_secret="asda8", encrypt_backend=ormar.EncryptBackends.CUSTOM, encrypt_custom_backend=DummyBackend, ) class Hash(ormar.Model): class Meta(BaseMeta): tablename = "hashes" id: int = ormar.Integer(primary_key=True) name: str = ormar.String( max_length=128, encrypt_secret="udxc32", encrypt_backend=ormar.EncryptBackends.HASH, ) class Filter(ormar.Model): class Meta(BaseMeta): tablename = "filters" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, **default_fernet) hash = ormar.ForeignKey(Hash) class Report(ormar.Model): class Meta(BaseMeta): tablename = "reports" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) filters = ormar.ManyToMany(Filter) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_error_on_encrypted_pk(): with pytest.raises(ModelDefinitionError): class Wrong(ormar.Model): class Meta(BaseMeta): tablename = "wrongs" id: int = ormar.Integer( primary_key=True, encrypt_secret="asd123", encrypt_backend=ormar.EncryptBackends.FERNET, ) def test_error_on_encrypted_relation(): with pytest.raises(ModelDefinitionError): class Wrong2(ormar.Model): class Meta(BaseMeta): tablename = "wrongs2" id: int = ormar.Integer(primary_key=True) author = ormar.ForeignKey( Author, encrypt_secret="asd123", encrypt_backend=ormar.EncryptBackends.FERNET, ) def test_error_on_encrypted_m2m_relation(): with pytest.raises(ModelDefinitionError): class Wrong3(ormar.Model): class Meta(BaseMeta): tablename = "wrongs3" id: int = ormar.Integer(primary_key=True) author = ormar.ManyToMany( Author, encrypt_secret="asd123", encrypt_backend=ormar.EncryptBackends.FERNET, ) def test_wrong_backend(): with pytest.raises(ModelDefinitionError): class Wrong3(ormar.Model): class Meta(BaseMeta): tablename = "wrongs3" id: int = ormar.Integer(primary_key=True) author = ormar.Integer( encrypt_secret="asd123", encrypt_backend=ormar.EncryptBackends.CUSTOM, encrypt_custom_backend="aa", ) def test_db_structure(): assert Author.Meta.table.c.get("name").type.__class__ == EncryptedString @pytest.mark.asyncio async def test_save_and_retrieve(): async with database: test_uuid = uuid.uuid4() await Author( name="Test", birth_year=1988, password="test123", uuid_test=test_uuid, test_float=1.2, test_bool=True, test_decimal=decimal.Decimal(3.5), test_decimal2=decimal.Decimal(5.5), test_json=dict(aa=12), custom_backend="test12", ).save() author = await Author.objects.get() assert author.name == "Test" assert author.birth_year == 1988 password = ( "03e4a4d513e99cb3fe4ee3db282c053daa3f3572b849c3868939a306944ad5c08" "22b50d4886e10f4cd418c3f2df3ceb02e2e7ac6e920ae0c90f2dedfc8fa16e2" ) assert author.password == password assert author.uuid_test == test_uuid assert author.uuid_test2 is None assert author.test_datetime.date() == datetime.date.today() assert author.test_date == datetime.date.today() assert author.test_text == "" assert author.test_float == 1.2 assert author.test_float2 is None assert author.test_bigint == 0 assert author.test_json == {"aa": 12} assert author.test_decimal == 3.5 assert author.test_decimal2 == 5.5 assert author.custom_backend == "test12" @pytest.mark.asyncio async def test_fernet_filters_nomatch(): async with database: await Filter(name="test1").save() await Filter(name="test1").save() filters = await Filter.objects.all() assert filters[0].name == filters[1].name == "test1" with pytest.raises(NoMatch): await Filter.objects.get(name="test1") assert await Filter.objects.get_or_none(name="test1") is None @pytest.mark.asyncio async def test_hash_filters_works(): async with database: await Hash(name="test1").save() await Hash(name="test2").save() secret = hashlib.sha256("udxc32".encode()).digest() secret = base64.urlsafe_b64encode(secret) hashed_test1 = hashlib.sha512(secret + "test1".encode()).hexdigest() hash1 = await Hash.objects.get(name="test1") assert hash1.name == hashed_test1 with pytest.raises(NoMatch): await Filter.objects.get(name__icontains="test") @pytest.mark.asyncio async def test_related_model_fields_properly_decrypted(): async with database: hash1 = await Hash(name="test1").save() report = await Report.objects.create(name="Report1") await report.filters.create(name="test1", hash=hash1) await report.filters.create(name="test2") report2 = await Report.objects.select_related("filters").get() assert report2.filters[0].name == "test1" assert report2.filters[1].name == "test2" secret = hashlib.sha256("udxc32".encode()).digest() secret = base64.urlsafe_b64encode(secret) hashed_test1 = hashlib.sha512(secret + "test1".encode()).hexdigest() report2 = await Report.objects.select_related("filters__hash").get() assert report2.filters[0].name == "test1" assert report2.filters[0].hash.name == hashed_test1 ormar-0.12.2/tests/test_exclude_include_dict/000077500000000000000000000000001444363446500212715ustar00rootroot00000000000000ormar-0.12.2/tests/test_exclude_include_dict/__init__.py000066400000000000000000000000001444363446500233700ustar00rootroot00000000000000ormar-0.12.2/tests/test_exclude_include_dict/test_complex_relation_tree_performance.py000066400000000000000000000374011444363446500316530ustar00rootroot00000000000000from datetime import datetime from typing import List, Optional, Union import databases import pydantic import pytest import sqlalchemy import ormar as orm from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class MainMeta(orm.ModelMeta): database = database metadata = metadata class ChagenlogRelease(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "changelog_release" class CommitIssue(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "commit_issues" class CommitLabel(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "commit_label" class MergeRequestCommit(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "merge_request_commits" class MergeRequestIssue(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "merge_request_issues" class MergeRequestLabel(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "merge_request_labels" class ProjectLabel(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "project_label" class PushCommit(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "push_commit" class PushLabel(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "push_label" class TagCommit(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "tag_commits" class TagIssue(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "tag_issue" class TagLabel(orm.Model): id: int = orm.Integer(name="id", primary_key=True) class Meta(MainMeta): tablename = "tag_label" class UserProject(orm.Model): id: int = orm.Integer(name="id", primary_key=True) access_level: int = orm.Integer(default=0) class Meta(MainMeta): tablename = "user_project" class Label(orm.Model): id: int = orm.Integer(name="id", primary_key=True) title: str = orm.String(max_length=100) description: str = orm.Text(default="") type: str = orm.String(max_length=100, default="") class Meta(MainMeta): tablename = "labels" class Project(orm.Model): id: int = orm.Integer(name="id", primary_key=True) name: str = orm.String(max_length=100) description: str = orm.Text(default="") git_url: str = orm.String(max_length=500, default="") labels: Optional[Union[List[Label], Label]] = orm.ManyToMany( Label, through=ProjectLabel, ondelete="CASCADE", onupdate="CASCADE" ) changelog_jira_tag: str = orm.String(max_length=100, default="") change_type_jira_tag: str = orm.String(max_length=100, default="") jira_prefix: str = orm.String(max_length=10, default="SAN") type: str = orm.String(max_length=10, default="cs") target_branch_name: str = orm.String(max_length=100, default="master") header: str = orm.String(max_length=250, default="") jira_url: str = orm.String(max_length=500) changelog_file: str = orm.String(max_length=250, default="") version_file: str = orm.String(max_length=250, default="") class Meta(MainMeta): tablename = "projects" class Issue(orm.Model): id: int = orm.Integer(name="id", primary_key=True) summary: str = orm.Text(default="") description: str = orm.Text(default="") changelog: str = orm.Text(default="") link: str = orm.String(max_length=500) issue_type: str = orm.String(max_length=100) key: str = orm.String(max_length=100) change_type: str = orm.String(max_length=100, default="") data: pydantic.Json = orm.JSON(default={}) class Meta(MainMeta): tablename = "issues" class User(orm.Model): id: int = orm.Integer(name="id", primary_key=True) username: str = orm.String(max_length=100, unique=True) name: str = orm.String(max_length=200, default="") class Meta(MainMeta): tablename = "users" class Branch(orm.Model): id: int = orm.Integer(name="id", primary_key=True) name: str = orm.String(max_length=200) description: str = orm.Text(default="") automatic_tags: bool = orm.Boolean(default=False) is_it_locked: bool = orm.Boolean(default=True) prefix_tag: str = orm.String(max_length=50, default="") postfix_tag: str = orm.String(max_length=50, default="") project: Project = orm.ForeignKey(Project, ondelete="CASCADE", onupdate="CASCADE") class Meta(MainMeta): tablename = "branches" class Changelog(orm.Model): id: int = orm.Integer(name="id", primary_key=True) content: str = orm.Text(default="") version: str = orm.Text(default="") past_changelog: int = orm.Integer(default=0) label: Label = orm.ForeignKey( Label, nullable=True, ondelete="CASCADE", onupdate="CASCADE" ) project: Project = orm.ForeignKey(Project, ondelete="CASCADE", onupdate="CASCADE") created_date: datetime = orm.DateTime(default=datetime.utcnow()) class Meta(MainMeta): tablename = "changelogs" class Commit(orm.Model): id: str = orm.String(max_length=500, primary_key=True) short_id: str = orm.String(max_length=500) title: str = orm.String(max_length=500) message: str = orm.Text(default="") url = orm.String(max_length=500, default="") author_name = orm.String(max_length=500, default="") labels: Optional[Union[List[Label], Label]] = orm.ManyToMany( Label, through=CommitLabel, ondelete="CASCADE", onupdate="CASCADE" ) issues: Optional[Union[List[Issue], Issue]] = orm.ManyToMany( Issue, through=CommitIssue, ondelete="CASCADE", onupdate="CASCADE" ) class Meta(MainMeta): tablename = "commits" class MergeRequest(orm.Model): id: int = orm.Integer(name="id", primary_key=True) idd: int = orm.Integer(default=0) title: str = orm.String(max_length=500) state: str = orm.String(max_length=100) merge_status: str = orm.String(max_length=100) description: str = orm.Text(default="") source: Branch = orm.ForeignKey(Branch, related_name="source") target: Branch = orm.ForeignKey(Branch, related_name="target") labels: Optional[Union[List[Label], Label]] = orm.ManyToMany( Label, through=MergeRequestLabel, ondelete="CASCADE", onupdate="CASCADE" ) commits: Optional[Union[List[Commit], Commit]] = orm.ManyToMany( Commit, through=MergeRequestCommit, ondelete="CASCADE", onupdate="CASCADE" ) issues: Optional[Union[List[Issue], Issue]] = orm.ManyToMany( Issue, through=MergeRequestIssue, ondelete="CASCADE", onupdate="CASCADE" ) project: Project = orm.ForeignKey(Project, ondelete="CASCADE", onupdate="CASCADE") class Meta(MainMeta): tablename = "merge_requests" class Push(orm.Model): id: int = orm.Integer(name="id", primary_key=True) branch: Branch = orm.ForeignKey( Branch, nullable=True, ondelete="CASCADE", onupdate="CASCADE" ) has_locking_changes: bool = orm.Boolean(default=False) sha: str = orm.String(max_length=200) labels: Optional[Union[List[Label], Label]] = orm.ManyToMany( Label, through=PushLabel, ondelete="CASCADE", onupdate="CASCADE" ) commits: Optional[Union[List[Commit], Commit]] = orm.ManyToMany( Commit, through=PushCommit, through_relation_name="push", through_reverse_relation_name="commit_id", ondelete="CASCADE", onupdate="CASCADE", ) author: User = orm.ForeignKey(User, ondelete="CASCADE", onupdate="CASCADE") project: Project = orm.ForeignKey(Project, ondelete="CASCADE", onupdate="CASCADE") class Meta(MainMeta): tablename = "pushes" class Tag(orm.Model): id: int = orm.Integer(name="id", primary_key=True) name: str = orm.String(max_length=200) ref: str = orm.String(max_length=200) project: Project = orm.ForeignKey(Project, ondelete="CASCADE", onupdate="CASCADE") title: str = orm.String(max_length=200, default="") description: str = orm.Text(default="") commits: Optional[Union[List[Commit], Commit]] = orm.ManyToMany( Commit, through=TagCommit, through_relation_name="tag", through_reverse_relation_name="commit_id", ondelete="CASCADE", onupdate="CASCADE", ) issues: Optional[Union[List[Issue], Issue]] = orm.ManyToMany( Issue, through=TagIssue, ondelete="CASCADE", onupdate="CASCADE" ) labels: Optional[Union[List[Label], Label]] = orm.ManyToMany( Label, through=TagLabel, ondelete="CASCADE", onupdate="CASCADE" ) user: User = orm.ForeignKey( User, nullable=True, ondelete="CASCADE", onupdate="CASCADE" ) branch: Branch = orm.ForeignKey( Branch, nullable=True, ondelete="CASCADE", onupdate="CASCADE" ) class Meta(MainMeta): tablename = "tags" class Release(orm.Model): id: int = orm.Integer(name="id", primary_key=True) title: str = orm.String(max_length=200, default="") description: str = orm.Text(default="") tag: Tag = orm.ForeignKey(Tag, ondelete="CASCADE", onupdate="CASCADE") changelogs: List[Changelog] = orm.ManyToMany( Changelog, through=ChagenlogRelease, ondelete="CASCADE", onupdate="CASCADE" ) data: pydantic.Json = orm.JSON(default={}) class Meta(MainMeta): tablename = "releases" class Webhook(orm.Model): id: int = orm.Integer(name="id", primary_key=True) object_kind = orm.String(max_length=100) project: Project = orm.ForeignKey(Project, ondelete="CASCADE", onupdate="CASCADE") merge_request: MergeRequest = orm.ForeignKey( MergeRequest, nullable=True, ondelete="CASCADE", onupdate="CASCADE" ) tag: Tag = orm.ForeignKey( Tag, nullable=True, ondelete="CASCADE", onupdate="CASCADE" ) push: Push = orm.ForeignKey( Push, nullable=True, ondelete="CASCADE", onupdate="CASCADE" ) created_at: datetime = orm.DateTime(default=datetime.now()) data: pydantic.Json = orm.JSON(default={}) status: int = orm.Integer(default=200) error: str = orm.Text(default="") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_very_complex_relation_map(): async with database: tags = [ {"id": 18, "name": "name-18", "ref": "ref-18"}, {"id": 17, "name": "name-17", "ref": "ref-17"}, {"id": 12, "name": "name-12", "ref": "ref-12"}, ] payload = [ { "id": 9, "title": "prueba-2321", "description": "\n\n### [v.1.3.0.0] - 2021-08-19\n#### Resolved Issues\n\n#### Task\n\n- Probar flujo de changelog Automatic Jira: [SAN-86](https://htech.atlassian.net/browse/SAN-86)\n\n Description: Se probara el flujo de changelog automatic. \n\n Changelog: Se agrega función para extraer texto del campo changelog del dashboard de Sanval y ponerlo directamente en el changelog.md del repositorio. \n\n\n \n\n", "data": {}, }, { "id": 8, "title": "prueba-123-prod", "description": "\n\n### [v.1.2.0.0] - 2021-08-19\n#### Resolved Issues\n\n#### Task\n\n- Probar flujo de changelog Automatic Jira: [SAN-86](https://htech.atlassian.net/browse/SAN-86)\n\n Description: Se probara el flujo de changelog automatic. \n\n Changelog: Se agrega función para extraer texto del campo changelog del dashboard de Sanval y ponerlo directamente en el changelog.md del repositorio. \n\n\n \n\n", "data": {}, }, { "id": 6, "title": "prueba-3-2", "description": "\n\n### [v.1.1.0.0] - 2021-07-29\n#### Resolved Issues\n\n#### Task\n\n- Probar flujo de changelog Automatic Jira: [SAN-86](https://htech.atlassian.net/browse/SAN-86)\n\n Description: Se probara el flujo de changelog automatic. \n\n Changelog: Se agrega función para extraer texto del campo changelog del dashboard de Sanval y ponerlo directamente en el changelog.md del repositorio. \n\n\n \n\n", "data": {}, }, ] saved_tags = [] for tag in tags: saved_tags.append(await Tag(**tag).save()) for ind, pay in enumerate(payload): await Release(**pay, tag=saved_tags[ind]).save() releases = await Release.objects.order_by(Release.id.desc()).all() dicts = [release.dict() for release in releases] result = [ { "id": 9, "title": "prueba-2321", "description": "\n\n### [v.1.3.0.0] - 2021-08-19\n#### Resolved Issues\n\n#### Task\n\n- Probar flujo de changelog Automatic Jira: [SAN-86](https://htech.atlassian.net/browse/SAN-86)\n\n Description: Se probara el flujo de changelog automatic. \n\n Changelog: Se agrega función para extraer texto del campo changelog del dashboard de Sanval y ponerlo directamente en el changelog.md del repositorio. \n\n\n \n\n", "data": {}, "tag": { "id": 18, "taglabel": None, "tagcommit": None, "tagissue": None, }, "changelogs": [], }, { "id": 8, "title": "prueba-123-prod", "description": "\n\n### [v.1.2.0.0] - 2021-08-19\n#### Resolved Issues\n\n#### Task\n\n- Probar flujo de changelog Automatic Jira: [SAN-86](https://htech.atlassian.net/browse/SAN-86)\n\n Description: Se probara el flujo de changelog automatic. \n\n Changelog: Se agrega función para extraer texto del campo changelog del dashboard de Sanval y ponerlo directamente en el changelog.md del repositorio. \n\n\n \n\n", "data": {}, "tag": { "id": 17, "taglabel": None, "tagcommit": None, "tagissue": None, }, "changelogs": [], }, { "id": 6, "title": "prueba-3-2", "description": "\n\n### [v.1.1.0.0] - 2021-07-29\n#### Resolved Issues\n\n#### Task\n\n- Probar flujo de changelog Automatic Jira: [SAN-86](https://htech.atlassian.net/browse/SAN-86)\n\n Description: Se probara el flujo de changelog automatic. \n\n Changelog: Se agrega función para extraer texto del campo changelog del dashboard de Sanval y ponerlo directamente en el changelog.md del repositorio. \n\n\n \n\n", "data": {}, "tag": { "id": 12, "taglabel": None, "tagcommit": None, "tagissue": None, }, "changelogs": [], }, ] assert dicts == result ormar-0.12.2/tests/test_exclude_include_dict/test_dumping_model_to_dict.py000066400000000000000000000133301444363446500272320ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class MainMeta(ormar.ModelMeta): metadata = metadata database = database class Role(ormar.Model): class Meta(MainMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255, nullable=False) class User(ormar.Model): class Meta(MainMeta): tablename: str = "users" id: int = ormar.Integer(primary_key=True) email: str = ormar.String(max_length=255, nullable=False) password: str = ormar.String(max_length=255, nullable=True) first_name: str = ormar.String(max_length=255, nullable=False) roles: List[Role] = ormar.ManyToMany(Role) class Tier(ormar.Model): class Meta: tablename = "tiers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) tier: Optional[Tier] = ormar.ForeignKey(Tier) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) created_by: Optional[User] = ormar.ForeignKey(User) @pytest.fixture(autouse=True, scope="module") def sample_data(): role = Role(name="User", id=1) role2 = Role(name="Admin", id=2) user = User( id=1, email="test@test.com", password="ijacids7^*&", first_name="Anna", roles=[role, role2], ) tier = Tier(id=1, name="Tier I") category1 = Category(id=1, name="Toys", tier=tier) category2 = Category(id=2, name="Weapons", tier=tier) item1 = Item(id=1, name="Teddy Bear", category=category1, created_by=user) item2 = Item(id=2, name="M16", category=category2, created_by=user) return item1, item2 def test_dumping_to_dict_no_exclusion(sample_data): item1, item2 = sample_data dict1 = item1.dict() assert dict1["name"] == "Teddy Bear" assert dict1["category"]["name"] == "Toys" assert dict1["category"]["tier"]["name"] == "Tier I" assert dict1["created_by"]["email"] == "test@test.com" dict2 = item2.dict() assert dict2["name"] == "M16" assert dict2["category"]["name"] == "Weapons" assert dict2["created_by"]["email"] == "test@test.com" def test_dumping_to_dict_exclude_set(sample_data): item1, item2 = sample_data dict3 = item2.dict(exclude={"name"}) assert "name" not in dict3 assert dict3["category"]["name"] == "Weapons" assert dict3["created_by"]["email"] == "test@test.com" dict4 = item2.dict(exclude={"category"}) assert dict4["name"] == "M16" assert "category" not in dict4 assert dict4["created_by"]["email"] == "test@test.com" dict5 = item2.dict(exclude={"category", "name"}) assert "name" not in dict5 assert "category" not in dict5 assert dict5["created_by"]["email"] == "test@test.com" def test_dumping_to_dict_exclude_dict(sample_data): item1, item2 = sample_data dict6 = item2.dict(exclude={"category": {"name"}, "name": ...}) assert "name" not in dict6 assert "category" in dict6 assert "name" not in dict6["category"] assert dict6["created_by"]["email"] == "test@test.com" def test_dumping_to_dict_exclude_nested_dict(sample_data): item1, item2 = sample_data dict1 = item2.dict(exclude={"category": {"tier": {"name"}}, "name": ...}) assert "name" not in dict1 assert "category" in dict1 assert dict1["category"]["name"] == "Weapons" assert dict1["created_by"]["email"] == "test@test.com" assert dict1["category"]["tier"].get("name") is None def test_dumping_to_dict_exclude_and_include_nested_dict(sample_data): item1, item2 = sample_data dict1 = item2.dict( exclude={"category": {"tier": {"name"}}}, include={"name", "category"} ) assert dict1.get("name") == "M16" assert "category" in dict1 assert dict1["category"]["name"] == "Weapons" assert "created_by" not in dict1 assert dict1["category"]["tier"].get("name") is None dict2 = item1.dict( exclude={"id": ...}, include={"name": ..., "category": {"name": ..., "tier": {"id"}}}, ) assert dict2.get("name") == "Teddy Bear" assert dict2.get("id") is None # models not saved assert dict2["category"]["name"] == "Toys" assert "created_by" not in dict1 assert dict1["category"]["tier"].get("name") is None assert dict1["category"]["tier"]["id"] == 1 def test_dumping_dict_without_primary_keys(sample_data): item1, item2 = sample_data dict1 = item2.dict(exclude_primary_keys=True) assert dict1 == { "category": {"name": "Weapons", "tier": {"name": "Tier I"}}, "created_by": { "email": "test@test.com", "first_name": "Anna", "password": "ijacids7^*&", "roles": [{"name": "User"}, {"name": "Admin"}], }, "name": "M16", } dict2 = item1.dict(exclude_primary_keys=True) assert dict2 == { "category": {"name": "Toys", "tier": {"name": "Tier I"}}, "created_by": { "email": "test@test.com", "first_name": "Anna", "password": "ijacids7^*&", "roles": [{"name": "User"}, {"name": "Admin"}], }, "name": "Teddy Bear", } ormar-0.12.2/tests/test_exclude_include_dict/test_excludable_items.py000066400000000000000000000150541444363446500262200ustar00rootroot00000000000000from typing import List, Optional import databases import sqlalchemy import ormar from ormar.models.excludable import ExcludableItems from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class NickNames(ormar.Model): class Meta(BaseMeta): tablename = "nicks" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") is_lame: bool = ormar.Boolean(nullable=True) class NicksHq(ormar.Model): class Meta(BaseMeta): tablename = "nicks_x_hq" class HQ(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") nicks: List[NickNames] = ormar.ManyToMany(NickNames, through=NicksHq) class Company(ormar.Model): class Meta(BaseMeta): tablename = "companies" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) hq: HQ = ormar.ForeignKey(HQ) class Car(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) manufacturer: Optional[Company] = ormar.ForeignKey(Company) name: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) gearbox_type: str = ormar.String(max_length=20, nullable=True) gears: int = ormar.Integer(nullable=True) aircon_type: str = ormar.String(max_length=20, nullable=True) def compare_results(excludable): car_excludable = excludable.get(Car) assert car_excludable.exclude == {"year", "gearbox_type", "gears", "aircon_type"} assert car_excludable.include == set() assert car_excludable.is_excluded("year") alias = Company.Meta.alias_manager.resolve_relation_alias(Car, "manufacturer") manu_excludable = excludable.get(Company, alias=alias) assert manu_excludable.exclude == {"founded"} assert manu_excludable.include == set() assert manu_excludable.is_excluded("founded") def compare_results_include(excludable): manager = Company.Meta.alias_manager car_excludable = excludable.get(Car) assert car_excludable.include == {"id", "name"} assert car_excludable.exclude == set() assert car_excludable.is_included("name") assert not car_excludable.is_included("gears") alias = manager.resolve_relation_alias(Car, "manufacturer") manu_excludable = excludable.get(Company, alias=alias) assert manu_excludable.include == {"name"} assert manu_excludable.exclude == set() assert manu_excludable.is_included("name") assert not manu_excludable.is_included("founded") alias = manager.resolve_relation_alias(Company, "hq") hq_excludable = excludable.get(HQ, alias=alias) assert hq_excludable.include == {"name"} assert hq_excludable.exclude == set() alias = manager.resolve_relation_alias(NicksHq, "nicknames") nick_excludable = excludable.get(NickNames, alias=alias) assert nick_excludable.include == {"name"} assert nick_excludable.exclude == set() def test_excluding_fields_from_list(): fields = ["gearbox_type", "gears", "aircon_type", "year", "manufacturer__founded"] excludable = ExcludableItems() excludable.build(items=fields, model_cls=Car, is_exclude=True) compare_results(excludable) def test_excluding_fields_from_dict(): fields = { "gearbox_type": ..., "gears": ..., "aircon_type": ..., "year": ..., "manufacturer": {"founded": ...}, } excludable = ExcludableItems() excludable.build(items=fields, model_cls=Car, is_exclude=True) compare_results(excludable) def test_excluding_fields_from_dict_with_set(): fields = { "gearbox_type": ..., "gears": ..., "aircon_type": ..., "year": ..., "manufacturer": {"founded"}, } excludable = ExcludableItems() excludable.build(items=fields, model_cls=Car, is_exclude=True) compare_results(excludable) def test_gradual_build_from_lists(): fields_col = [ "year", ["gearbox_type", "gears"], "aircon_type", ["manufacturer__founded"], ] excludable = ExcludableItems() for fields in fields_col: excludable.build(items=fields, model_cls=Car, is_exclude=True) compare_results(excludable) def test_nested_includes(): fields = [ "id", "name", "manufacturer__name", "manufacturer__hq__name", "manufacturer__hq__nicks__name", ] excludable = ExcludableItems() excludable.build(items=fields, model_cls=Car, is_exclude=False) compare_results_include(excludable) def test_nested_includes_from_dict(): fields = { "id": ..., "name": ..., "manufacturer": {"name": ..., "hq": {"name": ..., "nicks": {"name": ...}}}, } excludable = ExcludableItems() excludable.build(items=fields, model_cls=Car, is_exclude=False) compare_results_include(excludable) def test_nested_includes_from_dict_with_set(): fields = { "id": ..., "name": ..., "manufacturer": {"name": ..., "hq": {"name": ..., "nicks": {"name"}}}, } excludable = ExcludableItems() excludable.build(items=fields, model_cls=Car, is_exclude=False) compare_results_include(excludable) def test_includes_and_excludes_combo(): fields_inc1 = ["id", "name", "year", "gearbox_type", "gears"] fields_inc2 = {"manufacturer": {"name"}} fields_exc1 = {"manufacturer__founded"} fields_exc2 = "aircon_type" excludable = ExcludableItems() excludable.build(items=fields_inc1, model_cls=Car, is_exclude=False) excludable.build(items=fields_inc2, model_cls=Car, is_exclude=False) excludable.build(items=fields_exc1, model_cls=Car, is_exclude=True) excludable.build(items=fields_exc2, model_cls=Car, is_exclude=True) car_excludable = excludable.get(Car) assert car_excludable.include == {"id", "name", "year", "gearbox_type", "gears"} assert car_excludable.exclude == {"aircon_type"} assert car_excludable.is_excluded("aircon_type") assert car_excludable.is_included("name") alias = Company.Meta.alias_manager.resolve_relation_alias(Car, "manufacturer") manu_excludable = excludable.get(Company, alias=alias) assert manu_excludable.include == {"name"} assert manu_excludable.exclude == {"founded"} assert manu_excludable.is_excluded("founded") ormar-0.12.2/tests/test_exclude_include_dict/test_excluding_fields_in_fastapi.py000066400000000000000000000215341444363446500304140ustar00rootroot00000000000000import datetime import string import random import databases import pydantic import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from ormar import post_save, property_field from tests.settings import DATABASE_URL app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() # note that you can set orm_mode here # and in this case UserSchema become unnecessary class UserBase(pydantic.BaseModel): class Config: orm_mode = True email: str first_name: str last_name: str class UserCreateSchema(UserBase): password: str category: str class UserSchema(UserBase): class Config: orm_mode = True def gen_pass(): choices = string.ascii_letters + string.digits + "!@#$%^&*()" return "".join(random.choice(choices) for _ in range(20)) class RandomModel(ormar.Model): class Meta: tablename: str = "random_users" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) password: str = ormar.String(max_length=255, default=gen_pass) first_name: str = ormar.String(max_length=255, default="John") last_name: str = ormar.String(max_length=255) created_date: datetime.datetime = ormar.DateTime( server_default=sqlalchemy.func.now() ) @property_field def full_name(self) -> str: return " ".join([self.first_name, self.last_name]) class User(ormar.Model): class Meta: tablename: str = "users" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) email: str = ormar.String(max_length=255) password: str = ormar.String(max_length=255, nullable=True) first_name: str = ormar.String(max_length=255) last_name: str = ormar.String(max_length=255) category: str = ormar.String(max_length=255, nullable=True) class User2(ormar.Model): class Meta: tablename: str = "users2" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) email: str = ormar.String(max_length=255, nullable=False) password: str = ormar.String(max_length=255) first_name: str = ormar.String(max_length=255) last_name: str = ormar.String(max_length=255) category: str = ormar.String(max_length=255, nullable=True) timestamp: datetime.datetime = ormar.DateTime( pydantic_only=True, default=datetime.datetime.now ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.post("/users/", response_model=User, response_model_exclude={"password"}) async def create_user(user: User): return await user.save() @app.post("/users2/", response_model=User) async def create_user2(user: User): user = await user.save() return user.dict(exclude={"password"}) @app.post("/users3/", response_model=UserBase) async def create_user3(user: User2): return await user.save() @app.post("/users4/") async def create_user4(user: User2): return (await user.save()).dict(exclude={"password"}) @app.post("/random/", response_model=RandomModel) async def create_user5(user: RandomModel): return await user.save() @app.post("/random2/", response_model=RandomModel) async def create_user6(user: RandomModel): return await user.save() @app.post("/random3/", response_model=RandomModel, response_model_exclude={"full_name"}) async def create_user7(user: RandomModel): return await user.save() @pytest.mark.asyncio async def test_excluding_fields_in_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): user = { "email": "test@domain.com", "password": "^*^%A*DA*IAAA", "first_name": "John", "last_name": "Doe", } response = await client.post("/users/", json=user) created_user = User(**response.json()) assert created_user.pk is not None assert created_user.password is None user2 = {"email": "test@domain.com", "first_name": "John", "last_name": "Doe"} response = await client.post("/users/", json=user2) created_user = User(**response.json()) assert created_user.pk is not None assert created_user.password is None response = await client.post("/users2/", json=user) created_user2 = User(**response.json()) assert created_user2.pk is not None assert created_user2.password is None # response has only 3 fields from UserBase response = await client.post("/users3/", json=user) assert list(response.json().keys()) == ["email", "first_name", "last_name"] timestamp = datetime.datetime.now() user3 = { "email": "test@domain.com", "password": "^*^%A*DA*IAAA", "first_name": "John", "last_name": "Doe", "timestamp": str(timestamp), } response = await client.post("/users4/", json=user3) assert list(response.json().keys()) == [ "id", "email", "first_name", "last_name", "category", "timestamp", ] assert response.json().get("timestamp") == str(timestamp).replace(" ", "T") resp_dict = response.json() resp_dict.update({"password": "random"}) user_instance = User2(**resp_dict) assert user_instance.timestamp is not None assert isinstance(user_instance.timestamp, datetime.datetime) assert user_instance.timestamp == timestamp response = await client.post("/users4/", json=user3) assert list(response.json().keys()) == [ "id", "email", "first_name", "last_name", "category", "timestamp", ] assert ( datetime.datetime.strptime( response.json().get("timestamp"), "%Y-%m-%dT%H:%M:%S.%f" ) == timestamp ) @pytest.mark.asyncio async def test_adding_fields_in_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): user3 = {"last_name": "Test", "full_name": "deleted"} response = await client.post("/random/", json=user3) assert list(response.json().keys()) == [ "id", "password", "first_name", "last_name", "created_date", "full_name", ] assert response.json().get("full_name") == "John Test" user3 = {"last_name": "Test"} response = await client.post("/random/", json=user3) assert list(response.json().keys()) == [ "id", "password", "first_name", "last_name", "created_date", "full_name", ] assert response.json().get("full_name") == "John Test" @pytest.mark.asyncio async def test_adding_fields_in_endpoints2(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): user3 = {"last_name": "Test"} response = await client.post("/random2/", json=user3) assert list(response.json().keys()) == [ "id", "password", "first_name", "last_name", "created_date", "full_name", ] assert response.json().get("full_name") == "John Test" @pytest.mark.asyncio async def test_excluding_property_field_in_endpoints2(): dummy_registry = {} @post_save(RandomModel) async def after_save(sender, instance, **kwargs): dummy_registry[instance.pk] = instance.dict() client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): user3 = {"last_name": "Test"} response = await client.post("/random3/", json=user3) assert list(response.json().keys()) == [ "id", "password", "first_name", "last_name", "created_date", ] assert response.json().get("full_name") is None assert len(dummy_registry) == 1 check_dict = dummy_registry.get(response.json().get("id")) check_dict.pop("full_name") assert response.json().get("password") == check_dict.get("password") ormar-0.12.2/tests/test_exclude_include_dict/test_excluding_fields_with_default.py000066400000000000000000000072511444363446500307560ustar00rootroot00000000000000import random from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() def get_position() -> int: return random.randint(1, 10) class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False, nullable=True) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer(default=get_position) play_count: int = ormar.Integer(nullable=True, default=0) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_excluding_field_with_default(): async with database: async with database.transaction(force_rollback=True): album = await Album.objects.create(name="Miami") await Track.objects.create(title="Vice City", album=album, play_count=10) await Track.objects.create(title="Beach Sand", album=album, play_count=20) await Track.objects.create(title="Night Lights", album=album) album = await Album.objects.fields("name").get() assert album.is_best_seller is None album = await Album.objects.exclude_fields({"is_best_seller", "id"}).get() assert album.is_best_seller is None album = await Album.objects.exclude_fields({"is_best_seller": ...}).get() assert album.is_best_seller is None tracks = await Track.objects.all() for track in tracks: assert track.play_count is not None assert track.position is not None album = ( await Album.objects.select_related("tracks") .exclude_fields({"is_best_seller": ..., "tracks": {"play_count"}}) .get(name="Miami") ) assert album.is_best_seller is None assert len(album.tracks) == 3 for track in album.tracks: assert track.play_count is None assert track.position is not None album = ( await Album.objects.select_related("tracks") .exclude_fields( { "is_best_seller": ..., "tracks": {"play_count": ..., "position": ...}, } ) .get(name="Miami") ) assert album.is_best_seller is None assert len(album.tracks) == 3 for track in album.tracks: assert track.play_count is None assert track.position is None album = ( await Album.objects.select_related("tracks") .exclude_fields( {"is_best_seller": ..., "tracks": {"play_count", "position"}} ) .get(name="Miami") ) assert album.is_best_seller is None assert len(album.tracks) == 3 for track in album.tracks: assert track.play_count is None assert track.position is None ormar-0.12.2/tests/test_exclude_include_dict/test_excluding_subset_of_columns.py000066400000000000000000000137361444363446500305070ustar00rootroot00000000000000from typing import Optional import databases import pydantic import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False) founded: int = ormar.Integer(nullable=True) class Car(ormar.Model): class Meta: tablename = "cars" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) manufacturer: Optional[Company] = ormar.ForeignKey(Company) name: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) gearbox_type: str = ormar.String(max_length=20, nullable=True) gears: int = ormar.Integer(nullable=True, name="gears_number") aircon_type: str = ormar.String(max_length=20, nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_selecting_subset(): async with database: async with database.transaction(force_rollback=True): toyota = await Company.objects.create(name="Toyota", founded=1937) await Car.objects.create( manufacturer=toyota, name="Corolla", year=2020, gearbox_type="Manual", gears=5, aircon_type="Manual", ) await Car.objects.create( manufacturer=toyota, name="Yaris", year=2019, gearbox_type="Manual", gears=5, aircon_type="Manual", ) await Car.objects.create( manufacturer=toyota, name="Supreme", year=2020, gearbox_type="Auto", gears=6, aircon_type="Auto", ) all_cars = ( await Car.objects.select_related("manufacturer") .exclude_fields( [ "gearbox_type", "gears", "aircon_type", "year", "manufacturer__founded", ] ) .all() ) for car in all_cars: assert all( getattr(car, x) is None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded is None all_cars = ( await Car.objects.select_related("manufacturer") .exclude_fields( { "gearbox_type": ..., "gears": ..., "aircon_type": ..., "year": ..., "manufacturer": {"founded": ...}, } ) .all() ) all_cars2 = ( await Car.objects.select_related("manufacturer") .exclude_fields( { "gearbox_type": ..., "gears": ..., "aircon_type": ..., "year": ..., "manufacturer": {"founded"}, } ) .all() ) assert all_cars == all_cars2 for car in all_cars: assert all( getattr(car, x) is None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded is None all_cars = ( await Car.objects.select_related("manufacturer") .exclude_fields("year") .exclude_fields(["gearbox_type", "gears"]) .exclude_fields("aircon_type") .all() ) for car in all_cars: assert all( getattr(car, x) is None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded == 1937 all_cars_check = await Car.objects.select_related("manufacturer").all() for car in all_cars_check: assert all( getattr(car, x) is not None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded == 1937 all_cars_check2 = ( await Car.objects.select_related("manufacturer") .fields(["id", "name", "manufacturer"]) .exclude_fields("manufacturer__founded") .all() ) for car in all_cars_check2: assert all( getattr(car, x) is None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded is None with pytest.raises(pydantic.error_wrappers.ValidationError): # cannot exclude mandatory model columns - company__name in this example await Car.objects.select_related("manufacturer").exclude_fields( ["manufacturer__name"] ).all() ormar-0.12.2/tests/test_exclude_include_dict/test_pydantic_dict_params.py000066400000000000000000000071411444363446500270660ustar00rootroot00000000000000from typing import List import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="Test", nullable=True) visibility: bool = ormar.Boolean(default=True) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) price: float = ormar.Float(default=9.99) categories: List[Category] = ormar.ManyToMany(Category) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_exclude_default(): async with database: category = Category() assert category.dict() == { "id": None, "items": [], "name": "Test", "visibility": True, } assert category.dict(exclude_defaults=True) == {"items": []} await category.save() category2 = await Category.objects.get() assert category2.dict() == { "id": 1, "items": [], "name": "Test", "visibility": True, } assert category2.dict(exclude_defaults=True) == {"id": 1, "items": []} assert category2.json(exclude_defaults=True) == '{"id": 1, "items": []}' @pytest.mark.asyncio async def test_exclude_none(): async with database: category = Category(id=2, name=None) assert category.dict() == { "id": 2, "items": [], "name": None, "visibility": True, } assert category.dict(exclude_none=True) == { "id": 2, "items": [], "visibility": True, } await category.save() category2 = await Category.objects.get() assert category2.dict() == { "id": 2, "items": [], "name": None, "visibility": True, } assert category2.dict(exclude_none=True) == { "id": 2, "items": [], "visibility": True, } assert ( category2.json(exclude_none=True) == '{"id": 2, "visibility": true, "items": []}' ) @pytest.mark.asyncio async def test_exclude_unset(): async with database: category = Category(id=3, name="Test 2") assert category.dict() == { "id": 3, "items": [], "name": "Test 2", "visibility": True, } assert category.dict(exclude_unset=True) == { "id": 3, "items": [], "name": "Test 2", } await category.save() category2 = await Category.objects.get() assert category2.dict() == { "id": 3, "items": [], "name": "Test 2", "visibility": True, } # NOTE how after loading from db all fields are set explicitly # as this is what happens when you populate a model from db assert category2.dict(exclude_unset=True) == { "id": 3, "items": [], "name": "Test 2", "visibility": True, } ormar-0.12.2/tests/test_fastapi/000077500000000000000000000000001444363446500165615ustar00rootroot00000000000000ormar-0.12.2/tests/test_fastapi/__init__.py000066400000000000000000000000001444363446500206600ustar00rootroot00000000000000ormar-0.12.2/tests/test_fastapi/test_binary_fields.py000066400000000000000000000053011444363446500230030ustar00rootroot00000000000000import base64 import json import uuid from typing import List import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() app.state.database = database headers = {"content-type": "application/json"} @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() blob3 = b"\xc3\x28" blob4 = b"\xf0\x28\x8c\x28" blob5 = b"\xee" blob6 = b"\xff" class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class BinaryThing(ormar.Model): class Meta(BaseMeta): tablename = "things" id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) name: str = ormar.Text(default="") bt: str = ormar.LargeBinary( max_length=1000, choices=[blob3, blob4, blob5, blob6], represent_as_base64_str=True, ) @app.get("/things", response_model=List[BinaryThing]) async def read_things(): return await BinaryThing.objects.order_by("name").all() @app.post("/things", response_model=BinaryThing) async def create_things(thing: BinaryThing): thing = await thing.save() return thing @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_read_main(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.post( "/things", json={"bt": base64.b64encode(blob3).decode()}, headers=headers, ) assert response.status_code == 200 response = await client.get("/things") assert response.json()[0]["bt"] == base64.b64encode(blob3).decode() thing = BinaryThing(**response.json()[0]) assert thing.__dict__["bt"] == blob3 def test_schema(): schema = BinaryThing.schema() assert schema["properties"]["bt"]["format"] == "base64" converted_choices = ["7g==", "/w==", "8CiMKA==", "wyg="] assert len(schema["properties"]["bt"]["enum"]) == 4 assert all( choice in schema["properties"]["bt"]["enum"] for choice in converted_choices ) assert schema["example"]["bt"] == "string" ormar-0.12.2/tests/test_fastapi/test_choices_schema.py000066400000000000000000000112471444363446500231340ustar00rootroot00000000000000import datetime import decimal import uuid from enum import Enum import databases import pydantic import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() app.state.database = database uuid1 = uuid.uuid4() uuid2 = uuid.uuid4() blob = b"test" blob2 = b"test2icac89uc98" class EnumTest(Enum): val1 = "Val1" val2 = "Val2" class Organisation(ormar.Model): class Meta: tablename = "org" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) ident: str = ormar.String(max_length=100, choices=["ACME Ltd", "Other ltd"]) priority: int = ormar.Integer(choices=[1, 2, 3, 4, 5]) priority2: int = ormar.BigInteger(choices=[1, 2, 3, 4, 5]) priority3: int = ormar.SmallInteger(choices=[1, 2, 3, 4, 5]) expire_date: datetime.date = ormar.Date( choices=[datetime.date(2021, 1, 1), datetime.date(2022, 5, 1)] ) expire_time: datetime.time = ormar.Time( choices=[datetime.time(10, 0, 0), datetime.time(12, 30)] ) expire_datetime: datetime.datetime = ormar.DateTime( choices=[ datetime.datetime(2021, 1, 1, 10, 0, 0), datetime.datetime(2022, 5, 1, 12, 30), ] ) random_val: float = ormar.Float(choices=[2.0, 3.5]) random_decimal: decimal.Decimal = ormar.Decimal( scale=2, precision=4, choices=[decimal.Decimal(12.4), decimal.Decimal(58.2)] ) random_json: pydantic.Json = ormar.JSON(choices=["aa", '{"aa": "bb"}']) random_uuid: uuid.UUID = ormar.UUID(choices=[uuid1, uuid2]) enum_string: str = ormar.String(max_length=100, choices=list(EnumTest)) blob_col: bytes = ormar.LargeBinary(max_length=100000, choices=[blob, blob2]) @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.post("/items/", response_model=Organisation) async def create_item(item: Organisation): await item.save() return item @pytest.mark.asyncio async def test_all_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.post( "/items/", json={"id": 1, "ident": "", "priority": 4, "expire_date": "2022-05-01"}, ) assert response.status_code == 422 response = await client.post( "/items/", json={ "id": 1, "ident": "ACME Ltd", "priority": 4, "priority2": 2, "priority3": 1, "expire_date": "2022-05-01", "expire_time": "10:00:00", "expire_datetime": "2022-05-01T12:30:00", "random_val": 3.5, "random_decimal": 12.4, "random_json": '{"aa": "bb"}', "random_uuid": str(uuid1), "enum_string": EnumTest.val1.value, "blob_col": blob.decode("utf-8"), }, ) assert response.status_code == 200 item = Organisation(**response.json()) assert item.pk is not None response = await client.get("/docs") assert response.status_code == 200 assert b"FastAPI - Swagger UI" in response.content def test_schema_modification(): schema = Organisation.schema() for field in ["ident", "priority", "expire_date"]: assert field in schema["properties"] assert schema["properties"].get(field).get("enum") == list( Organisation.Meta.model_fields.get(field).choices ) assert "An enumeration." in schema["properties"].get(field).get("description") def test_schema_gen(): schema = app.openapi() assert "Organisation" in schema["components"]["schemas"] props = schema["components"]["schemas"]["Organisation"]["properties"] for field in [k for k in Organisation.Meta.model_fields.keys() if k != "id"]: assert "enum" in props.get(field) assert "description" in props.get(field) assert "An enumeration." in props.get(field).get("description") ormar-0.12.2/tests/test_fastapi/test_docs_with_multiple_relations_to_one.py000066400000000000000000000036201444363446500275140ustar00rootroot00000000000000from typing import Optional from uuid import UUID, uuid4 import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar app = FastAPI() DATABASE_URL = "sqlite:///db.sqlite" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class CA(ormar.Model): class Meta(BaseMeta): tablename = "cas" id: UUID = ormar.UUID(primary_key=True, default=uuid4) ca_name: str = ormar.Text(default="") class CB1(ormar.Model): class Meta(BaseMeta): tablename = "cb1s" id: UUID = ormar.UUID(primary_key=True, default=uuid4) cb1_name: str = ormar.Text(default="") ca1: Optional[CA] = ormar.ForeignKey(CA, nullable=True) class CB2(ormar.Model): class Meta(BaseMeta): tablename = "cb2s" id: UUID = ormar.UUID(primary_key=True, default=uuid4) cb2_name: str = ormar.Text(default="") ca2: Optional[CA] = ormar.ForeignKey(CA, nullable=True) @app.get("/ca", response_model=CA) async def get_ca(): # pragma: no cover return None @app.get("/cb1", response_model=CB1) async def get_cb1(): # pragma: no cover return None @app.get("/cb2", response_model=CB2) async def get_cb2(): # pragma: no cover return None @pytest.mark.asyncio async def test_all_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.get("/openapi.json") assert response.status_code == 200, response.text schema = response.json() components = schema["components"]["schemas"] assert all(x in components for x in ["CA", "CB1", "CB2"]) pk_onlys = [x for x in list(components.keys()) if x.startswith("PkOnly")] assert len(pk_onlys) == 2 ormar-0.12.2/tests/test_fastapi/test_enum_schema.py000066400000000000000000000013151444363446500224560ustar00rootroot00000000000000from enum import Enum import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class MyEnum(Enum): SMALL = 1 BIG = 2 class EnumExample(ormar.Model): class Meta: tablename = "enum_example" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) size: MyEnum = ormar.Enum(enum_class=MyEnum, default=MyEnum.SMALL) def test_proper_schema(): schema = EnumExample.schema_json() assert ( '{"MyEnum": {"title": "MyEnum", "description": "An enumeration.", ' '"enum": [1, 2]}}' in schema ) ormar-0.12.2/tests/test_fastapi/test_excludes_with_get_pydantic.py000066400000000000000000000076151444363446500256040ustar00rootroot00000000000000import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient from tests.settings import DATABASE_URL from tests.test_inheritance_and_pydantic_generation.test_geting_pydantic_models import ( Category, SelfRef, database, metadata, ) # type: ignore app = FastAPI() app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) async def create_category(category: Category): return await Category(**category.dict()).save() create_category.__annotations__["category"] = Category.get_pydantic(exclude={"id"}) app.post("/categories/", response_model=Category)(create_category) @app.post( "/selfrefs/", response_model=SelfRef.get_pydantic(exclude={"parent", "children__name"}), ) async def create_selfref( selfref: SelfRef.get_pydantic( # type: ignore exclude={"children__name"} # noqa: F821 ), ): selfr = SelfRef(**selfref.dict()) await selfr.save() if selfr.children: for child in selfr.children: await child.upsert() return selfr @app.get("/selfrefs/{ref_id}/") async def get_selfref(ref_id: int): selfr = await SelfRef.objects.select_related("children").get(id=ref_id) return selfr @pytest.mark.asyncio async def test_read_main(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): test_category = dict(name="Foo", id=12) response = await client.post("/categories/", json=test_category) assert response.status_code == 200 cat = Category(**response.json()) assert cat.name == "Foo" assert cat.id == 1 assert cat.items == [] test_selfref = dict(name="test") test_selfref2 = dict(name="test2", parent={"id": 1}) test_selfref3 = dict(name="test3", children=[{"name": "aaa"}]) response = await client.post("/selfrefs/", json=test_selfref) assert response.status_code == 200 self_ref = SelfRef(**response.json()) assert self_ref.id == 1 assert self_ref.name == "test" assert self_ref.parent is None assert self_ref.children == [] response = await client.post("/selfrefs/", json=test_selfref2) assert response.status_code == 200 self_ref = SelfRef(**response.json()) assert self_ref.id == 2 assert self_ref.name == "test2" assert self_ref.parent is None assert self_ref.children == [] response = await client.post("/selfrefs/", json=test_selfref3) assert response.status_code == 200 self_ref = SelfRef(**response.json()) assert self_ref.id == 3 assert self_ref.name == "test3" assert self_ref.parent is None assert self_ref.children[0].dict() == {"id": 4} response = await client.get("/selfrefs/3/") assert response.status_code == 200 check_children = SelfRef(**response.json()) assert check_children.children[0].dict() == { "children": [], "id": 4, "name": "selfref", "parent": {"id": 3, "name": "test3"}, } response = await client.get("/selfrefs/2/") assert response.status_code == 200 check_children = SelfRef(**response.json()) assert check_children.dict() == { "children": [], "id": 2, "name": "test2", "parent": {"id": 1}, } ormar-0.12.2/tests/test_fastapi/test_excluding_fields.py000066400000000000000000000104741444363446500235100ustar00rootroot00000000000000from typing import List import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) categories: List[Category] = ormar.ManyToMany(Category) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.post("/items/", response_model=Item) async def create_item(item: Item): await item.save_related(follow=True, save_all=True) return item @app.get("/items/{item_id}") async def get_item(item_id: int): item = await Item.objects.select_related("categories").get(pk=item_id) return item.dict(exclude_primary_keys=True, exclude_through_models=True) @app.get("/categories/{category_id}") async def get_category(category_id: int): category = await Category.objects.select_related("items").get(pk=category_id) return category.dict(exclude_primary_keys=True) @app.get("/categories/nt/{category_id}") async def get_category_no_through(category_id: int): category = await Category.objects.select_related("items").get(pk=category_id) return category.dict(exclude_through_models=True) @app.get("/categories/ntp/{category_id}") async def get_category_no_pk_through(category_id: int): category = await Category.objects.select_related("items").get(pk=category_id) return category.dict(exclude_through_models=True, exclude_primary_keys=True) @app.get( "/items/fex/{item_id}", response_model=Item, response_model_exclude={ "id", "categories__id", "categories__itemcategory", "categories__items", }, ) async def get_item_excl(item_id: int): item = await Item.objects.select_all().get(pk=item_id) return item @pytest.mark.asyncio async def test_all_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): item = { "name": "test", "categories": [{"name": "test cat"}, {"name": "test cat2"}], } response = await client.post("/items/", json=item) item_check = Item(**response.json()) assert item_check.id is not None assert item_check.categories[0].id is not None no_pk_item = (await client.get(f"/items/{item_check.id}")).json() assert no_pk_item == item no_pk_item2 = (await client.get(f"/items/fex/{item_check.id}")).json() assert no_pk_item2 == item no_pk_category = ( await client.get(f"/categories/{item_check.categories[0].id}") ).json() assert no_pk_category == { "items": [ { "itemcategory": {"category": None, "id": 1, "item": None}, "name": "test", } ], "name": "test cat", } no_through_category = ( await client.get(f"/categories/nt/{item_check.categories[0].id}") ).json() assert no_through_category == { "id": 1, "items": [{"id": 1, "name": "test"}], "name": "test cat", } no_through_category = ( await client.get(f"/categories/ntp/{item_check.categories[0].id}") ).json() assert no_through_category == {"items": [{"name": "test"}], "name": "test cat"} ormar-0.12.2/tests/test_fastapi/test_extra_ignore_parameter.py000066400000000000000000000031711444363446500247220ustar00rootroot00000000000000import json import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from ormar import Extra from tests.settings import DATABASE_URL app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class Item(ormar.Model): class Meta: database = database metadata = metadata extra = Extra.ignore id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.post("/item/", response_model=Item) async def create_item(item: Item): return await item.save() @pytest.mark.asyncio async def test_extra_parameters_in_request(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): data = {"name": "Name", "extraname": "to ignore"} resp = await client.post("item/", json=data) assert resp.status_code == 200 assert "name" in resp.json() assert resp.json().get("name") == "Name" ormar-0.12.2/tests/test_fastapi/test_fastapi_docs.py000066400000000000000000000110641444363446500226330ustar00rootroot00000000000000import datetime from typing import List, Optional import databases import pydantic import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class LocalMeta: metadata = metadata database = database class PTestA(pydantic.BaseModel): c: str d: bytes e: datetime.datetime class PTestP(pydantic.BaseModel): a: int b: Optional[PTestA] class Category(ormar.Model): class Meta(LocalMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta(LocalMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) pydantic_int: Optional[int] test_P: Optional[List[PTestP]] categories = ormar.ManyToMany(Category) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.get("/items/", response_model=List[Item]) async def get_items(): items = await Item.objects.select_related("categories").all() return items @app.post("/items/", response_model=Item) async def create_item(item: Item): await item.save() return item @app.post("/items/add_category/", response_model=Item) async def add_item_category(item: Item, category: Category): await item.categories.add(category) return item @app.post("/categories/", response_model=Category) async def create_category(category: Category): await category.save() return category @pytest.mark.asyncio async def test_all_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.post("/categories/", json={"name": "test cat"}) category = response.json() response = await client.post("/categories/", json={"name": "test cat2"}) category2 = response.json() response = await client.post("/items/", json={"name": "test", "id": 1}) item = Item(**response.json()) assert item.pk is not None response = await client.post( "/items/add_category/", json={"item": item.dict(), "category": category} ) item = Item(**response.json()) assert len(item.categories) == 1 assert item.categories[0].name == "test cat" await client.post( "/items/add_category/", json={"item": item.dict(), "category": category2} ) response = await client.get("/items/") items = [Item(**item) for item in response.json()] assert items[0] == item assert len(items[0].categories) == 2 assert items[0].categories[0].name == "test cat" assert items[0].categories[1].name == "test cat2" response = await client.get("/docs") assert response.status_code == 200 assert b"FastAPI - Swagger UI" in response.content def test_schema_modification(): schema = Item.schema() assert any( x.get("type") == "array" for x in schema["properties"]["categories"]["anyOf"] ) assert schema["properties"]["categories"]["title"] == "Categories" assert schema["example"] == { "categories": [{"id": 0, "name": "string"}], "id": 0, "name": "string", "pydantic_int": 0, "test_P": [{"a": 0, "b": {"c": "string", "d": "string", "e": "string"}}], } schema = Category.schema() assert schema["example"] == { "id": 0, "name": "string", "items": [ { "id": 0, "name": "string", "pydantic_int": 0, "test_P": [ {"a": 0, "b": {"c": "string", "d": "string", "e": "string"}} ], } ], } def test_schema_gen(): schema = app.openapi() assert "Category" in schema["components"]["schemas"] assert "Item" in schema["components"]["schemas"] ormar-0.12.2/tests/test_fastapi/test_fastapi_usage.py000066400000000000000000000031451444363446500230100ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) @app.post("/items/", response_model=Item) async def create_item(item: Item): return item @pytest.mark.asyncio async def test_read_main(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.post( "/items/", json={"name": "test", "id": 1, "category": {"name": "test cat"}} ) assert response.status_code == 200 assert response.json() == { "category": { "id": None, "items": [{"id": 1, "name": "test"}], "name": "test cat", }, "id": 1, "name": "test", } item = Item(**response.json()) assert item.id == 1 ormar-0.12.2/tests/test_fastapi/test_inheritance_concrete_fastapi.py000066400000000000000000000174611444363446500260650ustar00rootroot00000000000000import datetime from typing import List import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient from tests.settings import DATABASE_URL from tests.test_inheritance_and_pydantic_generation.test_inheritance_concrete import ( # type: ignore Category, Subject, Person, Bus, Truck, Bus2, Truck2, db as database, metadata, ) app = FastAPI() app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() @app.post("/subjects/", response_model=Subject) async def create_item(item: Subject): return item @app.post("/categories/", response_model=Category) async def create_category(category: Category): await category.save() return category @app.post("/buses/", response_model=Bus) async def create_bus(bus: Bus): await bus.save() return bus @app.get("/buses/{item_id}", response_model=Bus) async def get_bus(item_id: int): bus = await Bus.objects.select_related(["owner", "co_owner"]).get(pk=item_id) return bus @app.get("/buses/", response_model=List[Bus]) async def get_buses(): buses = await Bus.objects.select_related(["owner", "co_owner"]).all() return buses @app.post("/trucks/", response_model=Truck) async def create_truck(truck: Truck): await truck.save() return truck @app.post("/persons/", response_model=Person) async def create_person(person: Person): await person.save() return person @app.post("/buses2/", response_model=Bus2) async def create_bus2(bus: Bus2): await bus.save() return bus @app.post("/buses2/{item_id}/add_coowner/", response_model=Bus2) async def add_bus_coowner(item_id: int, person: Person): bus = await Bus2.objects.select_related(["owner", "co_owners"]).get(pk=item_id) await bus.co_owners.add(person) return bus @app.get("/buses2/", response_model=List[Bus2]) async def get_buses2(): buses = await Bus2.objects.select_related(["owner", "co_owners"]).all() return buses @app.post("/trucks2/", response_model=Truck2) async def create_truck2(truck: Truck2): await truck.save() return truck @app.post("/trucks2/{item_id}/add_coowner/", response_model=Truck2) async def add_truck_coowner(item_id: int, person: Person): truck = await Truck2.objects.select_related(["owner", "co_owners"]).get(pk=item_id) await truck.co_owners.add(person) return truck @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_read_main(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): test_category = dict(name="Foo", code=123, created_by="Sam", updated_by="Max") test_subject = dict(name="Bar") response = await client.post("/categories/", json=test_category) assert response.status_code == 200 cat = Category(**response.json()) assert cat.name == "Foo" assert cat.created_by == "Sam" assert cat.created_date is not None assert cat.id == 1 cat_dict = cat.dict() cat_dict["updated_date"] = cat_dict["updated_date"].strftime( "%Y-%m-%d %H:%M:%S.%f" ) cat_dict["created_date"] = cat_dict["created_date"].strftime( "%Y-%m-%d %H:%M:%S.%f" ) test_subject["category"] = cat_dict response = await client.post("/subjects/", json=test_subject) assert response.status_code == 200 sub = Subject(**response.json()) assert sub.name == "Bar" assert sub.category.pk == cat.pk assert isinstance(sub.updated_date, datetime.datetime) @pytest.mark.asyncio async def test_inheritance_with_relation(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): sam = Person(**(await client.post("/persons/", json={"name": "Sam"})).json()) joe = Person(**(await client.post("/persons/", json={"name": "Joe"})).json()) truck_dict = dict( name="Shelby wanna be", max_capacity=1400, owner=sam.dict(), co_owner=joe.dict(), ) bus_dict = dict( name="Unicorn", max_persons=50, owner=sam.dict(), co_owner=joe.dict() ) unicorn = Bus(**(await client.post("/buses/", json=bus_dict)).json()) shelby = Truck(**(await client.post("/trucks/", json=truck_dict)).json()) assert shelby.name == "Shelby wanna be" assert shelby.owner.name == "Sam" assert shelby.co_owner.name == "Joe" assert shelby.co_owner == joe assert shelby.max_capacity == 1400 assert unicorn.name == "Unicorn" assert unicorn.owner == sam assert unicorn.owner.name == "Sam" assert unicorn.co_owner.name == "Joe" assert unicorn.max_persons == 50 unicorn2 = Bus(**(await client.get(f"/buses/{unicorn.pk}")).json()) assert unicorn2.name == "Unicorn" assert unicorn2.owner == sam assert unicorn2.owner.name == "Sam" assert unicorn2.co_owner.name == "Joe" assert unicorn2.max_persons == 50 buses = [Bus(**x) for x in (await client.get("/buses/")).json()] assert len(buses) == 1 assert buses[0].name == "Unicorn" @pytest.mark.asyncio async def test_inheritance_with_m2m_relation(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): sam = Person(**(await client.post("/persons/", json={"name": "Sam"})).json()) joe = Person(**(await client.post("/persons/", json={"name": "Joe"})).json()) alex = Person(**(await client.post("/persons/", json={"name": "Alex"})).json()) truck_dict = dict(name="Shelby wanna be", max_capacity=2000, owner=sam.dict()) bus_dict = dict(name="Unicorn", max_persons=80, owner=sam.dict()) unicorn = Bus2(**(await client.post("/buses2/", json=bus_dict)).json()) shelby = Truck2(**(await client.post("/trucks2/", json=truck_dict)).json()) unicorn = Bus2( **( await client.post(f"/buses2/{unicorn.pk}/add_coowner/", json=joe.dict()) ).json() ) unicorn = Bus2( **( await client.post( f"/buses2/{unicorn.pk}/add_coowner/", json=alex.dict() ) ).json() ) assert shelby.name == "Shelby wanna be" assert shelby.owner.name == "Sam" assert len(shelby.co_owners) == 0 assert shelby.max_capacity == 2000 assert unicorn.name == "Unicorn" assert unicorn.owner == sam assert unicorn.owner.name == "Sam" assert unicorn.co_owners[0].name == "Joe" assert unicorn.co_owners[1] == alex assert unicorn.max_persons == 80 await client.post(f"/trucks2/{shelby.pk}/add_coowner/", json=alex.dict()) shelby = Truck2( **( await client.post(f"/trucks2/{shelby.pk}/add_coowner/", json=joe.dict()) ).json() ) assert shelby.name == "Shelby wanna be" assert shelby.owner.name == "Sam" assert len(shelby.co_owners) == 2 assert shelby.co_owners[0] == alex assert shelby.co_owners[1] == joe assert shelby.max_capacity == 2000 buses = [Bus2(**x) for x in (await client.get("/buses2/")).json()] assert len(buses) == 1 assert buses[0].name == "Unicorn" ormar-0.12.2/tests/test_fastapi/test_inheritance_mixins_fastapi.py000066400000000000000000000044521444363446500255660ustar00rootroot00000000000000import datetime import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient from tests.settings import DATABASE_URL from tests.test_inheritance_and_pydantic_generation.test_inheritance_mixins import Category, Subject, metadata, db as database # type: ignore app = FastAPI() app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() @app.post("/subjects/", response_model=Subject) async def create_item(item: Subject): return item @app.post("/categories/", response_model=Category) async def create_category(category: Category): await category.save() return category @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_read_main(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): test_category = dict(name="Foo", code=123, created_by="Sam", updated_by="Max") test_subject = dict(name="Bar") response = await client.post("/categories/", json=test_category) assert response.status_code == 200 cat = Category(**response.json()) assert cat.name == "Foo" assert cat.created_by == "Sam" assert cat.created_date is not None assert cat.id == 1 cat_dict = cat.dict() cat_dict["updated_date"] = cat_dict["updated_date"].strftime( "%Y-%m-%d %H:%M:%S.%f" ) cat_dict["created_date"] = cat_dict["created_date"].strftime( "%Y-%m-%d %H:%M:%S.%f" ) test_subject["category"] = cat_dict response = await client.post("/subjects/", json=test_subject) assert response.status_code == 200 sub = Subject(**response.json()) assert sub.name == "Bar" assert sub.category.pk == cat.pk assert isinstance(sub.updated_date, datetime.datetime) ormar-0.12.2/tests/test_fastapi/test_json_field_fastapi.py000066400000000000000000000132601444363446500240170ustar00rootroot00000000000000# type: ignore import uuid from typing import List import databases import pydantic import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Thing(ormar.Model): class Meta(BaseMeta): tablename = "things" id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) name: str = ormar.Text(default="") js: pydantic.Json = ormar.JSON() @app.get("/things", response_model=List[Thing]) async def read_things(): return await Thing.objects.order_by("name").all() @app.get("/things_with_sample", response_model=List[Thing]) async def read_things_sample(): await Thing(name="b", js=["asdf", "asdf", "bobby", "nigel"]).save() await Thing(name="a", js='["lemon", "raspberry", "lime", "pumice"]').save() return await Thing.objects.order_by("name").all() @app.get("/things_with_sample_after_init", response_model=Thing) async def read_things_init(): thing1 = Thing(js="{}") thing1.name = "d" thing1.js = ["js", "set", "after", "constructor"] await thing1.save() return thing1 @app.put("/update_thing", response_model=Thing) async def update_things(thing: Thing): thing.js = ["js", "set", "after", "update"] # type: ignore await thing.update() return thing @app.post("/things", response_model=Thing) async def create_things(thing: Thing): thing = await thing.save() return thing @app.get("/things_untyped") async def read_things_untyped(): return await Thing.objects.order_by("name").all() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_json_is_required_if_not_nullable(): with pytest.raises(pydantic.ValidationError): Thing() @pytest.mark.asyncio async def test_json_is_not_required_if_nullable(): class Thing2(ormar.Model): class Meta(BaseMeta): tablename = "things2" id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) name: str = ormar.Text(default="") js: pydantic.Json = ormar.JSON(nullable=True) Thing2() @pytest.mark.asyncio async def test_setting_values_after_init(): async with database: t1 = Thing(id="67a82813-d90c-45ff-b546-b4e38d7030d7", name="t1", js=["thing1"]) assert '["thing1"]' in t1.json() await t1.save() t1.json() assert '["thing1"]' in t1.json() assert '["thing1"]' in (await Thing.objects.get(id=t1.id)).json() await t1.update() assert '["thing1"]' in (await Thing.objects.get(id=t1.id)).json() @pytest.mark.asyncio async def test_read_main(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.get("/things_with_sample") assert response.status_code == 200 # check if raw response not double encoded assert '["lemon","raspberry","lime","pumice"]' in response.text # parse json and check that we get lists not strings resp = response.json() assert resp[0].get("js") == ["lemon", "raspberry", "lime", "pumice"] assert resp[1].get("js") == ["asdf", "asdf", "bobby", "nigel"] # create a new one response = await client.post( "/things", json={"js": ["test", "test2"], "name": "c"} ) assert response.json().get("js") == ["test", "test2"] # get all with new one response = await client.get("/things") assert response.status_code == 200 assert '["test","test2"]' in response.text resp = response.json() assert resp[0].get("js") == ["lemon", "raspberry", "lime", "pumice"] assert resp[1].get("js") == ["asdf", "asdf", "bobby", "nigel"] assert resp[2].get("js") == ["test", "test2"] response = await client.get("/things_with_sample_after_init") assert response.status_code == 200 resp = response.json() assert resp.get("js") == ["js", "set", "after", "constructor"] # test new with after constructor response = await client.get("/things") resp = response.json() assert resp[0].get("js") == ["lemon", "raspberry", "lime", "pumice"] assert resp[1].get("js") == ["asdf", "asdf", "bobby", "nigel"] assert resp[2].get("js") == ["test", "test2"] assert resp[3].get("js") == ["js", "set", "after", "constructor"] response = await client.put("/update_thing", json=resp[3]) assert response.status_code == 200 resp = response.json() assert resp.get("js") == ["js", "set", "after", "update"] # test new with after constructor response = await client.get("/things_untyped") resp = response.json() assert resp[0].get("js") == ["lemon", "raspberry", "lime", "pumice"] assert resp[1].get("js") == ["asdf", "asdf", "bobby", "nigel"] assert resp[2].get("js") == ["test", "test2"] assert resp[3].get("js") == ["js", "set", "after", "update"] ormar-0.12.2/tests/test_fastapi/test_m2m_forwardref.py000066400000000000000000000057721444363446500231210ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from pydantic.schema import ForwardRef from starlette import status from httpx import AsyncClient import ormar app = FastAPI() from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata CityRef = ForwardRef("City") CountryRef = ForwardRef("Country") # models.py class Country(ormar.Model): class Meta(BaseMeta): tablename = "countries" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=128, unique=True) iso2: str = ormar.String(max_length=3) iso3: str = ormar.String(max_length=4, unique=True) population: int = ormar.Integer(maximum=10000000000) demonym: str = ormar.String(max_length=128) native_name: str = ormar.String(max_length=128) capital: Optional[CityRef] = ormar.ForeignKey( # type: ignore CityRef, related_name="capital_city", nullable=True ) borders: List[Optional[CountryRef]] = ormar.ManyToMany( # type: ignore CountryRef, nullable=True, skip_reverse=True ) class City(ormar.Model): class Meta(BaseMeta): tablename = "cities" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=128) country: Country = ormar.ForeignKey( Country, related_name="cities", skip_reverse=True ) Country.update_forward_refs() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.post("/", response_model=Country, status_code=status.HTTP_201_CREATED) async def create_country(country: Country): # if this is ormar result = await country.upsert() # it's already initialized as ormar model return result @pytest.mark.asyncio async def test_payload(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): payload = { "name": "Thailand", "iso2": "TH", "iso3": "THA", "population": 23123123, "demonym": "Thai", "native_name": "Thailand", } resp = await client.post( "/", json=payload, headers={"application-type": "json"} ) # print(resp.content) assert resp.status_code == 201 resp_country = Country(**resp.json()) assert resp_country.name == "Thailand" ormar-0.12.2/tests/test_fastapi/test_more_reallife_fastapi.py000066400000000000000000000105501444363446500245070ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta: tablename = "items" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.get("/items", response_model=List[Item]) async def get_items(): items = await Item.objects.select_related("category").all() return items @app.get("/items/raw", response_model=List[Item]) async def get_raw_items(): items = await Item.objects.all() return items @app.post("/items", response_model=Item) async def create_item(item: Item): await item.save() return item @app.post("/categories", response_model=Category) async def create_category(category: Category): await category.save() return category @app.get("/items/{item_id}") async def get_item(item_id: int): item = await Item.objects.get(pk=item_id) return item @app.put("/items/{item_id}") async def update_item(item_id: int, item: Item): item_db = await Item.objects.get(pk=item_id) return await item_db.update(**item.dict()) @app.delete("/items/{item_id}") async def delete_item(item_id: int): item_db = await Item.objects.get(pk=item_id) return {"deleted_rows": await item_db.delete()} @pytest.mark.asyncio async def test_all_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.post("/categories", json={"name": "test cat"}) category = response.json() response = await client.post( "/items", json={"name": "test", "id": 1, "category": category} ) item = Item(**response.json()) assert item.pk is not None response = await client.get("/items") items = [Item(**item) for item in response.json()] assert items[0] == item item.name = "New name" response = await client.put(f"/items/{item.pk}", json=item.dict()) assert response.json() == item.dict() response = await client.get("/items") items = [Item(**item) for item in response.json()] assert items[0].name == "New name" response = await client.get("/items/raw") items = [Item(**item) for item in response.json()] assert items[0].name == "New name" assert items[0].category.name is None response = await client.get(f"/items/{item.pk}") new_item = Item(**response.json()) assert new_item == item response = await client.delete(f"/items/{item.pk}") assert response.json().get("deleted_rows", "__UNDEFINED__") != "__UNDEFINED__" response = await client.get("/items") items = response.json() assert len(items) == 0 await client.post( "/items", json={"name": "test_2", "id": 2, "category": category} ) response = await client.get("/items") items = response.json() assert len(items) == 1 item = Item(**items[0]) response = await client.delete(f"/items/{item.pk}") assert response.json().get("deleted_rows", "__UNDEFINED__") != "__UNDEFINED__" response = await client.get("/docs") assert response.status_code == 200 ormar-0.12.2/tests/test_fastapi/test_nested_saving.py000066400000000000000000000121111444363446500230170ustar00rootroot00000000000000import json from typing import Any, Dict, Optional, Set, Type, Union, cast import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from ormar.queryset.utils import translate_list_to_dict from tests.settings import DATABASE_URL app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) app.state.database = database headers = {"content-type": "application/json"} @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) department_name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) course_name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean() department: Optional[Department] = ormar.ForeignKey(Department) class Student(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany(Course) # create db and tables @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) to_exclude = { "id": ..., "courses": { "__all__": {"id": ..., "students": {"__all__": {"id", "studentcourse"}}} }, } exclude_all = {"id": ..., "courses": {"__all__"}} to_exclude_ormar = { "id": ..., "courses": {"id": ..., "students": {"id", "studentcourse"}}, } def auto_exclude_id_field(to_exclude: Any) -> Union[Dict, Set]: if isinstance(to_exclude, dict): for key in to_exclude.keys(): to_exclude[key] = auto_exclude_id_field(to_exclude[key]) to_exclude["id"] = Ellipsis return to_exclude else: return {"id"} def generate_exclude_for_ids(model: Type[ormar.Model]) -> Dict: to_exclude_base = translate_list_to_dict(model._iterate_related_models()) return cast(Dict, auto_exclude_id_field(to_exclude=to_exclude_base)) to_exclude_auto = generate_exclude_for_ids(model=Department) @app.post("/departments/", response_model=Department) async def create_department(department: Department): await department.save_related(follow=True, save_all=True) return department @app.get("/departments/{department_name}") async def get_department(department_name: str): department = await Department.objects.select_all(follow=True).get( department_name=department_name ) return department.dict(exclude=to_exclude) @app.get("/departments/{department_name}/second") async def get_department_exclude(department_name: str): department = await Department.objects.select_all(follow=True).get( department_name=department_name ) return department.dict(exclude=to_exclude_ormar) @app.get("/departments/{department_name}/exclude") async def get_department_exclude_all(department_name: str): department = await Department.objects.select_all(follow=True).get( department_name=department_name ) return department.dict(exclude=exclude_all) @pytest.mark.asyncio async def test_saving_related_in_fastapi(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): payload = { "department_name": "Ormar", "courses": [ { "course_name": "basic1", "completed": True, "students": [{"name": "Jack"}, {"name": "Abi"}], }, { "course_name": "basic2", "completed": True, "students": [{"name": "Kate"}, {"name": "Miranda"}], }, ], } response = await client.post("/departments/", json=payload, headers=headers) department = Department(**response.json()) assert department.id is not None assert len(department.courses) == 2 assert department.department_name == "Ormar" assert department.courses[0].course_name == "basic1" assert department.courses[0].completed assert department.courses[1].course_name == "basic2" assert department.courses[1].completed response = await client.get("/departments/Ormar") response2 = await client.get("/departments/Ormar/second") assert response.json() == response2.json() == payload response3 = await client.get("/departments/Ormar/exclude") assert response3.json() == {"department_name": "Ormar"} ormar-0.12.2/tests/test_fastapi/test_recursion_error.py000066400000000000000000000102561444363446500234200ustar00rootroot00000000000000import json import uuid from datetime import datetime from typing import List import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import Depends, FastAPI from httpx import AsyncClient from pydantic import BaseModel, Json import ormar from tests.settings import DATABASE_URL router = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) router.state.database = database headers = {"content-type": "application/json"} @router.on_event("startup") async def startup() -> None: database_ = router.state.database if not database_.is_connected: await database_.connect() @router.on_event("shutdown") async def shutdown() -> None: database_ = router.state.database if database_.is_connected: await database_.disconnect() class User(ormar.Model): """ The user model """ id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) email: str = ormar.String(unique=True, max_length=100) username: str = ormar.String(unique=True, max_length=100) password: str = ormar.String(unique=True, max_length=100) verified: bool = ormar.Boolean(default=False) verify_key: str = ormar.String(unique=True, max_length=100, nullable=True) created_at: datetime = ormar.DateTime(default=datetime.now()) class Meta: tablename = "users" metadata = metadata database = database class UserSession(ormar.Model): """ The user session model """ id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) user: User = ormar.ForeignKey(User) session_key: str = ormar.String(unique=True, max_length=64) created_at: datetime = ormar.DateTime(default=datetime.now()) class Meta: tablename = "user_sessions" metadata = metadata database = database class QuizAnswer(BaseModel): right: bool answer: str class QuizQuestion(BaseModel): question: str answers: List[QuizAnswer] class QuizInput(BaseModel): title: str description: str questions: List[QuizQuestion] class Quiz(ormar.Model): id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) title: str = ormar.String(max_length=100) description: str = ormar.String(max_length=300, nullable=True) created_at: datetime = ormar.DateTime(default=datetime.now()) updated_at: datetime = ormar.DateTime(default=datetime.now()) user_id: uuid.UUID = ormar.UUID(foreign_key=User.id) questions: Json = ormar.JSON(nullable=False) class Meta: tablename = "quiz" metadata = metadata database = database @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) async def get_current_user(): return await User(email="mail@example.com", username="aa", password="pass").save() @router.post("/create", response_model=Quiz) async def create_quiz_lol( quiz_input: QuizInput, user: User = Depends(get_current_user) ): quiz = Quiz(**quiz_input.dict(), user_id=user.id) return await quiz.save() @pytest.mark.asyncio async def test_quiz_creation(): client = AsyncClient(app=router, base_url="http://testserver") async with client as client, LifespanManager(router): payload = { "title": "Some test question", "description": "A description", "questions": [ { "question": "Is ClassQuiz cool?", "answers": [ {"right": True, "answer": "Yes"}, {"right": False, "answer": "No"}, ], }, { "question": "Do you like open source?", "answers": [ {"right": True, "answer": "Yes"}, {"right": False, "answer": "No"}, {"right": False, "answer": "Maybe"}, ], }, ], } response = await client.post("/create", json=payload) assert response.status_code == 200 ormar-0.12.2/tests/test_fastapi/test_relations_with_nested_defaults.py000066400000000000000000000064151444363446500264640ustar00rootroot00000000000000from typing import Optional import databases import pytest import pytest_asyncio import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() app = FastAPI() app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Country(ormar.Model): class Meta(BaseMeta): tablename = "countries" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="Poland") class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) rating: int = ormar.Integer(default=0) country: Optional[Country] = ormar.ForeignKey(Country) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture async def sample_data(): async with database: country = await Country(id=1, name="USA").save() author = await Author(id=1, name="bug", rating=5, country=country).save() await Book( id=1, author=author, title="Bug caused by default value", year=2021 ).save() @app.get("/books/{book_id}", response_model=Book) async def get_book_by_id(book_id: int): book = await Book.objects.get(id=book_id) return book @app.get("/books_with_author/{book_id}", response_model=Book) async def get_book_with_author_by_id(book_id: int): book = await Book.objects.select_related("author").get(id=book_id) return book @pytest.mark.asyncio async def test_related_with_defaults(sample_data): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): response = await client.get("/books/1") assert response.json() == { "author": {"id": 1}, "id": 1, "title": "Bug caused by default value", "year": 2021, } response = await client.get("/books_with_author/1") assert response.json() == { "author": { "books": [ {"id": 1, "title": "Bug caused by default value", "year": 2021} ], "country": {"id": 1}, "id": 1, "name": "bug", "rating": 5, }, "id": 1, "title": "Bug caused by default value", "year": 2021, } ormar-0.12.2/tests/test_fastapi/test_schema_not_allowed_params.py000066400000000000000000000012751444363446500253710ustar00rootroot00000000000000import databases import sqlalchemy import ormar DATABASE_URL = "sqlite:///db.sqlite" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) contents: str = ormar.Text() def test_schema_not_allowed(): schema = Author.schema() for field_schema in schema.get("properties").values(): for key in field_schema.keys(): assert "_" not in key, f"Found illegal field in openapi schema: {key}" ormar-0.12.2/tests/test_fastapi/test_skip_reverse_models.py000066400000000000000000000112771444363446500242460ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) app.state.database = database headers = {"content-type": "application/json"} @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Author(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories = ormar.ManyToMany(Category, skip_reverse=True) author: Optional[Author] = ormar.ForeignKey(Author, skip_reverse=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @app.post("/categories/", response_model=Category) async def create_category(category: Category): await category.save() await category.save_related(follow=True, save_all=True) return category @app.post("/posts/", response_model=Post) async def create_post(post: Post): if post.author: await post.author.save() await post.save() await post.save_related(follow=True, save_all=True) for category in [cat for cat in post.categories]: await post.categories.add(category) return post @app.get("/categories/", response_model=List[Category]) async def get_categories(): return await Category.objects.select_related("posts").all() @app.get("/posts/", response_model=List[Post]) async def get_posts(): posts = await Post.objects.select_related(["categories", "author"]).all() return posts @pytest.mark.asyncio async def test_queries(): client = AsyncClient(app=app, base_url="http://testserver") async with client as client, LifespanManager(app): right_category = {"name": "Test category"} wrong_category = {"name": "Test category2", "posts": [{"title": "Test Post"}]} # cannot add posts if skipped, will be ignored (with extra=ignore by default) response = await client.post( "/categories/", json=wrong_category, headers=headers ) assert response.status_code == 200 response = await client.get("/categories/") assert response.status_code == 200 assert not "posts" in response.json() categories = [Category(**x) for x in response.json()] assert categories[0] is not None assert categories[0].name == "Test category2" response = await client.post( "/categories/", json=right_category, headers=headers ) assert response.status_code == 200 response = await client.get("/categories/") assert response.status_code == 200 categories = [Category(**x) for x in response.json()] assert categories[1] is not None assert categories[1].name == "Test category" right_post = { "title": "ok post", "author": {"first_name": "John", "last_name": "Smith"}, "categories": [{"name": "New cat"}], } response = await client.post("/posts/", json=right_post, headers=headers) assert response.status_code == 200 Category.__config__.extra = "allow" response = await client.get("/posts/") assert response.status_code == 200 posts = [Post(**x) for x in response.json()] assert posts[0].title == "ok post" assert posts[0].author.first_name == "John" assert posts[0].categories[0].name == "New cat" wrong_category = {"name": "Test category3", "posts": [{"title": "Test Post"}]} # cannot add posts if skipped, will be error with extra forbid Category.__config__.extra = "forbid" response = await client.post("/categories/", json=wrong_category) assert response.status_code == 422 ormar-0.12.2/tests/test_fastapi/test_wekref_exclusion.py000066400000000000000000000111451444363446500235500ustar00rootroot00000000000000from typing import List, Optional from uuid import UUID, uuid4 import databases import pydantic import pytest import sqlalchemy from asgi_lifespan import LifespanManager from fastapi import FastAPI from httpx import AsyncClient import ormar from tests.settings import DATABASE_URL app = FastAPI() database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() app.state.database = database @app.on_event("startup") async def startup() -> None: database_ = app.state.database if not database_.is_connected: await database_.connect() @app.on_event("shutdown") async def shutdown() -> None: database_ = app.state.database if database_.is_connected: await database_.disconnect() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class OtherThing(ormar.Model): class Meta(BaseMeta): tablename = "other_things" id: UUID = ormar.UUID(primary_key=True, default=uuid4) name: str = ormar.Text(default="") ot_contents: str = ormar.Text(default="") class Thing(ormar.Model): class Meta(BaseMeta): tablename = "things" id: UUID = ormar.UUID(primary_key=True, default=uuid4) name: str = ormar.Text(default="") js: pydantic.Json = ormar.JSON(nullable=True) other_thing: Optional[OtherThing] = ormar.ForeignKey(OtherThing, nullable=True) @app.post("/test/1") async def post_test_1(): # don't split initialization and attribute assignment ot = await OtherThing(ot_contents="otc").save() await Thing(other_thing=ot, name="t1").save() await Thing(other_thing=ot, name="t2").save() await Thing(other_thing=ot, name="t3").save() # if you do not care about returned object you can even go with bulk_create # all of them are created in one transaction # things = [Thing(other_thing=ot, name='t1'), # Thing(other_thing=ot, name="t2"), # Thing(other_thing=ot, name="t3")] # await Thing.objects.bulk_create(things) @app.get("/test/2", response_model=List[Thing]) async def get_test_2(): # if you only query for one use get or first ot = await OtherThing.objects.get() ts = await ot.things.all() # specifically null out the relation on things before return for t in ts: t.remove(ot, name="other_thing") return ts @app.get("/test/3", response_model=List[Thing]) async def get_test_3(): ot = await OtherThing.objects.select_related("things").get() # exclude unwanted field while ot is still in scope # in order not to pass it to fastapi return [t.dict(exclude={"other_thing"}) for t in ot.things] @app.get("/test/4", response_model=List[Thing], response_model_exclude={"other_thing"}) async def get_test_4(): ot = await OtherThing.objects.get() # query from the active side return await Thing.objects.all(other_thing=ot) @app.get("/get_ot/", response_model=OtherThing) async def get_ot(): return await OtherThing.objects.get() # more real life (usually) is not getting some random OT and get it's Things # but query for a specific one by some kind of id @app.get( "/test/5/{thing_id}", response_model=List[Thing], response_model_exclude={"other_thing"}, ) async def get_test_5(thing_id: UUID): return await Thing.objects.all(other_thing__id=thing_id) @app.get( "/test/error", response_model=List[Thing], response_model_exclude={"other_thing"} ) async def get_weakref(): ots = await OtherThing.objects.all() ot = ots[0] ts = await ot.things.all() return ts @pytest.mark.asyncio async def test_endpoints(): client = AsyncClient(app=app, base_url="http://testserver") async with client, LifespanManager(app): resp = await client.post("/test/1") assert resp.status_code == 200 resp2 = await client.get("/test/2") assert resp2.status_code == 200 assert len(resp2.json()) == 3 resp3 = await client.get("/test/3") assert resp3.status_code == 200 assert len(resp3.json()) == 3 resp4 = await client.get("/test/4") assert resp4.status_code == 200 assert len(resp4.json()) == 3 ot = OtherThing(**(await client.get("/get_ot/")).json()) resp5 = await client.get(f"/test/5/{ot.id}") assert resp5.status_code == 200 assert len(resp5.json()) == 3 resp6 = await client.get("/test/error") assert resp6.status_code == 200 assert len(resp6.json()) == 3 ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/000077500000000000000000000000001444363446500243535ustar00rootroot00000000000000ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/__init__.py000066400000000000000000000000001444363446500264520ustar00rootroot00000000000000test_excluding_parent_fields_inheritance.py000066400000000000000000000105621444363446500351430ustar00rootroot00000000000000ormar-0.12.2/tests/test_inheritance_and_pydantic_generationimport datetime import databases import pytest import sqlalchemy as sa from sqlalchemy import create_engine import ormar from tests.settings import DATABASE_URL metadata = sa.MetaData() db = databases.Database(DATABASE_URL) engine = create_engine(DATABASE_URL) class User(ormar.Model): class Meta(ormar.ModelMeta): tablename = "users" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) class RelationalAuditModel(ormar.Model): class Meta: abstract = True created_by: User = ormar.ForeignKey(User, nullable=False) updated_by: User = ormar.ForeignKey(User, nullable=False) class AuditModel(ormar.Model): class Meta: abstract = True created_by: str = ormar.String(max_length=100) updated_by: str = ormar.String(max_length=100, default="Sam") class DateFieldsModel(ormar.Model): class Meta(ormar.ModelMeta): abstract = True metadata = metadata database = db created_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="creation_date" ) updated_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="modification_date" ) class Category(DateFieldsModel, AuditModel): class Meta(ormar.ModelMeta): tablename = "categories" exclude_parent_fields = ["updated_by", "updated_date"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() class Item(DateFieldsModel, AuditModel): class Meta(ormar.ModelMeta): tablename = "items" exclude_parent_fields = ["updated_by", "updated_date"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() updated_by: str = ormar.String(max_length=100, default="Bob") class Gun(RelationalAuditModel, DateFieldsModel): class Meta(ormar.ModelMeta): tablename = "guns" exclude_parent_fields = ["updated_by"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) @pytest.fixture(autouse=True, scope="module") def create_test_database(): metadata.create_all(engine) yield metadata.drop_all(engine) def test_model_definition(): model_fields = Category.Meta.model_fields sqlalchemy_columns = Category.Meta.table.c pydantic_columns = Category.__fields__ assert "updated_by" not in model_fields assert "updated_by" not in sqlalchemy_columns assert "updated_by" not in pydantic_columns assert "updated_date" not in model_fields assert "updated_date" not in sqlalchemy_columns assert "updated_date" not in pydantic_columns assert "updated_by" not in Gun.Meta.model_fields assert "updated_by" not in Gun.Meta.table.c assert "updated_by" not in Gun.__fields__ @pytest.mark.asyncio async def test_model_works_as_expected(): async with db: async with db.transaction(force_rollback=True): test = await Category(name="Cat", code=2, created_by="Joe").save() assert test.created_date is not None test2 = await Category.objects.get(pk=test.pk) assert test2.name == "Cat" assert test2.created_by == "Joe" @pytest.mark.asyncio async def test_exclude_with_redefinition(): async with db: async with db.transaction(force_rollback=True): test = await Item(name="Item", code=3, created_by="Anna").save() assert test.created_date is not None assert test.updated_by == "Bob" test2 = await Item.objects.get(pk=test.pk) assert test2.name == "Item" assert test2.code == 3 @pytest.mark.asyncio async def test_exclude_with_relation(): async with db: async with db.transaction(force_rollback=True): user = await User(name="Michail Kalasznikow").save() test = await Gun(name="AK47", created_by=user).save() assert test.created_date is not None with pytest.raises(AttributeError): assert test.updated_by test2 = await Gun.objects.select_related("created_by").get(pk=test.pk) assert test2.name == "AK47" assert test2.created_by.name == "Michail Kalasznikow" ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_geting_pydantic_models.py000066400000000000000000000202331444363446500324770ustar00rootroot00000000000000from typing import List, Optional import databases import pydantic import sqlalchemy from pydantic import ConstrainedStr from pydantic.typing import ForwardRef import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class SelfRef(ormar.Model): class Meta(BaseMeta): tablename = "self_refs" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="selfref") parent = ormar.ForeignKey(ForwardRef("SelfRef"), related_name="children") SelfRef.update_forward_refs() class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Item(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="test") category: Optional[Category] = ormar.ForeignKey(Category, nullable=True) class MutualA(ormar.Model): class Meta(BaseMeta): tablename = "mutual_a" id: int = ormar.Integer(primary_key=True) mutual_b = ormar.ForeignKey(ForwardRef("MutualB"), related_name="mutuals_a") class MutualB(ormar.Model): class Meta(BaseMeta): tablename = "mutual_b" id: int = ormar.Integer(primary_key=True) name = ormar.String(max_length=100, default="test") mutual_a = ormar.ForeignKey(MutualA, related_name="mutuals_b") MutualA.update_forward_refs() def test_getting_pydantic_model(): PydanticCategory = Category.get_pydantic() assert issubclass(PydanticCategory, pydantic.BaseModel) assert {*PydanticCategory.__fields__.keys()} == {"items", "id", "name"} assert not PydanticCategory.__fields__["id"].required assert PydanticCategory.__fields__["id"].outer_type_ == int assert PydanticCategory.__fields__["id"].default is None assert PydanticCategory.__fields__["name"].required assert issubclass(PydanticCategory.__fields__["name"].outer_type_, ConstrainedStr) assert PydanticCategory.__fields__["name"].default in [None, Ellipsis] PydanticItem = PydanticCategory.__fields__["items"].type_ assert PydanticCategory.__fields__["items"].outer_type_ == List[PydanticItem] assert issubclass(PydanticItem, pydantic.BaseModel) assert not PydanticItem.__fields__["name"].required assert PydanticItem.__fields__["name"].default == "test" assert issubclass(PydanticItem.__fields__["name"].outer_type_, ConstrainedStr) assert "category" not in PydanticItem.__fields__ def test_initializing_pydantic_model(): data = { "id": 1, "name": "test", "items": [{"id": 1, "name": "test_i1"}, {"id": 2, "name": "test_i2"}], } PydanticCategory = Category.get_pydantic() cat = PydanticCategory(**data) assert cat.dict() == data data = {"id": 1, "name": "test"} cat = PydanticCategory(**data) assert cat.dict() == {**data, "items": None} def test_getting_pydantic_model_include(): PydanticCategory = Category.get_pydantic(include={"id", "name"}) assert len(PydanticCategory.__fields__) == 2 assert "items" not in PydanticCategory.__fields__ def test_getting_pydantic_model_nested_include_set(): PydanticCategory = Category.get_pydantic(include={"id", "items__id"}) assert len(PydanticCategory.__fields__) == 2 assert "name" not in PydanticCategory.__fields__ PydanticItem = PydanticCategory.__fields__["items"].type_ assert len(PydanticItem.__fields__) == 1 assert "id" in PydanticItem.__fields__ def test_getting_pydantic_model_nested_include_dict(): PydanticCategory = Category.get_pydantic(include={"id": ..., "items": {"id"}}) assert len(PydanticCategory.__fields__) == 2 assert "name" not in PydanticCategory.__fields__ PydanticItem = PydanticCategory.__fields__["items"].type_ assert len(PydanticItem.__fields__) == 1 assert "id" in PydanticItem.__fields__ def test_getting_pydantic_model_nested_include_nested_dict(): PydanticCategory = Category.get_pydantic(include={"id": ..., "items": {"id": ...}}) assert len(PydanticCategory.__fields__) == 2 assert "name" not in PydanticCategory.__fields__ PydanticItem = PydanticCategory.__fields__["items"].type_ assert len(PydanticItem.__fields__) == 1 assert "id" in PydanticItem.__fields__ def test_getting_pydantic_model_include_exclude(): PydanticCategory = Category.get_pydantic( include={"id": ..., "items": {"id", "name"}}, exclude={"items__name"} ) assert len(PydanticCategory.__fields__) == 2 assert "name" not in PydanticCategory.__fields__ PydanticItem = PydanticCategory.__fields__["items"].type_ assert len(PydanticItem.__fields__) == 1 assert "id" in PydanticItem.__fields__ def test_getting_pydantic_model_exclude(): PydanticItem = Item.get_pydantic(exclude={"category__name"}) assert len(PydanticItem.__fields__) == 3 assert "category" in PydanticItem.__fields__ PydanticCategory = PydanticItem.__fields__["category"].type_ assert len(PydanticCategory.__fields__) == 1 assert "name" not in PydanticCategory.__fields__ def test_getting_pydantic_model_exclude_dict(): PydanticItem = Item.get_pydantic(exclude={"id": ..., "category": {"name"}}) assert len(PydanticItem.__fields__) == 2 assert "category" in PydanticItem.__fields__ assert "id" not in PydanticItem.__fields__ PydanticCategory = PydanticItem.__fields__["category"].type_ assert len(PydanticCategory.__fields__) == 1 assert "name" not in PydanticCategory.__fields__ def test_getting_pydantic_model_self_ref(): PydanticSelfRef = SelfRef.get_pydantic() assert len(PydanticSelfRef.__fields__) == 4 assert set(PydanticSelfRef.__fields__.keys()) == { "id", "name", "parent", "children", } InnerSelf = PydanticSelfRef.__fields__["parent"].type_ assert len(InnerSelf.__fields__) == 2 assert set(InnerSelf.__fields__.keys()) == {"id", "name"} InnerSelf2 = PydanticSelfRef.__fields__["children"].type_ assert len(InnerSelf2.__fields__) == 2 assert set(InnerSelf2.__fields__.keys()) == {"id", "name"} def test_getting_pydantic_model_self_ref_exclude(): PydanticSelfRef = SelfRef.get_pydantic(exclude={"children": {"name"}}) assert len(PydanticSelfRef.__fields__) == 4 assert set(PydanticSelfRef.__fields__.keys()) == { "id", "name", "parent", "children", } InnerSelf = PydanticSelfRef.__fields__["parent"].type_ assert len(InnerSelf.__fields__) == 2 assert set(InnerSelf.__fields__.keys()) == {"id", "name"} PydanticSelfRefChildren = PydanticSelfRef.__fields__["children"].type_ assert len(PydanticSelfRefChildren.__fields__) == 1 assert set(PydanticSelfRefChildren.__fields__.keys()) == {"id"} assert PydanticSelfRef != PydanticSelfRefChildren assert InnerSelf != PydanticSelfRefChildren def test_getting_pydantic_model_mutual_rels(): MutualAPydantic = MutualA.get_pydantic() assert len(MutualAPydantic.__fields__) == 3 assert set(MutualAPydantic.__fields__.keys()) == {"id", "mutual_b", "mutuals_b"} MutualB1 = MutualAPydantic.__fields__["mutual_b"].type_ MutualB2 = MutualAPydantic.__fields__["mutuals_b"].type_ assert len(MutualB1.__fields__) == 2 assert set(MutualB1.__fields__.keys()) == {"id", "name"} assert len(MutualB2.__fields__) == 2 assert set(MutualB2.__fields__.keys()) == {"id", "name"} assert MutualB1 == MutualB2 def test_getting_pydantic_model_mutual_rels_exclude(): MutualAPydantic = MutualA.get_pydantic(exclude={"mutual_b": {"name"}}) assert len(MutualAPydantic.__fields__) == 3 assert set(MutualAPydantic.__fields__.keys()) == {"id", "mutual_b", "mutuals_b"} MutualB1 = MutualAPydantic.__fields__["mutual_b"].type_ MutualB2 = MutualAPydantic.__fields__["mutuals_b"].type_ assert len(MutualB1.__fields__) == 1 assert set(MutualB1.__fields__.keys()) == {"id"} assert len(MutualB2.__fields__) == 2 assert set(MutualB2.__fields__.keys()) == {"id", "name"} assert MutualB1 != MutualB2 ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_inheritance_concrete.py000066400000000000000000000447421444363446500321520ustar00rootroot00000000000000# type: ignore import datetime from typing import List, Optional from collections import Counter import databases import pytest import sqlalchemy as sa from sqlalchemy import create_engine import ormar import ormar.fields.constraints from ormar import ModelDefinitionError, property_field from ormar.exceptions import ModelError from ormar.models.metaclass import get_constraint_copy from tests.settings import DATABASE_URL metadata = sa.MetaData() db = databases.Database(DATABASE_URL) engine = create_engine(DATABASE_URL) class AuditModel(ormar.Model): class Meta: abstract = True created_by: str = ormar.String(max_length=100) updated_by: str = ormar.String(max_length=100, default="Sam") @property_field def audit(self): # pragma: no cover return f"{self.created_by} {self.updated_by}" class DateFieldsModelNoSubclass(ormar.Model): class Meta: tablename = "test_date_models" metadata = metadata database = db date_id: int = ormar.Integer(primary_key=True) created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) updated_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) class DateFieldsModel(ormar.Model): class Meta: abstract = True metadata = metadata database = db constraints = [ ormar.fields.constraints.UniqueColumns( "creation_date", "modification_date", ), ormar.fields.constraints.CheckColumns( "creation_date <= modification_date", ), ] created_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="creation_date" ) updated_date: datetime.datetime = ormar.DateTime( default=datetime.datetime.now, name="modification_date" ) class Category(DateFieldsModel, AuditModel): class Meta(ormar.ModelMeta): tablename = "categories" constraints = [ormar.fields.constraints.UniqueColumns("name", "code")] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() @property_field def code_name(self): return f"{self.code}:{self.name}" @property_field def audit(self): return f"{self.created_by} {self.updated_by}" class Subject(DateFieldsModel): class Meta(ormar.ModelMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) category: Optional[Category] = ormar.ForeignKey(Category) class Person(ormar.Model): class Meta: metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Car(ormar.Model): class Meta: abstract = True metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) owner: Person = ormar.ForeignKey(Person) co_owner: Person = ormar.ForeignKey(Person, related_name="coowned") created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) class Truck(Car): class Meta: pass max_capacity: int = ormar.Integer() class Bus(Car): class Meta: tablename = "buses" metadata = metadata database = db owner: Person = ormar.ForeignKey(Person, related_name="buses") max_persons: int = ormar.Integer() class Car2(ormar.Model): class Meta: abstract = True metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) owner: Person = ormar.ForeignKey(Person, related_name="owned") co_owners: List[Person] = ormar.ManyToMany(Person, related_name="coowned") created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) class Truck2(Car2): class Meta: tablename = "trucks2" max_capacity: int = ormar.Integer() class Bus2(Car2): class Meta: tablename = "buses2" max_persons: int = ormar.Integer() class ImmutablePerson(Person): class Config: allow_mutation = False validate_assignment = False @pytest.fixture(autouse=True, scope="module") def create_test_database(): metadata.create_all(engine) yield metadata.drop_all(engine) def test_init_of_abstract_model(): with pytest.raises(ModelError): DateFieldsModel() def test_duplicated_related_name_on_different_model(): with pytest.raises(ModelDefinitionError): class Bus3(Car2): # pragma: no cover class Meta: tablename = "buses3" owner: Person = ormar.ForeignKey(Person, related_name="buses") max_persons: int = ormar.Integer() def test_config_is_not_a_class_raises_error(): with pytest.raises(ModelDefinitionError): class ImmutablePerson2(Person): Config = dict(allow_mutation=False, validate_assignment=False) def test_field_redefining_in_concrete_models(): class RedefinedField(DateFieldsModel): class Meta(ormar.ModelMeta): tablename = "redefines" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) created_date: str = ormar.String(max_length=200, name="creation_date") changed_field = RedefinedField.Meta.model_fields["created_date"] assert changed_field.ormar_default is None assert changed_field.get_alias() == "creation_date" assert any(x.name == "creation_date" for x in RedefinedField.Meta.table.columns) assert isinstance( RedefinedField.Meta.table.columns["creation_date"].type, sa.sql.sqltypes.String ) def test_model_subclassing_that_redefines_constraints_column_names(): with pytest.raises(ModelDefinitionError): class WrongField2(DateFieldsModel): # pragma: no cover class Meta(ormar.ModelMeta): tablename = "wrongs" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) created_date: str = ormar.String(max_length=200) def test_model_subclassing_non_abstract_raises_error(): with pytest.raises(ModelDefinitionError): class WrongField2(DateFieldsModelNoSubclass): # pragma: no cover class Meta(ormar.ModelMeta): tablename = "wrongs" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) def test_params_are_inherited(): assert Category.Meta.metadata == metadata assert Category.Meta.database == db assert len(Category.Meta.property_fields) == 2 constraints = Counter(map(lambda c: type(c), Category.Meta.constraints)) assert constraints[ormar.fields.constraints.UniqueColumns] == 2 assert constraints[ormar.fields.constraints.IndexColumns] == 0 assert constraints[ormar.fields.constraints.CheckColumns] == 1 def round_date_to_seconds( date: datetime.datetime, ) -> datetime.datetime: # pragma: no cover if date.microsecond >= 500000: date = date + datetime.timedelta(seconds=1) return date.replace(microsecond=0) @pytest.mark.asyncio async def test_fields_inherited_from_mixin(): async with db: async with db.transaction(force_rollback=True): cat = await Category( name="Foo", code=123, created_by="Sam", updated_by="Max" ).save() sub = await Subject(name="Bar", category=cat).save() mixin_columns = ["created_date", "updated_date"] mixin_db_columns = ["creation_date", "modification_date"] mixin2_columns = ["created_by", "updated_by"] assert all(field in Category.Meta.model_fields for field in mixin_columns) assert cat.created_date is not None assert cat.updated_date is not None assert all(field in Subject.Meta.model_fields for field in mixin_columns) assert sub.created_date is not None assert sub.updated_date is not None assert all(field in Category.Meta.model_fields for field in mixin2_columns) assert all( field not in Subject.Meta.model_fields for field in mixin2_columns ) inspector = sa.inspect(engine) assert "categories" in inspector.get_table_names() table_columns = [x.get("name") for x in inspector.get_columns("categories")] assert all( col in table_columns for col in mixin_db_columns ) # + mixin2_columns) assert "subjects" in inspector.get_table_names() table_columns = [x.get("name") for x in inspector.get_columns("subjects")] assert all(col in table_columns for col in mixin_db_columns) sub2 = ( await Subject.objects.select_related("category") .order_by("-created_date") .exclude_fields("updated_date") .get() ) assert round_date_to_seconds(sub2.created_date) == round_date_to_seconds( sub.created_date ) assert sub2.category.updated_date is not None assert round_date_to_seconds( sub2.category.created_date ) == round_date_to_seconds(cat.created_date) assert sub2.updated_date is None assert sub2.category.created_by == "Sam" assert sub2.category.updated_by == cat.updated_by sub3 = ( await Subject.objects.prefetch_related("category") .order_by("-created_date") .exclude_fields({"updated_date": ..., "category": {"updated_date"}}) .get() ) assert round_date_to_seconds(sub3.created_date) == round_date_to_seconds( sub.created_date ) assert sub3.category.updated_date is None assert round_date_to_seconds( sub3.category.created_date ) == round_date_to_seconds(cat.created_date) assert sub3.updated_date is None assert sub3.category.created_by == "Sam" assert sub3.category.updated_by == cat.updated_by @pytest.mark.asyncio async def test_inheritance_with_relation(): async with db: async with db.transaction(force_rollback=True): sam = await Person(name="Sam").save() joe = await Person(name="Joe").save() await Truck( name="Shelby wanna be", max_capacity=1400, owner=sam, co_owner=joe ).save() await Bus(name="Unicorn", max_persons=50, owner=sam, co_owner=joe).save() shelby = await Truck.objects.select_related(["owner", "co_owner"]).get() assert shelby.name == "Shelby wanna be" assert shelby.owner.name == "Sam" assert shelby.co_owner.name == "Joe" assert shelby.max_capacity == 1400 unicorn = await Bus.objects.select_related(["owner", "co_owner"]).get() assert unicorn.name == "Unicorn" assert unicorn.owner.name == "Sam" assert unicorn.co_owner.name == "Joe" assert unicorn.max_persons == 50 joe_check = await Person.objects.select_related( ["coowned_trucks", "coowned_buses"] ).get(name="Joe") assert joe_check.pk == joe.pk assert joe_check.coowned_trucks[0] == shelby assert joe_check.coowned_trucks[0].created_date is not None assert joe_check.coowned_buses[0] == unicorn assert joe_check.coowned_buses[0].created_date is not None joe_check = ( await Person.objects.exclude_fields( { "coowned_trucks": {"created_date"}, "coowned_buses": {"created_date"}, } ) .prefetch_related(["coowned_trucks", "coowned_buses"]) .get(name="Joe") ) assert joe_check.pk == joe.pk assert joe_check.coowned_trucks[0] == shelby assert joe_check.coowned_trucks[0].created_date is None assert joe_check.coowned_buses[0] == unicorn assert joe_check.coowned_buses[0].created_date is None @pytest.mark.asyncio async def test_inheritance_with_multi_relation(): async with db: async with db.transaction(force_rollback=True): sam = await Person(name="Sam").save() joe = await Person(name="Joe").save() alex = await Person(name="Alex").save() truck = await Truck2( name="Shelby wanna be 2", max_capacity=1400, owner=sam ).save() await truck.co_owners.add(joe) await truck.co_owners.add(alex) bus3 = await Bus2(name="Unicorn 3", max_persons=30, owner=joe).save() await bus3.co_owners.add(sam) bus = await Bus2(name="Unicorn 2", max_persons=50, owner=sam).save() await bus.co_owners.add(joe) await bus.co_owners.add(alex) shelby = await Truck2.objects.select_related(["owner", "co_owners"]).get() assert shelby.name == "Shelby wanna be 2" assert shelby.owner.name == "Sam" assert shelby.co_owners[0].name == "Joe" assert len(shelby.co_owners) == 2 assert shelby.max_capacity == 1400 unicorn = await Bus2.objects.select_related(["owner", "co_owners"]).get( name="Unicorn 2" ) assert unicorn.name == "Unicorn 2" assert unicorn.owner.name == "Sam" assert unicorn.co_owners[0].name == "Joe" assert len(unicorn.co_owners) == 2 assert unicorn.max_persons == 50 unicorn = ( await Bus2.objects.select_related(["owner", "co_owners"]) .order_by("-co_owners__name") .get() ) assert unicorn.name == "Unicorn 2" assert unicorn.owner.name == "Sam" assert len(unicorn.co_owners) == 2 assert unicorn.co_owners[0].name == "Joe" unicorn = ( await Bus2.objects.select_related(["owner", "co_owners"]) .order_by("co_owners__name") .get() ) assert unicorn.name == "Unicorn 2" assert unicorn.owner.name == "Sam" assert len(unicorn.co_owners) == 2 assert unicorn.co_owners[0].name == "Alex" joe_check = await Person.objects.select_related( ["coowned_trucks2", "coowned_buses2"] ).get(name="Joe") assert joe_check.pk == joe.pk assert joe_check.coowned_trucks2[0] == shelby assert joe_check.coowned_trucks2[0].created_date is not None assert joe_check.coowned_buses2[0] == unicorn assert joe_check.coowned_buses2[0].created_date is not None joe_check = ( await Person.objects.exclude_fields( { "coowned_trucks2": {"created_date"}, "coowned_buses2": {"created_date"}, } ) .prefetch_related(["coowned_trucks2", "coowned_buses2"]) .get(name="Joe") ) assert joe_check.pk == joe.pk assert joe_check.coowned_trucks2[0] == shelby assert joe_check.coowned_trucks2[0].created_date is None assert joe_check.coowned_buses2[0] == unicorn assert joe_check.coowned_buses2[0].created_date is None await shelby.co_owners.remove(joe) await shelby.co_owners.remove(alex) await Truck2.objects.delete(name="Shelby wanna be 2") unicorn = ( await Bus2.objects.select_related(["owner", "co_owners"]) .filter(co_owners__name="Joe") .get() ) assert unicorn.name == "Unicorn 2" assert unicorn.owner.name == "Sam" assert unicorn.co_owners[0].name == "Joe" assert len(unicorn.co_owners) == 1 assert unicorn.max_persons == 50 unicorn = ( await Bus2.objects.select_related(["owner", "co_owners"]) .exclude(co_owners__name="Joe") .get() ) assert unicorn.name == "Unicorn 2" assert unicorn.owner.name == "Sam" assert unicorn.co_owners[0].name == "Alex" assert len(unicorn.co_owners) == 1 assert unicorn.max_persons == 50 unicorn = await Bus2.objects.get() assert unicorn.name == "Unicorn 2" assert unicorn.owner.name is None assert len(unicorn.co_owners) == 0 await unicorn.co_owners.all() assert len(unicorn.co_owners) == 2 assert unicorn.co_owners[0].name == "Joe" await unicorn.owner.load() assert unicorn.owner.name == "Sam" unicorns = ( await Bus2.objects.select_related(["owner", "co_owners"]) .filter(name__contains="Unicorn") .order_by("-name") .all() ) assert unicorns[0].name == "Unicorn 3" assert unicorns[0].owner.name == "Joe" assert len(unicorns[0].co_owners) == 1 assert unicorns[0].co_owners[0].name == "Sam" assert unicorns[1].name == "Unicorn 2" assert unicorns[1].owner.name == "Sam" assert len(unicorns[1].co_owners) == 2 assert unicorns[1].co_owners[0].name == "Joe" unicorns = ( await Bus2.objects.select_related(["owner", "co_owners"]) .filter(name__contains="Unicorn") .order_by("-name") .limit(2, limit_raw_sql=True) .all() ) assert len(unicorns) == 2 assert unicorns[1].name == "Unicorn 2" assert len(unicorns[1].co_owners) == 1 def test_custom_config(): # Custom config inherits defaults assert getattr(ImmutablePerson.__config__, "orm_mode") is True # Custom config can override defaults assert getattr(ImmutablePerson.__config__, "validate_assignment") is False sam = ImmutablePerson(name="Sam") with pytest.raises(TypeError): sam.name = "Not Sam" def test_get_constraint_copy(): with pytest.raises(ValueError): get_constraint_copy("INVALID CONSTRAINT") ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_inheritance_mixins.py000066400000000000000000000126711444363446500316530ustar00rootroot00000000000000# type: ignore import datetime from typing import Optional import databases import pytest import sqlalchemy as sa from sqlalchemy import create_engine import ormar from tests.settings import DATABASE_URL metadata = sa.MetaData() db = databases.Database(DATABASE_URL) engine = create_engine(DATABASE_URL) class AuditMixin: created_by: str = ormar.String(max_length=100) updated_by: str = ormar.String(max_length=100, default="Sam") class DateFieldsMixins: created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) updated_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) class Category(ormar.Model, DateFieldsMixins, AuditMixin): class Meta(ormar.ModelMeta): tablename = "categories" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() class Subject(ormar.Model, DateFieldsMixins): class Meta(ormar.ModelMeta): tablename = "subjects" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) category: Optional[Category] = ormar.ForeignKey(Category) @pytest.fixture(autouse=True, scope="module") def create_test_database(): metadata.create_all(engine) yield metadata.drop_all(engine) def test_field_redefining(): class RedefinedField(ormar.Model, DateFieldsMixins): class Meta(ormar.ModelMeta): tablename = "redefined" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) created_date: datetime.datetime = ormar.DateTime(name="creation_date") assert RedefinedField.Meta.model_fields["created_date"].ormar_default is None assert ( RedefinedField.Meta.model_fields["created_date"].get_alias() == "creation_date" ) assert any(x.name == "creation_date" for x in RedefinedField.Meta.table.columns) def test_field_redefining_in_second_raises_error(): class OkField(ormar.Model, DateFieldsMixins): # pragma: no cover class Meta(ormar.ModelMeta): tablename = "oks" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) class RedefinedField2(ormar.Model, DateFieldsMixins): class Meta(ormar.ModelMeta): tablename = "redefines2" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) created_date: str = ormar.String(max_length=200, name="creation_date") assert RedefinedField2.Meta.model_fields["created_date"].ormar_default is None assert ( RedefinedField2.Meta.model_fields["created_date"].get_alias() == "creation_date" ) assert any(x.name == "creation_date" for x in RedefinedField2.Meta.table.columns) assert isinstance( RedefinedField2.Meta.table.columns["creation_date"].type, sa.sql.sqltypes.String ) def round_date_to_seconds( date: datetime.datetime, ) -> datetime.datetime: # pragma: no cover if date.microsecond >= 500000: date = date + datetime.timedelta(seconds=1) return date.replace(microsecond=0) @pytest.mark.asyncio async def test_fields_inherited_from_mixin(): async with db: async with db.transaction(force_rollback=True): cat = await Category( name="Foo", code=123, created_by="Sam", updated_by="Max" ).save() sub = await Subject(name="Bar", category=cat).save() mixin_columns = ["created_date", "updated_date"] mixin2_columns = ["created_by", "updated_by"] assert all(field in Category.Meta.model_fields for field in mixin_columns) assert cat.created_date is not None assert cat.updated_date is not None assert all(field in Subject.Meta.model_fields for field in mixin_columns) assert sub.created_date is not None assert sub.updated_date is not None assert all(field in Category.Meta.model_fields for field in mixin2_columns) assert all( field not in Subject.Meta.model_fields for field in mixin2_columns ) inspector = sa.inspect(engine) assert "categories" in inspector.get_table_names() table_columns = [x.get("name") for x in inspector.get_columns("categories")] assert all(col in table_columns for col in mixin_columns + mixin2_columns) assert "subjects" in inspector.get_table_names() table_columns = [x.get("name") for x in inspector.get_columns("subjects")] assert all(col in table_columns for col in mixin_columns) sub2 = ( await Subject.objects.select_related("category") .order_by("-created_date") .exclude_fields("updated_date") .get() ) assert round_date_to_seconds(sub2.created_date) == round_date_to_seconds( sub.created_date ) assert sub2.category.updated_date is not None assert round_date_to_seconds( sub2.category.created_date ) == round_date_to_seconds(cat.created_date) assert sub2.updated_date is None assert sub2.category.created_by == "Sam" assert sub2.category.updated_by == cat.updated_by ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_inheritance_of_property_fields.py000066400000000000000000000030251444363446500342330ustar00rootroot00000000000000import databases import pytest import sqlalchemy import sqlalchemy as sa import ormar from tests.settings import DATABASE_URL metadata = sa.MetaData() database = databases.Database(DATABASE_URL) class BaseFoo(ormar.Model): class Meta: abstract = True name: str = ormar.String(max_length=100) @ormar.property_field def prefixed_name(self) -> str: return "prefix_" + self.name class Foo(BaseFoo): class Meta: metadata = metadata database = database @ormar.property_field def double_prefixed_name(self) -> str: return "prefix2_" + self.name id: int = ormar.Integer(primary_key=True) class Bar(BaseFoo): class Meta: metadata = metadata database = database @ormar.property_field def prefixed_name(self) -> str: return "baz_" + self.name id: int = ormar.Integer(primary_key=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_property_fields_are_inherited(): foo = Foo(name="foo") assert foo.prefixed_name == "prefix_foo" assert foo.dict() == { "name": "foo", "id": None, "double_prefixed_name": "prefix2_foo", "prefixed_name": "prefix_foo", } bar = Bar(name="bar") assert bar.prefixed_name == "baz_bar" assert bar.dict() == {"name": "bar", "id": None, "prefixed_name": "baz_bar"} ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_inheritance_with_default.py000066400000000000000000000033241444363446500330160ustar00rootroot00000000000000import datetime import uuid import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL) class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class BaseModel(ormar.Model): class Meta(ormar.ModelMeta): abstract = True id: uuid.UUID = ormar.UUID( primary_key=True, default=uuid.uuid4, uuid_format="string" ) created_at: datetime.datetime = ormar.DateTime(default=datetime.datetime.utcnow()) updated_at: datetime.datetime = ormar.DateTime(default=datetime.datetime.utcnow()) class Member(BaseModel): class Meta(BaseMeta): tablename = "members" first_name: str = ormar.String(max_length=50) last_name: str = ormar.String(max_length=50) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_model_structure(): assert "id" in BaseModel.__fields__ assert "id" in BaseModel.Meta.model_fields assert BaseModel.Meta.model_fields["id"].has_default() assert BaseModel.__fields__["id"].default_factory is not None assert "id" in Member.__fields__ assert "id" in Member.Meta.model_fields assert Member.Meta.model_fields["id"].has_default() assert Member.__fields__["id"].default_factory is not None @pytest.mark.asyncio async def test_fields_inherited_with_default(): async with database: await Member(first_name="foo", last_name="bar").save() await Member.objects.create(first_name="foo", last_name="bar") test_inherited_class_is_not_abstract_by_default.py000066400000000000000000000030661444363446500365060ustar00rootroot00000000000000ormar-0.12.2/tests/test_inheritance_and_pydantic_generationimport datetime import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL) class TableBase(ormar.Model): class Meta(ormar.ModelMeta): abstract = True metadata = metadata database = database id: int = ormar.Integer(primary_key=True) created_by: str = ormar.String(max_length=20, default="test") created_at: datetime.datetime = ormar.DateTime( timezone=True, default=datetime.datetime.now ) last_modified_by: str = ormar.String(max_length=20, nullable=True) last_modified_at: datetime.datetime = ormar.DateTime(timezone=True, nullable=True) class NationBase(ormar.Model): class Meta(ormar.ModelMeta): abstract = True name: str = ormar.String(max_length=50) alpha2_code: str = ormar.String(max_length=2) region: str = ormar.String(max_length=30) subregion: str = ormar.String(max_length=30) class Nation(NationBase, TableBase): class Meta(ormar.ModelMeta): pass @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_model_is_not_abstract_by_default(): async with database: sweden = await Nation( name="Sweden", alpha2_code="SE", region="Europe", subregion="Scandinavia" ).save() assert sweden.id is not None ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_nested_models_pydantic.py000066400000000000000000000037631444363446500325150ustar00rootroot00000000000000import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Library(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Package(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) library: Library = ormar.ForeignKey(Library, related_name="packages") version: str = ormar.String(max_length=100) class Ticket(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) number: int = ormar.Integer() status: str = ormar.String(max_length=100) class TicketPackage(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) status: str = ormar.String(max_length=100) ticket: Ticket = ormar.ForeignKey(Ticket, related_name="packages") package: Package = ormar.ForeignKey(Package, related_name="tickets") def test_have_proper_children(): TicketPackageOut = TicketPackage.get_pydantic(exclude={"ticket"}) assert "package" in TicketPackageOut.__fields__ PydanticPackage = TicketPackageOut.__fields__["package"].type_ assert "library" in PydanticPackage.__fields__ def test_casts_properly(): payload = { "id": 0, "status": "string", "ticket": {"id": 0, "number": 0, "status": "string"}, "package": { "version": "string", "id": 0, "library": {"id": 0, "name": "string"}, }, } test_package = TicketPackage(**payload) TicketPackageOut = TicketPackage.get_pydantic(exclude={"ticket"}) parsed = TicketPackageOut(**test_package.dict()).dict() assert "ticket" not in parsed assert "package" in parsed assert "library" in parsed.get("package") ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_pydantic_fields_order.py000066400000000000000000000017641444363446500323300ustar00rootroot00000000000000import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL) class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class NewTestModel(ormar.Model): class Meta: database = database metadata = metadata a: int = ormar.Integer(primary_key=True) b: str = ormar.String(max_length=1) c: str = ormar.String(max_length=1) d: str = ormar.String(max_length=1) e: str = ormar.String(max_length=1) f: str = ormar.String(max_length=1) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_model_field_order(): TestCreate = NewTestModel.get_pydantic(exclude={"a"}) assert list(TestCreate.__fields__.keys()) == ["b", "c", "d", "e", "f"] ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_validators_are_inherited.py000066400000000000000000000034371444363446500330250ustar00rootroot00000000000000import enum import databases import pydantic import pytest import sqlalchemy from pydantic import ValidationError import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL) class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class BaseModel(ormar.Model): class Meta: abstract = True id: int = ormar.Integer(primary_key=True) class EnumExample(str, enum.Enum): A = "A" B = "B" C = "C" class ModelExample(BaseModel): class Meta(BaseMeta): tablename = "examples" str_field: str = ormar.String(min_length=5, max_length=10, nullable=False) enum_field: str = ormar.String( max_length=1, nullable=False, choices=list(EnumExample) ) @pydantic.validator("str_field") def validate_str_field(cls, v): if " " not in v: raise ValueError("must contain a space") return v ModelExampleCreate = ModelExample.get_pydantic(exclude={"id"}) def test_ormar_validator(): ModelExample(str_field="a aaaaaa", enum_field="A") with pytest.raises(ValidationError) as e: ModelExample(str_field="aaaaaaa", enum_field="A") assert "must contain a space" in str(e) with pytest.raises(ValidationError) as e: ModelExample(str_field="a aaaaaaa", enum_field="Z") assert "not in allowed choices" in str(e) def test_pydantic_validator(): ModelExampleCreate(str_field="a aaaaaa", enum_field="A") with pytest.raises(ValidationError) as e: ModelExampleCreate(str_field="aaaaaaa", enum_field="A") assert "must contain a space" in str(e) with pytest.raises(ValidationError) as e: ModelExampleCreate(str_field="a aaaaaaa", enum_field="Z") assert "not in allowed choices" in str(e) ormar-0.12.2/tests/test_inheritance_and_pydantic_generation/test_validators_in_generated_pydantic.py000066400000000000000000000034301444363446500345330ustar00rootroot00000000000000import enum import databases import pydantic import pytest import sqlalchemy from pydantic import ValidationError import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL) class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class EnumExample(str, enum.Enum): A = "A" B = "B" C = "C" class ModelExample(ormar.Model): class Meta(ormar.ModelMeta): database = database metadata = metadata tablename = "examples" id: int = ormar.Integer(primary_key=True) str_field: str = ormar.String(min_length=5, max_length=10, nullable=False) enum_field: str = ormar.String( max_length=1, nullable=False, choices=list(EnumExample) ) @pydantic.validator("str_field") def validate_str_field(cls, v): if " " not in v: raise ValueError("must contain a space") return v ModelExampleCreate = ModelExample.get_pydantic(exclude={"id"}) def test_ormar_validator(): ModelExample(str_field="a aaaaaa", enum_field="A") with pytest.raises(ValidationError) as e: ModelExample(str_field="aaaaaaa", enum_field="A") assert "must contain a space" in str(e) with pytest.raises(ValidationError) as e: ModelExample(str_field="a aaaaaaa", enum_field="Z") assert "not in allowed choices" in str(e) def test_pydantic_validator(): ModelExampleCreate(str_field="a aaaaaa", enum_field="A") with pytest.raises(ValidationError) as e: ModelExampleCreate(str_field="aaaaaaa", enum_field="A") assert "must contain a space" in str(e) with pytest.raises(ValidationError) as e: ModelExampleCreate(str_field="a aaaaaaa", enum_field="Z") assert "not in allowed choices" in str(e) ormar-0.12.2/tests/test_meta_constraints/000077500000000000000000000000001444363446500205075ustar00rootroot00000000000000ormar-0.12.2/tests/test_meta_constraints/__init__.py000066400000000000000000000000001444363446500226060ustar00rootroot00000000000000ormar-0.12.2/tests/test_meta_constraints/test_check_constraints.py000066400000000000000000000032461444363446500256310ustar00rootroot00000000000000import sqlite3 import asyncpg # type: ignore import databases import pytest import sqlalchemy import ormar.fields.constraints from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Product(ormar.Model): class Meta: tablename = "products" metadata = metadata database = database constraints = [ ormar.fields.constraints.CheckColumns("inventory > buffer"), ] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) company: str = ormar.String(max_length=200) inventory: int = ormar.Integer() buffer: int = ormar.Integer() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_check_columns_exclude_mysql(): if Product.Meta.database._backend._dialect.name != "mysql": async with database: # pragma: no cover async with database.transaction(force_rollback=True): await Product.objects.create( name="Mars", company="Nestle", inventory=100, buffer=10 ) with pytest.raises( ( sqlite3.IntegrityError, asyncpg.exceptions.CheckViolationError, ) ): await Product.objects.create( name="Cookies", company="Nestle", inventory=1, buffer=10 ) ormar-0.12.2/tests/test_meta_constraints/test_index_constraints.py000066400000000000000000000043051444363446500256600ustar00rootroot00000000000000import asyncpg # type: ignore import databases import pytest import sqlalchemy import ormar.fields.constraints from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Product(ormar.Model): class Meta: tablename = "products" metadata = metadata database = database constraints = [ ormar.fields.constraints.IndexColumns("company", "name", name="my_index"), ormar.fields.constraints.IndexColumns("location", "company_type"), ] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) company: str = ormar.String(max_length=200) location: str = ormar.String(max_length=200) company_type: str = ormar.String(max_length=200) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_table_structure(): assert len(Product.Meta.table.indexes) > 0 indexes = sorted( list(Product.Meta.table.indexes), key=lambda x: x.name, reverse=True ) test_index = indexes[0] assert test_index.name == "my_index" assert [col.name for col in test_index.columns] == ["company", "name"] test_index = indexes[1] assert test_index.name == "ix_products_location_company_type" assert [col.name for col in test_index.columns] == ["location", "company_type"] @pytest.mark.asyncio async def test_index_is_not_unique(): async with database: async with database.transaction(force_rollback=True): await Product.objects.create( name="Cookies", company="Nestle", location="A", company_type="B" ) await Product.objects.create( name="Mars", company="Mars", location="B", company_type="Z" ) await Product.objects.create( name="Mars", company="Nestle", location="C", company_type="X" ) await Product.objects.create( name="Mars", company="Mars", location="D", company_type="Y" ) ormar-0.12.2/tests/test_meta_constraints/test_unique_constraints.py000066400000000000000000000027771444363446500260720ustar00rootroot00000000000000import sqlite3 import asyncpg # type: ignore import databases import pymysql import pytest import sqlalchemy import ormar.fields.constraints from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Product(ormar.Model): class Meta: tablename = "products" metadata = metadata database = database constraints = [ormar.fields.constraints.UniqueColumns("name", "company")] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) company: str = ormar.String(max_length=200) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_unique_columns(): async with database: async with database.transaction(force_rollback=True): await Product.objects.create(name="Cookies", company="Nestle") await Product.objects.create(name="Mars", company="Mars") await Product.objects.create(name="Mars", company="Nestle") with pytest.raises( ( sqlite3.IntegrityError, pymysql.IntegrityError, asyncpg.exceptions.UniqueViolationError, ) ): await Product.objects.create(name="Mars", company="Mars") ormar-0.12.2/tests/test_model_definition/000077500000000000000000000000001444363446500204425ustar00rootroot00000000000000ormar-0.12.2/tests/test_model_definition/__init__.py000066400000000000000000000000001444363446500225410ustar00rootroot00000000000000ormar-0.12.2/tests/test_model_definition/pks_and_fks/000077500000000000000000000000001444363446500227245ustar00rootroot00000000000000ormar-0.12.2/tests/test_model_definition/pks_and_fks/__init__.py000066400000000000000000000000001444363446500250230ustar00rootroot00000000000000ormar-0.12.2/tests/test_model_definition/pks_and_fks/test_non_integer_pkey.py000066400000000000000000000021071444363446500276740ustar00rootroot00000000000000import random import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() def key(): return "".join(random.choice("abcdefgh123456") for _ in range(8)) class Model(ormar.Model): class Meta: tablename = "models" metadata = metadata database = database id: str = ormar.String(primary_key=True, default=key, max_length=8) name: str = ormar.String(max_length=32) @pytest.fixture(autouse=True, scope="function") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_pk_1(): async with database: model = await Model.objects.create(name="NAME") assert isinstance(model.id, str) @pytest.mark.asyncio async def test_pk_2(): async with database: model = await Model.objects.create(name="NAME") assert await Model.objects.all() == [model] ormar-0.12.2/tests/test_model_definition/pks_and_fks/test_saving_string_pks.py000066400000000000000000000040401444363446500300650ustar00rootroot00000000000000from random import choice from string import ascii_uppercase import databases import pytest import pytest_asyncio import sqlalchemy from sqlalchemy import create_engine import ormar from ormar import Float, String from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() def get_id() -> str: return "".join(choice(ascii_uppercase) for _ in range(12)) class MainMeta(ormar.ModelMeta): metadata = metadata database = database class PositionOrm(ormar.Model): class Meta(MainMeta): pass name: str = String(primary_key=True, max_length=50) x: float = Float() y: float = Float() degrees: float = Float() class PositionOrmDef(ormar.Model): class Meta(MainMeta): pass name: str = String(primary_key=True, max_length=50, default=get_id) x: float = Float() y: float = Float() degrees: float = Float() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(scope="function") async def cleanup(): yield async with database: await PositionOrm.objects.delete(each=True) await PositionOrmDef.objects.delete(each=True) @pytest.mark.asyncio async def test_creating_a_position(cleanup): async with database: instance = PositionOrm(name="my_pos", x=1.0, y=2.0, degrees=3.0) await instance.save() assert instance.saved assert instance.name == "my_pos" instance2 = PositionOrmDef(x=1.0, y=2.0, degrees=3.0) await instance2.save() assert instance2.saved assert instance2.name is not None assert len(instance2.name) == 12 instance3 = PositionOrmDef(x=1.0, y=2.0, degrees=3.0) await instance3.save() assert instance3.saved assert instance3.name is not None assert len(instance3.name) == 12 assert instance2.name != instance3.name ormar-0.12.2/tests/test_model_definition/pks_and_fks/test_uuid_fks.py000066400000000000000000000034621444363446500261530ustar00rootroot00000000000000import uuid import databases import pytest import sqlalchemy from sqlalchemy import create_engine import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() db = databases.Database(DATABASE_URL) class User(ormar.Model): class Meta: tablename = "user" metadata = metadata database = db id: uuid.UUID = ormar.UUID( primary_key=True, default=uuid.uuid4, uuid_format="string" ) username = ormar.String(index=True, unique=True, null=False, max_length=255) email = ormar.String(index=True, unique=True, nullable=False, max_length=255) hashed_password = ormar.String(null=False, max_length=255) is_active = ormar.Boolean(default=True, nullable=False) is_superuser = ormar.Boolean(default=False, nullable=False) class Token(ormar.Model): class Meta: tablename = "token" metadata = metadata database = db id = ormar.Integer(primary_key=True) text = ormar.String(max_length=4, unique=True) user = ormar.ForeignKey(User, related_name="tokens") created_at = ormar.DateTime(server_default=sqlalchemy.func.now()) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_uuid_fk(): async with db: async with db.transaction(force_rollback=True): user = await User.objects.create( username="User1", email="email@example.com", hashed_password="^$EDACVS(&A&Y@2131aa", is_active=True, is_superuser=False, ) await Token.objects.create(text="AAAA", user=user) await Token.objects.order_by("-created_at").all() ormar-0.12.2/tests/test_model_definition/test_aliases.py000066400000000000000000000137351444363446500235050ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Child(ormar.Model): class Meta: tablename = "children" metadata = metadata database = database id: int = ormar.Integer(name="child_id", primary_key=True) first_name: str = ormar.String(name="fname", max_length=100) last_name: str = ormar.String(name="lname", max_length=100) born_year: int = ormar.Integer(name="year_born", nullable=True) class Artist(ormar.Model): class Meta: tablename = "artists" metadata = metadata database = database id: int = ormar.Integer(name="artist_id", primary_key=True) first_name: str = ormar.String(name="fname", max_length=100) last_name: str = ormar.String(name="lname", max_length=100) born_year: int = ormar.Integer(name="year") children: Optional[List[Child]] = ormar.ManyToMany(Child) class Album(ormar.Model): class Meta: tablename = "music_albums" metadata = metadata database = database id: int = ormar.Integer(name="album_id", primary_key=True) name: str = ormar.String(name="album_name", max_length=100) artist: Optional[Artist] = ormar.ForeignKey(Artist, name="artist_id") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_table_structure(): assert "album_id" in [x.name for x in Album.Meta.table.columns] assert "album_name" in [x.name for x in Album.Meta.table.columns] assert "fname" in [x.name for x in Artist.Meta.table.columns] assert "lname" in [x.name for x in Artist.Meta.table.columns] assert "year" in [x.name for x in Artist.Meta.table.columns] @pytest.mark.asyncio async def test_working_with_aliases(): async with database: async with database.transaction(force_rollback=True): artist = await Artist.objects.create( first_name="Ted", last_name="Mosbey", born_year=1975 ) await Album.objects.create(name="Aunt Robin", artist=artist) await artist.children.create( first_name="Son", last_name="1", born_year=1990 ) await artist.children.create( first_name="Son", last_name="2", born_year=1995 ) await artist.children.create( first_name="Son", last_name="3", born_year=1998 ) album = await Album.objects.select_related("artist").first() assert album.artist.last_name == "Mosbey" assert album.artist.id is not None assert album.artist.first_name == "Ted" assert album.artist.born_year == 1975 assert album.name == "Aunt Robin" artist = await Artist.objects.select_related("children").get() assert len(artist.children) == 3 assert artist.children[0].first_name == "Son" assert artist.children[1].last_name == "2" assert artist.children[2].last_name == "3" await artist.update(last_name="Bundy") await Artist.objects.filter(pk=artist.pk).update(born_year=1974) artist = await Artist.objects.select_related("children").get() assert artist.last_name == "Bundy" assert artist.born_year == 1974 artist = ( await Artist.objects.select_related("children") .fields( [ "first_name", "last_name", "born_year", "children__first_name", "children__last_name", ] ) .get() ) assert artist.children[0].born_year is None @pytest.mark.asyncio async def test_bulk_operations_and_fields(): async with database: d1 = Child(first_name="Daughter", last_name="1", born_year=1990) d2 = Child(first_name="Daughter", last_name="2", born_year=1991) await Child.objects.bulk_create([d1, d2]) children = await Child.objects.filter(first_name="Daughter").all() assert len(children) == 2 assert children[0].last_name == "1" for child in children: child.born_year = child.born_year - 100 await Child.objects.bulk_update(children) children = await Child.objects.filter(first_name="Daughter").all() assert len(children) == 2 assert children[0].born_year == 1890 children = await Child.objects.fields(["first_name", "last_name"]).all() assert len(children) == 2 for child in children: assert child.born_year is None await children[0].load() await children[0].delete() children = await Child.objects.all() assert len(children) == 1 @pytest.mark.asyncio async def test_working_with_aliases_get_or_create(): async with database: async with database.transaction(force_rollback=True): artist, created = await Artist.objects.get_or_create( first_name="Teddy", last_name="Bear", born_year=2020 ) assert artist.pk is not None assert created is True artist2, created = await Artist.objects.get_or_create( first_name="Teddy", last_name="Bear", born_year=2020 ) assert artist == artist2 assert created is False art3 = artist2.dict() art3["born_year"] = 2019 await Artist.objects.update_or_create(**art3) artist3 = await Artist.objects.get(last_name="Bear") assert artist3.born_year == 2019 artists = await Artist.objects.all() assert len(artists) == 1 ormar-0.12.2/tests/test_model_definition/test_columns.py000066400000000000000000000077121444363446500235420ustar00rootroot00000000000000import datetime from enum import Enum import databases import pydantic import pytest import sqlalchemy import ormar from ormar import ModelDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() def time(): return datetime.datetime.now().time() class MyEnum(Enum): SMALL = 1 BIG = 2 class Example(ormar.Model): class Meta: tablename = "example" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200, default="aaa") created: datetime.datetime = ormar.DateTime(default=datetime.datetime.now) created_day: datetime.date = ormar.Date(default=datetime.date.today) created_time: datetime.time = ormar.Time(default=time) description: str = ormar.Text(nullable=True) value: float = ormar.Float(nullable=True) data: pydantic.Json = ormar.JSON(default={}) size: MyEnum = ormar.Enum(enum_class=MyEnum, default=MyEnum.SMALL) class EnumExample(ormar.Model): class Meta: tablename = "enum_example" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) size: MyEnum = ormar.Enum(enum_class=MyEnum, default=MyEnum.SMALL) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) def test_proper_enum_column_type(): assert Example.__fields__["size"].type_ == MyEnum def test_accepts_only_proper_enums(): class WrongEnum(Enum): A = 1 B = 2 with pytest.raises(pydantic.ValidationError): Example(size=WrongEnum.A) @pytest.mark.asyncio async def test_enum_bulk_operations(): async with database: examples = [EnumExample(), EnumExample()] await EnumExample.objects.bulk_create(examples) check = await EnumExample.objects.all() assert all(x.size == MyEnum.SMALL for x in check) for x in check: x.size = MyEnum.BIG await EnumExample.objects.bulk_update(check) check2 = await EnumExample.objects.all() assert all(x.size == MyEnum.BIG for x in check2) @pytest.mark.asyncio async def test_enum_filter(): async with database: examples = [EnumExample(), EnumExample(size=MyEnum.BIG)] await EnumExample.objects.bulk_create(examples) check = await EnumExample.objects.all(size=MyEnum.SMALL) assert len(check) == 1 check = await EnumExample.objects.all(size=MyEnum.BIG) assert len(check) == 1 @pytest.mark.asyncio async def test_model_crud(): async with database: example = Example() await example.save() await example.load() assert example.created.year == datetime.datetime.now().year assert example.created_day == datetime.date.today() assert example.description is None assert example.value is None assert example.data == {} assert example.size == MyEnum.SMALL await example.update(data={"foo": 123}, value=123.456, size=MyEnum.BIG) await example.load() assert example.value == 123.456 assert example.data == {"foo": 123} assert example.size == MyEnum.BIG await example.update(data={"foo": 123}, value=123.456) await example.load() assert example.value == 123.456 assert example.data == {"foo": 123} await example.delete() @pytest.mark.asyncio async def test_invalid_enum_field(): async with database: with pytest.raises(ModelDefinitionError): class Example2(ormar.Model): class Meta: tablename = "example" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) size: MyEnum = ormar.Enum(enum_class=[]) ormar-0.12.2/tests/test_model_definition/test_create_uses_init_for_consistency.py000066400000000000000000000032411444363446500306670ustar00rootroot00000000000000import uuid from typing import ClassVar import databases import pytest import sqlalchemy from pydantic import root_validator import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Mol(ormar.Model): # fixed namespace to generate always unique uuid from the smiles _UUID_NAMESPACE: ClassVar[uuid.UUID] = uuid.UUID( "12345678-abcd-1234-abcd-123456789abc" ) class Meta(BaseMeta): tablename = "mols" id: uuid.UUID = ormar.UUID(primary_key=True, index=True, uuid_format="hex") smiles: str = ormar.String(nullable=False, unique=True, max_length=256) def __init__(self, **kwargs): # this is required to generate id from smiles in init, if id is not given if "id" not in kwargs: kwargs["id"] = self._UUID_NAMESPACE super().__init__(**kwargs) @root_validator() def make_canonical_smiles_and_uuid(cls, values): values["id"], values["smiles"] = cls.uuid(values["smiles"]) return values @classmethod def uuid(cls, smiles): id_ = uuid.uuid5(cls._UUID_NAMESPACE, smiles) return id_, smiles @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_json_column(): async with database: await Mol.objects.create(smiles="Cc1ccccc1") count = await Mol.objects.count() assert count == 1 ormar-0.12.2/tests/test_model_definition/test_dates_with_timezone.py000066400000000000000000000100501444363446500261140ustar00rootroot00000000000000from datetime import timezone, timedelta, datetime, date, time import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class DateFieldsModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) created_date: datetime = ormar.DateTime( default=datetime.now(tz=timezone(timedelta(hours=3))), timezone=True ) updated_date: datetime = ormar.DateTime( default=datetime.now(tz=timezone(timedelta(hours=3))), name="modification_date", timezone=True, ) class SampleModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) updated_at: datetime = ormar.DateTime() class TimeModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) elapsed: time = ormar.Time() class DateModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) creation_date: date = ormar.Date() class MyModel(ormar.Model): id: int = ormar.Integer(primary_key=True) created_at: datetime = ormar.DateTime(timezone=True, nullable=False) class Meta: tablename = "mymodels" metadata = metadata database = database @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_model_crud_with_timezone(): async with database: datemodel = await DateFieldsModel().save() assert datemodel.created_date is not None assert datemodel.updated_date is not None @pytest.mark.asyncio async def test_query_with_datetime_in_filter(): async with database: creation_dt = datetime(2021, 5, 18, 0, 0, 0, 0) sample = await SampleModel.objects.create(updated_at=creation_dt) current_dt = datetime(2021, 5, 19, 0, 0, 0, 0) outdated_samples = await SampleModel.objects.filter( updated_at__lt=current_dt ).all() assert outdated_samples[0] == sample @pytest.mark.asyncio async def test_query_with_date_in_filter(): async with database: sample = await TimeModel.objects.create(elapsed=time(0, 20, 20)) await TimeModel.objects.create(elapsed=time(0, 12, 0)) await TimeModel.objects.create(elapsed=time(0, 19, 55)) sample4 = await TimeModel.objects.create(elapsed=time(0, 21, 15)) threshold = time(0, 20, 0) samples = await TimeModel.objects.filter(TimeModel.elapsed >= threshold).all() assert len(samples) == 2 assert samples[0] == sample assert samples[1] == sample4 @pytest.mark.asyncio async def test_query_with_time_in_filter(): async with database: await DateModel.objects.create(creation_date=date(2021, 5, 18)) sample2 = await DateModel.objects.create(creation_date=date(2021, 5, 19)) sample3 = await DateModel.objects.create(creation_date=date(2021, 5, 20)) outdated_samples = await DateModel.objects.filter( creation_date__in=[date(2021, 5, 19), date(2021, 5, 20)] ).all() assert len(outdated_samples) == 2 assert outdated_samples[0] == sample2 assert outdated_samples[1] == sample3 @pytest.mark.asyncio async def test_filtering_by_timezone_with_timedelta(): async with database: now_utc = datetime.now(timezone.utc) object = MyModel(created_at=now_utc) await object.save() one_hour_ago = datetime.now(timezone.utc) - timedelta(hours=1) created_since_one_hour_ago = await MyModel.objects.filter( created_at__gte=one_hour_ago ).all() assert len(created_since_one_hour_ago) == 1 ormar-0.12.2/tests/test_model_definition/test_equality_and_hash.py000066400000000000000000000032051444363446500255350ustar00rootroot00000000000000# type: ignore import databases import pytest import sqlalchemy import ormar from ormar import ModelDefinitionError, property_field from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Song(ormar.Model): class Meta: tablename = "songs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_equality(): async with database: song1 = await Song.objects.create(name="Song") song2 = await Song.objects.create(name="Song") song3 = Song(name="Song") song4 = Song(name="Song") assert song1 == song1 assert song3 == song4 assert song1 != song2 assert song1 != song3 assert song3 != song1 assert song1 is not None @pytest.mark.asyncio async def test_hash_doesnt_change_with_fields_if_pk(): async with database: song1 = await Song.objects.create(name="Song") prev_hash = hash(song1) await song1.update(name="Song 2") assert hash(song1) == prev_hash @pytest.mark.asyncio async def test_hash_changes_with_fields_if_no_pk(): async with database: song1 = Song(name="Song") prev_hash = hash(song1) song1.name = "Song 2" assert hash(song1) != prev_hash ormar-0.12.2/tests/test_model_definition/test_extra_ignore_parameter.py000066400000000000000000000014631444363446500266050ustar00rootroot00000000000000import databases import sqlalchemy import ormar from ormar import Extra from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Child(ormar.Model): class Meta(ormar.ModelMeta): tablename = "children" metadata = metadata database = database extra = Extra.ignore id: int = ormar.Integer(name="child_id", primary_key=True) first_name: str = ormar.String(name="fname", max_length=100) last_name: str = ormar.String(name="lname", max_length=100) def test_allow_extra_parameter(): child = Child(first_name="Test", last_name="Name", extra_param="Unexpected") assert child.first_name == "Test" assert child.last_name == "Name" assert not hasattr(child, "extra_param") ormar-0.12.2/tests/test_model_definition/test_fields_access.py000066400000000000000000000157471444363446500246600ustar00rootroot00000000000000import databases import pytest import sqlalchemy import ormar from ormar import BaseField from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class PriceList(ormar.Model): class Meta(BaseMeta): tablename = "price_lists" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) price_lists = ormar.ManyToMany(PriceList, related_name="categories") class Product(ormar.Model): class Meta(BaseMeta): tablename = "product" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) rating: float = ormar.Float(minimum=1, maximum=5) category = ormar.ForeignKey(Category) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_fields_access(): # basic access assert Product.id._field == Product.Meta.model_fields["id"] assert Product.id.id == Product.Meta.model_fields["id"] assert Product.pk.id == Product.id.id assert isinstance(Product.id._field, BaseField) assert Product.id._access_chain == "id" assert Product.id._source_model == Product # nested models curr_field = Product.category.name assert curr_field._field == Category.Meta.model_fields["name"] assert curr_field._access_chain == "category__name" assert curr_field._source_model == Product # deeper nesting curr_field = Product.category.price_lists.name assert curr_field._field == PriceList.Meta.model_fields["name"] assert curr_field._access_chain == "category__price_lists__name" assert curr_field._source_model == Product # reverse nesting curr_field = PriceList.categories.products.rating assert curr_field._field == Product.Meta.model_fields["rating"] assert curr_field._access_chain == "categories__products__rating" assert curr_field._source_model == PriceList with pytest.raises(AttributeError): assert Product.category >= 3 @pytest.mark.parametrize( "method, expected, expected_value", [ ("__eq__", "exact", "Test"), ("__lt__", "lt", "Test"), ("__le__", "lte", "Test"), ("__ge__", "gte", "Test"), ("__gt__", "gt", "Test"), ("iexact", "iexact", "Test"), ("contains", "contains", "Test"), ("icontains", "icontains", "Test"), ("startswith", "startswith", "Test"), ("istartswith", "istartswith", "Test"), ("endswith", "endswith", "Test"), ("iendswith", "iendswith", "Test"), ("isnull", "isnull", "Test"), ("in_", "in", "Test"), ("__lshift__", "in", "Test"), ("__rshift__", "isnull", True), ("__mod__", "contains", "Test"), ], ) def test_operator_return_proper_filter_action(method, expected, expected_value): group_ = getattr(Product.name, method)("Test") assert group_._kwargs_dict == {f"name__{expected}": expected_value} group_ = getattr(Product.category.name, method)("Test") assert group_._kwargs_dict == {f"category__name__{expected}": expected_value} group_ = getattr(PriceList.categories.products.rating, method)("Test") assert group_._kwargs_dict == { f"categories__products__rating__{expected}": expected_value } @pytest.mark.parametrize("method, expected_direction", [("asc", ""), ("desc", "desc")]) def test_operator_return_proper_order_action(method, expected_direction): action = getattr(Product.name, method)() assert action.source_model == Product assert action.target_model == Product assert action.direction == expected_direction assert action.is_source_model_order action = getattr(Product.category.name, method)() assert action.source_model == Product assert action.target_model == Category assert action.direction == expected_direction assert not action.is_source_model_order action = getattr(PriceList.categories.products.rating, method)() assert action.source_model == PriceList assert action.target_model == Product assert action.direction == expected_direction assert not action.is_source_model_order def test_combining_groups_together(): group = (Product.name == "Test") & (Product.rating >= 3.0) group.resolve(model_cls=Product) assert len(group._nested_groups) == 2 assert str( group.get_text_clause().compile(compile_kwargs={"literal_binds": True}) ) == ("((product.name = 'Test') AND (product.rating >= 3.0))") group = ~((Product.name == "Test") & (Product.rating >= 3.0)) group.resolve(model_cls=Product) assert len(group._nested_groups) == 2 assert str( group.get_text_clause().compile(compile_kwargs={"literal_binds": True}) ) == ("NOT ((product.name = 'Test') AND" " (product.rating >= 3.0))") group = ((Product.name == "Test") & (Product.rating >= 3.0)) | ( Product.category.name << (["Toys", "Books"]) ) group.resolve(model_cls=Product) assert len(group._nested_groups) == 2 assert len(group._nested_groups[0]._nested_groups) == 2 group_str = str( group.get_text_clause().compile(compile_kwargs={"literal_binds": True}) ) category_prefix = group._nested_groups[1].actions[0].table_prefix assert group_str == ( "(((product.name = 'Test') AND (product.rating >= 3.0)) " f"OR ({category_prefix}_categories.name IN ('Toys', 'Books')))" ) group = (Product.name % "Test") | ( (Product.category.price_lists.name.startswith("Aa")) | (Product.category.name << (["Toys", "Books"])) ) group.resolve(model_cls=Product) assert len(group._nested_groups) == 2 assert len(group._nested_groups[1]._nested_groups) == 2 group_str = str( group.get_text_clause().compile(compile_kwargs={"literal_binds": True}) ) price_list_prefix = ( group._nested_groups[1]._nested_groups[0].actions[0].table_prefix ) category_prefix = group._nested_groups[1]._nested_groups[1].actions[0].table_prefix assert group_str == ( f"((product.name LIKE '%Test%') " f"OR (({price_list_prefix}_price_lists.name LIKE 'Aa%') " f"OR ({category_prefix}_categories.name IN ('Toys', 'Books'))))" ) @pytest.mark.asyncio async def test_filtering_by_field_access(): async with database: async with database.transaction(force_rollback=True): category = await Category(name="Toys").save() product2 = await Product( name="My Little Pony", rating=3.8, category=category ).save() check = await Product.objects.get(Product.name == "My Little Pony") assert check == product2 ormar-0.12.2/tests/test_model_definition/test_foreign_key_value_used_for_related_model.py000066400000000000000000000042421444363446500323200ustar00rootroot00000000000000import uuid from typing import List, Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class PageLink(ormar.Model): class Meta(BaseMeta): tablename = "pagelinks" id: int = ormar.Integer(primary_key=True) value: str = ormar.String(max_length=2048) country: str = ormar.String(max_length=1000) class Post(ormar.Model): class Meta(BaseMeta): tablename = "posts" id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=500) link: PageLink = ormar.ForeignKey( PageLink, related_name="posts", ondelete="CASCADE" ) class Department(ormar.Model): class Meta(BaseMeta): pass id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4()) name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) department: Optional[Department] = ormar.ForeignKey(Department) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_pass_int_values_as_fk(): async with database: async with database.transaction(force_rollback=True): link = await PageLink(id=1, value="test", country="USA").save() await Post.objects.create(title="My post", link=link.id) post_check = await Post.objects.select_related("link").get() assert post_check.link == link @pytest.mark.asyncio async def test_pass_uuid_value_as_fk(): async with database: async with database.transaction(force_rollback=True): dept = await Department(name="Department test").save() await Course(name="Test course", department=dept.id).save() ormar-0.12.2/tests/test_model_definition/test_iterate.py000066400000000000000000000212741444363446500235160ustar00rootroot00000000000000import uuid import databases import pytest import sqlalchemy import ormar from ormar.exceptions import QueryDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class User(ormar.Model): class Meta: tablename = "users3" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="") class User2(ormar.Model): class Meta: tablename = "users4" metadata = metadata database = database id: uuid.UUID = ormar.UUID( uuid_format="string", primary_key=True, default=uuid.uuid4 ) name: str = ormar.String(max_length=100, default="") class Task(ormar.Model): class Meta: tablename = "tasks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="") user: User = ormar.ForeignKey(to=User) class Task2(ormar.Model): class Meta: tablename = "tasks2" metadata = metadata database = database id: uuid.UUID = ormar.UUID( uuid_format="string", primary_key=True, default=uuid.uuid4 ) name: str = ormar.String(max_length=100, default="") user: User2 = ormar.ForeignKey(to=User2) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_empty_result(): async with database: async with database.transaction(force_rollback=True): async for user in User.objects.iterate(): pass # pragma: no cover @pytest.mark.asyncio async def test_model_iterator(): async with database: async with database.transaction(force_rollback=True): tom = await User.objects.create(name="Tom") jane = await User.objects.create(name="Jane") lucy = await User.objects.create(name="Lucy") async for user in User.objects.iterate(): assert user in (tom, jane, lucy) @pytest.mark.asyncio async def test_model_iterator_filter(): async with database: async with database.transaction(force_rollback=True): tom = await User.objects.create(name="Tom") await User.objects.create(name="Jane") await User.objects.create(name="Lucy") async for user in User.objects.iterate(name="Tom"): assert user.name == tom.name @pytest.mark.asyncio async def test_model_iterator_relations(): async with database: async with database.transaction(force_rollback=True): tom = await User.objects.create(name="Tom") jane = await User.objects.create(name="Jane") lucy = await User.objects.create(name="Lucy") for user in tom, jane, lucy: await Task.objects.create(name="task1", user=user) await Task.objects.create(name="task2", user=user) results = [] async for user in User.objects.select_related(User.tasks).iterate(): assert len(user.tasks) == 2 results.append(user) assert len(results) == 3 @pytest.mark.asyncio async def test_model_iterator_relations_queryset_proxy(): async with database: async with database.transaction(force_rollback=True): tom = await User.objects.create(name="Tom") jane = await User.objects.create(name="Jane") for user in tom, jane: await Task.objects.create(name="task1", user=user) await Task.objects.create(name="task2", user=user) tom_tasks = [] async for task in tom.tasks.iterate(): assert task.name in ("task1", "task2") tom_tasks.append(task) assert len(tom_tasks) == 2 jane_tasks = [] async for task in jane.tasks.iterate(): assert task.name in ("task1", "task2") jane_tasks.append(task) assert len(jane_tasks) == 2 @pytest.mark.asyncio async def test_model_iterator_uneven_number_of_relations(): async with database: async with database.transaction(force_rollback=True): tom = await User.objects.create(name="Tom") jane = await User.objects.create(name="Jane") lucy = await User.objects.create(name="Lucy") for user in tom, jane: await Task.objects.create(name="task1", user=user) await Task.objects.create(name="task2", user=user) await Task.objects.create(name="task3", user=lucy) expected_counts = {"Tom": 2, "Jane": 2, "Lucy": 1} results = [] async for user in User.objects.select_related(User.tasks).iterate(): assert len(user.tasks) == expected_counts[user.name] results.append(user) assert len(results) == 3 @pytest.mark.asyncio async def test_model_iterator_uuid_pk(): async with database: async with database.transaction(force_rollback=True): tom = await User2.objects.create(name="Tom") jane = await User2.objects.create(name="Jane") lucy = await User2.objects.create(name="Lucy") async for user in User2.objects.iterate(): assert user in (tom, jane, lucy) @pytest.mark.asyncio async def test_model_iterator_filter_uuid_pk(): async with database: async with database.transaction(force_rollback=True): tom = await User2.objects.create(name="Tom") await User2.objects.create(name="Jane") await User2.objects.create(name="Lucy") async for user in User2.objects.iterate(name="Tom"): assert user.name == tom.name @pytest.mark.asyncio async def test_model_iterator_relations_uuid_pk(): async with database: async with database.transaction(force_rollback=True): tom = await User2.objects.create(name="Tom") jane = await User2.objects.create(name="Jane") lucy = await User2.objects.create(name="Lucy") for user in tom, jane, lucy: await Task2.objects.create(name="task1", user=user) await Task2.objects.create(name="task2", user=user) results = [] async for user in User2.objects.select_related(User2.task2s).iterate(): assert len(user.task2s) == 2 results.append(user) assert len(results) == 3 @pytest.mark.asyncio async def test_model_iterator_relations_queryset_proxy_uuid_pk(): async with database: async with database.transaction(force_rollback=True): tom = await User2.objects.create(name="Tom") jane = await User2.objects.create(name="Jane") for user in tom, jane: await Task2.objects.create(name="task1", user=user) await Task2.objects.create(name="task2", user=user) tom_tasks = [] async for task in tom.task2s.iterate(): assert task.name in ("task1", "task2") tom_tasks.append(task) assert len(tom_tasks) == 2 jane_tasks = [] async for task in jane.task2s.iterate(): assert task.name in ("task1", "task2") jane_tasks.append(task) assert len(jane_tasks) == 2 @pytest.mark.asyncio async def test_model_iterator_uneven_number_of_relations_uuid_pk(): async with database: async with database.transaction(force_rollback=True): tom = await User2.objects.create(name="Tom") jane = await User2.objects.create(name="Jane") lucy = await User2.objects.create(name="Lucy") for user in tom, jane: await Task2.objects.create(name="task1", user=user) await Task2.objects.create(name="task2", user=user) await Task2.objects.create(name="task3", user=lucy) expected_counts = {"Tom": 2, "Jane": 2, "Lucy": 1} results = [] async for user in User2.objects.select_related(User2.task2s).iterate(): assert len(user.task2s) == expected_counts[user.name] results.append(user) assert len(results) == 3 @pytest.mark.asyncio async def test_model_iterator_with_prefetch_raises_error(): async with database: with pytest.raises(QueryDefinitionError): async for user in User.objects.prefetch_related(User.tasks).iterate(): pass # pragma: no cover ormar-0.12.2/tests/test_model_definition/test_model_construct.py000066400000000000000000000057311444363446500252650ustar00rootroot00000000000000from typing import List import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class NickNames(ormar.Model): class Meta: tablename = "nicks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") class NicksHq(ormar.Model): class Meta: tablename = "nicks_x_hq" metadata = metadata database = database class HQ(ormar.Model): class Meta: tablename = "hqs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") nicks: List[NickNames] = ormar.ManyToMany(NickNames, through=NicksHq) class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) hq: HQ = ormar.ForeignKey(HQ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_construct_with_empty_relation(): async with database: async with database.transaction(force_rollback=True): hq = await HQ.objects.create(name="Main") comp = Company(name="Banzai", hq=None, founded=1988) comp2 = Company.construct(**dict(name="Banzai", hq=None, founded=1988)) assert comp.dict() == comp2.dict() @pytest.mark.asyncio async def test_init_and_construct_has_same_effect(): async with database: async with database.transaction(force_rollback=True): hq = await HQ.objects.create(name="Main") comp = Company(name="Banzai", hq=hq, founded=1988) comp2 = Company.construct(**dict(name="Banzai", hq=hq, founded=1988)) assert comp.dict() == comp2.dict() comp3 = Company.construct(**dict(name="Banzai", hq=hq.dict(), founded=1988)) assert comp.dict() == comp3.dict() @pytest.mark.asyncio async def test_init_and_construct_has_same_effect_with_m2m(): async with database: async with database.transaction(force_rollback=True): n1 = await NickNames(name="test").save() n2 = await NickNames(name="test2").save() hq = HQ(name="Main", nicks=[n1, n2]) hq2 = HQ.construct(**dict(name="Main", nicks=[n1, n2])) assert hq.dict() == hq2.dict() hq3 = HQ.construct(**dict(name="Main", nicks=[n1.dict(), n2.dict()])) assert hq.dict() == hq3.dict() ormar-0.12.2/tests/test_model_definition/test_model_definition.py000066400000000000000000000151561444363446500253730ustar00rootroot00000000000000# type: ignore import asyncio import datetime import decimal import databases import pydantic import pytest import pytest_asyncio import sqlalchemy import typing import ormar from ormar.exceptions import ModelDefinitionError from ormar.models import Model from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class ExampleModel(Model): class Meta: tablename = "example" metadata = metadata database = database test: int = ormar.Integer(primary_key=True) test_string: str = ormar.String(max_length=250) test_text: str = ormar.Text(default="") test_bool: bool = ormar.Boolean(nullable=False) test_float = ormar.Float(nullable=True) test_datetime = ormar.DateTime(default=datetime.datetime.now) test_date = ormar.Date(default=datetime.date.today) test_time = ormar.Time(default=datetime.time) test_json = ormar.JSON(default={}) test_bigint: int = ormar.BigInteger(default=0) test_smallint: int = ormar.SmallInteger(default=0) test_decimal = ormar.Decimal(scale=2, precision=10) test_decimal2 = ormar.Decimal(max_digits=10, decimal_places=2) fields_to_check = [ "test", "test_text", "test_string", "test_datetime", "test_date", "test_text", "test_float", "test_bigint", "test_json", ] class ExampleModel2(Model): class Meta: tablename = "examples" metadata = metadata database = database test: int = ormar.Integer(primary_key=True) test_string: str = ormar.String(max_length=250) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.fixture() def example(): return ExampleModel( pk=1, test_string="test", test_bool=True, test_decimal=decimal.Decimal(3.5), test_decimal2=decimal.Decimal(5.5), ) def test_not_nullable_field_is_required(): with pytest.raises(pydantic.error_wrappers.ValidationError): ExampleModel(test=1, test_string="test") def test_model_attribute_access(example): assert example.test == 1 assert example.test_string == "test" assert example.test_datetime.year == datetime.datetime.now().year assert example.test_date == datetime.date.today() assert example.test_text == "" assert example.test_float is None assert example.test_bigint == 0 assert example.test_json == {} assert example.test_decimal == 3.5 assert example.test_decimal2 == 5.5 example.test = 12 assert example.test == 12 example._orm_saved = True assert example._orm_saved def test_model_attribute_json_access(example): example.test_json = dict(aa=12) assert example.test_json == dict(aa=12) def test_missing_metadata(): with pytest.raises(ModelDefinitionError): class JsonSample2(ormar.Model): class Meta: tablename = "jsons2" database = database id: int = ormar.Integer(primary_key=True) test_json = ormar.JSON(nullable=True) def test_missing_database(): with pytest.raises(ModelDefinitionError): class JsonSample3(ormar.Model): class Meta: tablename = "jsons3" id: int = ormar.Integer(primary_key=True) test_json = ormar.JSON(nullable=True) def test_non_existing_attr(example): with pytest.raises(ValueError): example.new_attr = 12 def test_primary_key_access_and_setting(example): assert example.pk == 1 example.pk = 2 assert example.pk == 2 assert example.test == 2 def test_pydantic_model_is_created(example): assert issubclass(example.__class__, pydantic.BaseModel) assert all([field in example.__fields__ for field in fields_to_check]) assert example.test == 1 def test_sqlalchemy_table_is_created(example): assert issubclass(example.Meta.table.__class__, sqlalchemy.Table) assert all([field in example.Meta.table.columns for field in fields_to_check]) @typing.no_type_check def test_no_pk_in_model_definition(): with pytest.raises(ModelDefinitionError): # type: ignore class ExampleModel2(Model): # type: ignore class Meta: tablename = "example2" database = database metadata = metadata test_string: str = ormar.String(max_length=250) # type: ignore @typing.no_type_check def test_two_pks_in_model_definition(): with pytest.raises(ModelDefinitionError): @typing.no_type_check class ExampleModel2(Model): class Meta: tablename = "example3" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) test_string: str = ormar.String(max_length=250, primary_key=True) @typing.no_type_check def test_setting_pk_column_as_pydantic_only_in_model_definition(): with pytest.raises(ModelDefinitionError): class ExampleModel2(Model): class Meta: tablename = "example4" database = database metadata = metadata test: int = ormar.Integer(primary_key=True, pydantic_only=True) @typing.no_type_check def test_decimal_error_in_model_definition(): with pytest.raises(ModelDefinitionError): class ExampleModel2(Model): class Meta: tablename = "example5" database = database metadata = metadata test: decimal.Decimal = ormar.Decimal(primary_key=True) @typing.no_type_check def test_binary_error_without_length_model_definition(): with pytest.raises(ModelDefinitionError): class ExampleModel2(Model): class Meta: tablename = "example6" database = database metadata = metadata test: bytes = ormar.LargeBinary(primary_key=True, max_length=-1) @typing.no_type_check def test_string_error_in_model_definition(): with pytest.raises(ModelDefinitionError): class ExampleModel2(Model): class Meta: tablename = "example6" database = database metadata = metadata test: str = ormar.String(primary_key=True, max_length=0) @typing.no_type_check def test_json_conversion_in_model(): with pytest.raises(pydantic.ValidationError): ExampleModel( test_json=datetime.datetime.now(), test=1, test_string="test", test_bool=True, ) ormar-0.12.2/tests/test_model_definition/test_models.py000066400000000000000000000553671444363446500233560ustar00rootroot00000000000000import asyncio import base64 import datetime import os import uuid from typing import List import databases import pydantic import pytest import sqlalchemy import ormar from ormar.exceptions import ModelError, NoMatch, QueryDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class JsonSample(ormar.Model): class Meta: tablename = "jsons" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) test_json = ormar.JSON(nullable=True) blob = b"test" blob2 = b"test2icac89uc98" class LargeBinarySample(ormar.Model): class Meta: tablename = "my_bolbs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) test_binary: bytes = ormar.LargeBinary(max_length=100000, choices=[blob, blob2]) blob3 = os.urandom(64) blob4 = os.urandom(100) class LargeBinaryStr(ormar.Model): class Meta: tablename = "my_str_blobs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) test_binary: str = ormar.LargeBinary( max_length=100000, choices=[blob3, blob4], represent_as_base64_str=True ) class LargeBinaryNullableStr(ormar.Model): class Meta: tablename = "my_str_blobs2" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) test_binary: str = ormar.LargeBinary( max_length=100000, choices=[blob3, blob4], represent_as_base64_str=True, nullable=True, ) class UUIDSample(ormar.Model): class Meta: tablename = "uuids" metadata = metadata database = database id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) test_text: str = ormar.Text() class UUIDSample2(ormar.Model): class Meta: tablename = "uuids2" metadata = metadata database = database id: uuid.UUID = ormar.UUID( primary_key=True, default=uuid.uuid4, uuid_format="string" ) test_text: str = ormar.Text() class User(ormar.Model): class Meta: tablename = "users" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, default="") class User2(ormar.Model): class Meta: tablename = "users2" metadata = metadata database = database id: str = ormar.String(primary_key=True, max_length=100) name: str = ormar.String(max_length=100, default="") class Product(ormar.Model): class Meta: tablename = "product" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) rating: int = ormar.Integer(minimum=1, maximum=5) in_stock: bool = ormar.Boolean(default=False) last_delivery: datetime.date = ormar.Date(default=datetime.datetime.now) country_name_choices = ("Canada", "Algeria", "United States", "Belize") country_taxed_choices = (True,) country_country_code_choices = (-10, 1, 213, 1200) class Country(ormar.Model): class Meta: tablename = "country" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String( max_length=9, choices=country_name_choices, default="Canada" ) taxed: bool = ormar.Boolean(choices=country_taxed_choices, default=True) country_code: int = ormar.Integer( minimum=0, maximum=1000, choices=country_country_code_choices, default=1 ) class NullableCountry(ormar.Model): class Meta: tablename = "country2" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=9, choices=country_name_choices, nullable=True) class NotNullableCountry(ormar.Model): class Meta: tablename = "country3" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=9, choices=country_name_choices, nullable=False) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_model_class(): assert list(User.Meta.model_fields.keys()) == ["id", "name"] assert issubclass(User.Meta.model_fields["id"].__class__, pydantic.fields.FieldInfo) assert User.Meta.model_fields["id"].primary_key is True assert isinstance(User.Meta.model_fields["name"], pydantic.fields.FieldInfo) assert User.Meta.model_fields["name"].max_length == 100 assert isinstance(User.Meta.table, sqlalchemy.Table) def test_wrong_field_name(): with pytest.raises(ModelError): User(non_existing_pk=1) def test_model_pk(): user = User(pk=1) assert user.pk == 1 assert user.id == 1 @pytest.mark.asyncio async def test_json_column(): async with database: async with database.transaction(force_rollback=True): await JsonSample.objects.create(test_json=dict(aa=12)) await JsonSample.objects.create(test_json='{"aa": 12}') items = await JsonSample.objects.all() assert len(items) == 2 assert items[0].test_json == dict(aa=12) assert items[1].test_json == dict(aa=12) items[0].test_json = "[1, 2, 3]" assert items[0].test_json == [1, 2, 3] @pytest.mark.asyncio async def test_binary_column(): async with database: async with database.transaction(force_rollback=True): await LargeBinarySample.objects.create(test_binary=blob) await LargeBinarySample.objects.create(test_binary=blob2) items = await LargeBinarySample.objects.all() assert len(items) == 2 assert items[0].test_binary == blob assert items[1].test_binary == blob2 items[0].test_binary = "test2icac89uc98" assert items[0].test_binary == b"test2icac89uc98" @pytest.mark.asyncio async def test_binary_str_column(): async with database: async with database.transaction(force_rollback=True): await LargeBinaryStr(test_binary=blob3).save() await LargeBinaryStr.objects.create(test_binary=blob4) items = await LargeBinaryStr.objects.all() assert len(items) == 2 assert items[0].test_binary == base64.b64encode(blob3).decode() items[0].test_binary = base64.b64encode(blob4).decode() assert items[0].test_binary == base64.b64encode(blob4).decode() assert items[1].test_binary == base64.b64encode(blob4).decode() assert items[1].__dict__["test_binary"] == blob4 @pytest.mark.asyncio async def test_binary_nullable_str_column(): async with database: async with database.transaction(force_rollback=True): await LargeBinaryNullableStr().save() await LargeBinaryNullableStr.objects.create() items = await LargeBinaryNullableStr.objects.all() assert len(items) == 2 items[0].test_binary = blob3 items[1].test_binary = blob4 await LargeBinaryNullableStr.objects.bulk_update(items) items = await LargeBinaryNullableStr.objects.all() assert len(items) == 2 assert items[0].test_binary == base64.b64encode(blob3).decode() items[0].test_binary = base64.b64encode(blob4).decode() assert items[0].test_binary == base64.b64encode(blob4).decode() assert items[1].test_binary == base64.b64encode(blob4).decode() assert items[1].__dict__["test_binary"] == blob4 await LargeBinaryNullableStr.objects.bulk_create( [LargeBinaryNullableStr(), LargeBinaryNullableStr(test_binary=blob3)] ) items = await LargeBinaryNullableStr.objects.all() assert len(items) == 4 await items[0].update(test_binary=blob4) check_item = await LargeBinaryNullableStr.objects.get(id=items[0].id) assert check_item.test_binary == base64.b64encode(blob4).decode() @pytest.mark.asyncio async def test_uuid_column(): async with database: async with database.transaction(force_rollback=True): u1 = await UUIDSample.objects.create(test_text="aa") u2 = await UUIDSample.objects.create(test_text="bb") items = await UUIDSample.objects.all() assert len(items) == 2 assert isinstance(items[0].id, uuid.UUID) assert isinstance(items[1].id, uuid.UUID) assert items[0].id in (u1.id, u2.id) assert items[1].id in (u1.id, u2.id) assert items[0].id != items[1].id item = await UUIDSample.objects.filter(id=u1.id).get() assert item.id == u1.id item2 = await UUIDSample.objects.first() item3 = await UUIDSample.objects.get(pk=item2.id) assert item2.id == item3.id assert isinstance(item3.id, uuid.UUID) u3 = await UUIDSample2(**u1.dict()).save() u1_2 = await UUIDSample.objects.get(pk=u3.id) assert u1_2 == u1 u4 = await UUIDSample2.objects.get(pk=u3.id) assert u3 == u4 @pytest.mark.asyncio async def test_model_crud(): async with database: async with database.transaction(force_rollback=True): users = await User.objects.all() assert users == [] user = await User.objects.create(name="Tom") users = await User.objects.all() assert user.name == "Tom" assert user.pk is not None assert users == [user] lookup = await User.objects.get() assert lookup == user await user.update(name="Jane") users = await User.objects.all() assert user.name == "Jane" assert user.pk is not None assert users == [user] await user.delete() users = await User.objects.all() assert users == [] @pytest.mark.asyncio async def test_model_get(): async with database: async with database.transaction(force_rollback=True): with pytest.raises(ormar.NoMatch): await User.objects.get() assert await User.objects.get_or_none() is None user = await User.objects.create(name="Tom") lookup = await User.objects.get() assert lookup == user user2 = await User.objects.create(name="Jane") await User.objects.create(name="Jane") with pytest.raises(ormar.MultipleMatches): await User.objects.get(name="Jane") same_user = await User.objects.get(pk=user2.id) assert same_user.id == user2.id assert same_user.pk == user2.pk assert await User.objects.order_by("-name").get() == user @pytest.mark.asyncio async def test_model_filter(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Tom") await User.objects.create(name="Jane") await User.objects.create(name="Lucy") user = await User.objects.get(name="Lucy") assert user.name == "Lucy" with pytest.raises(ormar.NoMatch): await User.objects.get(name="Jim") await Product.objects.create(name="T-Shirt", rating=5, in_stock=True) await Product.objects.create(name="Dress", rating=4) await Product.objects.create(name="Coat", rating=3, in_stock=True) product = await Product.objects.get(name__iexact="t-shirt", rating=5) assert product.pk is not None assert product.name == "T-Shirt" assert product.rating == 5 assert product.last_delivery == datetime.datetime.now().date() products = await Product.objects.all(rating__gte=2, in_stock=True) assert len(products) == 2 products = await Product.objects.all(name__icontains="T") assert len(products) == 2 products = await Product.objects.exclude(rating__gte=4).all() assert len(products) == 1 products = await Product.objects.exclude(rating__gte=4, in_stock=True).all() assert len(products) == 2 products = await Product.objects.exclude(in_stock=True).all() assert len(products) == 1 products = await Product.objects.exclude(name__icontains="T").all() assert len(products) == 1 # Test escaping % character from icontains, contains, and iexact await Product.objects.create(name="100%-Cotton", rating=3) await Product.objects.create(name="Cotton-100%-Egyptian", rating=3) await Product.objects.create(name="Cotton-100%", rating=3) products = Product.objects.filter(name__iexact="100%-cotton") assert await products.count() == 1 products = Product.objects.filter(name__contains="%") assert await products.count() == 3 products = Product.objects.filter(name__icontains="%") assert await products.count() == 3 @pytest.mark.asyncio async def test_wrong_query_contains_model(): async with database: with pytest.raises(QueryDefinitionError): product = Product(name="90%-Cotton", rating=2) await Product.objects.filter(name__contains=product).count() @pytest.mark.asyncio async def test_model_exists(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Tom") assert await User.objects.filter(name="Tom").exists() is True assert await User.objects.filter(name="Jane").exists() is False @pytest.mark.asyncio async def test_model_count(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Tom") await User.objects.create(name="Jane") await User.objects.create(name="Lucy") assert await User.objects.count() == 3 assert await User.objects.filter(name__icontains="T").count() == 1 @pytest.mark.asyncio async def test_model_limit(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Tom") await User.objects.create(name="Jane") await User.objects.create(name="Lucy") assert len(await User.objects.limit(2).all()) == 2 @pytest.mark.asyncio async def test_model_limit_with_filter(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Tom") await User.objects.create(name="Tom") await User.objects.create(name="Tom") assert ( len(await User.objects.limit(2).filter(name__iexact="Tom").all()) == 2 ) @pytest.mark.asyncio async def test_offset(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Tom") await User.objects.create(name="Jane") users = await User.objects.offset(1).limit(1).all() assert users[0].name == "Jane" @pytest.mark.asyncio async def test_model_first(): async with database: async with database.transaction(force_rollback=True): tom = await User.objects.create(name="Tom") jane = await User.objects.create(name="Jane") assert await User.objects.first() == tom assert await User.objects.first(name="Jane") == jane assert await User.objects.filter(name="Jane").first() == jane with pytest.raises(NoMatch): await User.objects.filter(name="Lucy").first() assert await User.objects.order_by("name").first() == jane def not_contains(a, b): return a not in b def contains(a, b): return a in b def check_choices(values: tuple, ops: List): ops_dict = {"in": contains, "out": not_contains} checks = (country_name_choices, country_taxed_choices, country_country_code_choices) assert all( [ops_dict[op](value, check) for value, op, check in zip(values, ops, checks)] ) @pytest.mark.asyncio async def test_model_choices(): """Test that choices work properly for various types of fields.""" async with database: # Test valid choices. await asyncio.gather( Country.objects.create(name="Canada", taxed=True, country_code=1), Country.objects.create(name="Algeria", taxed=True, country_code=213), Country.objects.create(name="Algeria"), ) with pytest.raises(ValueError): name, taxed, country_code = "Saudi Arabia", True, 1 check_choices((name, taxed, country_code), ["out", "in", "in"]) await Country.objects.create( name=name, taxed=taxed, country_code=country_code ) with pytest.raises(ValueError): name, taxed, country_code = "Algeria", False, 1 check_choices((name, taxed, country_code), ["in", "out", "in"]) await Country.objects.create( name=name, taxed=taxed, country_code=country_code ) with pytest.raises(ValueError): name, taxed, country_code = "Algeria", True, 967 check_choices((name, taxed, country_code), ["in", "in", "out"]) await Country.objects.create( name=name, taxed=taxed, country_code=country_code ) with pytest.raises(ValueError): name, taxed, country_code = ( "United States", True, 1, ) # name is too long but is a valid choice check_choices((name, taxed, country_code), ["in", "in", "in"]) await Country.objects.create( name=name, taxed=taxed, country_code=country_code ) with pytest.raises(ValueError): name, taxed, country_code = ( "Algeria", True, -10, ) # country code is too small but is a valid choice check_choices((name, taxed, country_code), ["in", "in", "in"]) await Country.objects.create( name=name, taxed=taxed, country_code=country_code ) with pytest.raises(ValueError): name, taxed, country_code = ( "Algeria", True, 1200, ) # country code is too large but is a valid choice check_choices((name, taxed, country_code), ["in", "in", "in"]) await Country.objects.create( name=name, taxed=taxed, country_code=country_code ) # test setting after init also triggers validation with pytest.raises(ValueError): name, taxed, country_code = "Algeria", True, 967 check_choices((name, taxed, country_code), ["in", "in", "out"]) country = Country() country.country_code = country_code with pytest.raises(ValueError): name, taxed, country_code = "Saudi Arabia", True, 1 check_choices((name, taxed, country_code), ["out", "in", "in"]) country = Country() country.name = name with pytest.raises(ValueError): name, taxed, country_code = "Algeria", False, 1 check_choices((name, taxed, country_code), ["in", "out", "in"]) country = Country() country.taxed = taxed # check also update from queryset with pytest.raises(ValueError): name, taxed, country_code = "Algeria", False, 1 check_choices((name, taxed, country_code), ["in", "out", "in"]) await Country(name="Belize").save() await Country.objects.filter(name="Belize").update(name="Vietnam") @pytest.mark.asyncio async def test_nullable_field_model_choices(): """Test that choices work properly for according to nullable setting""" async with database: c1 = await NullableCountry(name=None).save() assert c1.name is None with pytest.raises(ValueError): await NotNullableCountry(name=None).save() @pytest.mark.asyncio async def test_start_and_end_filters(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Markos Uj") await User.objects.create(name="Maqua Bigo") await User.objects.create(name="maqo quidid") await User.objects.create(name="Louis Figo") await User.objects.create(name="Loordi Kami") await User.objects.create(name="Yuuki Sami") users = await User.objects.filter(name__startswith="Mar").all() assert len(users) == 1 users = await User.objects.filter(name__istartswith="ma").all() assert len(users) == 3 users = await User.objects.filter(name__istartswith="Maq").all() assert len(users) == 2 users = await User.objects.filter(name__iendswith="AMI").all() assert len(users) == 2 users = await User.objects.filter(name__endswith="Uj").all() assert len(users) == 1 users = await User.objects.filter(name__endswith="igo").all() assert len(users) == 2 @pytest.mark.asyncio async def test_get_and_first(): async with database: async with database.transaction(force_rollback=True): await User.objects.create(name="Tom") await User.objects.create(name="Jane") await User.objects.create(name="Lucy") await User.objects.create(name="Zack") await User.objects.create(name="Ula") user = await User.objects.get() assert user.name == "Ula" user = await User.objects.first() assert user.name == "Tom" await User2.objects.create(id="Tom", name="Tom") await User2.objects.create(id="Jane", name="Jane") await User2.objects.create(id="Lucy", name="Lucy") await User2.objects.create(id="Zack", name="Zack") await User2.objects.create(id="Ula", name="Ula") user = await User2.objects.get() assert user.name == "Zack" user = await User2.objects.first() assert user.name == "Jane" def test_constraints(): with pytest.raises(pydantic.ValidationError) as e: Product(name="T-Shirt", rating=50, in_stock=True) assert "ensure this value is less than or equal to 5" in str(e.value) ormar-0.12.2/tests/test_model_definition/test_models_are_pickable.py000066400000000000000000000032551444363446500260240ustar00rootroot00000000000000import pickle from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class User(ormar.Model): class Meta: tablename = "users" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) properties = ormar.JSON(nullable=True) class Post(ormar.Model): class Meta: tablename = "posts" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) created_by: Optional[User] = ormar.ForeignKey(User) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_dumping_and_loading_model_works(): async with database: user = await User(name="Test", properties={"aa": "bb"}).save() post = Post(name="Test post") await user.posts.add(post) pickled_value = pickle.dumps(user) python_value = pickle.loads(pickled_value) assert isinstance(python_value, User) assert python_value.name == "Test" assert python_value.properties == {"aa": "bb"} assert python_value.posts[0].name == "Test post" await python_value.load() await python_value.update(name="Test2") check = await User.objects.get() assert check.name == "Test2" ormar-0.12.2/tests/test_model_definition/test_overwriting_pydantic_field_type.py000066400000000000000000000027641444363446500305420ustar00rootroot00000000000000from typing import Dict, Optional import databases import pytest import sqlalchemy from pydantic import Json, PositiveInt, ValidationError import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class OverwriteTest(ormar.Model): class Meta: tablename = "overwrites" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) my_int: int = ormar.Integer(overwrite_pydantic_type=PositiveInt) constraint_dict: Json = ormar.JSON( overwrite_pydantic_type=Optional[Json[Dict[str, int]]] # type: ignore ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_constraints(): with pytest.raises(ValidationError) as e: OverwriteTest(my_int=-10) assert "ensure this value is greater than 0" in str(e.value) with pytest.raises(ValidationError) as e: OverwriteTest(my_int=10, constraint_dict={"aa": "ab"}) assert "value is not a valid integer" in str(e.value) @pytest.mark.asyncio async def test_saving(): async with database: await OverwriteTest(my_int=5, constraint_dict={"aa": 123}).save() test = await OverwriteTest.objects.get() assert test.my_int == 5 assert test.constraint_dict == {"aa": 123} ormar-0.12.2/tests/test_model_definition/test_overwriting_sql_nullable.py000066400000000000000000000030661444363446500271740ustar00rootroot00000000000000import sqlite3 from typing import Optional import asyncpg import databases import pymysql import sqlalchemy from sqlalchemy import create_engine, text import ormar import pytest from tests.settings import DATABASE_URL db = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = db class PrimaryModel(ormar.Model): class Meta(BaseMeta): tablename = "primary_models" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255, index=True) some_text: Optional[str] = ormar.Text(nullable=True, sql_nullable=False) some_other_text: Optional[str] = ormar.String( max_length=255, nullable=True, sql_nullable=False, server_default=text("''") ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_create_models(): async with db: primary = await PrimaryModel( name="Foo", some_text="Bar", some_other_text="Baz" ).save() assert primary.id == 1 primary2 = await PrimaryModel(name="Foo2", some_text="Bar2").save() assert primary2.id == 2 with pytest.raises( ( sqlite3.IntegrityError, pymysql.IntegrityError, asyncpg.exceptions.NotNullViolationError, ) ): await PrimaryModel(name="Foo3").save() ormar-0.12.2/tests/test_model_definition/test_pk_field_is_always_not_null.py000066400000000000000000000015221444363446500276150ustar00rootroot00000000000000import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class AutoincrementModel(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) class NonAutoincrementModel(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True, autoincrement=False) class ExplicitNullableModel(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True, nullable=True) def test_pk_field_is_not_null(): for model in [AutoincrementModel, NonAutoincrementModel, ExplicitNullableModel]: assert not model.Meta.table.c.get("id").nullable ormar-0.12.2/tests/test_model_definition/test_properties.py000066400000000000000000000043311444363446500242500ustar00rootroot00000000000000# type: ignore import databases import pytest import sqlalchemy import ormar from ormar import ModelDefinitionError, property_field from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Song(ormar.Model): class Meta: tablename = "songs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) sort_order: int = ormar.Integer() @property_field def sorted_name(self): return f"{self.sort_order}: {self.name}" @property_field def sample(self): return "sample" @property_field def sample2(self): return "sample2" @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_sort_order_on_main_model(): async with database: await Song.objects.create(name="Song 3", sort_order=3) await Song.objects.create(name="Song 1", sort_order=1) await Song.objects.create(name="Song 2", sort_order=2) songs = await Song.objects.all() song_dict = [song.dict() for song in songs] assert all("sorted_name" in x for x in song_dict) assert all( x["sorted_name"] == f"{x['sort_order']}: {x['name']}" for x in song_dict ) song_json = [song.json() for song in songs] assert all("sorted_name" in x for x in song_json) check_include = songs[0].dict(include={"sample"}) assert "sample" in check_include assert "sample2" not in check_include assert "sorted_name" not in check_include check_include = songs[0].dict(exclude={"sample"}) assert "sample" not in check_include assert "sample2" in check_include assert "sorted_name" in check_include def test_wrong_definition(): with pytest.raises(ModelDefinitionError): class WrongModel(ormar.Model): # pragma: no cover @property_field def test(self, aa=10, bb=30): pass ormar-0.12.2/tests/test_model_definition/test_pydantic_fields.py000066400000000000000000000073441444363446500252240ustar00rootroot00000000000000import random from typing import Optional import databases import pytest import sqlalchemy from pydantic import BaseModel, Field, HttpUrl, PaymentCardNumber import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class ModelTest(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) url: HttpUrl = "https://www.example.com" # type: ignore number: Optional[PaymentCardNumber] CARD_NUMBERS = [ "123456789007", "123456789015", "123456789023", "123456789031", "123456789049", ] def get_number(): return random.choice(CARD_NUMBERS) class ModelTest2(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) url: HttpUrl = "https://www.example2.com" # type: ignore number: PaymentCardNumber = Field(default_factory=get_number) class PydanticTest(BaseModel): aa: str bb: int class ModelTest3(ormar.Model): class Meta(BaseMeta): pass def __init__(self, **kwargs): kwargs["number"] = get_number() kwargs["pydantic_test"] = PydanticTest(aa="random", bb=42) super().__init__(**kwargs) id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) url: HttpUrl = "https://www.example3.com" # type: ignore number: PaymentCardNumber pydantic_test: PydanticTest @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_working_with_pydantic_fields(): async with database: test = ModelTest(name="Test") assert test.name == "Test" assert test.url == "https://www.example.com" assert test.number is None test.number = "123456789015" test.url = "https://www.sdta.ada.pt" assert test.url == "https://www.sdta.ada.pt" await test.save() test_check = await ModelTest.objects.get() assert test_check.name == "Test" assert test_check.url == "https://www.example.com" assert test_check.number is None @pytest.mark.asyncio async def test_default_factory_for_pydantic_fields(): async with database: test = ModelTest2(name="Test2", number="4000000000000002") assert test.name == "Test2" assert test.url == "https://www.example2.com" assert test.number == "4000000000000002" test.url = "http://www.sdta.ada.pt" assert test.url == "http://www.sdta.ada.pt" await test.save() test_check = await ModelTest2.objects.get() assert test_check.name == "Test2" assert test_check.url == "https://www.example2.com" assert test_check.number in CARD_NUMBERS assert test_check.number != test.number @pytest.mark.asyncio async def test_init_setting_for_pydantic_fields(): async with database: test = ModelTest3(name="Test3") assert test.name == "Test3" assert test.url == "https://www.example3.com" assert test.pydantic_test.bb == 42 test.url = "http://www.sdta.ada.pt" assert test.url == "http://www.sdta.ada.pt" await test.save() test_check = await ModelTest3.objects.get() assert test_check.name == "Test3" assert test_check.url == "https://www.example3.com" assert test_check.number in CARD_NUMBERS assert test_check.pydantic_test.aa == "random" ormar-0.12.2/tests/test_model_definition/test_pydantic_only_fields.py000066400000000000000000000043321444363446500262570ustar00rootroot00000000000000import datetime import databases import pytest import sqlalchemy import ormar from ormar import property_field from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) timestamp: datetime.datetime = ormar.DateTime(pydantic_only=True) @property_field def name10(self) -> str: return self.name + "_10" @property_field def name20(self) -> str: return self.name + "_20" @property def name30(self) -> str: return self.name + "_30" @property_field def name40(self) -> str: return self.name + "_40" @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_pydantic_only_fields(): async with database: async with database.transaction(force_rollback=True): album = await Album.objects.create(name="Hitchcock") assert album.pk is not None assert album.saved assert album.timestamp is None album = await Album.objects.exclude_fields("timestamp").get() assert album.timestamp is None album = await Album.objects.fields({"name", "timestamp"}).get() assert album.timestamp is None test_dict = album.dict() assert "timestamp" in test_dict assert test_dict["timestamp"] is None assert album.name30 == "Hitchcock_30" album.timestamp = datetime.datetime.now() test_dict = album.dict() assert "timestamp" in test_dict assert test_dict["timestamp"] is not None assert test_dict.get("name10") == "Hitchcock_10" assert test_dict.get("name20") == "Hitchcock_20" assert test_dict.get("name40") == "Hitchcock_40" assert "name30" not in test_dict ormar-0.12.2/tests/test_model_definition/test_pydantic_private_attributes.py000066400000000000000000000014721444363446500276720ustar00rootroot00000000000000from typing import List import databases import sqlalchemy from pydantic import PrivateAttr import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Subscription(ormar.Model): class Meta(BaseMeta): tablename = "subscriptions" id: int = ormar.Integer(primary_key=True) stripe_subscription_id: str = ormar.String(nullable=False, max_length=256) _add_payments: List[str] = PrivateAttr(default_factory=list) def add_payment(self, payment: str): self._add_payments.append(payment) def test_private_attribute(): sub = Subscription(stripe_subscription_id="2312312sad231") sub.add_payment("test") ormar-0.12.2/tests/test_model_definition/test_save_status.py000066400000000000000000000170321444363446500244170ustar00rootroot00000000000000from typing import List import databases import pytest import sqlalchemy import ormar from ormar.exceptions import ModelPersistenceError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class NickNames(ormar.Model): class Meta: tablename = "nicks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") is_lame: bool = ormar.Boolean(nullable=True) class NicksHq(ormar.Model): class Meta: tablename = "nicks_x_hq" metadata = metadata database = database class HQ(ormar.Model): class Meta: tablename = "hqs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") nicks: List[NickNames] = ormar.ManyToMany(NickNames, through=NicksHq) class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) hq: HQ = ormar.ForeignKey(HQ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_instantiation_false_save_true(): async with database: async with database.transaction(force_rollback=True): comp = Company(name="Banzai", founded=1988) assert not comp.saved await comp.save() assert comp.saved @pytest.mark.asyncio async def test_saved_edited_not_saved(): async with database: async with database.transaction(force_rollback=True): comp = await Company.objects.create(name="Banzai", founded=1988) assert comp.saved comp.name = "Banzai2" assert not comp.saved await comp.update() assert comp.saved await comp.update(name="Banzai3") assert comp.saved comp.pk = 999 assert not comp.saved await comp.update() assert comp.saved @pytest.mark.asyncio async def test_adding_related_gets_dirty(): async with database: async with database.transaction(force_rollback=True): hq = await HQ.objects.create(name="Main") comp = await Company.objects.create(name="Banzai", founded=1988) assert comp.saved comp.hq = hq assert not comp.saved await comp.update() assert comp.saved comp = await Company.objects.select_related("hq").get(name="Banzai") assert comp.saved assert comp.hq.pk == hq.pk assert comp.hq.saved comp.hq.name = "Suburbs" assert not comp.hq.saved assert comp.saved await comp.hq.update() assert comp.hq.saved @pytest.mark.asyncio async def test_adding_many_to_many_does_not_gets_dirty(): async with database: async with database.transaction(force_rollback=True): nick1 = await NickNames.objects.create(name="Bazinga", is_lame=False) nick2 = await NickNames.objects.create(name="Bazinga2", is_lame=True) hq = await HQ.objects.create(name="Main") assert hq.saved await hq.nicks.add(nick1) assert hq.saved await hq.nicks.add(nick2) assert hq.saved hq = await HQ.objects.select_related("nicks").get(name="Main") assert hq.saved assert hq.nicks[0].saved await hq.nicks.remove(nick1) assert hq.saved hq.nicks[0].name = "Kabucha" assert not hq.nicks[0].saved await hq.nicks[0].update() assert hq.nicks[0].saved @pytest.mark.asyncio async def test_delete(): async with database: async with database.transaction(force_rollback=True): comp = await Company.objects.create(name="Banzai", founded=1988) assert comp.saved await comp.delete() assert not comp.saved await comp.update() assert comp.saved @pytest.mark.asyncio async def test_load(): async with database: async with database.transaction(force_rollback=True): comp = await Company.objects.create(name="Banzai", founded=1988) assert comp.saved comp.name = "AA" assert not comp.saved await comp.load() assert comp.saved assert comp.name == "Banzai" @pytest.mark.asyncio async def test_queryset_methods(): async with database: async with database.transaction(force_rollback=True): await Company.objects.create(name="Banzai", founded=1988) await Company.objects.create(name="Yuhu", founded=1989) await Company.objects.create(name="Konono", founded=1990) await Company.objects.create(name="Sumaaa", founded=1991) comp = await Company.objects.get(name="Banzai") assert comp.saved comp = await Company.objects.first() assert comp.saved comps = await Company.objects.all() assert [comp.saved for comp in comps] comp2, created = await Company.objects.get_or_create( name="Banzai_new", founded=2001 ) assert comp2.saved assert created is True comp3, created = await Company.objects.get_or_create( name="Banzai", founded=1988 ) assert comp3.saved assert comp3.pk == comp.pk assert created is False update_dict = comp.dict() update_dict["founded"] = 2010 comp = await Company.objects.update_or_create(**update_dict) assert comp.saved assert comp.founded == 2010 create_dict = {"name": "Yoko", "founded": 2005} comp = await Company.objects.update_or_create(**create_dict) assert comp.saved assert comp.founded == 2005 @pytest.mark.asyncio async def test_bulk_methods(): async with database: async with database.transaction(force_rollback=True): c1 = Company(name="Banzai", founded=1988) c2 = Company(name="Yuhu", founded=1989) await Company.objects.bulk_create([c1, c2]) assert c1.saved assert c2.saved c1, c2 = await Company.objects.all() c1.name = "Banzai2" c2.name = "Yuhu2" assert not c1.saved assert not c2.saved await Company.objects.bulk_update([c1, c2]) assert c1.saved assert c2.saved c3 = Company(name="Cobra", founded=2088) assert not c3.saved with pytest.raises(ModelPersistenceError): await c3.update() await c3.upsert() assert c3.saved c3.name = "Python" assert not c3.saved await c3.upsert() assert c3.saved assert c3.name == "Python" await c3.upsert(founded=2077) assert c3.saved assert c3.founded == 2077 ormar-0.12.2/tests/test_model_definition/test_saving_nullable_fields.py000066400000000000000000000035111444363446500265460ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy from sqlalchemy import create_engine import ormar import pytest from tests.settings import DATABASE_URL db = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class PrimaryModel(ormar.Model): class Meta: metadata = metadata database = db tablename = "primary_models" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255, index=True) some_text: str = ormar.Text() # NOTE: Removing nullable=True makes the test pass. some_other_text: Optional[str] = ormar.Text(nullable=True) class SecondaryModel(ormar.Model): class Meta: metadata = metadata database = db tablename = "secondary_models" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) primary_model: PrimaryModel = ormar.ForeignKey( PrimaryModel, related_name="secondary_models" ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_create_models(): async with db: async with db.transaction(force_rollback=True): primary = await PrimaryModel( name="Foo", some_text="Bar", some_other_text="Baz" ).save() assert primary.id == 1 secondary = await SecondaryModel(name="Foo", primary_model=primary).save() assert secondary.id == 1 assert secondary.primary_model.id == 1 secondary = await SecondaryModel.objects.get() assert secondary.name == "Foo" await secondary.update(name="Updated") assert secondary.name == "Updated" ormar-0.12.2/tests/test_model_definition/test_server_default.py000066400000000000000000000047721444363446500250770ustar00rootroot00000000000000import asyncio import time from datetime import datetime import databases import pytest import sqlalchemy from sqlalchemy import func, text import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Product(ormar.Model): class Meta: tablename = "product" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) company: str = ormar.String(max_length=200, server_default="Acme") sort_order: int = ormar.Integer(server_default=text("10")) created: datetime = ormar.DateTime(server_default=func.now()) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_table_defined_properly(): assert Product.Meta.model_fields["created"].nullable assert not Product.__fields__["created"].required assert Product.Meta.table.columns["created"].server_default.arg.name == "now" @pytest.mark.asyncio async def test_model_creation(): async with database: async with database.transaction(force_rollback=True): p1 = Product(name="Test") assert p1.created is None await p1.save() await p1.load() assert p1.created is not None assert p1.company == "Acme" assert p1.sort_order == 10 date = datetime.strptime("2020-10-27 11:30", "%Y-%m-%d %H:%M") p3 = await Product.objects.create( name="Test2", created=date, company="Roadrunner", sort_order=1 ) assert p3.created is not None assert p3.created == date assert p1.created != p3.created assert p3.company == "Roadrunner" assert p3.sort_order == 1 p3 = await Product.objects.get(name="Test2") assert p3.company == "Roadrunner" assert p3.sort_order == 1 time.sleep(1) p2 = await Product.objects.create(name="Test3") assert p2.created is not None assert p2.company == "Acme" assert p2.sort_order == 10 if Product.db_backend_name() != "postgresql": # postgres use transaction timestamp so it will remain the same assert p1.created != p2.created # pragma nocover ormar-0.12.2/tests/test_model_definition/test_setting_comments_in_db.py000066400000000000000000000016471444363446500266000ustar00rootroot00000000000000import databases import pytest import sqlalchemy import ormar from ormar.models import Model from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class Comment(Model): class Meta(ormar.ModelMeta): tablename = "comments" metadata = metadata database = database test: int = ormar.Integer(primary_key=True, comment="primary key of comments") test_string: str = ormar.String(max_length=250, comment="test that it works") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_comments_are_set_in_db(): columns = Comment.Meta.table.c for c in columns: assert c.comment == Comment.Meta.model_fields[c.name].comment ormar-0.12.2/tests/test_model_methods/000077500000000000000000000000001444363446500177555ustar00rootroot00000000000000ormar-0.12.2/tests/test_model_methods/__init__.py000066400000000000000000000000001444363446500220540ustar00rootroot00000000000000ormar-0.12.2/tests/test_model_methods/test_excludes_in_load_all.py000066400000000000000000000042271444363446500255240ustar00rootroot00000000000000import asyncio import uuid import pytest import ormar import sqlalchemy import databases from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta: metadata = metadata database = database class JimmyUser(ormar.Model): class Meta(BaseMeta): tablename = "jimmy_users" id: uuid.UUID = ormar.UUID( primary_key=True, default=uuid.uuid4(), uuid_format="string" ) class JimmyProfile(ormar.Model): class Meta(BaseMeta): tablename = "jimmy_profiles" id: uuid.UUID = ormar.UUID( primary_key=True, default=uuid.uuid4(), uuid_format="string" ) name = ormar.String(max_length=42, default="JimmyProfile") user: JimmyUser = ormar.ForeignKey(to=JimmyUser) class JimmyAccount(ormar.Model): class Meta(BaseMeta): tablename = "jimmy_accounts" id: uuid.UUID = ormar.UUID( primary_key=True, default=uuid.uuid4(), uuid_format="string" ) name = ormar.String(max_length=42, default="JimmyAccount") user: JimmyUser = ormar.ForeignKey(to=JimmyUser) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_excluding_one_relation(): async with database: user = JimmyUser() await user.save() await JimmyAccount(user=user).save() await JimmyProfile(user=user).save() await user.load_all(exclude={"jimmyprofiles"}) assert hasattr(user.jimmyaccounts[0], "name") assert len(user.jimmyprofiles) == 0 @pytest.mark.asyncio async def test_excluding_other_relation(): async with database: user = JimmyUser() await user.save() await JimmyAccount(user=user).save() await JimmyProfile(user=user).save() await user.load_all(exclude={"jimmyaccounts"}) assert await JimmyProfile.objects.get() assert hasattr(user.jimmyprofiles[0], "name") assert len(user.jimmyaccounts) == 0 ormar-0.12.2/tests/test_model_methods/test_load_all.py000066400000000000000000000212651444363446500231430ustar00rootroot00000000000000from typing import List import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Language(ormar.Model): class Meta(BaseMeta): tablename = "languages" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) level: str = ormar.String(max_length=150, default="Beginner") class CringeLevel(ormar.Model): class Meta(BaseMeta): tablename = "levels" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) language = ormar.ForeignKey(Language) class NickName(ormar.Model): class Meta(BaseMeta): tablename = "nicks" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") is_lame: bool = ormar.Boolean(nullable=True) level: CringeLevel = ormar.ForeignKey(CringeLevel) class HQ(ormar.Model): class Meta(BaseMeta): tablename = "hqs" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") nicks: List[NickName] = ormar.ManyToMany(NickName) class Company(ormar.Model): class Meta(BaseMeta): tablename = "companies" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) hq: HQ = ormar.ForeignKey(HQ, related_name="companies") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_load_all_fk_rel(): async with database: async with database.transaction(force_rollback=True): hq = await HQ.objects.create(name="Main") company = await Company.objects.create(name="Banzai", founded=1988, hq=hq) hq = await HQ.objects.get(name="Main") await hq.load_all() assert hq.companies[0] == company assert hq.companies[0].name == "Banzai" assert hq.companies[0].founded == 1988 hq2 = await HQ.objects.select_all().get(name="Main") assert hq2.companies[0] == company assert hq2.companies[0].name == "Banzai" assert hq2.companies[0].founded == 1988 @pytest.mark.asyncio async def test_load_all_many_to_many(): async with database: async with database.transaction(force_rollback=True): nick1 = await NickName.objects.create(name="BazingaO", is_lame=False) nick2 = await NickName.objects.create(name="Bazinga20", is_lame=True) hq = await HQ.objects.create(name="Main") await hq.nicks.add(nick1) await hq.nicks.add(nick2) hq = await HQ.objects.get(name="Main") await hq.load_all() assert hq.nicks[0] == nick1 assert hq.nicks[0].name == "BazingaO" assert hq.nicks[1] == nick2 assert hq.nicks[1].name == "Bazinga20" hq2 = await HQ.objects.select_all().get(name="Main") assert hq2.nicks[0] == nick1 assert hq2.nicks[0].name == "BazingaO" assert hq2.nicks[1] == nick2 assert hq2.nicks[1].name == "Bazinga20" @pytest.mark.asyncio async def test_load_all_with_order(): async with database: async with database.transaction(force_rollback=True): nick1 = await NickName.objects.create(name="Barry", is_lame=False) nick2 = await NickName.objects.create(name="Joe", is_lame=True) hq = await HQ.objects.create(name="Main") await hq.nicks.add(nick1) await hq.nicks.add(nick2) hq = await HQ.objects.get(name="Main") await hq.load_all(order_by="-nicks__name") assert hq.nicks[0] == nick2 assert hq.nicks[0].name == "Joe" assert hq.nicks[1] == nick1 assert hq.nicks[1].name == "Barry" await hq.load_all() assert hq.nicks[0] == nick1 assert hq.nicks[1] == nick2 hq2 = ( await HQ.objects.select_all().order_by("-nicks__name").get(name="Main") ) assert hq2.nicks[0] == nick2 assert hq2.nicks[1] == nick1 hq3 = await HQ.objects.select_all().get(name="Main") assert hq3.nicks[0] == nick1 assert hq3.nicks[1] == nick2 @pytest.mark.asyncio async def test_loading_reversed_relation(): async with database: async with database.transaction(force_rollback=True): hq = await HQ.objects.create(name="Main") await Company.objects.create(name="Banzai", founded=1988, hq=hq) company = await Company.objects.get(name="Banzai") await company.load_all() assert company.hq == hq company2 = await Company.objects.select_all().get(name="Banzai") assert company2.hq == hq @pytest.mark.asyncio async def test_loading_nested(): async with database: async with database.transaction(force_rollback=True): language = await Language.objects.create(name="English") level = await CringeLevel.objects.create(name="High", language=language) level2 = await CringeLevel.objects.create(name="Low", language=language) nick1 = await NickName.objects.create( name="BazingaO", is_lame=False, level=level ) nick2 = await NickName.objects.create( name="Bazinga20", is_lame=True, level=level2 ) hq = await HQ.objects.create(name="Main") await hq.nicks.add(nick1) await hq.nicks.add(nick2) hq = await HQ.objects.get(name="Main") await hq.load_all(follow=True) assert hq.nicks[0] == nick1 assert hq.nicks[0].name == "BazingaO" assert hq.nicks[0].level.name == "High" assert hq.nicks[0].level.language.name == "English" assert hq.nicks[1] == nick2 assert hq.nicks[1].name == "Bazinga20" assert hq.nicks[1].level.name == "Low" assert hq.nicks[1].level.language.name == "English" hq2 = await HQ.objects.select_all(follow=True).get(name="Main") assert hq2.nicks[0] == nick1 assert hq2.nicks[0].name == "BazingaO" assert hq2.nicks[0].level.name == "High" assert hq2.nicks[0].level.language.name == "English" assert hq2.nicks[1] == nick2 assert hq2.nicks[1].name == "Bazinga20" assert hq2.nicks[1].level.name == "Low" assert hq2.nicks[1].level.language.name == "English" hq5 = await HQ.objects.select_all().get(name="Main") assert len(hq5.nicks) == 2 await hq5.nicks.select_all(follow=True).all() assert hq5.nicks[0] == nick1 assert hq5.nicks[0].name == "BazingaO" assert hq5.nicks[0].level.name == "High" assert hq5.nicks[0].level.language.name == "English" assert hq5.nicks[1] == nick2 assert hq5.nicks[1].name == "Bazinga20" assert hq5.nicks[1].level.name == "Low" assert hq5.nicks[1].level.language.name == "English" await hq.load_all(follow=True, exclude="nicks__level__language") assert len(hq.nicks) == 2 assert hq.nicks[0].level.language is None assert hq.nicks[1].level.language is None hq3 = ( await HQ.objects.select_all(follow=True) .exclude_fields("nicks__level__language") .get(name="Main") ) assert len(hq3.nicks) == 2 assert hq3.nicks[0].level.language is None assert hq3.nicks[1].level.language is None await hq.load_all(follow=True, exclude="nicks__level__language__level") assert len(hq.nicks) == 2 assert hq.nicks[0].level.language is not None assert hq.nicks[0].level.language.level is None assert hq.nicks[1].level.language is not None assert hq.nicks[1].level.language.level is None await hq.load_all(follow=True, exclude="nicks__level") assert len(hq.nicks) == 2 assert hq.nicks[0].level is None assert hq.nicks[1].level is None await hq.load_all(follow=True, exclude="nicks") assert len(hq.nicks) == 0 ormar-0.12.2/tests/test_model_methods/test_populate_default_values.py000066400000000000000000000016661444363446500263130ustar00rootroot00000000000000import databases import pytest import sqlalchemy from sqlalchemy import text import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Task(ormar.Model): class Meta(BaseMeta): tablename = "tasks" id: int = ormar.Integer(primary_key=True) name: str = ormar.String( max_length=255, minimum=0, server_default=text("Default Name"), nullable=False ) points: int = ormar.Integer( default=0, minimum=0, server_default=text("0"), nullable=False ) def test_populate_default_values(): new_kwargs = { "id": None, "name": "", "points": 0, } result = Task.populate_default_values(new_kwargs) assert result["id"] is None assert result["name"] == "" assert result["points"] == 0 ormar-0.12.2/tests/test_model_methods/test_save_related.py000066400000000000000000000171551444363446500240350ustar00rootroot00000000000000from typing import List import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class CringeLevel(ormar.Model): class Meta: tablename = "levels" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class NickName(ormar.Model): class Meta: tablename = "nicks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") is_lame: bool = ormar.Boolean(nullable=True) level: CringeLevel = ormar.ForeignKey(CringeLevel) class NicksHq(ormar.Model): class Meta: tablename = "nicks_x_hq" metadata = metadata database = database class HQ(ormar.Model): class Meta: tablename = "hqs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") nicks: List[NickName] = ormar.ManyToMany(NickName, through=NicksHq) class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) hq: HQ = ormar.ForeignKey(HQ, related_name="companies") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_saving_related_fk_rel(): async with database: async with database.transaction(force_rollback=True): hq = await HQ.objects.create(name="Main") comp = await Company.objects.create(name="Banzai", founded=1988, hq=hq) assert comp.saved count = await comp.save_related() assert count == 0 comp.hq.name = "Suburbs" assert not comp.hq.saved assert comp.saved count = await comp.save_related() assert count == 1 assert comp.hq.saved comp.hq.name = "Suburbs 2" assert not comp.hq.saved assert comp.saved count = await comp.save_related(exclude={"hq"}) assert count == 0 assert not comp.hq.saved @pytest.mark.asyncio async def test_saving_many_to_many(): async with database: async with database.transaction(force_rollback=True): nick1 = await NickName.objects.create(name="BazingaO", is_lame=False) nick2 = await NickName.objects.create(name="Bazinga20", is_lame=True) hq = await HQ.objects.create(name="Main") assert hq.saved await hq.nicks.add(nick1) assert hq.saved await hq.nicks.add(nick2) assert hq.saved count = await hq.save_related() assert count == 0 count = await hq.save_related(save_all=True) assert count == 3 hq.nicks[0].name = "Kabucha" hq.nicks[1].name = "Kabucha2" assert not hq.nicks[0].saved assert not hq.nicks[1].saved count = await hq.save_related() assert count == 2 assert hq.nicks[0].saved assert hq.nicks[1].saved hq.nicks[0].name = "Kabucha a" hq.nicks[1].name = "Kabucha2 a" assert not hq.nicks[0].saved assert not hq.nicks[1].saved count = await hq.save_related(exclude={"nicks": ...}) assert count == 0 assert not hq.nicks[0].saved assert not hq.nicks[1].saved @pytest.mark.asyncio async def test_saving_reversed_relation(): async with database: async with database.transaction(force_rollback=True): hq = await HQ.objects.create(name="Main") await Company.objects.create(name="Banzai", founded=1988, hq=hq) hq = await HQ.objects.select_related("companies").get(name="Main") assert hq.saved assert hq.companies[0].saved hq.companies[0].name = "Konichiwa" assert not hq.companies[0].saved count = await hq.save_related() assert count == 1 assert hq.companies[0].saved await Company.objects.create(name="Joshua", founded=1888, hq=hq) hq = await HQ.objects.select_related("companies").get(name="Main") assert hq.saved assert hq.companies[0].saved assert hq.companies[1].saved hq.companies[0].name = hq.companies[0].name + "20" assert not hq.companies[0].saved # save only if not saved so now only one count = await hq.save_related() assert count == 1 assert hq.companies[0].saved hq.companies[0].name = hq.companies[0].name + "20" hq.companies[1].name = hq.companies[1].name + "30" assert not hq.companies[0].saved assert not hq.companies[1].saved count = await hq.save_related() assert count == 2 assert hq.companies[0].saved assert hq.companies[1].saved @pytest.mark.asyncio async def test_saving_nested(): async with database: async with database.transaction(force_rollback=True): level = await CringeLevel.objects.create(name="High") level2 = await CringeLevel.objects.create(name="Low") nick1 = await NickName.objects.create( name="BazingaO", is_lame=False, level=level ) nick2 = await NickName.objects.create( name="Bazinga20", is_lame=True, level=level2 ) hq = await HQ.objects.create(name="Main") assert hq.saved await hq.nicks.add(nick1) assert hq.saved await hq.nicks.add(nick2) assert hq.saved count = await hq.save_related() assert count == 0 hq.nicks[0].level.name = "Medium" assert not hq.nicks[0].level.saved assert hq.nicks[0].saved count = await hq.save_related(follow=True) assert count == 1 assert hq.nicks[0].saved assert hq.nicks[0].level.saved hq.nicks[0].level.name = "Low" hq.nicks[1].level.name = "Medium" assert not hq.nicks[0].level.saved assert not hq.nicks[1].level.saved assert hq.nicks[0].saved assert hq.nicks[1].saved count = await hq.save_related(follow=True) assert count == 2 assert hq.nicks[0].saved assert hq.nicks[0].level.saved assert hq.nicks[1].saved assert hq.nicks[1].level.saved hq.nicks[0].level.name = "Low 2" hq.nicks[1].level.name = "Medium 2" assert not hq.nicks[0].level.saved assert not hq.nicks[1].level.saved assert hq.nicks[0].saved assert hq.nicks[1].saved count = await hq.save_related(follow=True, exclude={"nicks": {"level"}}) assert count == 0 assert hq.nicks[0].saved assert not hq.nicks[0].level.saved assert hq.nicks[1].saved assert not hq.nicks[1].level.saved ormar-0.12.2/tests/test_model_methods/test_save_related_from_dict.py000066400000000000000000000214561444363446500260620ustar00rootroot00000000000000from typing import List import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class CringeLevel(ormar.Model): class Meta: tablename = "levels" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class NickName(ormar.Model): class Meta: tablename = "nicks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") is_lame: bool = ormar.Boolean(nullable=True) level: CringeLevel = ormar.ForeignKey(CringeLevel) class NicksHq(ormar.Model): class Meta: tablename = "nicks_x_hq" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) new_field: str = ormar.String(max_length=200, nullable=True) class HQ(ormar.Model): class Meta: tablename = "hqs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") nicks: List[NickName] = ormar.ManyToMany(NickName, through=NicksHq) class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) hq: HQ = ormar.ForeignKey(HQ, related_name="companies") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_saving_related_reverse_fk(): async with database: async with database.transaction(force_rollback=True): payload = {"companies": [{"name": "Banzai"}], "name": "Main"} hq = HQ(**payload) count = await hq.save_related(follow=True, save_all=True) assert count == 2 hq_check = await HQ.objects.select_related("companies").get() assert hq_check.pk is not None assert hq_check.name == "Main" assert len(hq_check.companies) == 1 assert hq_check.companies[0].name == "Banzai" assert hq_check.companies[0].pk is not None @pytest.mark.asyncio async def test_saving_related_reverse_fk_multiple(): async with database: async with database.transaction(force_rollback=True): payload = { "companies": [{"name": "Banzai"}, {"name": "Yamate"}], "name": "Main", } hq = HQ(**payload) count = await hq.save_related(follow=True, save_all=True) assert count == 3 hq_check = await HQ.objects.select_related("companies").get() assert hq_check.pk is not None assert hq_check.name == "Main" assert len(hq_check.companies) == 2 assert hq_check.companies[0].name == "Banzai" assert hq_check.companies[0].pk is not None assert hq_check.companies[1].name == "Yamate" assert hq_check.companies[1].pk is not None @pytest.mark.asyncio async def test_saving_related_fk(): async with database: async with database.transaction(force_rollback=True): payload = {"hq": {"name": "Main"}, "name": "Banzai"} comp = Company(**payload) count = await comp.save_related(follow=True, save_all=True) assert count == 2 comp_check = await Company.objects.select_related("hq").get() assert comp_check.pk is not None assert comp_check.name == "Banzai" assert comp_check.hq.name == "Main" assert comp_check.hq.pk is not None @pytest.mark.asyncio async def test_saving_many_to_many_wo_through(): async with database: async with database.transaction(force_rollback=True): payload = { "name": "Main", "nicks": [ {"name": "Bazinga0", "is_lame": False}, {"name": "Bazinga20", "is_lame": True}, ], } hq = HQ(**payload) count = await hq.save_related() assert count == 3 hq_check = await HQ.objects.select_related("nicks").get() assert hq_check.pk is not None assert len(hq_check.nicks) == 2 assert hq_check.nicks[0].name == "Bazinga0" assert hq_check.nicks[1].name == "Bazinga20" @pytest.mark.asyncio async def test_saving_many_to_many_with_through(): async with database: async with database.transaction(force_rollback=True): async with database.transaction(force_rollback=True): payload = { "name": "Main", "nicks": [ { "name": "Bazinga0", "is_lame": False, "nickshq": {"new_field": "test"}, }, { "name": "Bazinga20", "is_lame": True, "nickshq": {"new_field": "test2"}, }, ], } hq = HQ(**payload) count = await hq.save_related() assert count == 3 hq_check = await HQ.objects.select_related("nicks").get() assert hq_check.pk is not None assert len(hq_check.nicks) == 2 assert hq_check.nicks[0].name == "Bazinga0" assert hq_check.nicks[0].nickshq.new_field == "test" assert hq_check.nicks[1].name == "Bazinga20" assert hq_check.nicks[1].nickshq.new_field == "test2" @pytest.mark.asyncio async def test_saving_nested_with_m2m_and_rev_fk(): async with database: async with database.transaction(force_rollback=True): payload = { "name": "Main", "nicks": [ {"name": "Bazinga0", "is_lame": False, "level": {"name": "High"}}, {"name": "Bazinga20", "is_lame": True, "level": {"name": "Low"}}, ], } hq = HQ(**payload) count = await hq.save_related(follow=True, save_all=True) assert count == 5 hq_check = await HQ.objects.select_related("nicks__level").get() assert hq_check.pk is not None assert len(hq_check.nicks) == 2 assert hq_check.nicks[0].name == "Bazinga0" assert hq_check.nicks[0].level.name == "High" assert hq_check.nicks[1].name == "Bazinga20" assert hq_check.nicks[1].level.name == "Low" @pytest.mark.asyncio async def test_saving_nested_with_m2m_and_rev_fk_and_through(): async with database: async with database.transaction(force_rollback=True): payload = { "hq": { "name": "Yoko", "nicks": [ { "name": "Bazinga0", "is_lame": False, "nickshq": {"new_field": "test"}, "level": {"name": "High"}, }, { "name": "Bazinga20", "is_lame": True, "nickshq": {"new_field": "test2"}, "level": {"name": "Low"}, }, ], }, "name": "Main", } company = Company(**payload) count = await company.save_related(follow=True, save_all=True) assert count == 6 company_check = await Company.objects.select_related( "hq__nicks__level" ).get() assert company_check.pk is not None assert company_check.name == "Main" assert company_check.hq.name == "Yoko" assert len(company_check.hq.nicks) == 2 assert company_check.hq.nicks[0].name == "Bazinga0" assert company_check.hq.nicks[0].nickshq.new_field == "test" assert company_check.hq.nicks[0].level.name == "High" assert company_check.hq.nicks[1].name == "Bazinga20" assert company_check.hq.nicks[1].level.name == "Low" assert company_check.hq.nicks[1].nickshq.new_field == "test2" ormar-0.12.2/tests/test_model_methods/test_save_related_uuid.py000066400000000000000000000044701444363446500250570ustar00rootroot00000000000000import uuid from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: database = database metadata = metadata id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) department_name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) course_name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean() department: Optional[Department] = ormar.ForeignKey(Department) class Student(ormar.Model): class Meta: database = database metadata = metadata id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany(Course) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_uuid_pk_in_save_related(): async with database: to_save = { "department_name": "Ormar", "courses": [ { "course_name": "basic1", "completed": True, "students": [{"name": "Abi"}, {"name": "Jack"}], }, { "course_name": "basic2", "completed": True, "students": [{"name": "Kate"}, {"name": "Miranda"}], }, ], } department = Department(**to_save) await department.save_related(follow=True, save_all=True) department_check = ( await Department.objects.select_all(follow=True) .order_by(Department.courses.students.name.asc()) .get() ) to_exclude = { "id": ..., "courses": {"id": ..., "students": {"id", "studentcourse"}}, } assert department_check.dict(exclude=to_exclude) == to_save ormar-0.12.2/tests/test_model_methods/test_update.py000066400000000000000000000063441444363446500226570ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Director(ormar.Model): class Meta: tablename = "directors" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="first_name") last_name: str = ormar.String(max_length=100, nullable=False, name="last_name") class Movie(ormar.Model): class Meta: tablename = "movies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="title") year: int = ormar.Integer() profit: float = ormar.Float() director: Optional[Director] = ormar.ForeignKey(Director) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_updating_selected_columns(): async with database: director1 = await Director(name="Peter", last_name="Jackson").save() director2 = await Director(name="James", last_name="Cameron").save() lotr = await Movie( name="LOTR", year=2001, director=director1, profit=1.140 ).save() lotr.name = "Lord of The Rings" lotr.year = 2003 lotr.profit = 1.212 await lotr.update(_columns=["name"]) # before reload the field has current value even if not saved assert lotr.year == 2003 lotr = await Movie.objects.get() assert lotr.name == "Lord of The Rings" assert lotr.year == 2001 assert round(lotr.profit, 3) == 1.140 assert lotr.director.pk == director1.pk lotr.year = 2003 lotr.profit = 1.212 lotr.director = director2 await lotr.update(_columns=["year", "profit"]) lotr = await Movie.objects.get() assert lotr.year == 2003 assert round(lotr.profit, 3) == 1.212 assert lotr.director.pk == director1.pk @pytest.mark.asyncio async def test_not_passing_columns_or_empty_list_saves_all(): async with database: director = await Director(name="James", last_name="Cameron").save() terminator = await Movie( name="Terminator", year=1984, director=director, profit=0.078 ).save() terminator.name = "Terminator 2" terminator.year = 1991 terminator.profit = 0.520 await terminator.update(_columns=[]) terminator = await Movie.objects.get() assert terminator.name == "Terminator 2" assert terminator.year == 1991 assert round(terminator.profit, 3) == 0.520 terminator.name = "Terminator 3" terminator.year = 2003 terminator.profit = 0.433 await terminator.update() terminator = await terminator.load() assert terminator.name == "Terminator 3" assert terminator.year == 2003 assert round(terminator.profit, 3) == 0.433 ormar-0.12.2/tests/test_model_methods/test_upsert.py000066400000000000000000000034541444363446500227160ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Director(ormar.Model): class Meta: tablename = "directors" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="first_name") last_name: str = ormar.String(max_length=100, nullable=False, name="last_name") class Movie(ormar.Model): class Meta: tablename = "movies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="title") year: int = ormar.Integer() profit: float = ormar.Float() director: Optional[Director] = ormar.ForeignKey(Director) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_updating_selected_columns(): async with database: director1 = await Director(name="Peter", last_name="Jackson").save() await Movie( id=1, name="Lord of The Rings", year=2003, director=director1, profit=1.212 ).upsert() with pytest.raises(ormar.NoMatch): await Movie.objects.get() await Movie( id=1, name="Lord of The Rings", year=2003, director=director1, profit=1.212 ).upsert(__force_save__=True) lotr = await Movie.objects.get() assert lotr.year == 2003 assert lotr.name == "Lord of The Rings" ormar-0.12.2/tests/test_ordering/000077500000000000000000000000001444363446500167435ustar00rootroot00000000000000ormar-0.12.2/tests/test_ordering/__init__.py000066400000000000000000000000001444363446500210420ustar00rootroot00000000000000ormar-0.12.2/tests/test_ordering/test_default_model_order.py000066400000000000000000000065711444363446500243640ustar00rootroot00000000000000from typing import Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" orders_by = ["-name"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" orders_by = ["year", "-ranking"] id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ranking: int = ormar.Integer(nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(autouse=True, scope="function") async def cleanup(): yield async with database: await Book.objects.delete(each=True) await Author.objects.delete(each=True) @pytest.mark.asyncio async def test_default_orders_is_applied(): async with database: tolkien = await Author(name="J.R.R. Tolkien").save() sapkowski = await Author(name="Andrzej Sapkowski").save() king = await Author(name="Stephen King").save() lewis = await Author(name="C.S Lewis").save() authors = await Author.objects.all() assert authors[0] == king assert authors[1] == tolkien assert authors[2] == lewis assert authors[3] == sapkowski authors = await Author.objects.order_by("name").all() assert authors[3] == king assert authors[2] == tolkien assert authors[1] == lewis assert authors[0] == sapkowski @pytest.mark.asyncio async def test_default_orders_is_applied_on_related(): async with database: tolkien = await Author(name="J.R.R. Tolkien").save() silmarillion = await Book( author=tolkien, title="The Silmarillion", year=1977 ).save() lotr = await Book( author=tolkien, title="The Lord of the Rings", year=1955 ).save() hobbit = await Book(author=tolkien, title="The Hobbit", year=1933).save() await tolkien.books.all() assert tolkien.books[0] == hobbit assert tolkien.books[1] == lotr assert tolkien.books[2] == silmarillion await tolkien.books.order_by("-title").all() assert tolkien.books[2] == hobbit assert tolkien.books[1] == lotr assert tolkien.books[0] == silmarillion @pytest.mark.asyncio async def test_default_orders_is_applied_on_related_two_fields(): async with database: sanders = await Author(name="Brandon Sanderson").save() twok = await Book( author=sanders, title="The Way of Kings", year=2010, ranking=10 ).save() bret = await Author(name="Peter V. Bret").save() tds = await Book( author=bret, title="The Desert Spear", year=2010, ranking=9 ).save() books = await Book.objects.all() assert books[0] == twok assert books[1] == tds ormar-0.12.2/tests/test_ordering/test_default_relation_order.py000066400000000000000000000105521444363446500250730ustar00rootroot00000000000000from typing import List, Optional from uuid import UUID, uuid4 import databases import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey( Author, orders_by=["name"], related_orders_by=["-year"] ) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ranking: int = ormar.Integer(nullable=True) class Animal(ormar.Model): class Meta(BaseMeta): tablename = "animals" id: UUID = ormar.UUID(primary_key=True, default=uuid4) name: str = ormar.String(max_length=200) specie: str = ormar.String(max_length=200) class Human(ormar.Model): class Meta(BaseMeta): tablename = "humans" id: UUID = ormar.UUID(primary_key=True, default=uuid4) name: str = ormar.Text(default="") pets: List[Animal] = ormar.ManyToMany( Animal, related_name="care_takers", orders_by=["specie", "-name"], related_orders_by=["name"], ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(autouse=True, scope="function") async def cleanup(): yield async with database: await Book.objects.delete(each=True) await Author.objects.delete(each=True) @pytest.mark.asyncio async def test_default_orders_is_applied_from_reverse_relation(): async with database: tolkien = await Author(name="J.R.R. Tolkien").save() hobbit = await Book(author=tolkien, title="The Hobbit", year=1933).save() silmarillion = await Book( author=tolkien, title="The Silmarillion", year=1977 ).save() lotr = await Book( author=tolkien, title="The Lord of the Rings", year=1955 ).save() tolkien = await Author.objects.select_related("books").get() assert tolkien.books[2] == hobbit assert tolkien.books[1] == lotr assert tolkien.books[0] == silmarillion @pytest.mark.asyncio async def test_default_orders_is_applied_from_relation(): async with database: bret = await Author(name="Peter V. Bret").save() tds = await Book( author=bret, title="The Desert Spear", year=2010, ranking=9 ).save() sanders = await Author(name="Brandon Sanderson").save() twok = await Book( author=sanders, title="The Way of Kings", year=2010, ranking=10 ).save() books = await Book.objects.order_by("year").select_related("author").all() assert books[0] == twok assert books[1] == tds @pytest.mark.asyncio async def test_default_orders_is_applied_from_relation_on_m2m(): async with database: alice = await Human(name="Alice").save() spot = await Animal(name="Spot", specie="Cat").save() zkitty = await Animal(name="ZKitty", specie="Cat").save() noodle = await Animal(name="Noodle", specie="Anaconda").save() await alice.pets.add(noodle) await alice.pets.add(spot) await alice.pets.add(zkitty) await alice.load_all() assert alice.pets[0] == noodle assert alice.pets[1] == zkitty assert alice.pets[2] == spot @pytest.mark.asyncio async def test_default_orders_is_applied_from_reverse_relation_on_m2m(): async with database: max = await Animal(name="Max", specie="Dog").save() joe = await Human(name="Joe").save() zack = await Human(name="Zack").save() julia = await Human(name="Julia").save() await max.care_takers.add(joe) await max.care_takers.add(zack) await max.care_takers.add(julia) await max.load_all() assert max.care_takers[0] == joe assert max.care_takers[1] == julia assert max.care_takers[2] == zack ormar-0.12.2/tests/test_ordering/test_default_through_relation_order.py000066400000000000000000000257631444363446500266450ustar00rootroot00000000000000from typing import Any, Dict, List, Tuple, Type, cast from uuid import UUID, uuid4 import databases import pytest import sqlalchemy import ormar from ormar import ModelDefinitionError, Model, QuerySet, pre_relation_remove, pre_update from ormar import pre_save from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Animal(ormar.Model): class Meta(BaseMeta): tablename = "animals" id: UUID = ormar.UUID(primary_key=True, default=uuid4) name: str = ormar.Text(default="") # favoriteHumans class Link(ormar.Model): class Meta(BaseMeta): tablename = "link_table" id: UUID = ormar.UUID(primary_key=True, default=uuid4) animal_order: int = ormar.Integer(nullable=True) human_order: int = ormar.Integer(nullable=True) class Human(ormar.Model): class Meta(BaseMeta): tablename = "humans" id: UUID = ormar.UUID(primary_key=True, default=uuid4) name: str = ormar.Text(default="") favoriteAnimals: List[Animal] = ormar.ManyToMany( Animal, through=Link, related_name="favoriteHumans", orders_by=["link__animal_order"], related_orders_by=["link__human_order"], ) class Human2(ormar.Model): class Meta(BaseMeta): tablename = "humans2" id: UUID = ormar.UUID(primary_key=True, default=uuid4) name: str = ormar.Text(default="") favoriteAnimals: List[Animal] = ormar.ManyToMany( Animal, related_name="favoriteHumans2", orders_by=["link__animal_order__fail"] ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_ordering_by_through_fail(): async with database: alice = await Human2(name="Alice").save() spot = await Animal(name="Spot").save() await alice.favoriteAnimals.add(spot) with pytest.raises(ModelDefinitionError): await alice.load_all() def _get_filtered_query( sender: Type[Model], instance: Model, to_class: Type[Model] ) -> QuerySet: """ Helper function. Gets the query filtered by the appropriate class name. """ pk = getattr(instance, f"{to_class.get_name()}").pk filter_kwargs = {f"{to_class.get_name()}": pk} query = sender.objects.filter(**filter_kwargs) return query def _get_through_model_relations( sender: Type[Model], instance: Model ) -> Tuple[Type[Model], Type[Model]]: relations = list(instance.extract_related_names()) rel_one = sender.Meta.model_fields[relations[0]].to rel_two = sender.Meta.model_fields[relations[1]].to return rel_one, rel_two async def _populate_order_on_insert( sender: Type[Model], instance: Model, from_class: Type[Model], to_class: Type[Model] ): """ Helper function. Get max values from database for both orders and adds 1 (0 if max is None) if the order is not provided. If the order is provided it reorders the existing links to match the newly defined order. Assumes names f"{model.get_name()}_order" like for Animal: animal_order. """ order_column = f"{from_class.get_name()}_order" if getattr(instance, order_column) is None: query = _get_filtered_query(sender, instance, to_class) max_order = await query.max(order_column) max_order = max_order + 1 if max_order is not None else 0 setattr(instance, order_column, max_order) else: await _reorder_on_update( sender=sender, instance=instance, from_class=from_class, to_class=to_class, passed_args={order_column: getattr(instance, order_column)}, ) async def _reorder_on_update( sender: Type[Model], instance: Model, from_class: Type[Model], to_class: Type[Model], passed_args: Dict, ): """ Helper function. Actually reorders links by given order passed in add/update query to the link model. Assumes names f"{model.get_name()}_order" like for Animal: animal_order. """ order = f"{from_class.get_name()}_order" if order in passed_args: query = _get_filtered_query(sender, instance, to_class) to_reorder = await query.exclude(pk=instance.pk).order_by(order).all() new_order = passed_args.get(order) if to_reorder and new_order is not None: # can be more efficient - here we renumber all even if not needed. for ind, link in enumerate(to_reorder): if ind < new_order: setattr(link, order, ind) else: setattr(link, order, ind + 1) await sender.objects.bulk_update( cast(List[Model], to_reorder), columns=[order] ) @pre_save(Link) async def order_link_on_insert(sender: Type[Model], instance: Model, **kwargs: Any): """ Signal receiver registered on Link model, triggered every time before one is created by calling save() on a model. Note that signal functions for pre_save signal accepts sender class, instance and have to accept **kwargs even if it's empty as of now. """ rel_one, rel_two = _get_through_model_relations(sender, instance) await _populate_order_on_insert( sender=sender, instance=instance, from_class=rel_one, to_class=rel_two ) await _populate_order_on_insert( sender=sender, instance=instance, from_class=rel_two, to_class=rel_one ) @pre_update(Link) async def reorder_links_on_update( sender: Type[ormar.Model], instance: ormar.Model, passed_args: Dict, **kwargs: Any ): """ Signal receiver registered on Link model, triggered every time before one is updated by calling update() on a model. Note that signal functions for pre_update signal accepts sender class, instance, passed_args which is a dict of kwargs passed to update and have to accept **kwargs even if it's empty as of now. """ rel_one, rel_two = _get_through_model_relations(sender, instance) await _reorder_on_update( sender=sender, instance=instance, from_class=rel_one, to_class=rel_two, passed_args=passed_args, ) await _reorder_on_update( sender=sender, instance=instance, from_class=rel_two, to_class=rel_one, passed_args=passed_args, ) @pre_relation_remove([Animal, Human]) async def reorder_links_on_remove( sender: Type[ormar.Model], instance: ormar.Model, child: ormar.Model, relation_name: str, **kwargs: Any, ): """ Signal receiver registered on Anima and Human models, triggered every time before relation on a model is removed. Note that signal functions for pre_relation_remove signal accepts sender class, instance, child, relation_name and have to accept **kwargs even if it's empty as of now. Note that if classes have many relations you need to check if current one is ordered """ through_class = sender.Meta.model_fields[relation_name].through through_instance = getattr(instance, through_class.get_name()) if not through_instance: parent_pk = instance.pk child_pk = child.pk filter_kwargs = {f"{sender.get_name()}": parent_pk, child.get_name(): child_pk} through_instance = await through_class.objects.get(**filter_kwargs) rel_one, rel_two = _get_through_model_relations(through_class, through_instance) await _reorder_on_update( sender=through_class, instance=through_instance, from_class=rel_one, to_class=rel_two, passed_args={f"{rel_one.get_name()}_order": 999999}, ) await _reorder_on_update( sender=through_class, instance=through_instance, from_class=rel_two, to_class=rel_one, passed_args={f"{rel_two.get_name()}_order": 999999}, ) @pytest.mark.asyncio async def test_ordering_by_through_on_m2m_field(): async with database: def verify_order(instance, expected): field_name = ( "favoriteAnimals" if isinstance(instance, Human) else "favoriteHumans" ) order_field_name = ( "animal_order" if isinstance(instance, Human) else "human_order" ) assert [x.name for x in getattr(instance, field_name)] == expected assert [ getattr(x.link, order_field_name) for x in getattr(instance, field_name) ] == [i for i in range(len(expected))] alice = await Human(name="Alice").save() bob = await Human(name="Bob").save() charlie = await Human(name="Charlie").save() spot = await Animal(name="Spot").save() kitty = await Animal(name="Kitty").save() noodle = await Animal(name="Noodle").save() await alice.favoriteAnimals.add(noodle) await alice.favoriteAnimals.add(spot) await alice.favoriteAnimals.add(kitty) await alice.load_all() verify_order(alice, ["Noodle", "Spot", "Kitty"]) await bob.favoriteAnimals.add(noodle) await bob.favoriteAnimals.add(kitty) await bob.favoriteAnimals.add(spot) await bob.load_all() verify_order(bob, ["Noodle", "Kitty", "Spot"]) await charlie.favoriteAnimals.add(kitty) await charlie.favoriteAnimals.add(noodle) await charlie.favoriteAnimals.add(spot) await charlie.load_all() verify_order(charlie, ["Kitty", "Noodle", "Spot"]) animals = [noodle, kitty, spot] for animal in animals: await animal.load_all() verify_order(animal, ["Alice", "Bob", "Charlie"]) zack = await Human(name="Zack").save() await noodle.favoriteHumans.add(zack, human_order=0) await noodle.load_all() verify_order(noodle, ["Zack", "Alice", "Bob", "Charlie"]) await zack.load_all() verify_order(zack, ["Noodle"]) await noodle.favoriteHumans.filter(name="Zack").update(link=dict(human_order=1)) await noodle.load_all() verify_order(noodle, ["Alice", "Zack", "Bob", "Charlie"]) await noodle.favoriteHumans.filter(name="Zack").update(link=dict(human_order=2)) await noodle.load_all() verify_order(noodle, ["Alice", "Bob", "Zack", "Charlie"]) await noodle.favoriteHumans.filter(name="Zack").update(link=dict(human_order=3)) await noodle.load_all() verify_order(noodle, ["Alice", "Bob", "Charlie", "Zack"]) await kitty.favoriteHumans.remove(bob) await kitty.load_all() assert [x.name for x in kitty.favoriteHumans] == ["Alice", "Charlie"] bob = await noodle.favoriteHumans.get(pk=bob.pk) assert bob.link.human_order == 1 await noodle.favoriteHumans.remove( await noodle.favoriteHumans.filter(link__human_order=2).get() ) await noodle.load_all() verify_order(noodle, ["Alice", "Bob", "Zack"]) ormar-0.12.2/tests/test_ordering/test_proper_order_of_sorting_apply.py000066400000000000000000000043521444363446500265200ustar00rootroot00000000000000from typing import Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" orders_by = ["-ranking"] id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey( Author, orders_by=["name"], related_orders_by=["-year"] ) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ranking: int = ormar.Integer(nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(autouse=True, scope="function") async def cleanup(): yield async with database: await Book.objects.delete(each=True) await Author.objects.delete(each=True) @pytest.mark.asyncio async def test_default_orders_is_applied_from_reverse_relation(): async with database: tolkien = await Author(name="J.R.R. Tolkien").save() hobbit = await Book(author=tolkien, title="The Hobbit", year=1933).save() silmarillion = await Book( author=tolkien, title="The Silmarillion", year=1977 ).save() lotr = await Book( author=tolkien, title="The Lord of the Rings", year=1955 ).save() tolkien = await Author.objects.select_related("books").get() assert tolkien.books[2] == hobbit assert tolkien.books[1] == lotr assert tolkien.books[0] == silmarillion tolkien = ( await Author.objects.select_related("books").order_by("books__title").get() ) assert tolkien.books[0] == hobbit assert tolkien.books[1] == lotr assert tolkien.books[2] == silmarillion ormar-0.12.2/tests/test_queries/000077500000000000000000000000001444363446500166075ustar00rootroot00000000000000ormar-0.12.2/tests/test_queries/__init__.py000066400000000000000000000000001444363446500207060ustar00rootroot00000000000000ormar-0.12.2/tests/test_queries/test_adding_related.py000066400000000000000000000025261444363446500231530ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import asyncio import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class Department(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) completed: bool = ormar.Boolean(default=False) department: Optional[Department] = ormar.ForeignKey(Department) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_adding_relation_to_reverse_saves_the_child(): async with database: department = await Department(name="Science").save() course = Course(name="Math", completed=False) await department.courses.add(course) assert course.pk is not None assert course.department == department assert department.courses[0] == course ormar-0.12.2/tests/test_queries/test_aggr_functions.py000066400000000000000000000134461444363446500232400ustar00rootroot00000000000000from typing import Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from ormar.exceptions import QueryDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" order_by = ["-name"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" order_by = ["year", "-ranking"] id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ranking: int = ormar.Integer(nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(autouse=True, scope="function") async def cleanup(): yield async with database: await Book.objects.delete(each=True) await Author.objects.delete(each=True) async def sample_data(): author = await Author(name="Author 1").save() await Book(title="Book 1", year=1920, ranking=3, author=author).save() await Book(title="Book 2", year=1930, ranking=1, author=author).save() await Book(title="Book 3", year=1923, ranking=5, author=author).save() @pytest.mark.asyncio async def test_min_method(): async with database: await sample_data() assert await Book.objects.min("year") == 1920 result = await Book.objects.min(["year", "ranking"]) assert result == dict(year=1920, ranking=1) assert await Book.objects.min("title") == "Book 1" assert await Author.objects.select_related("books").min("books__year") == 1920 result = await Author.objects.select_related("books").min( ["books__year", "books__ranking"] ) assert result == dict(books__year=1920, books__ranking=1) assert ( await Author.objects.select_related("books") .filter(books__year__gt=1925) .min("books__year") == 1930 ) @pytest.mark.asyncio async def test_max_method(): async with database: await sample_data() assert await Book.objects.max("year") == 1930 result = await Book.objects.max(["year", "ranking"]) assert result == dict(year=1930, ranking=5) assert await Book.objects.max("title") == "Book 3" assert await Author.objects.select_related("books").max("books__year") == 1930 result = await Author.objects.select_related("books").max( ["books__year", "books__ranking"] ) assert result == dict(books__year=1930, books__ranking=5) assert ( await Author.objects.select_related("books") .filter(books__year__lt=1925) .max("books__year") == 1923 ) @pytest.mark.asyncio async def test_sum_method(): async with database: await sample_data() assert await Book.objects.sum("year") == 5773 result = await Book.objects.sum(["year", "ranking"]) assert result == dict(year=5773, ranking=9) with pytest.raises(QueryDefinitionError): await Book.objects.sum("title") assert await Author.objects.select_related("books").sum("books__year") == 5773 result = await Author.objects.select_related("books").sum( ["books__year", "books__ranking"] ) assert result == dict(books__year=5773, books__ranking=9) assert ( await Author.objects.select_related("books") .filter(books__year__lt=1925) .sum("books__year") == 3843 ) @pytest.mark.asyncio async def test_avg_method(): async with database: await sample_data() assert round(float(await Book.objects.avg("year")), 2) == 1924.33 result = await Book.objects.avg(["year", "ranking"]) assert round(float(result.get("year")), 2) == 1924.33 assert result.get("ranking") == 3.0 with pytest.raises(QueryDefinitionError): await Book.objects.avg("title") result = await Author.objects.select_related("books").avg("books__year") assert round(float(result), 2) == 1924.33 result = await Author.objects.select_related("books").avg( ["books__year", "books__ranking"] ) assert round(float(result.get("books__year")), 2) == 1924.33 assert result.get("books__ranking") == 3.0 assert ( await Author.objects.select_related("books") .filter(books__year__lt=1925) .avg("books__year") == 1921.5 ) @pytest.mark.asyncio async def test_queryset_method(): async with database: await sample_data() author = await Author.objects.select_related("books").get() assert await author.books.min("year") == 1920 assert await author.books.max("year") == 1930 assert await author.books.sum("ranking") == 9 assert await author.books.avg("ranking") == 3.0 assert await author.books.max(["year", "title"]) == dict( year=1930, title="Book 3" ) @pytest.mark.asyncio async def test_count_method(): async with database: await sample_data() count = await Author.objects.select_related("books").count() assert count == 1 # The legacy functionality count = await Author.objects.select_related("books").count(distinct=False) assert count == 3 ormar-0.12.2/tests/test_queries/test_deep_relations_select_all.py000066400000000000000000000117651444363446500254160ustar00rootroot00000000000000import databases import pytest from sqlalchemy import func import ormar import sqlalchemy from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Chart(ormar.Model): class Meta(ormar.ModelMeta): tablename = "charts" database = database metadata = metadata chart_id = ormar.Integer(primary_key=True, autoincrement=True) name = ormar.String(max_length=200, unique=True, index=True) query_text = ormar.Text() datasets = ormar.JSON() layout = ormar.JSON() data_config = ormar.JSON() created_date = ormar.DateTime(server_default=func.now()) library = ormar.String(max_length=200, default="plotly") used_filters = ormar.JSON() class Report(ormar.Model): class Meta(ormar.ModelMeta): tablename = "reports" database = database metadata = metadata report_id = ormar.Integer(primary_key=True, autoincrement=True) name = ormar.String(max_length=200, unique=True, index=True) filters_position = ormar.String(max_length=200) created_date = ormar.DateTime(server_default=func.now()) class Language(ormar.Model): class Meta(ormar.ModelMeta): tablename = "languages" database = database metadata = metadata language_id = ormar.Integer(primary_key=True, autoincrement=True) code = ormar.String(max_length=5) name = ormar.String(max_length=200) class TranslationNode(ormar.Model): class Meta(ormar.ModelMeta): tablename = "translation_nodes" database = database metadata = metadata node_id = ormar.Integer(primary_key=True, autoincrement=True) node_type = ormar.String(max_length=200) class Translation(ormar.Model): class Meta(ormar.ModelMeta): tablename = "translations" database = database metadata = metadata translation_id = ormar.Integer(primary_key=True, autoincrement=True) node_id = ormar.ForeignKey(TranslationNode, related_name="translations") language = ormar.ForeignKey(Language, name="language_id") value = ormar.String(max_length=500) class Filter(ormar.Model): class Meta(ormar.ModelMeta): tablename = "filters" database = database metadata = metadata filter_id = ormar.Integer(primary_key=True, autoincrement=True) name = ormar.String(max_length=200, unique=True, index=True) label = ormar.String(max_length=200) query_text = ormar.Text() allow_multiselect = ormar.Boolean(default=True) created_date = ormar.DateTime(server_default=func.now()) is_dynamic = ormar.Boolean(default=True) is_date = ormar.Boolean(default=False) translation = ormar.ForeignKey(TranslationNode, name="translation_node_id") class FilterValue(ormar.Model): class Meta(ormar.ModelMeta): tablename = "filter_values" database = database metadata = metadata value_id = ormar.Integer(primary_key=True, autoincrement=True) value = ormar.String(max_length=300) label = ormar.String(max_length=300) filter = ormar.ForeignKey(Filter, name="filter_id", related_name="values") translation = ormar.ForeignKey(TranslationNode, name="translation_node_id") class FilterXReport(ormar.Model): class Meta(ormar.ModelMeta): tablename = "filters_x_reports" database = database metadata = metadata filter_x_report_id = ormar.Integer(primary_key=True) filter = ormar.ForeignKey(Filter, name="filter_id", related_name="reports") report = ormar.ForeignKey(Report, name="report_id", related_name="filters") sort_order = ormar.Integer() default_value = ormar.Text() is_visible = ormar.Boolean() class ChartXReport(ormar.Model): class Meta(ormar.ModelMeta): tablename = "charts_x_reports" database = database metadata = metadata chart_x_report_id = ormar.Integer(primary_key=True) chart = ormar.ForeignKey(Chart, name="chart_id", related_name="reports") report = ormar.ForeignKey(Report, name="report_id", related_name="charts") sort_order = ormar.Integer() width = ormar.Integer() class ChartColumn(ormar.Model): class Meta(ormar.ModelMeta): tablename = "charts_columns" database = database metadata = metadata column_id = ormar.Integer(primary_key=True, autoincrement=True) chart = ormar.ForeignKey(Chart, name="chart_id", related_name="columns") column_name = ormar.String(max_length=200) column_type = ormar.String(max_length=200) translation = ormar.ForeignKey(TranslationNode, name="translation_node_id") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_saving_related_fk_rel(): async with database: async with database.transaction(force_rollback=True): await Report.objects.select_all(follow=True).all() ormar-0.12.2/tests/test_queries/test_filter_groups.py000066400000000000000000000111421444363446500231030ustar00rootroot00000000000000from typing import Optional import databases import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) def test_or_group(): result = ormar.or_(name="aa", books__title="bb") result.resolve(model_cls=Author) assert len(result.actions) == 2 assert result.actions[0].target_model == Author assert result.actions[1].target_model == Book assert ( str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True})) == f"(authors.name = 'aa' OR " f"{result.actions[1].table_prefix}" f"_books.title = 'bb')" ) def test_and_group(): result = ormar.and_(name="aa", books__title="bb") result.resolve(model_cls=Author) assert len(result.actions) == 2 assert result.actions[0].target_model == Author assert result.actions[1].target_model == Book assert ( str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True})) == f"(authors.name = 'aa' AND " f"{result.actions[1].table_prefix}" f"_books.title = 'bb')" ) def test_nested_and(): result = ormar.and_( ormar.or_(name="aa", books__title="bb"), ormar.or_(name="cc", books__title="dd") ) result.resolve(model_cls=Author) assert len(result.actions) == 0 assert len(result._nested_groups) == 2 book_prefix = result._nested_groups[0].actions[1].table_prefix assert ( str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True})) == f"((authors.name = 'aa' OR " f"{book_prefix}" f"_books.title = 'bb') AND " f"(authors.name = 'cc' OR " f"{book_prefix}" f"_books.title = 'dd'))" ) def test_nested_group_and_action(): result = ormar.and_(ormar.or_(name="aa", books__title="bb"), books__title="dd") result.resolve(model_cls=Author) assert len(result.actions) == 1 assert len(result._nested_groups) == 1 book_prefix = result._nested_groups[0].actions[1].table_prefix assert ( str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True})) == f"((authors.name = 'aa' OR " f"{book_prefix}" f"_books.title = 'bb') AND " f"{book_prefix}" f"_books.title = 'dd')" ) def test_deeply_nested_or(): result = ormar.or_( ormar.and_( ormar.or_(name="aa", books__title="bb"), ormar.or_(name="cc", books__title="dd"), ), ormar.and_( ormar.or_(books__year__lt=1900, books__title="11"), ormar.or_(books__year__gt="xx", books__title="22"), ), ) result.resolve(model_cls=Author) assert len(result.actions) == 0 assert len(result._nested_groups) == 2 assert len(result._nested_groups[0]._nested_groups) == 2 book_prefix = result._nested_groups[0]._nested_groups[0].actions[1].table_prefix result_qry = str( result.get_text_clause().compile(compile_kwargs={"literal_binds": True}) ) expected_qry = ( f"(((authors.name = 'aa' OR {book_prefix}_books.title = 'bb') AND " f"(authors.name = 'cc' OR {book_prefix}_books.title = 'dd')) " f"OR (({book_prefix}_books.year < 1900 OR {book_prefix}_books.title = '11') AND" f" ({book_prefix}_books.year > 'xx' OR {book_prefix}_books.title = '22')))" ) assert result_qry.replace("\n", "") == expected_qry.replace("\n", "") def test_one_model_group(): result = ormar.and_(year__gt=1900, title="bb") result.resolve(model_cls=Book) assert len(result.actions) == 2 assert len(result._nested_groups) == 0 def test_one_model_nested_group(): result = ormar.and_( ormar.or_(year__gt=1900, title="bb"), ormar.or_(year__lt=1800, title="aa") ) result.resolve(model_cls=Book) assert len(result.actions) == 0 assert len(result._nested_groups) == 2 def test_one_model_with_group(): result = ormar.or_(ormar.and_(year__gt=1900, title="bb"), title="uu") result.resolve(model_cls=Book) assert len(result.actions) == 1 assert len(result._nested_groups) == 1 ormar-0.12.2/tests/test_queries/test_indirect_relations_to_self.py000066400000000000000000000040031444363446500256110ustar00rootroot00000000000000from datetime import datetime import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class Node(ormar.Model): class Meta(ormar.ModelMeta): tablename = "node" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=120) type: str = ormar.String(max_length=12, default="FLOW") created_at: datetime = ormar.DateTime(timezone=True, default=datetime.now) class Edge(ormar.Model): class Meta(ormar.ModelMeta): tablename = "edge" database = database metadata = metadata id: str = ormar.String(primary_key=True, max_length=12) src_node: Node = ormar.ForeignKey(Node, related_name="next_edges") dst_node: Node = ormar.ForeignKey(Node, related_name="previous_edges") order: int = ormar.Integer(default=1) created_at: datetime = ormar.DateTime(timezone=True, default=datetime.now) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_sort_order_on_main_model(): async with database: node1 = await Node(name="Node 1").save() node2 = await Node(name="Node 2").save() node3 = await Node(name="Node 3").save() await Edge(id="Side 1", src_node=node1, dst_node=node2).save() await Edge(id="Side 2", src_node=node2, dst_node=node3, order=2).save() await Edge(id="Side 3", src_node=node3, dst_node=node1, order=3).save() active_nodes = await Node.objects.select_related( ["next_edges", "next_edges__dst_node"] ).all() assert len(active_nodes) == 3 assert active_nodes[0].next_edges[0].id == "Side 1" assert active_nodes[0].next_edges[0].dst_node.type == "FLOW" ormar-0.12.2/tests/test_queries/test_isnull_filter.py000066400000000000000000000064441444363446500231030ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) class JsonModel(ormar.Model): class Meta(ormar.ModelMeta): metadata = metadata database = database tablename = "jsons" id = ormar.Integer(primary_key=True) text_field = ormar.Text(nullable=True) json_field = ormar.JSON(nullable=True) json_not_null = ormar.JSON() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_is_null(): async with database: tolkien = await Author.objects.create(name="J.R.R. Tolkien") await Book.objects.create(author=tolkien, title="The Hobbit") await Book.objects.create( author=tolkien, title="The Lord of the Rings", year=1955 ) await Book.objects.create(author=tolkien, title="The Silmarillion", year=1977) books = await Book.objects.all(year__isnull=True) assert len(books) == 1 assert books[0].year is None assert books[0].title == "The Hobbit" books = await Book.objects.all(year__isnull=False) assert len(books) == 2 tolkien = await Author.objects.select_related("books").get( books__year__isnull=True ) assert len(tolkien.books) == 1 assert tolkien.books[0].year is None assert tolkien.books[0].title == "The Hobbit" tolkien = ( await Author.objects.select_related("books") .paginate(1, 10) .get(books__year__isnull=True) ) assert len(tolkien.books) == 1 assert tolkien.books[0].year is None assert tolkien.books[0].title == "The Hobbit" tolkien = await Author.objects.select_related("books").get( books__year__isnull=False ) assert len(tolkien.books) == 2 assert tolkien.books[0].year == 1955 assert tolkien.books[0].title == "The Lord of the Rings" @pytest.mark.asyncio async def test_isnull_json(): async with database: author = await JsonModel.objects.create(json_not_null=None) assert author.json_field is None non_null_text_fields = await JsonModel.objects.all(text_field__isnull=False) assert len(non_null_text_fields) == 0 non_null_json_fields = await JsonModel.objects.all(json_field__isnull=False) assert len(non_null_json_fields) == 0 non_null_json_fields = await JsonModel.objects.all(json_not_null__isnull=False) assert len(non_null_json_fields) == 1 ormar-0.12.2/tests/test_queries/test_nested_reverse_relations.py000066400000000000000000000061341444363446500253210ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class DataSource(ormar.Model): class Meta(BaseMeta): tablename = "datasources" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200, unique=True, index=True) class DataSourceTable(ormar.Model): class Meta(BaseMeta): tablename = "source_tables" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200, index=True) source: Optional[DataSource] = ormar.ForeignKey( DataSource, name="source_id", related_name="tables", ondelete="CASCADE" ) class DataSourceTableColumn(ormar.Model): class Meta(BaseMeta): tablename = "source_columns" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200, index=True) data_type: str = ormar.String(max_length=200) table: Optional[DataSourceTable] = ormar.ForeignKey( DataSourceTable, name="table_id", related_name="columns", ondelete="CASCADE" ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): # pragma: no cover engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_double_nested_reverse_relation(): async with database: data_source = await DataSource(name="local").save() test_tables = [ { "name": "test1", "columns": [ {"name": "col1", "data_type": "test"}, {"name": "col2", "data_type": "test2"}, {"name": "col3", "data_type": "test3"}, ], }, { "name": "test2", "columns": [ {"name": "col4", "data_type": "test"}, {"name": "col5", "data_type": "test2"}, {"name": "col6", "data_type": "test3"}, ], }, ] data_source.tables = test_tables await data_source.save_related(save_all=True, follow=True) tables = await DataSourceTable.objects.all() assert len(tables) == 2 columns = await DataSourceTableColumn.objects.all() assert len(columns) == 6 data_source = ( await DataSource.objects.select_related("tables__columns") .filter(tables__name__in=["test1", "test2"], name="local") .get() ) assert len(data_source.tables) == 2 assert len(data_source.tables[0].columns) == 3 assert data_source.tables[0].columns[0].name == "col1" assert data_source.tables[0].columns[2].name == "col3" assert len(data_source.tables[1].columns) == 3 assert data_source.tables[1].columns[0].name == "col4" assert data_source.tables[1].columns[2].name == "col6" ormar-0.12.2/tests/test_queries/test_non_relation_fields_not_merged.py000066400000000000000000000024471444363446500264470ustar00rootroot00000000000000from typing import Dict, List, Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Chart(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) datasets = ormar.JSON() class Config(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) chart: Optional[Chart] = ormar.ForeignKey(Chart) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_list_field_that_is_not_relation_is_not_merged(): async with database: chart = await Chart.objects.create(datasets=[{"test": "ok"}]) await Config.objects.create(chart=chart) await Config.objects.create(chart=chart) chart2 = await Chart.objects.select_related("configs").get() assert len(chart2.datasets) == 1 assert chart2.datasets == [{"test": "ok"}] ormar-0.12.2/tests/test_queries/test_or_filters.py000066400000000000000000000205321444363446500223720ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from ormar.exceptions import QueryDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" id: int = ormar.Integer(primary_key=True) author: Optional[Author] = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_or_filters(): async with database: tolkien = await Author(name="J.R.R. Tolkien").save() await Book(author=tolkien, title="The Hobbit", year=1933).save() await Book(author=tolkien, title="The Lord of the Rings", year=1955).save() await Book(author=tolkien, title="The Silmarillion", year=1977).save() sapkowski = await Author(name="Andrzej Sapkowski").save() await Book(author=sapkowski, title="The Witcher", year=1990).save() await Book(author=sapkowski, title="The Tower of Fools", year=2002).save() books = ( await Book.objects.select_related("author") .filter(ormar.or_(author__name="J.R.R. Tolkien", year__gt=1970)) .all() ) assert len(books) == 5 books = ( await Book.objects.select_related("author") .filter(ormar.or_(author__name="J.R.R. Tolkien", year__lt=1995)) .all() ) assert len(books) == 4 assert not any([x.title == "The Tower of Fools" for x in books]) books = ( await Book.objects.select_related("author") .filter((Book.author.name == "J.R.R. Tolkien") | (Book.year < 1995)) .all() ) assert len(books) == 4 assert not any([x.title == "The Tower of Fools" for x in books]) books = ( await Book.objects.select_related("author") .filter(ormar.or_(year__gt=1960, year__lt=1940)) .filter(author__name="J.R.R. Tolkien") .all() ) assert len(books) == 2 assert books[0].title == "The Hobbit" assert books[1].title == "The Silmarillion" books = ( await Book.objects.select_related("author") .filter( ormar.and_( ormar.or_(year__gt=1960, year__lt=1940), author__name="J.R.R. Tolkien", ) ) .all() ) assert len(books) == 2 assert books[0].title == "The Hobbit" assert books[1].title == "The Silmarillion" books = ( await Book.objects.select_related("author") .filter( ormar.or_( ormar.and_(year__gt=1960, author__name="J.R.R. Tolkien"), ormar.and_(year__lt=2000, author__name="Andrzej Sapkowski"), ) ) .filter(title__startswith="The") .all() ) assert len(books) == 2 assert books[0].title == "The Silmarillion" assert books[1].title == "The Witcher" books = ( await Book.objects.select_related("author") .filter( ( ( (Book.year > 1960) & (Book.author.name == "J.R.R. Tolkien") | ( (Book.year < 2000) & (Book.author.name == "Andrzej Sapkowski") ) ) & (Book.title.startswith("The")) ) ) .all() ) assert len(books) == 2 assert books[0].title == "The Silmarillion" assert books[1].title == "The Witcher" books = ( await Book.objects.select_related("author") .filter( ormar.or_( ormar.and_( ormar.or_(year__gt=1960, year__lt=1940), author__name="J.R.R. Tolkien", ), ormar.and_(year__lt=2000, author__name="Andrzej Sapkowski"), ) ) .all() ) assert len(books) == 3 assert books[0].title == "The Hobbit" assert books[1].title == "The Silmarillion" assert books[2].title == "The Witcher" books = ( await Book.objects.select_related("author") .exclude( ormar.or_( ormar.and_(year__gt=1960, author__name="J.R.R. Tolkien"), ormar.and_(year__lt=2000, author__name="Andrzej Sapkowski"), ) ) .filter(title__startswith="The") .all() ) assert len(books) == 3 assert not any([x.title in ["The Silmarillion", "The Witcher"] for x in books]) books = ( await Book.objects.select_related("author") .filter( ormar.or_( ormar.and_(year__gt=1960, author__name="J.R.R. Tolkien"), ormar.and_(year__lt=2000, author__name="Andrzej Sapkowski"), title__icontains="hobbit", ) ) .filter(title__startswith="The") .all() ) assert len(books) == 3 assert not any( [x.title in ["The Tower of Fools", "The Lord of the Rings"] for x in books] ) books = ( await Book.objects.select_related("author") .filter(ormar.or_(year__gt=1980, year__lt=1910)) .filter(title__startswith="The") .limit(1) .all() ) assert len(books) == 1 assert books[0].title == "The Witcher" books = ( await Book.objects.select_related("author") .filter(ormar.or_(year__gt=1980, author__name="Andrzej Sapkowski")) .filter(title__startswith="The") .limit(1) .all() ) assert len(books) == 1 assert books[0].title == "The Witcher" books = ( await Book.objects.select_related("author") .filter(ormar.or_(year__gt=1980, author__name="Andrzej Sapkowski")) .filter(title__startswith="The") .limit(1) .offset(1) .all() ) assert len(books) == 1 assert books[0].title == "The Tower of Fools" books = ( await Book.objects.select_related("author") .filter(ormar.or_(year__gt=1980, author__name="Andrzej Sapkowski")) .filter(title__startswith="The") .limit(1) .offset(1) .order_by("-id") .all() ) assert len(books) == 1 assert books[0].title == "The Witcher" with pytest.raises(QueryDefinitionError): await Book.objects.select_related("author").filter("wrong").all() books = await tolkien.books.filter( ormar.or_(year__lt=1940, year__gt=1960) ).all() assert len(books) == 2 books = await tolkien.books.filter( ormar.and_( ormar.or_(year__lt=1940, year__gt=1960), title__icontains="hobbit" ) ).all() assert len(books) == 1 assert tolkien.books[0].title == "The Hobbit" books = ( await Book.objects.select_related("author") .filter(ormar.or_(author__name="J.R.R. Tolkien")) .all() ) assert len(books) == 3 books = ( await Book.objects.select_related("author") .filter( ormar.or_( ormar.and_(author__name__icontains="tolkien"), ormar.and_(author__name__icontains="sapkowski"), ) ) .all() ) assert len(books) == 5 ormar-0.12.2/tests/test_queries/test_order_by.py000066400000000000000000000270221444363446500220300ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Song(ormar.Model): class Meta: tablename = "songs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) sort_order: int = ormar.Integer() class Owner(ormar.Model): class Meta: tablename = "owners" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class AliasNested(ormar.Model): class Meta: tablename = "aliases_nested" metadata = metadata database = database id: int = ormar.Integer(name="alias_id", primary_key=True) name: str = ormar.String(name="alias_name", max_length=100) class AliasTest(ormar.Model): class Meta: tablename = "aliases" metadata = metadata database = database id: int = ormar.Integer(name="alias_id", primary_key=True) name: str = ormar.String(name="alias_name", max_length=100) nested = ormar.ForeignKey(AliasNested, name="nested_alias") class Toy(ormar.Model): class Meta: tablename = "toys" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) owner: Owner = ormar.ForeignKey(Owner) class Factory(ormar.Model): class Meta: tablename = "factories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Car(ormar.Model): class Meta: tablename = "cars" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) factory: Optional[Factory] = ormar.ForeignKey(Factory) class User(ormar.Model): class Meta: tablename = "users" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) cars: List[Car] = ormar.ManyToMany(Car) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_sort_order_on_main_model(): async with database: await Song.objects.create(name="Song 3", sort_order=3) await Song.objects.create(name="Song 1", sort_order=1) await Song.objects.create(name="Song 2", sort_order=2) songs = await Song.objects.all() assert songs[0].name == "Song 3" assert songs[1].name == "Song 1" assert songs[2].name == "Song 2" songs = await Song.objects.order_by("-sort_order").all() assert songs[0].name == "Song 3" assert songs[1].name == "Song 2" assert songs[2].name == "Song 1" songs = await Song.objects.order_by(Song.sort_order.desc()).all() assert songs[0].name == "Song 3" assert songs[1].name == "Song 2" assert songs[2].name == "Song 1" songs = await Song.objects.order_by("sort_order").all() assert songs[0].name == "Song 1" assert songs[1].name == "Song 2" assert songs[2].name == "Song 3" songs = await Song.objects.order_by(Song.sort_order.asc()).all() assert songs[0].name == "Song 1" assert songs[1].name == "Song 2" assert songs[2].name == "Song 3" songs = await Song.objects.order_by("name").all() assert songs[0].name == "Song 1" assert songs[1].name == "Song 2" assert songs[2].name == "Song 3" songs = await Song.objects.order_by("name").limit(2).all() assert len(songs) == 2 assert songs[0].name == "Song 1" assert songs[1].name == "Song 2" await Song.objects.create(name="Song 4", sort_order=1) songs = await Song.objects.order_by(["sort_order", "name"]).all() assert songs[0].name == "Song 1" assert songs[1].name == "Song 4" assert songs[2].name == "Song 2" assert songs[3].name == "Song 3" songs = await Song.objects.order_by( [Song.sort_order.asc(), Song.name.asc()] ).all() assert songs[0].name == "Song 1" assert songs[1].name == "Song 4" assert songs[2].name == "Song 2" assert songs[3].name == "Song 3" @pytest.mark.asyncio async def test_sort_order_on_related_model(): async with database: aphrodite = await Owner.objects.create(name="Aphrodite") hermes = await Owner.objects.create(name="Hermes") zeus = await Owner.objects.create(name="Zeus") await Toy.objects.create(name="Toy 4", owner=zeus) await Toy.objects.create(name="Toy 5", owner=hermes) await Toy.objects.create(name="Toy 2", owner=aphrodite) await Toy.objects.create(name="Toy 1", owner=zeus) await Toy.objects.create(name="Toy 3", owner=aphrodite) await Toy.objects.create(name="Toy 6", owner=hermes) toys = await Toy.objects.select_related("owner").order_by("name").all() assert [x.name.replace("Toy ", "") for x in toys] == [ str(x + 1) for x in range(6) ] assert toys[0].owner == zeus assert toys[1].owner == aphrodite toys = await Toy.objects.select_related("owner").order_by("owner__name").all() assert toys[0].owner.name == toys[1].owner.name == "Aphrodite" assert toys[2].owner.name == toys[3].owner.name == "Hermes" assert toys[4].owner.name == toys[5].owner.name == "Zeus" owner = ( await Owner.objects.select_related("toys") .order_by("toys__name") .filter(name="Zeus") .get() ) assert owner.toys[0].name == "Toy 1" assert owner.toys[1].name == "Toy 4" owner = ( await Owner.objects.select_related("toys") .order_by("-toys__name") .filter(name="Zeus") .get() ) assert owner.toys[0].name == "Toy 4" assert owner.toys[1].name == "Toy 1" owners = ( await Owner.objects.select_related("toys") .order_by("-toys__name") .filter(name__in=["Zeus", "Hermes"]) .all() ) assert owners[0].toys[0].name == "Toy 6" assert owners[0].toys[1].name == "Toy 5" assert owners[0].name == "Hermes" assert owners[1].toys[0].name == "Toy 4" assert owners[1].toys[1].name == "Toy 1" assert owners[1].name == "Zeus" await Toy.objects.create(name="Toy 7", owner=zeus) owners = ( await Owner.objects.select_related("toys") .order_by("-toys__name") .filter(name__in=["Zeus", "Hermes"]) .all() ) assert owners[0].toys[0].name == "Toy 7" assert owners[0].toys[1].name == "Toy 4" assert owners[0].toys[2].name == "Toy 1" assert owners[0].name == "Zeus" assert owners[1].toys[0].name == "Toy 6" assert owners[1].toys[1].name == "Toy 5" assert owners[1].name == "Hermes" toys = ( await Toy.objects.select_related("owner") .order_by(["owner__name", "name"]) .limit(2) .all() ) assert len(toys) == 2 assert toys[0].name == "Toy 2" assert toys[1].name == "Toy 3" @pytest.mark.asyncio async def test_sort_order_on_many_to_many(): async with database: factory1 = await Factory.objects.create(name="Factory 1") factory2 = await Factory.objects.create(name="Factory 2") car1 = await Car.objects.create(name="Buggy", factory=factory1) car2 = await Car.objects.create(name="Volkswagen", factory=factory2) car3 = await Car.objects.create(name="Ferrari", factory=factory1) car4 = await Car.objects.create(name="Volvo", factory=factory2) car5 = await Car.objects.create(name="Skoda", factory=factory1) car6 = await Car.objects.create(name="Seat", factory=factory2) user1 = await User.objects.create(name="Mark") user2 = await User.objects.create(name="Julie") await user1.cars.add(car1) await user1.cars.add(car3) await user1.cars.add(car4) await user1.cars.add(car5) await user2.cars.add(car1) await user2.cars.add(car2) await user2.cars.add(car5) await user2.cars.add(car6) user = ( await User.objects.select_related("cars") .filter(name="Mark") .order_by("cars__name") .get() ) assert user.cars[0].name == "Buggy" assert user.cars[1].name == "Ferrari" assert user.cars[2].name == "Skoda" assert user.cars[3].name == "Volvo" user = ( await User.objects.select_related("cars") .filter(name="Mark") .order_by("-cars__name") .get() ) assert user.cars[3].name == "Buggy" assert user.cars[2].name == "Ferrari" assert user.cars[1].name == "Skoda" assert user.cars[0].name == "Volvo" users = await User.objects.select_related("cars").order_by("-cars__name").all() assert users[0].name == "Mark" assert users[1].cars[0].name == "Volkswagen" assert users[1].cars[1].name == "Skoda" assert users[1].cars[2].name == "Seat" assert users[1].cars[3].name == "Buggy" users = ( await User.objects.select_related(["cars__factory"]) .order_by(["-cars__factory__name", "cars__name"]) .all() ) assert users[0].name == "Julie" assert users[0].cars[0].name == "Seat" assert users[0].cars[1].name == "Volkswagen" assert users[0].cars[2].name == "Buggy" assert users[0].cars[3].name == "Skoda" assert users[1].name == "Mark" assert users[1].cars[0].name == "Volvo" assert users[1].cars[1].name == "Buggy" assert users[1].cars[2].name == "Ferrari" assert users[1].cars[3].name == "Skoda" @pytest.mark.asyncio async def test_sort_order_with_aliases(): async with database: al1 = await AliasTest.objects.create(name="Test4") al2 = await AliasTest.objects.create(name="Test2") al3 = await AliasTest.objects.create(name="Test1") al4 = await AliasTest.objects.create(name="Test3") aliases = await AliasTest.objects.order_by("-name").all() assert [alias.name[-1] for alias in aliases] == ["4", "3", "2", "1"] nest1 = await AliasNested.objects.create(name="Try1") nest2 = await AliasNested.objects.create(name="Try2") nest3 = await AliasNested.objects.create(name="Try3") nest4 = await AliasNested.objects.create(name="Try4") al1.nested = nest1 await al1.update() al2.nested = nest2 await al2.update() al3.nested = nest3 await al3.update() al4.nested = nest4 await al4.update() aliases = ( await AliasTest.objects.select_related("nested") .order_by("-nested__name") .all() ) assert aliases[0].nested.name == "Try4" assert aliases[1].nested.name == "Try3" assert aliases[2].nested.name == "Try2" assert aliases[3].nested.name == "Try1" ormar-0.12.2/tests/test_queries/test_pagination.py000066400000000000000000000067661444363446500223700ustar00rootroot00000000000000import databases import pytest import sqlalchemy import ormar from ormar import ModelMeta from ormar.exceptions import QueryDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ModelMeta): metadata = metadata database = database class Car(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class UsersCar(ormar.Model): class Meta(BaseMeta): tablename = "cars_x_users" class User(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) cars = ormar.ManyToMany(Car, through=UsersCar) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_limit_zero(): async with database: async with database.transaction(force_rollback=True): for i in range(5): await Car(name=f"{i}").save() cars = await Car.objects.limit(0).all() assert cars == [] assert len(cars) == 0 @pytest.mark.asyncio async def test_pagination_errors(): async with database: async with database.transaction(force_rollback=True): with pytest.raises(QueryDefinitionError): await Car.objects.paginate(0).all() with pytest.raises(QueryDefinitionError): await Car.objects.paginate(1, page_size=0).all() @pytest.mark.asyncio async def test_pagination_on_single_model(): async with database: async with database.transaction(force_rollback=True): for i in range(20): await Car(name=f"{i}").save() cars_page1 = await Car.objects.paginate(1, page_size=5).all() assert len(cars_page1) == 5 assert cars_page1[0].name == "0" assert cars_page1[4].name == "4" cars_page2 = await Car.objects.paginate(2, page_size=5).all() assert len(cars_page2) == 5 assert cars_page2[0].name == "5" assert cars_page2[4].name == "9" all_cars = await Car.objects.paginate(1).all() assert len(all_cars) == 20 half_cars = await Car.objects.paginate(2, page_size=10).all() assert len(half_cars) == 10 assert half_cars[0].name == "10" @pytest.mark.asyncio async def test_proxy_pagination(): async with database: async with database.transaction(force_rollback=True): user = await User(name="Jon").save() for i in range(20): c = await Car(name=f"{i}").save() await user.cars.add(c) await user.cars.paginate(1, page_size=5).all() assert len(user.cars) == 5 assert user.cars[0].name == "0" assert user.cars[4].name == "4" await user.cars.paginate(2, page_size=5).all() assert len(user.cars) == 5 assert user.cars[0].name == "5" assert user.cars[4].name == "9" await user.cars.paginate(1).all() assert len(user.cars) == 20 await user.cars.paginate(2, page_size=10).all() assert len(user.cars) == 10 assert user.cars[0].name == "10" ormar-0.12.2/tests/test_queries/test_queryproxy_on_m2m_models.py000066400000000000000000000173401444363446500253060ustar00rootroot00000000000000import asyncio from typing import List, Optional, Union import databases import pytest import sqlalchemy import ormar from ormar.exceptions import QueryDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Subject(ormar.Model): class Meta: tablename = "subjects" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=80) class Author(ormar.Model): class Meta: tablename = "authors" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Category(ormar.Model): class Meta: tablename = "categories" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) sort_order: int = ormar.Integer(nullable=True) subject: Optional[Subject] = ormar.ForeignKey(Subject) class PostCategory(ormar.Model): class Meta: tablename = "posts_categories" database = database metadata = metadata class Post(ormar.Model): class Meta: tablename = "posts" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[Union[Category, List[Category]]] = ormar.ManyToMany( Category, through=PostCategory ) author: Optional[Author] = ormar.ForeignKey(Author) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_queryset_methods(): async with database: async with database.transaction(force_rollback=True): guido = await Author.objects.create( first_name="Guido", last_name="Van Rossum" ) subject = await Subject(name="Random").save() post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create( name="News", sort_order=1, subject=subject ) breaking = await Category.objects.create( name="Breaking", sort_order=3, subject=subject ) # Add a category to a post. await post.categories.add(news) await post.categories.add(breaking) category, created = await post.categories.get_or_create(name="News") assert category == news assert len(post.categories) == 1 assert created is False category, created = await post.categories.get_or_create( name="Breaking News" ) assert category != breaking assert category.pk is not None assert len(post.categories) == 2 assert created is True await post.categories.update_or_create(pk=category.pk, name="Urgent News") assert len(post.categories) == 2 cat, created = await post.categories.get_or_create(name="Urgent News") assert cat.pk == category.pk assert len(post.categories) == 1 assert created is False await post.categories.remove(cat) await cat.delete() assert len(post.categories) == 0 category = await post.categories.update_or_create( name="Weather News", sort_order=2, subject=subject ) assert category.pk is not None assert category.posts[0] == post assert len(post.categories) == 1 categories = await post.categories.all() assert len(categories) == 3 == len(post.categories) assert await post.categories.exists() assert 3 == await post.categories.count() categories = await post.categories.limit(2).all() assert len(categories) == 2 == len(post.categories) categories2 = await post.categories.limit(2).offset(1).all() assert len(categories2) == 2 == len(post.categories) assert categories != categories2 categories = await post.categories.order_by("-sort_order").all() assert len(categories) == 3 == len(post.categories) assert post.categories[2].name == "News" assert post.categories[0].name == "Breaking" categories = await post.categories.exclude(name__icontains="news").all() assert len(categories) == 1 == len(post.categories) assert post.categories[0].name == "Breaking" categories = ( await post.categories.filter(name__icontains="news") .order_by("-name") .all() ) assert len(categories) == 2 == len(post.categories) assert post.categories[0].name == "Weather News" assert post.categories[1].name == "News" categories = await post.categories.fields("name").all() assert len(categories) == 3 == len(post.categories) for cat in post.categories: assert cat.sort_order is None categories = await post.categories.exclude_fields("sort_order").all() assert len(categories) == 3 == len(post.categories) for cat in post.categories: assert cat.sort_order is None assert cat.subject.name is None categories = await post.categories.select_related("subject").all() assert len(categories) == 3 == len(post.categories) for cat in post.categories: assert cat.subject.name is not None categories = await post.categories.prefetch_related("subject").all() assert len(categories) == 3 == len(post.categories) for cat in post.categories: assert cat.subject.name is not None @pytest.mark.asyncio async def test_queryset_update(): async with database: async with database.transaction(force_rollback=True): guido = await Author.objects.create( first_name="Guido", last_name="Van Rossum" ) subject = await Subject(name="Random").save() post = await Post.objects.create(title="Hello, M2M", author=guido) await post.categories.create(name="News", sort_order=1, subject=subject) await post.categories.create(name="Breaking", sort_order=3, subject=subject) await post.categories.order_by("sort_order").all() assert len(post.categories) == 2 assert post.categories[0].sort_order == 1 assert post.categories[0].name == "News" assert post.categories[1].sort_order == 3 assert post.categories[1].name == "Breaking" updated = await post.categories.update(each=True, name="Test") assert updated == 2 await post.categories.order_by("sort_order").all() assert len(post.categories) == 2 assert post.categories[0].name == "Test" assert post.categories[1].name == "Test" updated = await post.categories.filter(sort_order=3).update(name="Test 2") assert updated == 1 await post.categories.order_by("sort_order").all() assert len(post.categories) == 2 assert post.categories[0].name == "Test" assert post.categories[1].name == "Test 2" with pytest.raises(QueryDefinitionError): await post.categories.update(name="Test WRONG") ormar-0.12.2/tests/test_queries/test_queryset_level_methods.py000066400000000000000000000360351444363446500250220ustar00rootroot00000000000000from enum import Enum from typing import Optional import databases import pydantic import pytest import sqlalchemy from pydantic import Json import ormar from ormar import QuerySet from ormar.exceptions import ( ModelPersistenceError, QueryDefinitionError, ModelListEmptyError, ) from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class MySize(Enum): SMALL = 0 BIG = 1 class Book(ormar.Model): class Meta: tablename = "books" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) author: str = ormar.String(max_length=100) genre: str = ormar.String( max_length=100, default="Fiction", choices=["Fiction", "Adventure", "Historic", "Fantasy"], ) class ToDo(ormar.Model): class Meta: tablename = "todos" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) text: str = ormar.String(max_length=500) completed: bool = ormar.Boolean(default=False) pairs: pydantic.Json = ormar.JSON(default=[]) size = ormar.Enum(enum_class=MySize, default=MySize.SMALL) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=500) class Note(ormar.Model): class Meta: tablename = "notes" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) text: str = ormar.String(max_length=500) category: Optional[Category] = ormar.ForeignKey(Category) class ItemConfig(ormar.Model): class Meta(ormar.ModelMeta): metadata = metadata database = database tablename = "item_config" id: Optional[int] = ormar.Integer(primary_key=True) item_id: str = ormar.String(max_length=32, index=True) pairs: pydantic.Json = ormar.JSON(default=["2", "3"]) size = ormar.Enum(enum_class=MySize, default=MySize.SMALL) class QuerySetCls(QuerySet): async def first_or_404(self, *args, **kwargs): entity = await self.get_or_none(*args, **kwargs) if not entity: # maybe HTTPException in fastapi raise ValueError("customer not found") return entity class Customer(ormar.Model): class Meta: metadata = metadata database = database tablename = "customer" queryset_class = QuerySetCls id: Optional[int] = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=32) class JsonTestModel(ormar.Model): class Meta(ormar.ModelMeta): metadata = metadata database = database tablename = "test_model" id: int = ormar.Integer(primary_key=True) json_field: Json = ormar.JSON() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_delete_and_update(): async with database: async with database.transaction(force_rollback=True): await Book.objects.create( title="Tom Sawyer", author="Twain, Mark", genre="Adventure" ) await Book.objects.create( title="War and Peace", author="Tolstoy, Leo", genre="Fiction" ) await Book.objects.create( title="Anna Karenina", author="Tolstoy, Leo", genre="Fiction" ) await Book.objects.create( title="Harry Potter", author="Rowling, J.K.", genre="Fantasy" ) await Book.objects.create( title="Lord of the Rings", author="Tolkien, J.R.", genre="Fantasy" ) all_books = await Book.objects.all() assert len(all_books) == 5 await Book.objects.filter(author="Tolstoy, Leo").update( author="Lenin, Vladimir" ) all_books = await Book.objects.filter(author="Lenin, Vladimir").all() assert len(all_books) == 2 historic_books = await Book.objects.filter(genre="Historic").all() assert len(historic_books) == 0 with pytest.raises(QueryDefinitionError): await Book.objects.update(genre="Historic") await Book.objects.filter(author="Lenin, Vladimir").update(genre="Historic") historic_books = await Book.objects.filter(genre="Historic").all() assert len(historic_books) == 2 await Book.objects.delete(genre="Fantasy") all_books = await Book.objects.all() assert len(all_books) == 3 await Book.objects.update(each=True, genre="Fiction") all_books = await Book.objects.filter(genre="Fiction").all() assert len(all_books) == 3 with pytest.raises(QueryDefinitionError): await Book.objects.delete() await Book.objects.delete(each=True) all_books = await Book.objects.all() assert len(all_books) == 0 @pytest.mark.asyncio async def test_get_or_create(): async with database: tom, created = await Book.objects.get_or_create( title="Volume I", author="Anonymous", genre="Fiction" ) assert await Book.objects.count() == 1 assert created is True second_tom, created = await Book.objects.get_or_create( title="Volume I", author="Anonymous", genre="Fiction" ) assert second_tom.pk == tom.pk assert created is False assert await Book.objects.count() == 1 assert await Book.objects.create( title="Volume I", author="Anonymous", genre="Fiction" ) with pytest.raises(ormar.exceptions.MultipleMatches): await Book.objects.get_or_create( title="Volume I", author="Anonymous", genre="Fiction" ) @pytest.mark.asyncio async def test_get_or_create_with_defaults(): async with database: book, created = await Book.objects.get_or_create( title="Nice book", _defaults={"author": "Mojix", "genre": "Historic"} ) assert created is True assert book.author == "Mojix" assert book.title == "Nice book" assert book.genre == "Historic" book2, created = await Book.objects.get_or_create( author="Mojix", _defaults={"title": "Book2"} ) assert created is False assert book2 == book assert book2.title == "Nice book" assert book2.author == "Mojix" assert book2.genre == "Historic" assert await Book.objects.count() == 1 book, created = await Book.objects.get_or_create( title="doesn't exist", _defaults={"title": "overwritten", "author": "Mojix", "genre": "Historic"}, ) assert created is True assert book.title == "overwritten" book2, created = await Book.objects.get_or_create( title="overwritten", _defaults={"title": "doesn't work"} ) assert created is False assert book2.title == "overwritten" assert book2 == book @pytest.mark.asyncio async def test_update_or_create(): async with database: tom = await Book.objects.update_or_create( title="Volume I", author="Anonymous", genre="Fiction" ) assert await Book.objects.count() == 1 assert await Book.objects.update_or_create(id=tom.id, genre="Historic") assert await Book.objects.count() == 1 assert await Book.objects.update_or_create(pk=tom.id, genre="Fantasy") assert await Book.objects.count() == 1 assert await Book.objects.create( title="Volume I", author="Anonymous", genre="Fantasy" ) with pytest.raises(ormar.exceptions.MultipleMatches): await Book.objects.get( title="Volume I", author="Anonymous", genre="Fantasy" ) @pytest.mark.asyncio async def test_bulk_create(): async with database: await ToDo.objects.bulk_create( [ ToDo(text="Buy the groceries."), ToDo(text="Call Mum.", completed=True), ToDo(text="Send invoices.", completed=True), ] ) todoes = await ToDo.objects.all() assert len(todoes) == 3 for todo in todoes: assert todo.pk is not None completed = await ToDo.objects.filter(completed=True).all() assert len(completed) == 2 with pytest.raises(ormar.exceptions.ModelListEmptyError): await ToDo.objects.bulk_create([]) @pytest.mark.asyncio async def test_bulk_create_json_field(): async with database: json_value = {"a": 1} test_model_1 = JsonTestModel(id=1, json_field=json_value) test_model_2 = JsonTestModel(id=2, json_field=json_value) # store one with .save() and the other with .bulk_create() await test_model_1.save() await JsonTestModel.objects.bulk_create([test_model_2]) # refresh from the database await test_model_1.load() await test_model_2.load() assert test_model_1.json_field == test_model_2.json_field # True # try to query the json field table = JsonTestModel.Meta.table query = table.select().where(table.c.json_field["a"].as_integer() == 1) res = [ JsonTestModel.from_row(record, source_model=JsonTestModel) for record in await database.fetch_all(query) ] assert test_model_1 in res assert test_model_2 in res assert len(res) == 2 @pytest.mark.asyncio async def test_bulk_create_with_relation(): async with database: category = await Category.objects.create(name="Sample Category") await Note.objects.bulk_create( [ Note(text="Buy the groceries.", category=category), Note(text="Call Mum.", category=category), ] ) todoes = await Note.objects.all() assert len(todoes) == 2 for todo in todoes: assert todo.category.pk == category.pk @pytest.mark.asyncio async def test_bulk_update(): async with database: await ToDo.objects.bulk_create( [ ToDo(text="Buy the groceries."), ToDo(text="Call Mum.", completed=True), ToDo(text="Send invoices.", completed=True), ] ) todoes = await ToDo.objects.all() assert len(todoes) == 3 for todo in todoes: todo.text = todo.text + "_1" todo.completed = False todo.size = MySize.BIG await ToDo.objects.bulk_update(todoes) completed = await ToDo.objects.filter(completed=False).all() assert len(completed) == 3 todoes = await ToDo.objects.all() assert len(todoes) == 3 for todo in todoes: assert todo.text[-2:] == "_1" assert todo.size == MySize.BIG @pytest.mark.asyncio async def test_bulk_update_with_only_selected_columns(): async with database: await ToDo.objects.bulk_create( [ ToDo(text="Reset the world simulation.", completed=False), ToDo(text="Watch kittens.", completed=True), ] ) todoes = await ToDo.objects.all() assert len(todoes) == 2 for todo in todoes: todo.text = todo.text + "_1" todo.completed = False await ToDo.objects.bulk_update(todoes, columns=["completed"]) completed = await ToDo.objects.filter(completed=False).all() assert len(completed) == 2 todoes = await ToDo.objects.all() assert len(todoes) == 2 for todo in todoes: assert todo.text[-2:] != "_1" @pytest.mark.asyncio async def test_bulk_update_with_relation(): async with database: category = await Category.objects.create(name="Sample Category") category2 = await Category.objects.create(name="Sample II Category") await Note.objects.bulk_create( [ Note(text="Buy the groceries.", category=category), Note(text="Call Mum.", category=category), Note(text="Text skynet.", category=category), ] ) notes = await Note.objects.all() assert len(notes) == 3 for note in notes: note.category = category2 await Note.objects.bulk_update(notes) notes_upd = await Note.objects.all() assert len(notes_upd) == 3 for note in notes_upd: assert note.category.pk == category2.pk @pytest.mark.asyncio async def test_bulk_update_not_saved_objts(): async with database: category = await Category.objects.create(name="Sample Category") with pytest.raises(ModelPersistenceError): await Note.objects.bulk_update( [ Note(text="Buy the groceries.", category=category), Note(text="Call Mum.", category=category), ] ) with pytest.raises(ModelListEmptyError): await Note.objects.bulk_update([]) @pytest.mark.asyncio async def test_bulk_operations_with_json(): async with database: items = [ ItemConfig(item_id="test1"), ItemConfig(item_id="test2"), ItemConfig(item_id="test3"), ] await ItemConfig.objects.bulk_create(items) items = await ItemConfig.objects.all() assert all(x.pairs == ["2", "3"] for x in items) for item in items: item.pairs = ["1"] await ItemConfig.objects.bulk_update(items) items = await ItemConfig.objects.all() assert all(x.pairs == ["1"] for x in items) items = await ItemConfig.objects.filter(ItemConfig.id > 1).all() for item in items: item.pairs = {"b": 2} await ItemConfig.objects.bulk_update(items) items = await ItemConfig.objects.filter(ItemConfig.id > 1).all() assert all(x.pairs == {"b": 2} for x in items) table = ItemConfig.Meta.table query = table.select().where(table.c.pairs["b"].as_integer() == 2) res = [ ItemConfig.from_row(record, source_model=ItemConfig) for record in await database.fetch_all(query) ] assert len(res) == 2 @pytest.mark.asyncio async def test_custom_queryset_cls(): async with database: with pytest.raises(ValueError): await Customer.objects.first_or_404(id=1) await Customer(name="test").save() c = await Customer.objects.first_or_404(name="test") assert c.name == "test" @pytest.mark.asyncio async def test_filter_enum(): async with database: it = ItemConfig(item_id="test_1") await it.save() it = await ItemConfig.objects.filter(size=MySize.SMALL).first() assert it ormar-0.12.2/tests/test_queries/test_quoting_table_names_in_on_join_clause.py000066400000000000000000000032231444363446500277750ustar00rootroot00000000000000import datetime import uuid from typing import Dict, Optional, Union import databases import pytest import sqlalchemy from sqlalchemy import create_engine import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() engine = create_engine(DATABASE_URL) class Team(ormar.Model): class Meta: tablename: str = "team" database = database metadata = metadata id: uuid.UUID = ormar.UUID(default=uuid.uuid4, primary_key=True, index=True) name = ormar.Text(nullable=True) client_id = ormar.Text(nullable=True) client_secret = ormar.Text(nullable=True) created_on = ormar.DateTime(timezone=True, default=datetime.datetime.utcnow()) class User(ormar.Model): class Meta: tablename: str = "user" database = database metadata = metadata id: uuid.UUID = ormar.UUID(default=uuid.uuid4, primary_key=True, index=True) client_user_id = ormar.Text() token = ormar.Text(nullable=True) team: Optional[Team] = ormar.ForeignKey(to=Team, name="team_id") class Order(ormar.Model): class Meta: tablename: str = "order" database = database metadata = metadata id: uuid.UUID = ormar.UUID(default=uuid.uuid4, primary_key=True, index=True) user: Optional[Union[User, Dict]] = ormar.ForeignKey(User) @pytest.fixture(autouse=True, scope="module") def create_test_database(): metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_quoting_on_clause_without_prefix(): async with database: await User.objects.select_related("orders").all() ormar-0.12.2/tests/test_queries/test_reserved_sql_keywords_escaped.py000066400000000000000000000055021444363446500263330ustar00rootroot00000000000000import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class User(ormar.Model): class Meta(BaseMeta): tablename = "user" id: int = ormar.Integer(primary_key=True, autoincrement=True, nullable=False) user: str = ormar.String( unique=True, index=True, nullable=False, max_length=255 ) # ID of the user on auth0 first: str = ormar.String(nullable=False, max_length=255) last: str = ormar.String(nullable=False, max_length=255) email: str = ormar.String(unique=True, index=True, nullable=False, max_length=255) display_name: str = ormar.String( unique=True, index=True, nullable=False, max_length=255 ) pic_url: str = ormar.Text(nullable=True) class Task(ormar.Model): class Meta(BaseMeta): tablename = "task" id: int = ormar.Integer(primary_key=True, autoincrement=True, nullable=False) from_: str = ormar.String(name="from", nullable=True, max_length=200) user = ormar.ForeignKey(User) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_single_model_quotes(): async with database: await User.objects.create( user="test", first="first", last="last", email="email@com.com", display_name="first last", ) user = await User.objects.order_by("user").get(first="first") assert user.last == "last" assert user.email == "email@com.com" @pytest.mark.asyncio async def test_two_model_quotes(): async with database: user = await User.objects.create( user="test", first="first", last="last", email="email@com.com", display_name="first last", ) await Task(user=user, from_="aa").save() await Task(user=user, from_="bb").save() task = ( await Task.objects.select_related("user") .order_by("user__user") .get(from_="aa") ) assert task.user.last == "last" assert task.user.email == "email@com.com" tasks = await Task.objects.select_related("user").order_by("-from").all() assert len(tasks) == 2 assert tasks[0].user.last == "last" assert tasks[0].user.email == "email@com.com" assert tasks[0].from_ == "bb" assert tasks[1].user.last == "last" assert tasks[1].user.email == "email@com.com" assert tasks[1].from_ == "aa" ormar-0.12.2/tests/test_queries/test_reverse_fk_queryset.py000066400000000000000000000206221444363446500243160ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from ormar import NoMatch from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True, name="album_id") name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Writer(ormar.Model): class Meta: tablename = "writers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True, name="writer_id") name: str = ormar.String(max_length=100) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album, name="album_id") title: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True) written_by: Optional[Writer] = ormar.ForeignKey(Writer, name="writer_id") async def get_sample_data(): album = await Album(name="Malibu").save() writer1 = await Writer.objects.create(name="John") writer2 = await Writer.objects.create(name="Sue") track1 = await Track( album=album, title="The Bird", position=1, play_count=30, written_by=writer1 ).save() track2 = await Track( album=album, title="Heart don't stand a chance", position=2, play_count=20, written_by=writer2, ).save() tracks3 = await Track( album=album, title="The Waters", position=3, play_count=10, written_by=writer1 ).save() return album, [track1, track2, tracks3] @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_quering_by_reverse_fk(): async with database: async with database.transaction(force_rollback=True): sample_data = await get_sample_data() track1 = sample_data[1][0] album = await Album.objects.first() assert await album.tracks.exists() assert await album.tracks.count() == 3 track, created = await album.tracks.get_or_create( title="The Bird", position=1, play_count=30 ) assert track == track1 assert created is False assert len(album.tracks) == 1 track, created = await album.tracks.get_or_create( title="The Bird2", _defaults={"position": 4, "play_count": 5} ) assert track != track1 assert created is True assert track.pk is not None assert track.position == 4 and track.play_count == 5 assert len(album.tracks) == 2 await album.tracks.update_or_create(pk=track.pk, play_count=50) assert len(album.tracks) == 2 track, created = await album.tracks.get_or_create(title="The Bird2") assert created is False assert track.play_count == 50 assert len(album.tracks) == 1 await album.tracks.remove(track) assert track.album is None await track.delete() assert len(album.tracks) == 0 track6 = await album.tracks.update_or_create( title="The Bird3", position=4, play_count=5 ) assert track6.pk is not None assert track6.play_count == 5 assert len(album.tracks) == 1 await album.tracks.remove(track6) assert track6.album is None await track6.delete() assert len(album.tracks) == 0 @pytest.mark.asyncio async def test_getting(): async with database: async with database.transaction(force_rollback=True): sample_data = await get_sample_data() album = sample_data[0] track1 = await album.tracks.fields(["album", "title", "position"]).get( title="The Bird" ) track2 = await album.tracks.exclude_fields("play_count").get( title="The Bird" ) for track in [track1, track2]: assert track.title == "The Bird" assert track.album == album assert track.play_count is None assert len(album.tracks) == 1 tracks = await album.tracks.all() assert len(tracks) == 3 assert len(album.tracks) == 3 tracks = await album.tracks.order_by("play_count").all() assert len(tracks) == 3 assert tracks[0].title == "The Waters" assert tracks[2].title == "The Bird" assert len(album.tracks) == 3 track = await album.tracks.create( title="The Bird Fly Away", position=4, play_count=10 ) assert track.title == "The Bird Fly Away" assert track.position == 4 assert track.album == album assert len(album.tracks) == 4 tracks = await album.tracks.all() assert len(tracks) == 4 tracks = await album.tracks.limit(2).all() assert len(tracks) == 2 tracks2 = await album.tracks.limit(2).offset(2).all() assert len(tracks2) == 2 assert tracks != tracks2 tracks3 = await album.tracks.filter(play_count__lt=15).all() assert len(tracks3) == 2 tracks4 = await album.tracks.exclude(play_count__lt=15).all() assert len(tracks4) == 2 assert tracks3 != tracks4 assert len(album.tracks) == 2 await album.tracks.clear() tracks = await album.tracks.all() assert len(tracks) == 0 assert len(album.tracks) == 0 still_tracks = await Track.objects.all() assert len(still_tracks) == 4 for track in still_tracks: assert track.album is None @pytest.mark.asyncio async def test_cleaning_related(): async with database: async with database.transaction(force_rollback=True): sample_data = await get_sample_data() album = sample_data[0] await album.tracks.clear(keep_reversed=False) tracks = await album.tracks.all() assert len(tracks) == 0 assert len(album.tracks) == 0 no_tracks = await Track.objects.all() assert len(no_tracks) == 0 @pytest.mark.asyncio async def test_loading_related(): async with database: async with database.transaction(force_rollback=True): sample_data = await get_sample_data() album = sample_data[0] tracks = await album.tracks.select_related("written_by").all() assert len(tracks) == 3 assert len(album.tracks) == 3 for track in tracks: assert track.written_by is not None tracks = await album.tracks.prefetch_related("written_by").all() assert len(tracks) == 3 assert len(album.tracks) == 3 for track in tracks: assert track.written_by is not None @pytest.mark.asyncio async def test_adding_removing(): async with database: async with database.transaction(force_rollback=True): sample_data = await get_sample_data() album = sample_data[0] track_new = await Track(title="Rainbow", position=5, play_count=300).save() await album.tracks.add(track_new) assert track_new.album == album assert len(album.tracks) == 4 track_check = await Track.objects.get(title="Rainbow") assert track_check.album == album await album.tracks.remove(track_new) assert track_new.album is None assert len(album.tracks) == 3 track1 = album.tracks[0] await album.tracks.remove(track1, keep_reversed=False) with pytest.raises(NoMatch): await track1.load() track_test = await Track.objects.get(title="Rainbow") assert track_test.album is None ormar-0.12.2/tests/test_queries/test_selecting_subset_of_columns.py000066400000000000000000000222671444363446500260170ustar00rootroot00000000000000import asyncio import itertools from typing import Optional, List import databases import pydantic import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class NickNames(ormar.Model): class Meta: tablename = "nicks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") is_lame: bool = ormar.Boolean(nullable=True) class NicksHq(ormar.Model): class Meta: tablename = "nicks_x_hq" metadata = metadata database = database class HQ(ormar.Model): class Meta: tablename = "hqs" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="hq_name") nicks: List[NickNames] = ormar.ManyToMany(NickNames, through=NicksHq) class Company(ormar.Model): class Meta: tablename = "companies" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=False, name="company_name") founded: int = ormar.Integer(nullable=True) hq: HQ = ormar.ForeignKey(HQ) class Car(ormar.Model): class Meta: tablename = "cars" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) manufacturer: Optional[Company] = ormar.ForeignKey(Company) name: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) gearbox_type: str = ormar.String(max_length=20, nullable=True) gears: int = ormar.Integer(nullable=True) aircon_type: str = ormar.String(max_length=20, nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop_policy().new_event_loop() yield loop loop.close() @pytest_asyncio.fixture(autouse=True, scope="module") async def sample_data(event_loop, create_test_database): async with database: nick1 = await NickNames.objects.create(name="Nippon", is_lame=False) nick2 = await NickNames.objects.create(name="EroCherry", is_lame=True) hq = await HQ.objects.create(name="Japan") await hq.nicks.add(nick1) await hq.nicks.add(nick2) toyota = await Company.objects.create(name="Toyota", founded=1937, hq=hq) await Car.objects.create( manufacturer=toyota, name="Corolla", year=2020, gearbox_type="Manual", gears=5, aircon_type="Manual", ) await Car.objects.create( manufacturer=toyota, name="Yaris", year=2019, gearbox_type="Manual", gears=5, aircon_type="Manual", ) await Car.objects.create( manufacturer=toyota, name="Supreme", year=2020, gearbox_type="Auto", gears=6, aircon_type="Auto", ) @pytest.mark.asyncio async def test_selecting_subset(): async with database: async with database.transaction(force_rollback=True): all_cars = ( await Car.objects.select_related(["manufacturer__hq__nicks"]) .fields( [ "id", "name", "manufacturer__name", "manufacturer__hq__name", "manufacturer__hq__nicks__name", ] ) .all() ) all_cars2 = ( await Car.objects.select_related(["manufacturer__hq__nicks"]) .fields( { "id": ..., "name": ..., "manufacturer": { "name": ..., "hq": {"name": ..., "nicks": {"name": ...}}, }, } ) .all() ) all_cars3 = ( await Car.objects.select_related(["manufacturer__hq__nicks"]) .fields( { "id": ..., "name": ..., "manufacturer": { "name": ..., "hq": {"name": ..., "nicks": {"name"}}, }, } ) .all() ) assert all_cars3 == all_cars for car in itertools.chain(all_cars, all_cars2): assert all( getattr(car, x) is None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded is None assert car.manufacturer.hq.name == "Japan" assert len(car.manufacturer.hq.nicks) == 2 assert car.manufacturer.hq.nicks[0].is_lame is None all_cars = ( await Car.objects.select_related("manufacturer") .fields("id") .fields(["name"]) .all() ) for car in all_cars: assert all( getattr(car, x) is None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded == 1937 assert car.manufacturer.hq.name is None all_cars_check = await Car.objects.select_related("manufacturer").all() all_cars_with_whole_nested = ( await Car.objects.select_related("manufacturer") .fields(["id", "name", "year", "gearbox_type", "gears", "aircon_type"]) .fields({"manufacturer": ...}) .all() ) for car in itertools.chain(all_cars_check, all_cars_with_whole_nested): assert all( getattr(car, x) is not None for x in ["year", "gearbox_type", "gears", "aircon_type"] ) assert car.manufacturer.name == "Toyota" assert car.manufacturer.founded == 1937 all_cars_dummy = ( await Car.objects.select_related("manufacturer") .fields(["id", "name", "year", "gearbox_type", "gears", "aircon_type"]) # .fields({"manufacturer": ...}) # .exclude_fields({"manufacturer": ...}) .fields({"manufacturer": {"name"}}) .exclude_fields({"manufacturer__founded"}) .all() ) assert all_cars_dummy[0].manufacturer.founded is None with pytest.raises(pydantic.error_wrappers.ValidationError): # cannot exclude mandatory model columns - company__name in this example await Car.objects.select_related("manufacturer").fields( ["id", "name", "manufacturer__founded"] ).all() @pytest.mark.asyncio async def test_selecting_subset_of_through_model(): async with database: car = ( await Car.objects.select_related(["manufacturer__hq__nicks"]) .fields( { "id": ..., "name": ..., "manufacturer": { "name": ..., "hq": {"name": ..., "nicks": {"name": ...}}, }, } ) .exclude_fields("manufacturer__hq__nickshq") .get() ) assert car.manufacturer.hq.nicks[0].nickshq is None car = ( await Car.objects.select_related(["manufacturer__hq__nicks"]) .fields( { "id": ..., "name": ..., "manufacturer": { "name": ..., "hq": {"name": ..., "nicks": {"name": ...}}, }, } ) .exclude_fields({"manufacturer": {"hq": {"nickshq": ...}}}) .get() ) assert car.manufacturer.hq.nicks[0].nickshq is None car = ( await Car.objects.select_related(["manufacturer__hq__nicks"]) .fields( { "id": ..., "name": ..., "manufacturer": { "name": ..., "hq": {"name": ..., "nicks": {"name": ...}}, }, } ) .exclude_fields("manufacturer__hq__nickshq__nick") .get() ) assert car.manufacturer.hq.nicks[0].nickshq is not None ormar-0.12.2/tests/test_queries/test_values_and_values_list.py000066400000000000000000000332321444363446500247560ustar00rootroot00000000000000import asyncio from typing import List, Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from ormar.exceptions import QueryDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class User(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Role(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) users: List[User] = ormar.ManyToMany(User) class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) sort_order: int = ormar.Integer(nullable=True) created_by: Optional[User] = ormar.ForeignKey(User, related_name="categories") class Post(ormar.Model): class Meta(BaseMeta): tablename = "posts" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=200) category: Optional[Category] = ormar.ForeignKey(Category) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop_policy().new_event_loop() yield loop loop.close() @pytest_asyncio.fixture(autouse=True, scope="module") async def sample_data(event_loop, create_test_database): async with database: creator = await User(name="Anonymous").save() admin = await Role(name="admin").save() editor = await Role(name="editor").save() await creator.roles.add(admin) await creator.roles.add(editor) news = await Category(name="News", sort_order=0, created_by=creator).save() await Post(name="Ormar strikes again!", category=news).save() await Post(name="Why don't you use ormar yet?", category=news).save() await Post(name="Check this out, ormar now for free", category=news).save() @pytest.mark.asyncio async def test_simple_queryset_values(): async with database: posts = await Post.objects.values() assert posts == [ {"id": 1, "name": "Ormar strikes again!", "category": 1}, {"id": 2, "name": "Why don't you use ormar yet?", "category": 1}, {"id": 3, "name": "Check this out, ormar now for free", "category": 1}, ] @pytest.mark.asyncio async def test_queryset_values_nested_relation(): async with database: posts = await Post.objects.select_related("category__created_by").values() assert posts == [ { "id": 1, "name": "Ormar strikes again!", "category": 1, "category__id": 1, "category__name": "News", "category__sort_order": 0, "category__created_by": 1, "category__created_by__id": 1, "category__created_by__name": "Anonymous", }, { "category": 1, "id": 2, "name": "Why don't you use ormar yet?", "category__id": 1, "category__name": "News", "category__sort_order": 0, "category__created_by": 1, "category__created_by__id": 1, "category__created_by__name": "Anonymous", }, { "id": 3, "name": "Check this out, ormar now for free", "category": 1, "category__id": 1, "category__name": "News", "category__sort_order": 0, "category__created_by": 1, "category__created_by__id": 1, "category__created_by__name": "Anonymous", }, ] @pytest.mark.asyncio async def test_queryset_values_nested_relation_subset_of_fields(): async with database: posts = await Post.objects.select_related("category__created_by").values( ["name", "category__name", "category__created_by__name"] ) assert posts == [ { "name": "Ormar strikes again!", "category__name": "News", "category__created_by__name": "Anonymous", }, { "name": "Why don't you use ormar yet?", "category__name": "News", "category__created_by__name": "Anonymous", }, { "name": "Check this out, ormar now for free", "category__name": "News", "category__created_by__name": "Anonymous", }, ] @pytest.mark.asyncio async def test_queryset_simple_values_list(): async with database: posts = await Post.objects.values_list() assert posts == [ (1, "Ormar strikes again!", 1), (2, "Why don't you use ormar yet?", 1), (3, "Check this out, ormar now for free", 1), ] @pytest.mark.asyncio async def test_queryset_nested_relation_values_list(): async with database: posts = await Post.objects.select_related("category__created_by").values_list() assert posts == [ (1, "Ormar strikes again!", 1, 1, "News", 0, 1, 1, "Anonymous"), (2, "Why don't you use ormar yet?", 1, 1, "News", 0, 1, 1, "Anonymous"), ( 3, "Check this out, ormar now for free", 1, 1, "News", 0, 1, 1, "Anonymous", ), ] @pytest.mark.asyncio async def test_queryset_nested_relation_subset_of_fields_values_list(): async with database: posts = await Post.objects.select_related("category__created_by").values_list( ["name", "category__name", "category__created_by__name"] ) assert posts == [ ("Ormar strikes again!", "News", "Anonymous"), ("Why don't you use ormar yet?", "News", "Anonymous"), ("Check this out, ormar now for free", "News", "Anonymous"), ] @pytest.mark.asyncio async def test_m2m_values(): async with database: user = await User.objects.select_related("roles").values() assert user == [ { "id": 1, "name": "Anonymous", "roleuser__id": 1, "roleuser__role": 1, "roleuser__user": 1, "roles__id": 1, "roles__name": "admin", }, { "id": 1, "name": "Anonymous", "roleuser__id": 2, "roleuser__role": 2, "roleuser__user": 1, "roles__id": 2, "roles__name": "editor", }, ] @pytest.mark.asyncio async def test_nested_m2m_values(): async with database: user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .values() ) assert user == [ { "id": 1, "name": "admin", "roleuser__id": 1, "roleuser__role": 1, "roleuser__user": 1, "users__id": 1, "users__name": "Anonymous", "users__categories__id": 1, "users__categories__name": "News", "users__categories__sort_order": 0, "users__categories__created_by": 1, } ] @pytest.mark.asyncio async def test_nested_m2m_values_without_through_explicit(): async with database: user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .fields({"name": ..., "users": {"name": ..., "categories": {"name"}}}) .exclude_fields("roleuser") .values() ) assert user == [ { "name": "admin", "users__name": "Anonymous", "users__categories__name": "News", } ] @pytest.mark.asyncio async def test_nested_m2m_values_without_through_param(): async with database: user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .fields({"name": ..., "users": {"name": ..., "categories": {"name"}}}) .values(exclude_through=True) ) assert user == [ { "name": "admin", "users__name": "Anonymous", "users__categories__name": "News", } ] @pytest.mark.asyncio async def test_nested_m2m_values_no_through_and_m2m_models_but_keep_end_model(): async with database: user = ( await Role.objects.select_related("users__categories") .filter(name="admin") .fields({"name": ..., "users": {"name": ..., "categories": {"name"}}}) .exclude_fields(["roleuser", "users"]) .values() ) assert user == [{"name": "admin", "users__categories__name": "News"}] @pytest.mark.asyncio async def test_nested_flatten_and_exception(): async with database: with pytest.raises(QueryDefinitionError): (await Role.objects.fields({"name", "id"}).values_list(flatten=True)) roles = await Role.objects.fields("name").values_list(flatten=True) assert roles == ["admin", "editor"] @pytest.mark.asyncio async def test_empty_result(): async with database: roles = await Role.objects.filter(Role.name == "test").values_list() roles2 = await Role.objects.filter(Role.name == "test").values() assert roles == roles2 == [] @pytest.mark.asyncio async def test_queryset_values_multiple_select_related(): async with database: posts = ( await Category.objects.select_related(["created_by__roles", "posts"]) .filter(Category.created_by.roles.name == "editor") .values( ["name", "posts__name", "created_by__name", "created_by__roles__name"], exclude_through=True, ) ) assert posts == [ { "name": "News", "created_by__name": "Anonymous", "created_by__roles__name": "editor", "posts__name": "Ormar strikes again!", }, { "name": "News", "created_by__name": "Anonymous", "created_by__roles__name": "editor", "posts__name": "Why don't you use ormar yet?", }, { "name": "News", "created_by__name": "Anonymous", "created_by__roles__name": "editor", "posts__name": "Check this out, ormar now for free", }, ] @pytest.mark.asyncio async def test_querysetproxy_values(): async with database: role = ( await Role.objects.select_related("users__categories") .filter(name="admin") .get() ) user = await role.users.values() assert user == [ { "id": 1, "name": "Anonymous", "roles__id": 1, "roles__name": "admin", "roleuser__id": 1, "roleuser__role": 1, "roleuser__user": 1, } ] user = ( await role.users.filter(name="Anonymous") .select_related("categories") .fields({"name": ..., "categories": {"name"}}) .values(exclude_through=True) ) assert user == [ { "name": "Anonymous", "roles__id": 1, "roles__name": "admin", "categories__name": "News", } ] user = ( await role.users.filter(name="Anonymous") .select_related("categories") .fields({"name": ..., "categories": {"name"}}) .exclude_fields("roles") .values(exclude_through=True) ) assert user == [{"name": "Anonymous", "categories__name": "News"}] @pytest.mark.asyncio async def test_querysetproxy_values_list(): async with database: role = ( await Role.objects.select_related("users__categories") .filter(name="admin") .get() ) user = await role.users.values_list() assert user == [(1, "Anonymous", 1, 1, 1, 1, "admin")] user = ( await role.users.filter(name="Anonymous") .select_related("categories") .fields({"name": ..., "categories": {"name"}}) .values_list(exclude_through=True) ) assert user == [("Anonymous", "News", 1, "admin")] user = ( await role.users.filter(name="Anonymous") .select_related("categories") .fields({"name": ..., "categories": {"name"}}) .exclude_fields("roles") .values_list(exclude_through=True) ) assert user == [("Anonymous", "News")] user = ( await role.users.filter(name="Anonymous") .select_related("categories") .fields({"name"}) .exclude_fields("roles") .values_list(exclude_through=True, flatten=True) ) assert user == ["Anonymous"] ormar-0.12.2/tests/test_relations/000077500000000000000000000000001444363446500171325ustar00rootroot00000000000000ormar-0.12.2/tests/test_relations/__init__.py000066400000000000000000000000001444363446500212310ustar00rootroot00000000000000ormar-0.12.2/tests/test_relations/test_cascades.py000066400000000000000000000101351444363446500223110ustar00rootroot00000000000000from typing import Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class Band(ormar.Model): class Meta: tablename = "bands" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class ArtistsBands(ormar.Model): class Meta: tablename = "artists_x_bands" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) class Artist(ormar.Model): class Meta: tablename = "artists" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) bands = ormar.ManyToMany(Band, through=ArtistsBands) class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) artist: Optional[Artist] = ormar.ForeignKey(Artist, ondelete="CASCADE") class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album, ondelete="CASCADE") title: str = ormar.String(max_length=100) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(scope="function") async def cleanup(): yield async with database: await Band.objects.delete(each=True) await Artist.objects.delete(each=True) @pytest.mark.asyncio async def test_simple_cascade(cleanup): async with database: artist = await Artist(name="Dr Alban").save() await Album(name="Jamaica", artist=artist).save() await Artist.objects.delete(id=artist.id) artists = await Artist.objects.all() assert len(artists) == 0 albums = await Album.objects.all() assert len(albums) == 0 @pytest.mark.asyncio async def test_nested_cascade(cleanup): async with database: artist = await Artist(name="Dr Alban").save() album = await Album(name="Jamaica", artist=artist).save() await Track(title="Yuhu", album=album).save() await Artist.objects.delete(id=artist.id) artists = await Artist.objects.all() assert len(artists) == 0 albums = await Album.objects.all() assert len(albums) == 0 tracks = await Track.objects.all() assert len(tracks) == 0 @pytest.mark.asyncio async def test_many_to_many_cascade(cleanup): async with database: artist = await Artist(name="Dr Alban").save() band = await Band(name="Scorpions").save() await artist.bands.add(band) check = await Artist.objects.select_related("bands").get() assert check.bands[0].name == "Scorpions" await Artist.objects.delete(id=artist.id) artists = await Artist.objects.all() assert len(artists) == 0 bands = await Band.objects.all() assert len(bands) == 1 connections = await ArtistsBands.objects.all() assert len(connections) == 0 @pytest.mark.asyncio async def test_reverse_many_to_many_cascade(cleanup): async with database: artist = await Artist(name="Dr Alban").save() band = await Band(name="Scorpions").save() await artist.bands.add(band) check = await Artist.objects.select_related("bands").get() assert check.bands[0].name == "Scorpions" await Band.objects.delete(id=band.id) artists = await Artist.objects.all() assert len(artists) == 1 connections = await ArtistsBands.objects.all() assert len(connections) == 0 bands = await Band.objects.all() assert len(bands) == 0 ormar-0.12.2/tests/test_relations/test_customizing_through_model_relation_names.py000066400000000000000000000050221444363446500311150ustar00rootroot00000000000000import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL metadata = sqlalchemy.MetaData() database = databases.Database(DATABASE_URL, force_rollback=True) class Course(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) course_name: str = ormar.String(max_length=100) class Student(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) courses = ormar.ManyToMany( Course, through_relation_name="student_id", through_reverse_relation_name="course_id", ) # create db and tables @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) def test_tables_columns(): through_meta = Student.Meta.model_fields["courses"].through.Meta assert "course_id" in through_meta.table.c assert "student_id" in through_meta.table.c assert "course_id" in through_meta.model_fields assert "student_id" in through_meta.model_fields @pytest.mark.asyncio async def test_working_with_changed_through_names(): async with database: async with database.transaction(force_rollback=True): to_save = { "course_name": "basic1", "students": [{"name": "Jack"}, {"name": "Abi"}], } await Course(**to_save).save_related(follow=True, save_all=True) course_check = await Course.objects.select_related("students").get() assert course_check.course_name == "basic1" assert course_check.students[0].name == "Jack" assert course_check.students[1].name == "Abi" students = await course_check.students.all() assert len(students) == 2 student = await course_check.students.get(name="Jack") assert student.name == "Jack" students = await Student.objects.select_related("courses").all( courses__course_name="basic1" ) assert len(students) == 2 course_check = ( await Course.objects.select_related("students") .order_by("students__name") .get() ) assert course_check.students[0].name == "Abi" assert course_check.students[1].name == "Jack" ormar-0.12.2/tests/test_relations/test_database_fk_creation.py000066400000000000000000000061611444363446500246570ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from ormar.fields.foreign_key import validate_referential_action from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() engine = sqlalchemy.create_engine(DATABASE_URL) class Artist(ormar.Model): class Meta: tablename = "artists" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) artist: Optional[Artist] = ormar.ForeignKey(Artist, ondelete="CASCADE") class A(ormar.Model): class Meta: metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=64, nullalbe=False) class B(ormar.Model): class Meta: metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=64, nullalbe=False) a: A = ormar.ForeignKey(to=A, ondelete=ormar.ReferentialAction.CASCADE) class C(ormar.Model): class Meta: metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=64, nullalbe=False) b: B = ormar.ForeignKey(to=B, ondelete=ormar.ReferentialAction.CASCADE) @pytest.fixture(autouse=True, scope="module") def create_test_database(): metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def test_simple_cascade(): inspector = sqlalchemy.inspect(engine) columns = inspector.get_columns("albums") assert len(columns) == 3 col_names = [col.get("name") for col in columns] assert sorted(["id", "name", "artist"]) == sorted(col_names) fks = inspector.get_foreign_keys("albums") assert len(fks) == 1 assert fks[0]["name"] == "fk_albums_artists_id_artist" assert fks[0]["constrained_columns"][0] == "artist" assert fks[0]["referred_columns"][0] == "id" assert fks[0]["options"].get("ondelete") == "CASCADE" def test_validations_referential_action(): CASCADE = ormar.ReferentialAction.CASCADE.value assert validate_referential_action(None) == None assert validate_referential_action("cascade") == CASCADE assert validate_referential_action(ormar.ReferentialAction.CASCADE) == CASCADE with pytest.raises(ormar.ModelDefinitionError): validate_referential_action("NOT VALID") @pytest.mark.asyncio async def test_cascade_clear(): async with database: async with database.transaction(force_rollback=True): a = await A.objects.create(name="a") b = await B.objects.create(name="b", a=a) c = await C.objects.create(name="c", b=b) await a.bs.clear(keep_reversed=False) assert await B.objects.count() == 0 assert await C.objects.count() == 0 ormar-0.12.2/tests/test_relations/test_foreign_keys.py000066400000000000000000000372761444363446500232460ustar00rootroot00000000000000from typing import Optional import databases import pytest import sqlalchemy import ormar from ormar.exceptions import NoMatch, MultipleMatches, RelationshipInstanceError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() play_count: int = ormar.Integer(nullable=True, default=0) is_disabled: bool = ormar.Boolean(default=False) class Cover(ormar.Model): class Meta: tablename = "covers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album, related_name="cover_pictures") title: str = ormar.String(max_length=100) class Organisation(ormar.Model): class Meta: tablename = "org" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) ident: str = ormar.String(max_length=100, choices=["ACME Ltd", "Other ltd"]) class Team(ormar.Model): class Meta: tablename = "teams" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) org: Optional[Organisation] = ormar.ForeignKey(Organisation) name: str = ormar.String(max_length=100) class Member(ormar.Model): class Meta: tablename = "members" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) team: Optional[Team] = ormar.ForeignKey(Team) email: str = ormar.String(max_length=100) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_wrong_query_foreign_key_type(): async with database: with pytest.raises(RelationshipInstanceError): Track(title="The Error", album="wrong_pk_type") @pytest.mark.asyncio async def test_setting_explicitly_empty_relation(): async with database: track = Track(album=None, title="The Bird", position=1) assert track.album is None @pytest.mark.asyncio async def test_related_name(): async with database: async with database.transaction(force_rollback=True): album = await Album.objects.create(name="Vanilla") await Cover.objects.create(album=album, title="The cover file") assert len(album.cover_pictures) == 1 @pytest.mark.asyncio async def test_model_crud(): async with database: async with database.transaction(force_rollback=True): album = Album(name="Jamaica") await album.save() track1 = Track(album=album, title="The Bird", position=1) track2 = Track(album=album, title="Heart don't stand a chance", position=2) track3 = Track(album=album, title="The Waters", position=3) await track1.save() await track2.save() await track3.save() track = await Track.objects.get(title="The Bird") assert track.album.pk == album.pk assert isinstance(track.album, ormar.Model) assert track.album.name is None await track.album.load() assert track.album.name == "Jamaica" assert len(album.tracks) == 3 assert album.tracks[1].title == "Heart don't stand a chance" album1 = await Album.objects.get(name="Jamaica") assert album1.pk == album.pk assert album1.tracks == [] await Track.objects.create( album={"id": track.album.pk}, title="The Bird2", position=4 ) @pytest.mark.asyncio async def test_select_related(): async with database: async with database.transaction(force_rollback=True): album = Album(name="Malibu") await album.save() track1 = Track(album=album, title="The Bird", position=1) track2 = Track(album=album, title="Heart don't stand a chance", position=2) track3 = Track(album=album, title="The Waters", position=3) await track1.save() await track2.save() await track3.save() fantasies = Album(name="Fantasies") await fantasies.save() track4 = Track(album=fantasies, title="Help I'm Alive", position=1) track5 = Track(album=fantasies, title="Sick Muse", position=2) track6 = Track(album=fantasies, title="Satellite Mind", position=3) await track4.save() await track5.save() await track6.save() track = await Track.objects.select_related("album").get(title="The Bird") assert track.album.name == "Malibu" tracks = await Track.objects.select_related("album").all() assert len(tracks) == 6 @pytest.mark.asyncio async def test_model_removal_from_relations(): async with database: async with database.transaction(force_rollback=True): album = Album(name="Chichi") await album.save() track1 = Track(album=album, title="The Birdman", position=1) track2 = Track(album=album, title="Superman", position=2) track3 = Track(album=album, title="Wonder Woman", position=3) await track1.save() await track2.save() await track3.save() assert len(album.tracks) == 3 await album.tracks.remove(track1) assert len(album.tracks) == 2 assert track1.album is None await track1.update() track1 = await Track.objects.get(title="The Birdman") assert track1.album is None await album.tracks.add(track1) assert len(album.tracks) == 3 assert track1.album == album await track1.update() track1 = await Track.objects.select_related("album__tracks").get( title="The Birdman" ) album = await Album.objects.select_related("tracks").get(name="Chichi") assert track1.album == album track1.remove(album, name="album") assert track1.album is None assert len(album.tracks) == 2 track2.remove(album, name="album") assert track2.album is None assert len(album.tracks) == 1 @pytest.mark.asyncio async def test_fk_filter(): async with database: async with database.transaction(force_rollback=True): malibu = Album(name="Malibu%") await malibu.save() await Track.objects.create(album=malibu, title="The Bird", position=1) await Track.objects.create( album=malibu, title="Heart don't stand a chance", position=2 ) await Track.objects.create(album=malibu, title="The Waters", position=3) fantasies = await Album.objects.create(name="Fantasies") await Track.objects.create( album=fantasies, title="Help I'm Alive", position=1 ) await Track.objects.create(album=fantasies, title="Sick Muse", position=2) await Track.objects.create( album=fantasies, title="Satellite Mind", position=3 ) tracks = ( await Track.objects.select_related("album") .filter(album__name="Fantasies") .all() ) assert len(tracks) == 3 for track in tracks: assert track.album.name == "Fantasies" tracks = ( await Track.objects.select_related("album") .filter(album__name__icontains="fan") .all() ) assert len(tracks) == 3 for track in tracks: assert track.album.name == "Fantasies" tracks = await Track.objects.filter(album__name__contains="Fan").all() assert len(tracks) == 3 for track in tracks: assert track.album.name == "Fantasies" tracks = await Track.objects.filter(album__name__contains="Malibu%").all() assert len(tracks) == 3 tracks = ( await Track.objects.filter(album=malibu).select_related("album").all() ) assert len(tracks) == 3 for track in tracks: assert track.album.name == "Malibu%" tracks = await Track.objects.select_related("album").all(album=malibu) assert len(tracks) == 3 for track in tracks: assert track.album.name == "Malibu%" @pytest.mark.asyncio async def test_multiple_fk(): async with database: async with database.transaction(force_rollback=True): acme = await Organisation.objects.create(ident="ACME Ltd") red_team = await Team.objects.create(org=acme, name="Red Team") blue_team = await Team.objects.create(org=acme, name="Blue Team") await Member.objects.create(team=red_team, email="a@example.org") await Member.objects.create(team=red_team, email="b@example.org") await Member.objects.create(team=blue_team, email="c@example.org") await Member.objects.create(team=blue_team, email="d@example.org") other = await Organisation.objects.create(ident="Other ltd") team = await Team.objects.create(org=other, name="Green Team") await Member.objects.create(team=team, email="e@example.org") members = ( await Member.objects.select_related("team__org") .filter(team__org__ident="ACME Ltd") .all() ) assert len(members) == 4 for member in members: assert member.team.org.ident == "ACME Ltd" @pytest.mark.asyncio async def test_wrong_choices(): async with database: async with database.transaction(force_rollback=True): with pytest.raises(ValueError): await Organisation.objects.create(ident="Test 1") @pytest.mark.asyncio async def test_pk_filter(): async with database: async with database.transaction(force_rollback=True): fantasies = await Album.objects.create(name="Test") track = await Track.objects.create( album=fantasies, title="Test1", position=1 ) await Track.objects.create(album=fantasies, title="Test2", position=2) await Track.objects.create(album=fantasies, title="Test3", position=3) tracks = ( await Track.objects.select_related("album").filter(pk=track.pk).all() ) assert len(tracks) == 1 tracks = ( await Track.objects.select_related("album") .filter(position=2, album__name="Test") .all() ) assert len(tracks) == 1 @pytest.mark.asyncio async def test_limit_and_offset(): async with database: async with database.transaction(force_rollback=True): fantasies = await Album.objects.create(name="Limitless") await Track.objects.create( id=None, album=fantasies, title="Sample", position=1 ) await Track.objects.create(album=fantasies, title="Sample2", position=2) await Track.objects.create(album=fantasies, title="Sample3", position=3) tracks = await Track.objects.limit(1).all() assert len(tracks) == 1 assert tracks[0].title == "Sample" tracks = await Track.objects.limit(1).offset(1).all() assert len(tracks) == 1 assert tracks[0].title == "Sample2" album = await Album.objects.select_related("tracks").limit(1).get() assert len(album.tracks) == 3 assert album.tracks[0].title == "Sample" album = ( await Album.objects.select_related("tracks") .limit(1, limit_raw_sql=True) .get() ) assert len(album.tracks) == 1 assert album.tracks[0].title == "Sample" @pytest.mark.asyncio async def test_get_exceptions(): async with database: async with database.transaction(force_rollback=True): fantasies = await Album.objects.create(name="Test") with pytest.raises(NoMatch): await Album.objects.get(name="Test2") await Track.objects.create(album=fantasies, title="Test1", position=1) await Track.objects.create(album=fantasies, title="Test2", position=2) await Track.objects.create(album=fantasies, title="Test3", position=3) with pytest.raises(MultipleMatches): await Track.objects.select_related("album").get(album=fantasies) @pytest.mark.asyncio async def test_wrong_model_passed_as_fk(): async with database: async with database.transaction(force_rollback=True): with pytest.raises(RelationshipInstanceError): org = await Organisation.objects.create(ident="ACME Ltd") await Track.objects.create(album=org, title="Test1", position=1) @pytest.mark.asyncio async def test_bulk_update_model_with_no_children(): async with database: async with database.transaction(force_rollback=True): album = await Album.objects.create(name="Test") album.name = "Test2" await Album.objects.bulk_update([album], columns=["name"]) updated_album = await Album.objects.get(id=album.id) assert updated_album.name == "Test2" @pytest.mark.asyncio async def test_bulk_update_model_with_children(): async with database: async with database.transaction(force_rollback=True): best_seller = await Album.objects.create(name="to_be_best_seller") best_seller2 = await Album.objects.create(name="to_be_best_seller2") not_best_seller = await Album.objects.create(name="unpopular") await Track.objects.create( album=best_seller, title="t1", position=1, play_count=100 ) await Track.objects.create( album=best_seller2, title="t2", position=1, play_count=100 ) await Track.objects.create( album=not_best_seller, title="t3", position=1, play_count=3 ) await Track.objects.create( album=best_seller, title="t4", position=1, play_count=500 ) tracks = ( await Track.objects.select_related("album") .filter(play_count__gt=10) .all() ) best_seller_albums = {} for track in tracks: album = track.album if album.id in best_seller_albums: continue album.is_best_seller = True best_seller_albums[album.id] = album await Album.objects.bulk_update( best_seller_albums.values(), columns=["is_best_seller"] ) best_seller_albums_db = await Album.objects.filter( is_best_seller=True ).all() assert len(best_seller_albums_db) == 2 ormar-0.12.2/tests/test_relations/test_m2m_through_fields.py000066400000000000000000000305561444363446500243350ustar00rootroot00000000000000from typing import Any, Sequence, cast import databases import pytest import sqlalchemy from pydantic.typing import ForwardRef import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id = ormar.Integer(primary_key=True) name = ormar.String(max_length=40) class PostCategory(ormar.Model): class Meta(BaseMeta): tablename = "posts_x_categories" id: int = ormar.Integer(primary_key=True) sort_order: int = ormar.Integer(nullable=True) param_name: str = ormar.String(default="Name", max_length=200) class Blog(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories = ormar.ManyToMany(Category, through=PostCategory) blog = ormar.ForeignKey(Blog) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) class PostCategory2(ormar.Model): class Meta(BaseMeta): tablename = "posts_x_categories2" id: int = ormar.Integer(primary_key=True) sort_order: int = ormar.Integer(nullable=True) class Post2(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories = ormar.ManyToMany(Category, through=ForwardRef("PostCategory2")) @pytest.mark.asyncio async def test_forward_ref_is_updated(): async with database: assert Post2.Meta.requires_ref_update Post2.update_forward_refs() assert Post2.Meta.model_fields["postcategory2"].to == PostCategory2 @pytest.mark.asyncio async def test_setting_fields_on_through_model(): async with database: post = await Post(title="Test post").save() category = await Category(name="Test category").save() await post.categories.add(category) assert hasattr(post.categories[0], "postcategory") assert post.categories[0].postcategory is None @pytest.mark.asyncio async def test_setting_additional_fields_on_through_model_in_add(): async with database: post = await Post(title="Test post").save() category = await Category(name="Test category").save() await post.categories.add(category, sort_order=1) postcat = await PostCategory.objects.get() assert postcat.sort_order == 1 @pytest.mark.asyncio async def test_setting_additional_fields_on_through_model_in_create(): async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category2", postcategory={"sort_order": 2} ) postcat = await PostCategory.objects.get() assert postcat.sort_order == 2 @pytest.mark.asyncio async def test_getting_additional_fields_from_queryset() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 1} ) await post.categories.create( name="Test category2", postcategory={"sort_order": 2} ) await post.categories.all() assert post.postcategory is None assert post.categories[0].postcategory.sort_order == 1 assert post.categories[1].postcategory.sort_order == 2 post2 = await Post.objects.select_related("categories").get( categories__name="Test category2" ) assert post2.categories[0].postcategory.sort_order == 2 @pytest.mark.asyncio async def test_only_one_side_has_through() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 1} ) await post.categories.create( name="Test category2", postcategory={"sort_order": 2} ) post2 = await Post.objects.select_related("categories").get() assert post2.postcategory is None assert post2.categories[0].postcategory is not None await post2.categories.all() assert post2.postcategory is None assert post2.categories[0].postcategory is not None categories = await Category.objects.select_related("posts").all() assert isinstance(categories[0], Category) assert categories[0].postcategory is None assert categories[0].posts[0].postcategory is not None @pytest.mark.asyncio async def test_filtering_by_through_model() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 1, "param_name": "volume"}, ) await post.categories.create( name="Test category2", postcategory={"sort_order": 2, "param_name": "area"} ) post2 = ( await Post.objects.select_related("categories") .filter(postcategory__sort_order__gt=1) .get() ) assert len(post2.categories) == 1 assert post2.categories[0].postcategory.sort_order == 2 post3 = await Post.objects.filter( categories__postcategory__param_name="volume" ).get() assert len(post3.categories) == 1 assert post3.categories[0].postcategory.param_name == "volume" @pytest.mark.asyncio async def test_deep_filtering_by_through_model() -> Any: async with database: blog = await Blog(title="My Blog").save() post = await Post(title="Test post", blog=blog).save() await post.categories.create( name="Test category1", postcategory={"sort_order": 1, "param_name": "volume"}, ) await post.categories.create( name="Test category2", postcategory={"sort_order": 2, "param_name": "area"} ) blog2 = ( await Blog.objects.select_related("posts__categories") .filter(posts__postcategory__sort_order__gt=1) .get() ) assert len(blog2.posts) == 1 assert len(blog2.posts[0].categories) == 1 assert blog2.posts[0].categories[0].postcategory.sort_order == 2 blog3 = await Blog.objects.filter( posts__categories__postcategory__param_name="volume" ).get() assert len(blog3.posts) == 1 assert len(blog3.posts[0].categories) == 1 assert blog3.posts[0].categories[0].postcategory.param_name == "volume" @pytest.mark.asyncio async def test_ordering_by_through_model() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 2, "param_name": "volume"}, ) await post.categories.create( name="Test category2", postcategory={"sort_order": 1, "param_name": "area"} ) await post.categories.create( name="Test category3", postcategory={"sort_order": 3, "param_name": "velocity"}, ) post2 = ( await Post.objects.select_related("categories") .order_by("-postcategory__sort_order") .get() ) assert len(post2.categories) == 3 assert post2.categories[0].name == "Test category3" assert post2.categories[2].name == "Test category2" post3 = ( await Post.objects.select_related("categories") .order_by("categories__postcategory__param_name") .get() ) assert len(post3.categories) == 3 assert post3.categories[0].postcategory.param_name == "area" assert post3.categories[2].postcategory.param_name == "volume" @pytest.mark.asyncio async def test_update_through_models_from_queryset_on_through() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 2, "param_name": "volume"}, ) await post.categories.create( name="Test category2", postcategory={"sort_order": 1, "param_name": "area"} ) await post.categories.create( name="Test category3", postcategory={"sort_order": 3, "param_name": "velocity"}, ) await PostCategory.objects.filter(param_name="volume", post=post.id).update( sort_order=4 ) post2 = ( await Post.objects.select_related("categories") .order_by("-postcategory__sort_order") .get() ) assert len(post2.categories) == 3 assert post2.categories[0].postcategory.param_name == "volume" assert post2.categories[2].postcategory.param_name == "area" @pytest.mark.asyncio async def test_update_through_model_after_load() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 2, "param_name": "volume"}, ) post2 = await Post.objects.select_related("categories").get() assert len(post2.categories) == 1 await post2.categories[0].postcategory.load() await post2.categories[0].postcategory.update(sort_order=3) post3 = await Post.objects.select_related("categories").get() assert len(post3.categories) == 1 assert post3.categories[0].postcategory.sort_order == 3 @pytest.mark.asyncio async def test_update_through_from_related() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 2, "param_name": "volume"}, ) await post.categories.create( name="Test category2", postcategory={"sort_order": 1, "param_name": "area"} ) await post.categories.create( name="Test category3", postcategory={"sort_order": 3, "param_name": "velocity"}, ) await post.categories.filter(name="Test category3").update( postcategory={"sort_order": 4} ) post2 = ( await Post.objects.select_related("categories") .order_by("postcategory__sort_order") .get() ) assert len(post2.categories) == 3 assert post2.categories[2].postcategory.sort_order == 4 @pytest.mark.asyncio async def test_excluding_fields_on_through_model() -> Any: async with database: post = await Post(title="Test post").save() await post.categories.create( name="Test category1", postcategory={"sort_order": 2, "param_name": "volume"}, ) await post.categories.create( name="Test category2", postcategory={"sort_order": 1, "param_name": "area"} ) await post.categories.create( name="Test category3", postcategory={"sort_order": 3, "param_name": "velocity"}, ) post2 = ( await Post.objects.select_related("categories") .exclude_fields("postcategory__param_name") .order_by("postcategory__sort_order") .get() ) assert len(post2.categories) == 3 assert post2.categories[0].postcategory.param_name is None assert post2.categories[0].postcategory.sort_order == 1 assert post2.categories[2].postcategory.param_name is None assert post2.categories[2].postcategory.sort_order == 3 post3 = ( await Post.objects.select_related("categories") .fields({"postcategory": ..., "title": ...}) .exclude_fields({"postcategory": {"param_name", "sort_order"}}) .get() ) assert len(post3.categories) == 3 for category in post3.categories: assert category.postcategory.param_name is None assert category.postcategory.sort_order is None ormar-0.12.2/tests/test_relations/test_many_to_many.py000066400000000000000000000202001444363446500232270ustar00rootroot00000000000000import asyncio from typing import List, Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from ormar.exceptions import ModelPersistenceError, NoMatch, RelationshipInstanceError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Author(ormar.Model): class Meta: tablename = "authors" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Category(ormar.Model): class Meta: tablename = "categories" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta: tablename = "posts" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category) author: Optional[Author] = ormar.ForeignKey(Author) @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop() yield loop loop.close() @pytest_asyncio.fixture(autouse=True, scope="module") async def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(scope="function") async def cleanup(): yield async with database: PostCategory = Post.Meta.model_fields["categories"].through await PostCategory.objects.delete(each=True) await Post.objects.delete(each=True) await Category.objects.delete(each=True) await Author.objects.delete(each=True) @pytest.mark.asyncio async def test_not_saved_raises_error(cleanup): async with database: guido = await Author(first_name="Guido", last_name="Van Rossum").save() post = await Post.objects.create(title="Hello, M2M", author=guido) news = Category(name="News") with pytest.raises(ModelPersistenceError): await post.categories.add(news) @pytest.mark.asyncio async def test_not_existing_raises_error(cleanup): async with database: guido = await Author(first_name="Guido", last_name="Van Rossum").save() post = await Post.objects.create(title="Hello, M2M", author=guido) with pytest.raises(NoMatch): await post.categories.get() assert await post.categories.get_or_none() is None @pytest.mark.asyncio async def test_assigning_related_objects(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") # Add a category to a post. await post.categories.add(news) # or from the other end: await news.posts.add(post) assert await post.categories.get_or_none(name="no exist") is None assert await post.categories.get_or_none(name="News") == news # Creating columns object from instance: await post.categories.create(name="Tips") assert len(post.categories) == 2 post_categories = await post.categories.all() assert len(post_categories) == 2 @pytest.mark.asyncio async def test_quering_of_the_m2m_models(cleanup): async with database: # orm can do this already. guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") # tl;dr: `post.categories` exposes the QuerySet API. await post.categories.add(news) post_categories = await post.categories.all() assert len(post_categories) == 1 assert news == await post.categories.get(name="News") num_posts = await news.posts.count() assert num_posts == 1 posts_about_m2m = await news.posts.filter(title__contains="M2M").all() assert len(posts_about_m2m) == 1 assert posts_about_m2m[0] == post posts_about_python = await Post.objects.filter(categories__name="python").all() assert len(posts_about_python) == 0 # Traversal of relationships: which categories has Guido contributed to? category = await Category.objects.filter(posts__author=guido).get() assert category == news # or: category2 = await Category.objects.filter( posts__author__first_name="Guido" ).get() assert category2 == news @pytest.mark.asyncio async def test_removal_of_the_relations(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") await post.categories.add(news) assert len(await post.categories.all()) == 1 await post.categories.remove(news) assert len(await post.categories.all()) == 0 # or: await news.posts.add(post) assert len(await news.posts.all()) == 1 await news.posts.remove(post) assert len(await news.posts.all()) == 0 # Remove all columns objects: await post.categories.add(news) await post.categories.clear() assert len(await post.categories.all()) == 0 # post would also lose 'news' category when running: await post.categories.add(news) await news.delete() assert len(await post.categories.all()) == 0 @pytest.mark.asyncio async def test_selecting_related(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") recent = await Category.objects.create(name="Recent") await post.categories.add(news) await post.categories.add(recent) assert len(await post.categories.all()) == 2 # Loads categories and posts (2 queries) and perform the join in Python. categories = await Category.objects.select_related("posts").all() # No extra queries needed => no more `await`s required. for category in categories: assert category.posts[0] == post news_posts = await news.posts.select_related("author").all() assert news_posts[0].author == guido assert (await post.categories.limit(1).all())[0] == news assert (await post.categories.offset(1).limit(1).all())[0] == recent assert await post.categories.first() == news assert await post.categories.exists() @pytest.mark.asyncio async def test_selecting_related_fail_without_saving(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = Post(title="Hello, M2M", author=guido) with pytest.raises(RelationshipInstanceError): await post.categories.all() @pytest.mark.asyncio async def test_adding_unsaved_related(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = Category(name="News") with pytest.raises(ModelPersistenceError): await post.categories.add(news) await news.save() await post.categories.add(news) assert len(await post.categories.all()) == 1 @pytest.mark.asyncio async def test_removing_unsaved_related(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = Category(name="News") with pytest.raises(NoMatch): await post.categories.remove(news) ormar-0.12.2/tests/test_relations/test_postgress_select_related_with_limit.py000066400000000000000000000064501444363446500300710ustar00rootroot00000000000000# Models import uuid from datetime import date from enum import Enum from typing import Optional from pydantic import EmailStr import databases import sqlalchemy from sqlalchemy import create_engine import ormar import pytest from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class PrimaryKeyMixin: id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4) class Level(Enum): ADMIN = "0" STAFF = "1" class MainMeta(ormar.ModelMeta): database = database metadata = metadata class User(PrimaryKeyMixin, ormar.Model): """User Model Class to Implement Method for Operations of User Entity""" mobile: str = ormar.String(unique=True, index=True, max_length=10) password: str = ormar.String(max_length=128) level: str = ormar.String( max_length=1, choices=list(Level), default=Level.STAFF.value ) email: Optional[str] = ormar.String(max_length=255, nullable=True, default=None) avatar: Optional[str] = ormar.String(max_length=255, nullable=True, default=None) fullname: Optional[str] = ormar.String(max_length=64, nullable=True, default=None) is_active: bool = ormar.Boolean(index=True, nullable=False, default=True) class Meta(MainMeta): orders_by = ["-is_active", "-level"] class Task(PrimaryKeyMixin, ormar.Model): """Task Model Class to Implement Method for Operations of Task Entity""" name: str = ormar.String(max_length=64, nullalbe=False) description: Optional[str] = ormar.Text(nullable=True, default=None) start_date: Optional[date] = ormar.Date(nullable=True, default=None) end_date: Optional[date] = ormar.Date(nullable=True, default=None) is_halted: bool = ormar.Boolean(index=True, nullable=False, default=True) user: User = ormar.ForeignKey(to=User) class Meta(MainMeta): orders_by = ["-end_date", "-start_date"] constraints = [ ormar.UniqueColumns("user", "name"), ] @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_selecting_related_with_limit(): async with database: user1 = await User(mobile="9928917653", password="pass1").save() user2 = await User(mobile="9928917654", password="pass2").save() await Task(name="one", user=user1).save() await Task(name="two", user=user1).save() await Task(name="three", user=user2).save() await Task(name="four", user=user2).save() users = ( await User.objects.limit(2, limit_raw_sql=True) .select_related(User.tasks) .all() ) users2 = ( await User.objects.select_related(User.tasks) .limit(2, limit_raw_sql=True) .all() ) assert users == users2 assert len(users) == 1 assert len(users[0].tasks) == 2 users3 = await User.objects.limit(2).select_related(User.tasks).all() users4 = await User.objects.select_related(User.tasks).limit(2).all() assert users3 == users4 assert len(users3) == 2 assert len(users3[0].tasks) == 2 assert len(users3[1].tasks) == 2 ormar-0.12.2/tests/test_relations/test_prefetch_related.py000066400000000000000000000350261444363446500240510ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class RandomSet(ormar.Model): class Meta: tablename = "randoms" metadata = metadata database = database id: int = ormar.Integer(name="random_id", primary_key=True) name: str = ormar.String(max_length=100) class Tonation(ormar.Model): class Meta: tablename = "tonations" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(name="tonation_name", max_length=100) rand_set: Optional[RandomSet] = ormar.ForeignKey(RandomSet) class Division(ormar.Model): class Meta: tablename = "divisions" metadata = metadata database = database id: int = ormar.Integer(name="division_id", primary_key=True) name: str = ormar.String(max_length=100, nullable=True) class Shop(ormar.Model): class Meta: tablename = "shops" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=True) division: Optional[Division] = ormar.ForeignKey(Division) class AlbumShops(ormar.Model): class Meta: tablename = "albums_x_shops" metadata = metadata database = database class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100, nullable=True) shops: List[Shop] = ormar.ManyToMany(to=Shop, through=AlbumShops) class Track(ormar.Model): class Meta: tablename = "tracks" metadata = metadata database = database id: int = ormar.Integer(name="track_id", primary_key=True) album: Optional[Album] = ormar.ForeignKey(Album) title: str = ormar.String(max_length=100) position: int = ormar.Integer() tonation: Optional[Tonation] = ormar.ForeignKey(Tonation, name="tonation_id") class Cover(ormar.Model): class Meta: tablename = "covers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) album: Optional[Album] = ormar.ForeignKey( Album, related_name="cover_pictures", name="album_id" ) title: str = ormar.String(max_length=100) artist: str = ormar.String(max_length=200, nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_prefetch_related(): async with database: async with database.transaction(force_rollback=True): album = Album(name="Malibu") await album.save() ton1 = await Tonation.objects.create(name="B-mol") await Track.objects.create( album=album, title="The Bird", position=1, tonation=ton1 ) await Track.objects.create( album=album, title="Heart don't stand a chance", position=2, tonation=ton1, ) await Track.objects.create( album=album, title="The Waters", position=3, tonation=ton1 ) await Cover.objects.create(title="Cover1", album=album, artist="Artist 1") await Cover.objects.create(title="Cover2", album=album, artist="Artist 2") fantasies = Album(name="Fantasies") await fantasies.save() await Track.objects.create( album=fantasies, title="Help I'm Alive", position=1 ) await Track.objects.create(album=fantasies, title="Sick Muse", position=2) await Track.objects.create( album=fantasies, title="Satellite Mind", position=3 ) await Cover.objects.create( title="Cover3", album=fantasies, artist="Artist 3" ) await Cover.objects.create( title="Cover4", album=fantasies, artist="Artist 4" ) album = ( await Album.objects.filter(name="Malibu") .prefetch_related(["tracks__tonation", "cover_pictures"]) .get() ) assert len(album.tracks) == 3 assert album.tracks[0].title == "The Bird" assert len(album.cover_pictures) == 2 assert album.cover_pictures[0].title == "Cover1" assert ( album.tracks[0].tonation.name == album.tracks[2].tonation.name == "B-mol" ) albums = await Album.objects.prefetch_related("tracks").all() assert len(albums[0].tracks) == 3 assert len(albums[1].tracks) == 3 assert albums[0].tracks[0].title == "The Bird" assert albums[1].tracks[0].title == "Help I'm Alive" track = await Track.objects.prefetch_related(["album__cover_pictures"]).get( title="The Bird" ) assert track.album.name == "Malibu" assert len(track.album.cover_pictures) == 2 assert track.album.cover_pictures[0].artist == "Artist 1" track = ( await Track.objects.prefetch_related(["album__cover_pictures"]) .exclude_fields("album__cover_pictures__artist") .get(title="The Bird") ) assert track.album.name == "Malibu" assert len(track.album.cover_pictures) == 2 assert track.album.cover_pictures[0].artist is None tracks = await Track.objects.prefetch_related("album").all() assert len(tracks) == 6 @pytest.mark.asyncio async def test_prefetch_related_with_many_to_many(): async with database: async with database.transaction(force_rollback=True): div = await Division.objects.create(name="Div 1") shop1 = await Shop.objects.create(name="Shop 1", division=div) shop2 = await Shop.objects.create(name="Shop 2", division=div) album = Album(name="Malibu") await album.save() await album.shops.add(shop1) await album.shops.add(shop2) await Track.objects.create(album=album, title="The Bird", position=1) await Track.objects.create( album=album, title="Heart don't stand a chance", position=2 ) await Track.objects.create(album=album, title="The Waters", position=3) await Cover.objects.create(title="Cover1", album=album, artist="Artist 1") await Cover.objects.create(title="Cover2", album=album, artist="Artist 2") track = await Track.objects.prefetch_related( ["album__cover_pictures", "album__shops__division"] ).get(title="The Bird") assert track.album.name == "Malibu" assert len(track.album.cover_pictures) == 2 assert track.album.cover_pictures[0].artist == "Artist 1" assert len(track.album.shops) == 2 assert track.album.shops[0].name == "Shop 1" assert track.album.shops[0].division.name == "Div 1" album2 = Album(name="Malibu 2") await album2.save() await album2.shops.add(shop1) await album2.shops.add(shop2) await Track.objects.create(album=album2, title="The Bird 2", position=1) tracks = await Track.objects.prefetch_related(["album__shops"]).all() assert tracks[0].album.name == "Malibu" assert tracks[0].album.shops[0].name == "Shop 1" assert tracks[3].album.name == "Malibu 2" assert tracks[3].album.shops[0].name == "Shop 1" assert tracks[0].album.shops[0] == tracks[3].album.shops[0] assert id(tracks[0].album.shops[0]) == id(tracks[3].album.shops[0]) tracks[0].album.shops[0].name = "Dummy" assert tracks[0].album.shops[0].name == tracks[3].album.shops[0].name @pytest.mark.asyncio async def test_prefetch_related_empty(): async with database: async with database.transaction(force_rollback=True): await Track.objects.create(title="The Bird", position=1) track = await Track.objects.prefetch_related(["album__cover_pictures"]).get( title="The Bird" ) assert track.title == "The Bird" assert track.album is None @pytest.mark.asyncio async def test_prefetch_related_with_select_related(): async with database: async with database.transaction(force_rollback=True): div = await Division.objects.create(name="Div 1") shop1 = await Shop.objects.create(name="Shop 1", division=div) shop2 = await Shop.objects.create(name="Shop 2", division=div) album = Album(name="Malibu") await album.save() await album.shops.add(shop1) await album.shops.add(shop2) await Cover.objects.create(title="Cover1", album=album, artist="Artist 1") await Cover.objects.create(title="Cover2", album=album, artist="Artist 2") album = ( await Album.objects.select_related(["tracks", "shops"]) .filter(name="Malibu") .prefetch_related(["cover_pictures", "shops__division"]) .first() ) assert len(album.tracks) == 0 assert len(album.cover_pictures) == 2 assert album.shops[0].division.name == "Div 1" rand_set = await RandomSet.objects.create(name="Rand 1") ton1 = await Tonation.objects.create(name="B-mol", rand_set=rand_set) await Track.objects.create( album=album, title="The Bird", position=1, tonation=ton1 ) await Track.objects.create( album=album, title="Heart don't stand a chance", position=2, tonation=ton1, ) await Track.objects.create( album=album, title="The Waters", position=3, tonation=ton1 ) album = ( await Album.objects.select_related("tracks__tonation__rand_set") .filter(name="Malibu") .prefetch_related(["cover_pictures", "shops__division"]) .order_by( ["-shops__name", "-cover_pictures__artist", "shops__division__name"] ) .get() ) assert len(album.tracks) == 3 assert album.tracks[0].tonation == album.tracks[2].tonation == ton1 assert len(album.cover_pictures) == 2 assert album.cover_pictures[0].artist == "Artist 2" assert len(album.shops) == 2 assert album.shops[0].name == "Shop 2" assert album.shops[0].division.name == "Div 1" track = ( await Track.objects.select_related("album") .prefetch_related(["album__cover_pictures", "album__shops__division"]) .get(title="The Bird") ) assert track.album.name == "Malibu" assert len(track.album.cover_pictures) == 2 assert track.album.cover_pictures[0].artist == "Artist 1" assert len(track.album.shops) == 2 assert track.album.shops[0].name == "Shop 1" assert track.album.shops[0].division.name == "Div 1" @pytest.mark.asyncio async def test_prefetch_related_with_select_related_and_fields(): async with database: async with database.transaction(force_rollback=True): div = await Division.objects.create(name="Div 1") shop1 = await Shop.objects.create(name="Shop 1", division=div) shop2 = await Shop.objects.create(name="Shop 2", division=div) album = Album(name="Malibu") await album.save() await album.shops.add(shop1) await album.shops.add(shop2) await Cover.objects.create(title="Cover1", album=album, artist="Artist 1") await Cover.objects.create(title="Cover2", album=album, artist="Artist 2") rand_set = await RandomSet.objects.create(name="Rand 1") ton1 = await Tonation.objects.create(name="B-mol", rand_set=rand_set) await Track.objects.create( album=album, title="The Bird", position=1, tonation=ton1 ) await Track.objects.create( album=album, title="Heart don't stand a chance", position=2, tonation=ton1, ) await Track.objects.create( album=album, title="The Waters", position=3, tonation=ton1 ) album = ( await Album.objects.select_related("tracks__tonation__rand_set") .filter(name="Malibu") .prefetch_related(["cover_pictures", "shops__division"]) .exclude_fields({"shops": {"division": {"name"}}}) .get() ) assert len(album.tracks) == 3 assert album.tracks[0].tonation == album.tracks[2].tonation == ton1 assert len(album.cover_pictures) == 2 assert album.cover_pictures[0].artist == "Artist 1" assert len(album.shops) == 2 assert album.shops[0].name == "Shop 1" assert album.shops[0].division.name is None album = ( await Album.objects.select_related("tracks") .filter(name="Malibu") .prefetch_related(["cover_pictures", "shops__division"]) .fields( { "name": ..., "shops": {"division"}, "cover_pictures": {"id": ..., "title": ...}, } ) .exclude_fields({"shops": {"division": {"name"}}}) .get() ) assert len(album.tracks) == 3 assert len(album.cover_pictures) == 2 assert album.cover_pictures[0].artist is None assert album.cover_pictures[0].title is not None assert len(album.shops) == 2 assert album.shops[0].name is None assert album.shops[0].division is not None assert album.shops[0].division.name is None ormar-0.12.2/tests/test_relations/test_prefetch_related_multiple_models_relation.py000066400000000000000000000053601444363446500312220ustar00rootroot00000000000000from typing import List, Optional import databases import sqlalchemy from sqlalchemy import create_engine import ormar import pytest from tests.settings import DATABASE_URL db = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class User(ormar.Model): class Meta: metadata = metadata database = db tablename = "test_users" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) class Signup(ormar.Model): class Meta: metadata = metadata database = db tablename = "test_signup" id: int = ormar.Integer(primary_key=True) class Session(ormar.Model): class Meta: metadata = metadata database = db tablename = "test_sessions" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255, index=True) some_text: str = ormar.Text() some_other_text: Optional[str] = ormar.Text(nullable=True) teacher: Optional[User] = ormar.ForeignKey( User, nullable=True, related_name="teaching" ) students: Optional[List[User]] = ormar.ManyToMany( User, through=Signup, related_name="attending" ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_add_students(): async with db: for user_id in [1, 2, 3, 4, 5]: await User.objects.create(name=f"User {user_id}") for name, some_text, some_other_text in [ ("Session 1", "Some text 1", "Some other text 1"), ("Session 2", "Some text 2", "Some other text 2"), ("Session 3", "Some text 3", "Some other text 3"), ("Session 4", "Some text 4", "Some other text 4"), ("Session 5", "Some text 5", "Some other text 5"), ]: await Session( name=name, some_text=some_text, some_other_text=some_other_text ).save() s1 = await Session.objects.get(pk=1) s2 = await Session.objects.get(pk=2) users = {} for i in range(1, 6): user = await User.objects.get(pk=i) users[f"user_{i}"] = user if i % 2 == 0: await s1.students.add(user) else: await s2.students.add(user) assert len(s1.students) > 0 assert len(s2.students) > 0 user = await User.objects.select_related("attending").get(pk=1) assert user.attending is not None assert len(user.attending) > 0 query = Session.objects.prefetch_related(["students", "teacher"]) sessions = await query.all() assert len(sessions) == 5 ormar-0.12.2/tests/test_relations/test_python_style_relations.py000066400000000000000000000065651444363446500254000ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Author(ormar.Model): class Meta: tablename = "authors" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Category(ormar.Model): class Meta: tablename = "categories" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta: tablename = "posts" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category) author: Optional[Author] = ormar.ForeignKey(Author, related_name="author_posts") @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(scope="function") async def cleanup(): yield async with database: PostCategory = Post.Meta.model_fields["categories"].through await PostCategory.objects.delete(each=True) await Post.objects.delete(each=True) await Category.objects.delete(each=True) await Author.objects.delete(each=True) @pytest.mark.asyncio async def test_selecting_related(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") recent = await Category.objects.create(name="Recent") await post.categories.add(news) await post.categories.add(recent) assert len(await post.categories.all()) == 2 # Loads categories and posts (2 queries) and perform the join in Python. categories = await Category.objects.select_related(Category.posts).all() assert len(categories) == 2 assert categories[0].name == "News" news_posts = await news.posts.select_related(Post.author).all() assert news_posts[0].author == guido assert (await post.categories.limit(1).all())[0] == news assert (await post.categories.offset(1).limit(1).all())[0] == recent assert await post.categories.first() == news assert await post.categories.exists() author = await Author.objects.prefetch_related( Author.author_posts.categories ).get() assert len(author.author_posts) == 1 assert author.author_posts[0].title == "Hello, M2M" assert author.author_posts[0].categories[0].name == "News" assert author.author_posts[0].categories[1].name == "Recent" post = await Post.objects.select_related([Post.author, Post.categories]).get() assert len(post.categories) == 2 assert post.categories[0].name == "News" assert post.categories[1].name == "Recent" assert post.author.first_name == "Guido" ormar-0.12.2/tests/test_relations/test_relations_default_exception.py000066400000000000000000000033551444363446500263330ustar00rootroot00000000000000# type: ignore from typing import List, Optional import databases import pytest import sqlalchemy import ormar from ormar.exceptions import ModelDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Author(ormar.Model): class Meta: tablename = "authors" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Category(ormar.Model): class Meta: tablename = "categories" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) def test_fk_error(): with pytest.raises(ModelDefinitionError): class Post(ormar.Model): class Meta: tablename = "posts" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category) author: Optional[Author] = ormar.ForeignKey(Author, default="aa") def test_m2m_error(): with pytest.raises(ModelDefinitionError): class Post(ormar.Model): class Meta: tablename = "posts" database = database metadata = metadata id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany( Category, default="aa" ) ormar-0.12.2/tests/test_relations/test_saving_related.py000066400000000000000000000041311444363446500235310ustar00rootroot00000000000000from typing import Union import databases import pytest import sqlalchemy as sa from sqlalchemy import create_engine import ormar from ormar.exceptions import ModelPersistenceError from tests.settings import DATABASE_URL metadata = sa.MetaData() db = databases.Database(DATABASE_URL) class Category(ormar.Model): class Meta: tablename = "categories" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50, unique=True, index=True) code: int = ormar.Integer() class Workshop(ormar.Model): class Meta: tablename = "workshops" metadata = metadata database = db id: int = ormar.Integer(primary_key=True) topic: str = ormar.String(max_length=255, index=True) category: Union[ormar.Model, Category] = ormar.ForeignKey( Category, related_name="workshops", nullable=False ) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_model_relationship(): async with db: async with db.transaction(force_rollback=True): cat = await Category(name="Foo", code=123).save() ws = await Workshop(topic="Topic 1", category=cat).save() assert ws.id == 1 assert ws.topic == "Topic 1" assert ws.category.name == "Foo" ws.topic = "Topic 2" await ws.update() assert ws.id == 1 assert ws.topic == "Topic 2" assert ws.category.name == "Foo" @pytest.mark.asyncio async def test_model_relationship_with_not_saved(): async with db: async with db.transaction(force_rollback=True): cat = Category(name="Foo", code=123) with pytest.raises(ModelPersistenceError): await Workshop(topic="Topic 1", category=cat).save() with pytest.raises(ModelPersistenceError): await Workshop.objects.create(topic="Topic 1", category=cat) ormar-0.12.2/tests/test_relations/test_select_related_with_limit.py000066400000000000000000000113641444363446500257600ustar00rootroot00000000000000from typing import List, Optional import databases import sqlalchemy from sqlalchemy import create_engine import ormar import pytest from tests.settings import DATABASE_URL db = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class Keyword(ormar.Model): class Meta: metadata = metadata database = db tablename = "keywords" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) class KeywordPrimaryModel(ormar.Model): class Meta: metadata = metadata database = db tablename = "primary_models_keywords" id: int = ormar.Integer(primary_key=True) class PrimaryModel(ormar.Model): class Meta: metadata = metadata database = db tablename = "primary_models" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255, index=True) some_text: str = ormar.Text() some_other_text: Optional[str] = ormar.Text(nullable=True) keywords: Optional[List[Keyword]] = ormar.ManyToMany( Keyword, through=KeywordPrimaryModel ) class SecondaryModel(ormar.Model): class Meta: metadata = metadata database = db tablename = "secondary_models" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) primary_model: PrimaryModel = ormar.ForeignKey( PrimaryModel, related_name="secondary_models" ) @pytest.mark.asyncio async def test_create_primary_models(): async with db: for name, some_text, some_other_text in [ ("Primary 1", "Some text 1", "Some other text 1"), ("Primary 2", "Some text 2", "Some other text 2"), ("Primary 3", "Some text 3", "Some other text 3"), ("Primary 4", "Some text 4", "Some other text 4"), ("Primary 5", "Some text 5", "Some other text 5"), ("Primary 6", "Some text 6", "Some other text 6"), ("Primary 7", "Some text 7", "Some other text 7"), ("Primary 8", "Some text 8", "Some other text 8"), ("Primary 9", "Some text 9", "Some other text 9"), ("Primary 10", "Some text 10", "Some other text 10"), ]: await PrimaryModel( name=name, some_text=some_text, some_other_text=some_other_text ).save() for tag_id in [1, 2, 3, 4, 5]: await Keyword.objects.create(name=f"Tag {tag_id}") p1 = await PrimaryModel.objects.get(pk=1) p2 = await PrimaryModel.objects.get(pk=2) for i in range(1, 6): keyword = await Keyword.objects.get(pk=i) if i % 2 == 0: await p1.keywords.add(keyword) else: await p2.keywords.add(keyword) models = await PrimaryModel.objects.select_related("keywords").limit(5).all() assert len(models) == 5 assert len(models[0].keywords) == 2 assert len(models[1].keywords) == 3 assert len(models[2].keywords) == 0 models2 = ( await PrimaryModel.objects.select_related("keywords") .limit(5) .offset(3) .all() ) assert len(models2) == 5 assert [x.name for x in models2] != [x.name for x in models] assert [x.name for x in models2] == [ "Primary 4", "Primary 5", "Primary 6", "Primary 7", "Primary 8", ] models3 = ( await PrimaryModel.objects.select_related("keywords") .limit(5, limit_raw_sql=True) .all() ) assert len(models3) == 2 assert len(models3[0].keywords) == 2 assert len(models3[1].keywords) == 3 models4 = ( await PrimaryModel.objects.offset(1) .select_related("keywords") .limit(5, limit_raw_sql=True) .all() ) assert len(models4) == 3 assert [x.name for x in models4] == ["Primary 1", "Primary 2", "Primary 3"] assert len(models4[0].keywords) == 1 assert len(models4[1].keywords) == 3 assert len(models4[2].keywords) == 0 models5 = ( await PrimaryModel.objects.select_related("keywords") .offset(2, limit_raw_sql=True) .limit(5) .all() ) assert len(models5) == 3 assert [x.name for x in models5] == ["Primary 2", "Primary 3", "Primary 4"] assert len(models5[0].keywords) == 3 assert len(models5[1].keywords) == 0 assert len(models5[2].keywords) == 0 @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) ormar-0.12.2/tests/test_relations/test_select_related_with_m2m_and_pk_name_set.py000066400000000000000000000077471444363446500305360ustar00rootroot00000000000000# type: ignore from datetime import date from typing import List, Optional, Union import databases import pytest import sqlalchemy from sqlalchemy import create_engine import ormar from ormar import ModelDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class MainMeta(ormar.ModelMeta): metadata = metadata database = database class Role(ormar.Model): class Meta(MainMeta): pass name: str = ormar.String(primary_key=True, max_length=1000) order: int = ormar.Integer(default=0, name="sort_order") description: str = ormar.Text() class Company(ormar.Model): class Meta(MainMeta): pass name: str = ormar.String(primary_key=True, max_length=1000) class UserRoleCompany(ormar.Model): class Meta(MainMeta): pass class User(ormar.Model): class Meta(MainMeta): pass registrationnumber: str = ormar.String(primary_key=True, max_length=1000) company: Company = ormar.ForeignKey(Company) company2: Company = ormar.ForeignKey(Company, related_name="secondary_users") name: str = ormar.Text() role: Optional[Role] = ormar.ForeignKey(Role) roleforcompanies: Optional[Union[Company, List[Company]]] = ormar.ManyToMany( Company, through=UserRoleCompany, related_name="role_users" ) lastupdate: date = ormar.DateTime(server_default=sqlalchemy.func.now()) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) def test_wrong_model(): with pytest.raises(ModelDefinitionError): class User(ormar.Model): class Meta(MainMeta): pass registrationnumber: str = ormar.Text(primary_key=True) company: Company = ormar.ForeignKey(Company) company2: Company = ormar.ForeignKey(Company) @pytest.mark.asyncio async def test_create_primary_models(): async with database: await Role.objects.create( name="user", order=0, description="no administration right" ) role_1 = await Role.objects.create( name="admin", order=1, description="standard administration right" ) await Role.objects.create( name="super_admin", order=2, description="super administration right" ) assert await Role.objects.count() == 3 company_0 = await Company.objects.create(name="Company") company_1 = await Company.objects.create(name="Subsidiary Company 1") company_2 = await Company.objects.create(name="Subsidiary Company 2") company_3 = await Company.objects.create(name="Subsidiary Company 3") assert await Company.objects.count() == 4 user = await User.objects.create( registrationnumber="00-00000", company=company_0, name="admin", role=role_1 ) assert await User.objects.count() == 1 await user.delete() assert await User.objects.count() == 0 user = await User.objects.create( registrationnumber="00-00000", company=company_0, company2=company_3, name="admin", role=role_1, ) await user.roleforcompanies.add(company_1) await user.roleforcompanies.add(company_2) users = await User.objects.select_related( ["company", "company2", "roleforcompanies"] ).all() assert len(users) == 1 assert len(users[0].roleforcompanies) == 2 assert len(users[0].roleforcompanies[0].role_users) == 1 assert users[0].company.name == "Company" assert len(users[0].company.users) == 1 assert users[0].company2.name == "Subsidiary Company 3" assert len(users[0].company2.secondary_users) == 1 users = await User.objects.select_related("roleforcompanies").all() assert len(users) == 1 assert len(users[0].roleforcompanies) == 2 ormar-0.12.2/tests/test_relations/test_selecting_proper_table_prefix.py000066400000000000000000000051371444363446500266410ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import sqlalchemy from sqlalchemy import create_engine import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class User(ormar.Model): class Meta: metadata = metadata database = database tablename = "test_users" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=50) class Signup(ormar.Model): class Meta: metadata = metadata database = database tablename = "test_signup" id: int = ormar.Integer(primary_key=True) class Session(ormar.Model): class Meta: metadata = metadata database = database tablename = "test_sessions" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=255, index=True) some_text: str = ormar.Text() some_other_text: Optional[str] = ormar.Text(nullable=True) students: Optional[List[User]] = ormar.ManyToMany(User, through=Signup) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_list_sessions_for_user(): async with database: for user_id in [1, 2, 3, 4, 5]: await User.objects.create(name=f"User {user_id}") for name, some_text, some_other_text in [ ("Session 1", "Some text 1", "Some other text 1"), ("Session 2", "Some text 2", "Some other text 2"), ("Session 3", "Some text 3", "Some other text 3"), ("Session 4", "Some text 4", "Some other text 4"), ("Session 5", "Some text 5", "Some other text 5"), ]: await Session( name=name, some_text=some_text, some_other_text=some_other_text ).save() s1 = await Session.objects.get(pk=1) s2 = await Session.objects.get(pk=2) users = {} for i in range(1, 6): user = await User.objects.get(pk=i) users[f"user_{i}"] = user if i % 2 == 0: await s1.students.add(user) else: await s2.students.add(user) assert len(s1.students) == 2 assert len(s2.students) == 3 assert [x.pk for x in s1.students] == [2, 4] assert [x.pk for x in s2.students] == [1, 3, 5] user = await User.objects.select_related("sessions").get(pk=1) assert user.sessions is not None assert len(user.sessions) > 0 ormar-0.12.2/tests/test_relations/test_skipping_reverse.py000066400000000000000000000161161444363446500241270ustar00rootroot00000000000000from typing import List, Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Author(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) first_name: str = ormar.String(max_length=80) last_name: str = ormar.String(max_length=80) class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=40) class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories: Optional[List[Category]] = ormar.ManyToMany(Category, skip_reverse=True) author: Optional[Author] = ormar.ForeignKey(Author, skip_reverse=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(scope="function") async def cleanup(): yield async with database: PostCategory = Post.Meta.model_fields["categories"].through await PostCategory.objects.delete(each=True) await Post.objects.delete(each=True) await Category.objects.delete(each=True) await Author.objects.delete(each=True) def test_model_definition(): category = Category(name="Test") author = Author(first_name="Test", last_name="Author") post = Post(title="Test Post", author=author) post.categories = category assert post.categories[0] == category assert post.author == author with pytest.raises(AttributeError): assert author.posts with pytest.raises(AttributeError): assert category.posts assert "posts" not in category._orm @pytest.mark.asyncio async def test_assigning_related_objects(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") # Add a category to a post. await post.categories.add(news) # other way is disabled with pytest.raises(AttributeError): await news.posts.add(post) assert await post.categories.get_or_none(name="no exist") is None assert await post.categories.get_or_none(name="News") == news # Creating columns object from instance: await post.categories.create(name="Tips") assert len(post.categories) == 2 post_categories = await post.categories.all() assert len(post_categories) == 2 category = await Category.objects.select_related("posts").get(name="News") with pytest.raises(AttributeError): assert category.posts @pytest.mark.asyncio async def test_quering_of_related_model_works_but_no_result(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") await post.categories.add(news) post_categories = await post.categories.all() assert len(post_categories) == 1 assert "posts" not in post.dict().get("categories", [])[0] assert news == await post.categories.get(name="News") posts_about_python = await Post.objects.filter(categories__name="python").all() assert len(posts_about_python) == 0 # relation not in dict category = ( await Category.objects.select_related("posts") .filter(posts__author=guido) .get() ) assert category == news assert "posts" not in category.dict() # relation not in json category2 = ( await Category.objects.select_related("posts") .filter(posts__author__first_name="Guido") .get() ) assert category2 == news assert "posts" not in category2.json() assert "posts" not in Category.schema().get("properties") @pytest.mark.asyncio async def test_removal_of_the_relations(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") post = await Post.objects.create(title="Hello, M2M", author=guido) news = await Category.objects.create(name="News") await post.categories.add(news) assert len(await post.categories.all()) == 1 await post.categories.remove(news) assert len(await post.categories.all()) == 0 with pytest.raises(AttributeError): await news.posts.add(post) with pytest.raises(AttributeError): await news.posts.remove(post) await post.categories.add(news) await post.categories.clear() assert len(await post.categories.all()) == 0 await post.categories.add(news) await news.delete() assert len(await post.categories.all()) == 0 @pytest.mark.asyncio async def test_selecting_related(cleanup): async with database: guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum") guido2 = await Author.objects.create( first_name="Guido2", last_name="Van Rossum" ) post = await Post.objects.create(title="Hello, M2M", author=guido) post2 = await Post.objects.create(title="Bye, M2M", author=guido2) news = await Category.objects.create(name="News") recent = await Category.objects.create(name="Recent") await post.categories.add(news) await post.categories.add(recent) await post2.categories.add(recent) assert len(await post.categories.all()) == 2 assert (await post.categories.limit(1).all())[0] == news assert (await post.categories.offset(1).limit(1).all())[0] == recent assert await post.categories.first() == news assert await post.categories.exists() # still can order categories = ( await Category.objects.select_related("posts") .order_by("posts__title") .all() ) assert categories[0].name == "Recent" assert categories[1].name == "News" # still can filter categories = await Category.objects.filter(posts__title="Bye, M2M").all() assert categories[0].name == "Recent" assert len(categories) == 1 # same for reverse fk authors = ( await Author.objects.select_related("posts").order_by("posts__title").all() ) assert authors[0].first_name == "Guido2" assert authors[1].first_name == "Guido" authors = await Author.objects.filter(posts__title="Bye, M2M").all() assert authors[0].first_name == "Guido2" assert len(authors) == 1 ormar-0.12.2/tests/test_relations/test_through_relations_fail.py000066400000000000000000000025771444363446500253110ustar00rootroot00000000000000# type: ignore import databases import pytest import sqlalchemy import ormar from ormar import ModelDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() def test_through_with_relation_fails(): class BaseMeta(ormar.ModelMeta): database = database metadata = metadata class Category(ormar.Model): class Meta(BaseMeta): tablename = "categories" id = ormar.Integer(primary_key=True) name = ormar.String(max_length=40) class Blog(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) class PostCategory(ormar.Model): class Meta(BaseMeta): tablename = "posts_x_categories" id: int = ormar.Integer(primary_key=True) sort_order: int = ormar.Integer(nullable=True) param_name: str = ormar.String(default="Name", max_length=200) blog = ormar.ForeignKey(Blog) with pytest.raises(ModelDefinitionError): class Post(ormar.Model): class Meta(BaseMeta): pass id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=200) categories = ormar.ManyToMany(Category, through=PostCategory) ormar-0.12.2/tests/test_relations/test_weakref_checking.py000066400000000000000000000022351444363446500240240ustar00rootroot00000000000000from typing import Optional, Type import databases import pytest import pytest_asyncio import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class Band(ormar.Model): class Meta: tablename = "bands" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Artist(ormar.Model): class Meta: tablename = "artists" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) band: Band = ormar.ForeignKey(Band) def test_weakref_init(): band = Band(name="Band") artist1 = Artist(name="Artist 1", band=band) artist2 = Artist(name="Artist 2", band=band) artist3 = Artist(name="Artist 3", band=band) del artist1 Artist( name="Artist 2", band=band ) # Force it to check for weakly-referenced objects del artist3 band.artists # Force it to clean assert len(band.artists) == 1 assert band.artists[0].name == "Artist 2" ormar-0.12.2/tests/test_signals/000077500000000000000000000000001444363446500165725ustar00rootroot00000000000000ormar-0.12.2/tests/test_signals/__init__.py000066400000000000000000000000001444363446500206710ustar00rootroot00000000000000ormar-0.12.2/tests/test_signals/test_signals.py000066400000000000000000000323571444363446500216550ustar00rootroot00000000000000from typing import Optional import databases import pydantic import pytest import pytest_asyncio import sqlalchemy import ormar from ormar import ( post_bulk_update, post_delete, post_save, post_update, pre_delete, pre_save, pre_update, ) from ormar.signals import SignalEmitter from ormar.exceptions import SignalDefinitionError from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class AuditLog(ormar.Model): class Meta: tablename = "audits" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) event_type: str = ormar.String(max_length=100) event_log: pydantic.Json = ormar.JSON() class Cover(ormar.Model): class Meta: tablename = "covers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=100) class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) is_best_seller: bool = ormar.Boolean(default=False) play_count: int = ormar.Integer(default=0) cover: Optional[Cover] = ormar.ForeignKey(Cover) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(scope="function") async def cleanup(): yield async with database: await AuditLog.objects.delete(each=True) def test_passing_not_callable(): with pytest.raises(SignalDefinitionError): pre_save(Album)("wrong") def test_passing_callable_without_kwargs(): with pytest.raises(SignalDefinitionError): @pre_save(Album) def trigger(sender, instance): # pragma: no cover pass def test_invalid_signal(): emitter = SignalEmitter() with pytest.raises(SignalDefinitionError): emitter.save = 1 @pytest.mark.asyncio async def test_signal_functions(cleanup): async with database: async with database.transaction(force_rollback=True): @pre_save(Album) async def before_save(sender, instance, **kwargs): await AuditLog( event_type=f"PRE_SAVE_{sender.get_name()}", event_log=instance.json(), ).save() @post_save(Album) async def after_save(sender, instance, **kwargs): await AuditLog( event_type=f"POST_SAVE_{sender.get_name()}", event_log=instance.json(), ).save() @pre_update(Album) async def before_update(sender, instance, **kwargs): await AuditLog( event_type=f"PRE_UPDATE_{sender.get_name()}", event_log=instance.json(), ).save() @post_update(Album) async def after_update(sender, instance, **kwargs): await AuditLog( event_type=f"POST_UPDATE_{sender.get_name()}", event_log=instance.json(), ).save() @pre_delete(Album) async def before_delete(sender, instance, **kwargs): await AuditLog( event_type=f"PRE_DELETE_{sender.get_name()}", event_log=instance.json(), ).save() @post_delete(Album) async def after_delete(sender, instance, **kwargs): await AuditLog( event_type=f"POST_DELETE_{sender.get_name()}", event_log=instance.json(), ).save() @post_bulk_update(Album) async def after_bulk_update(sender, instances, **kwargs): for it in instances: await AuditLog( event_type=f"BULK_POST_UPDATE_{sender.get_name()}", event_log=it.json(), ).save() album = await Album.objects.create(name="Venice") audits = await AuditLog.objects.all() assert len(audits) == 2 assert audits[0].event_type == "PRE_SAVE_album" assert audits[0].event_log.get("name") == album.name assert audits[1].event_type == "POST_SAVE_album" assert audits[1].event_log.get("id") == album.pk album = await Album(name="Rome").save() audits = await AuditLog.objects.all() assert len(audits) == 4 assert audits[2].event_type == "PRE_SAVE_album" assert audits[2].event_log.get("name") == album.name assert audits[3].event_type == "POST_SAVE_album" assert audits[3].event_log.get("id") == album.pk album.is_best_seller = True await album.update() audits = await AuditLog.objects.filter(event_type__contains="UPDATE").all() assert len(audits) == 2 assert audits[0].event_type == "PRE_UPDATE_album" assert audits[0].event_log.get("name") == album.name assert audits[1].event_type == "POST_UPDATE_album" assert audits[1].event_log.get("is_best_seller") == album.is_best_seller album.signals.pre_update.disconnect(before_update) album.signals.post_update.disconnect(after_update) album.is_best_seller = False await album.update() audits = await AuditLog.objects.filter(event_type__contains="UPDATE").all() assert len(audits) == 2 await album.delete() audits = await AuditLog.objects.filter(event_type__contains="DELETE").all() assert len(audits) == 2 assert audits[0].event_type == "PRE_DELETE_album" assert ( audits[0].event_log.get("id") == audits[1].event_log.get("id") == album.id ) assert audits[1].event_type == "POST_DELETE_album" album.signals.pre_delete.disconnect(before_delete) album.signals.post_delete.disconnect(after_delete) album.signals.pre_save.disconnect(before_save) album.signals.post_save.disconnect(after_save) albums = await Album.objects.all() assert len(albums) for album in albums: album.play_count = 1 await Album.objects.bulk_update(albums) cnt = await AuditLog.objects.filter( event_type__contains="BULK_POST" ).count() assert cnt == len(albums) album.signals.bulk_post_update.disconnect(after_bulk_update) @pytest.mark.asyncio async def test_multiple_signals(cleanup): async with database: async with database.transaction(force_rollback=True): @pre_save(Album) async def before_save(sender, instance, **kwargs): await AuditLog( event_type=f"PRE_SAVE_{sender.get_name()}", event_log=instance.json(), ).save() @pre_save(Album) async def before_save2(sender, instance, **kwargs): await AuditLog( event_type=f"PRE_SAVE_{sender.get_name()}", event_log=instance.json(), ).save() album = await Album.objects.create(name="Miami") audits = await AuditLog.objects.all() assert len(audits) == 2 assert audits[0].event_type == "PRE_SAVE_album" assert audits[0].event_log.get("name") == album.name assert audits[1].event_type == "PRE_SAVE_album" assert audits[1].event_log.get("name") == album.name album.signals.pre_save.disconnect(before_save) album.signals.pre_save.disconnect(before_save2) @pytest.mark.asyncio async def test_static_methods_as_signals(cleanup): async with database: async with database.transaction(force_rollback=True): class AlbumAuditor: event_type = "ALBUM_INSTANCE" @staticmethod @pre_save(Album) async def before_save(sender, instance, **kwargs): await AuditLog( event_type=f"{AlbumAuditor.event_type}_SAVE", event_log=instance.json(), ).save() album = await Album.objects.create(name="Colorado") audits = await AuditLog.objects.all() assert len(audits) == 1 assert audits[0].event_type == "ALBUM_INSTANCE_SAVE" assert audits[0].event_log.get("name") == album.name album.signals.pre_save.disconnect(AlbumAuditor.before_save) @pytest.mark.asyncio async def test_methods_as_signals(cleanup): async with database: async with database.transaction(force_rollback=True): class AlbumAuditor: def __init__(self): self.event_type = "ALBUM_INSTANCE" async def before_save(self, sender, instance, **kwargs): await AuditLog( event_type=f"{self.event_type}_SAVE", event_log=instance.json() ).save() auditor = AlbumAuditor() pre_save(Album)(auditor.before_save) album = await Album.objects.create(name="San Francisco") audits = await AuditLog.objects.all() assert len(audits) == 1 assert audits[0].event_type == "ALBUM_INSTANCE_SAVE" assert audits[0].event_log.get("name") == album.name album.signals.pre_save.disconnect(auditor.before_save) @pytest.mark.asyncio async def test_multiple_senders_signal(cleanup): async with database: async with database.transaction(force_rollback=True): @pre_save([Album, Cover]) async def before_save(sender, instance, **kwargs): await AuditLog( event_type=f"PRE_SAVE_{sender.get_name()}", event_log=instance.json(), ).save() cover = await Cover(title="Blue").save() album = await Album.objects.create(name="San Francisco", cover=cover) audits = await AuditLog.objects.all() assert len(audits) == 2 assert audits[0].event_type == "PRE_SAVE_cover" assert audits[0].event_log.get("title") == cover.title assert audits[1].event_type == "PRE_SAVE_album" assert audits[1].event_log.get("cover") == album.cover.dict( exclude={"albums"} ) album.signals.pre_save.disconnect(before_save) cover.signals.pre_save.disconnect(before_save) @pytest.mark.asyncio async def test_modifing_the_instance(cleanup): async with database: async with database.transaction(force_rollback=True): @pre_update(Album) async def before_update(sender, instance, **kwargs): if instance.play_count > 50 and not instance.is_best_seller: instance.is_best_seller = True # here album.play_count ans is_best_seller get default values album = await Album.objects.create(name="Venice") assert not album.is_best_seller assert album.play_count == 0 album.play_count = 30 # here a trigger is called but play_count is too low await album.update() assert not album.is_best_seller album.play_count = 60 await album.update() assert album.is_best_seller album.signals.pre_update.disconnect(before_update) @pytest.mark.asyncio async def test_custom_signal(cleanup): async with database: async with database.transaction(force_rollback=True): async def after_update(sender, instance, **kwargs): if instance.play_count > 50 and not instance.is_best_seller: instance.is_best_seller = True elif instance.play_count < 50 and instance.is_best_seller: instance.is_best_seller = False await instance.update() Album.Meta.signals.custom.connect(after_update) # here album.play_count ans is_best_seller get default values album = await Album.objects.create(name="Venice") assert not album.is_best_seller assert album.play_count == 0 album.play_count = 30 # here a trigger is called but play_count is too low await album.update() assert not album.is_best_seller album.play_count = 60 await album.update() assert not album.is_best_seller await Album.Meta.signals.custom.send(sender=Album, instance=album) assert album.is_best_seller album.play_count = 30 await album.update() assert album.is_best_seller await Album.Meta.signals.custom.send(sender=Album, instance=album) assert not album.is_best_seller Album.Meta.signals.custom.disconnect(after_update) ormar-0.12.2/tests/test_signals/test_signals_for_relations.py000066400000000000000000000173621444363446500246020ustar00rootroot00000000000000from typing import Optional import databases import pytest import pytest_asyncio import sqlalchemy import ormar from ormar import ( post_relation_add, post_relation_remove, pre_relation_add, pre_relation_remove, ) import pydantic from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class AuditLog(ormar.Model): class Meta: tablename = "audits" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) event_type: str = ormar.String(max_length=100) event_log: pydantic.Json = ormar.JSON() class Cover(ormar.Model): class Meta: tablename = "covers" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=100) class Artist(ormar.Model): class Meta: tablename = "artists" metadata = metadata database = database id: int = ormar.Integer(name="artist_id", primary_key=True) name: str = ormar.String(name="fname", max_length=100) class Album(ormar.Model): class Meta: tablename = "albums" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) title: str = ormar.String(max_length=100) cover: Optional[Cover] = ormar.ForeignKey(Cover) artists = ormar.ManyToMany(Artist) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest_asyncio.fixture(autouse=True, scope="function") async def cleanup(): yield async with database: await AuditLog.objects.delete(each=True) @pytest.mark.asyncio async def test_relation_signal_functions(): async with database: async with database.transaction(force_rollback=True): @pre_relation_add([Album, Cover, Artist]) async def before_relation_add( sender, instance, child, relation_name, passed_kwargs, **kwargs ): await AuditLog.objects.create( event_type="RELATION_PRE_ADD", event_log=dict( class_affected=sender.get_name(), parent_id=instance.pk, child_id=child.pk, relation_name=relation_name, kwargs=passed_kwargs, ), ) passed_kwargs.pop("dummy", None) @post_relation_add([Album, Cover, Artist]) async def after_relation_add( sender, instance, child, relation_name, passed_kwargs, **kwargs ): await AuditLog.objects.create( event_type="RELATION_POST_ADD", event_log=dict( class_affected=sender.get_name(), parent_id=instance.pk, child_id=child.pk, relation_name=relation_name, kwargs=passed_kwargs, ), ) @pre_relation_remove([Album, Cover, Artist]) async def before_relation_remove( sender, instance, child, relation_name, **kwargs ): await AuditLog.objects.create( event_type="RELATION_PRE_REMOVE", event_log=dict( class_affected=sender.get_name(), parent_id=instance.pk, child_id=child.pk, relation_name=relation_name, kwargs=kwargs, ), ) @post_relation_remove([Album, Cover, Artist]) async def after_relation_remove( sender, instance, child, relation_name, **kwargs ): await AuditLog.objects.create( event_type="RELATION_POST_REMOVE", event_log=dict( class_affected=sender.get_name(), parent_id=instance.pk, child_id=child.pk, relation_name=relation_name, kwargs=kwargs, ), ) cover = await Cover(title="New").save() artist = await Artist(name="Artist").save() album = await Album(title="New Album").save() await cover.albums.add(album, index=0) log = await AuditLog.objects.get(event_type="RELATION_PRE_ADD") assert log.event_log.get("parent_id") == cover.pk assert log.event_log.get("child_id") == album.pk assert log.event_log.get("relation_name") == "albums" assert log.event_log.get("kwargs") == dict(index=0) log2 = await AuditLog.objects.get(event_type="RELATION_POST_ADD") assert log2.event_log.get("parent_id") == cover.pk assert log2.event_log.get("child_id") == album.pk assert log2.event_log.get("relation_name") == "albums" assert log2.event_log.get("kwargs") == dict(index=0) await album.artists.add(artist, dummy="test") log3 = await AuditLog.objects.filter( event_type="RELATION_PRE_ADD", id__gt=log2.pk ).get() assert log3.event_log.get("parent_id") == album.pk assert log3.event_log.get("child_id") == artist.pk assert log3.event_log.get("relation_name") == "artists" assert log3.event_log.get("kwargs") == dict(dummy="test") log4 = await AuditLog.objects.get( event_type="RELATION_POST_ADD", id__gt=log3.pk ) assert log4.event_log.get("parent_id") == album.pk assert log4.event_log.get("child_id") == artist.pk assert log4.event_log.get("relation_name") == "artists" assert log4.event_log.get("kwargs") == dict() assert album.cover == cover assert len(album.artists) == 1 await cover.albums.remove(album) log = await AuditLog.objects.get(event_type="RELATION_PRE_REMOVE") assert log.event_log.get("parent_id") == cover.pk assert log.event_log.get("child_id") == album.pk assert log.event_log.get("relation_name") == "albums" assert log.event_log.get("kwargs") == dict() log2 = await AuditLog.objects.get(event_type="RELATION_POST_REMOVE") assert log2.event_log.get("parent_id") == cover.pk assert log2.event_log.get("child_id") == album.pk assert log2.event_log.get("relation_name") == "albums" assert log2.event_log.get("kwargs") == dict() await album.artists.remove(artist) log3 = await AuditLog.objects.filter( event_type="RELATION_PRE_REMOVE", id__gt=log2.pk ).get() assert log3.event_log.get("parent_id") == album.pk assert log3.event_log.get("child_id") == artist.pk assert log3.event_log.get("relation_name") == "artists" assert log3.event_log.get("kwargs") == dict() log4 = await AuditLog.objects.get( event_type="RELATION_POST_REMOVE", id__gt=log3.pk ) assert log4.event_log.get("parent_id") == album.pk assert log4.event_log.get("child_id") == artist.pk assert log4.event_log.get("relation_name") == "artists" assert log4.event_log.get("kwargs") == dict() await album.load_all() assert len(album.artists) == 0 assert album.cover is None ormar-0.12.2/tests/test_types.py000066400000000000000000000070201444363446500166470ustar00rootroot00000000000000from typing import Any, Optional, TYPE_CHECKING import databases import pytest import sqlalchemy import ormar from ormar.relations.querysetproxy import QuerysetProxy from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() class BaseMeta(ormar.ModelMeta): metadata = metadata database = database class Publisher(ormar.Model): class Meta(BaseMeta): tablename = "publishers" id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) class Author(ormar.Model): class Meta(BaseMeta): tablename = "authors" order_by = ["-name"] id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) publishers = ormar.ManyToMany(Publisher) class Book(ormar.Model): class Meta(BaseMeta): tablename = "books" order_by = ["year", "-ranking"] id: int = ormar.Integer(primary_key=True) author = ormar.ForeignKey(Author) title: str = ormar.String(max_length=100) year: int = ormar.Integer(nullable=True) ranking: int = ormar.Integer(nullable=True) @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) def assert_type(book: Book): _ = str(book) @pytest.mark.asyncio async def test_types() -> None: async with database: query = Book.objects publisher = await Publisher(name="Test publisher").save() author = await Author.objects.create(name="Test Author") await author.publishers.add(publisher) author2 = await Author.objects.select_related("publishers").get() publishers = author2.publishers publisher2 = await Publisher.objects.select_related("authors").get() authors = publisher2.authors assert authors[0] == author for author in authors: pass # if TYPE_CHECKING: # pragma: no cover # reveal_type(author) # iter of relation proxy book = await Book.objects.create(title="Test", author=author) book2 = await Book.objects.select_related("author").get() books = await Book.objects.select_related("author").all() author_books = await author.books.all() assert book.author.name == "Test Author" assert book2.author.name == "Test Author" # if TYPE_CHECKING: # pragma: no cover # reveal_type(publisher) # model method # reveal_type(publishers) # many to many # reveal_type(publishers[0]) # item in m2m list # reveal_type(next(p for p in publishers)) # item in m2m iterator # # getting relation without __getattribute__ # reveal_type(authors) # reverse many to many # TODO: wrong # reveal_type(book2) # queryset get # reveal_type(books) # queryset all # reveal_type(book) # queryset - create # reveal_type(query) # queryset itself # reveal_type(book.author) # fk # reveal_type(author.books) # reverse fk relation proxy # TODO: wrong # reveal_type(author) # another test for queryset get different model # reveal_type(book.author.name) # field on related model # reveal_type(author_books) # querysetproxy result for fk # TODO: wrong # reveal_type(author_books[0]) # item in qs proxy for fk # TODO: wrong assert_type(book) ormar-0.12.2/tests/test_utils/000077500000000000000000000000001444363446500162725ustar00rootroot00000000000000ormar-0.12.2/tests/test_utils/__init__.py000066400000000000000000000000001444363446500203710ustar00rootroot00000000000000ormar-0.12.2/tests/test_utils/test_models_helpers.py000066400000000000000000000007221444363446500227110ustar00rootroot00000000000000from ormar.models.helpers.models import group_related_list def test_group_related_list(): given = [ "friends__least_favourite_game", "least_favourite_game", "friends", "favourite_game", "friends__favourite_game", ] expected = { "least_favourite_game": [], "favourite_game": [], "friends": ["favourite_game", "least_favourite_game"], } assert group_related_list(given) == expected ormar-0.12.2/tests/test_utils/test_queryset_utils.py000066400000000000000000000143161444363446500230110ustar00rootroot00000000000000import databases import sqlalchemy import ormar from ormar.queryset.queries.prefetch_query import sort_models from ormar.queryset.utils import ( subtract_dict, translate_list_to_dict, update_dict_from_list, update, ) from tests.settings import DATABASE_URL def test_list_to_dict_translation(): tet_list = ["aa", "bb", "cc__aa", "cc__bb", "cc__aa__xx", "cc__aa__yy"] test = translate_list_to_dict(tet_list) assert test == { "aa": Ellipsis, "bb": Ellipsis, "cc": {"aa": {"xx": Ellipsis, "yy": Ellipsis}, "bb": Ellipsis}, } def test_updating_dict_with_list(): curr_dict = { "aa": Ellipsis, "bb": Ellipsis, "cc": {"aa": {"xx": Ellipsis, "yy": Ellipsis}, "bb": Ellipsis}, } list_to_update = ["ee", "bb__cc", "cc__aa__xx__oo", "cc__aa__oo"] test = update_dict_from_list(curr_dict, list_to_update) assert test == { "aa": Ellipsis, "bb": {"cc": Ellipsis}, "cc": { "aa": {"xx": {"oo": Ellipsis}, "yy": Ellipsis, "oo": Ellipsis}, "bb": Ellipsis, }, "ee": Ellipsis, } def test_updating_dict_inc_set_with_list(): curr_dict = { "aa": Ellipsis, "bb": Ellipsis, "cc": {"aa": {"xx", "yy"}, "bb": Ellipsis}, } list_to_update = ["uu", "bb__cc", "cc__aa__xx__oo", "cc__aa__oo"] test = update_dict_from_list(curr_dict, list_to_update) assert test == { "aa": Ellipsis, "bb": {"cc": Ellipsis}, "cc": { "aa": {"xx": {"oo": Ellipsis}, "yy": Ellipsis, "oo": Ellipsis}, "bb": Ellipsis, }, "uu": Ellipsis, } def test_updating_dict_inc_set_with_dict(): curr_dict = { "aa": Ellipsis, "bb": Ellipsis, "cc": {"aa": {"xx", "yy"}, "bb": Ellipsis}, } dict_to_update = { "uu": Ellipsis, "bb": {"cc", "dd"}, "cc": {"aa": {"xx": {"oo": Ellipsis}, "oo": Ellipsis}}, } test = update(curr_dict, dict_to_update) assert test == { "aa": Ellipsis, "bb": {"cc", "dd"}, "cc": { "aa": {"xx": {"oo": Ellipsis}, "yy": Ellipsis, "oo": Ellipsis}, "bb": Ellipsis, }, "uu": Ellipsis, } def test_subtracting_dict_inc_set_with_dict(): curr_dict = { "aa": Ellipsis, "bb": Ellipsis, "cc": {"aa": {"xx", "yy"}, "bb": Ellipsis}, } dict_to_update = { "uu": Ellipsis, "bb": {"cc", "dd"}, "cc": {"aa": {"xx": {"oo": Ellipsis}}, "bb": Ellipsis}, } test = subtract_dict(curr_dict, dict_to_update) assert test == {"aa": Ellipsis, "cc": {"aa": {"yy": Ellipsis}}} def test_updating_dict_inc_set_with_dict_inc_set(): curr_dict = { "aa": Ellipsis, "bb": Ellipsis, "cc": {"aa": {"xx", "yy"}, "bb": Ellipsis}, } dict_to_update = { "uu": Ellipsis, "bb": {"cc", "dd"}, "cc": {"aa": {"xx", "oo", "zz", "ii"}}, } test = update(curr_dict, dict_to_update) assert test == { "aa": Ellipsis, "bb": {"cc", "dd"}, "cc": {"aa": {"xx", "yy", "oo", "zz", "ii"}, "bb": Ellipsis}, "uu": Ellipsis, } def test_subtracting_dict_inc_set_with_dict_inc_set(): curr_dict = { "aa": Ellipsis, "bb": Ellipsis, "cc": {"aa": {"xx", "yy"}, "bb": Ellipsis}, "dd": {"aa", "bb"}, } dict_to_update = { "aa": Ellipsis, "bb": {"cc", "dd"}, "cc": {"aa": {"xx", "oo", "zz", "ii"}}, "dd": {"aa", "bb"}, } test = subtract_dict(curr_dict, dict_to_update) assert test == {"cc": {"aa": {"yy"}, "bb": Ellipsis}} def test_subtracting_with_set_and_dict(): curr_dict = { "translation": { "filters": { "values": Ellipsis, "reports": {"report": {"charts": {"chart": Ellipsis}}}, }, "translations": {"language": Ellipsis}, "filtervalues": { "filter": {"reports": {"report": {"charts": {"chart": Ellipsis}}}} }, }, "chart": { "reports": { "report": { "filters": { "filter": { "translation": { "translations": {"language": Ellipsis}, "filtervalues": Ellipsis, }, "values": { "translation": {"translations": {"language": Ellipsis}} }, } } } } }, } dict_to_update = { "chart": Ellipsis, "translation": {"filters", "filtervalues", "chartcolumns"}, } test = subtract_dict(curr_dict, dict_to_update) assert test == {"translation": {"translations": {"language": Ellipsis}}} database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class SortModel(ormar.Model): class Meta: tablename = "sorts" metadata = metadata database = database id: int = ormar.Integer(primary_key=True) name: str = ormar.String(max_length=100) sort_order: int = ormar.Integer() def test_sorting_models(): models = [ SortModel(id=1, name="Alice", sort_order=0), SortModel(id=2, name="Al", sort_order=1), SortModel(id=3, name="Zake", sort_order=1), SortModel(id=4, name="Will", sort_order=0), SortModel(id=5, name="Al", sort_order=2), SortModel(id=6, name="Alice", sort_order=2), ] orders_by = {"name": "asc", "none": {}, "sort_order": "desc"} models = sort_models(models, orders_by) assert models[5].name == "Zake" assert models[0].name == "Al" assert models[1].name == "Al" assert [model.id for model in models] == [5, 2, 6, 1, 4, 3] orders_by = {"name": "asc", "none": set("aa"), "id": "asc"} models = sort_models(models, orders_by) assert [model.id for model in models] == [2, 5, 1, 6, 4, 3] orders_by = {"sort_order": "asc", "none": ..., "id": "asc", "uu": 2, "aa": None} models = sort_models(models, orders_by) assert [model.id for model in models] == [1, 4, 2, 3, 5, 6]