pax_global_header 0000666 0000000 0000000 00000000064 14634267343 0014526 g ustar 00root root 0000000 0000000 52 comment=13b5672cfe1aed0ec10dcb0b3f4b382d22719de7
django-postgres-extra-2.0.9/ 0000775 0000000 0000000 00000000000 14634267343 0015765 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/.circleci/ 0000775 0000000 0000000 00000000000 14634267343 0017620 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/.circleci/config.yml 0000664 0000000 0000000 00000010363 14634267343 0021613 0 ustar 00root root 0000000 0000000 version: 2.1
executors:
python:
parameters:
version:
type: string
docker:
- image: python:<< parameters.version >>-buster
- image: postgres:13.0
environment:
POSTGRES_DB: 'psqlextra'
POSTGRES_USER: 'psqlextra'
POSTGRES_PASSWORD: 'psqlextra'
commands:
install-dependencies:
parameters:
extra:
type: string
steps:
- run:
name: Install packages
command: apt-get update && apt-get install -y --no-install-recommends postgresql-client-11 libpq-dev build-essential git
- run:
name: Install Python packages
command: pip install --progress-bar off '.[<< parameters.extra >>]'
run-tests:
parameters:
pyversion:
type: integer
steps:
- run:
name: Run tests
command: tox --listenvs | grep ^py<< parameters.pyversion >> | circleci tests split | xargs -n 1 tox -e
environment:
DATABASE_URL: 'postgres://psqlextra:psqlextra@localhost:5432/psqlextra'
jobs:
test-python36:
executor:
name: python
version: "3.6"
steps:
- checkout
- install-dependencies:
extra: test
- run-tests:
pyversion: 36
test-python37:
executor:
name: python
version: "3.7"
steps:
- checkout
- install-dependencies:
extra: test
- run-tests:
pyversion: 37
test-python38:
executor:
name: python
version: "3.8"
steps:
- checkout
- install-dependencies:
extra: test
- run-tests:
pyversion: 38
test-python39:
executor:
name: python
version: "3.9"
steps:
- checkout
- install-dependencies:
extra: test
- run-tests:
pyversion: 39
test-python310:
executor:
name: python
version: "3.10"
steps:
- checkout
- install-dependencies:
extra: test
- run-tests:
pyversion: 310
test-python311:
executor:
name: python
version: "3.11"
steps:
- checkout
- install-dependencies:
extra: test
- run-tests:
pyversion: 311
- store_test_results:
path: reports
- run:
name: Upload coverage report
command: coveralls
analysis:
executor:
name: python
version: "3.9"
steps:
- checkout
- install-dependencies:
extra: analysis, test
- run:
name: Verify
command: python setup.py verify
publish:
executor:
name: python
version: "3.9"
steps:
- checkout
- install-dependencies:
extra: publish
- run:
name: Set version number
command: echo "__version__ = \"${CIRCLE_TAG:1}\"" > psqlextra/_version.py
- run:
name: Build package
command: python -m build
- run:
name: Publish package
command: >
python -m twine upload
--username "__token__"
--password "${PYPI_API_TOKEN}"
--verbose
--non-interactive
--disable-progress-bar
dist/*
workflows:
build:
jobs:
- test-python36:
filters:
tags:
only: /.*/
branches:
only: /.*/
- test-python37:
filters:
tags:
only: /.*/
branches:
only: /.*/
- test-python38:
filters:
tags:
only: /.*/
branches:
only: /.*/
- test-python39:
filters:
tags:
only: /.*/
branches:
only: /.*/
- test-python310:
filters:
tags:
only: /.*/
branches:
only: /.*/
- test-python311:
filters:
tags:
only: /.*/
branches:
only: /.*/
- analysis:
filters:
tags:
only: /.*/
branches:
only: /.*/
- publish:
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
django-postgres-extra-2.0.9/.coveragerc 0000664 0000000 0000000 00000000071 14634267343 0020104 0 ustar 00root root 0000000 0000000 [run]
include = psqlextra/*
omit = *migrations*, *tests*
django-postgres-extra-2.0.9/.gitignore 0000664 0000000 0000000 00000000601 14634267343 0017752 0 ustar 00root root 0000000 0000000 # Ignore virtual environments
env/
.env/
# Ignore Python byte code cache
*.pyc
__pycache__
.cache
# Ignore coverage reports
.coverage
reports/
# Ignore build results
*.egg-info/
pip-wheel-metadata/
dist/
# Ignore stupid .DS_Store
.DS_Store
# Ignore benchmark results
.benchmarks/
# Ignore temporary tox environments
.tox/
.pytest_cache/
# Ignore PyCharm / IntelliJ files
.idea/
django-postgres-extra-2.0.9/.readthedocs.yml 0000664 0000000 0000000 00000000322 14634267343 0021050 0 ustar 00root root 0000000 0000000 version: 2
sphinx:
builder: html
configuration: docs/source/conf.py
python:
version: 3.7
install:
- method: pip
path: .
extra_requirements:
- docs
- test
django-postgres-extra-2.0.9/CONTRIBUTING.md 0000664 0000000 0000000 00000002051 14634267343 0020214 0 ustar 00root root 0000000 0000000 # Contributing
Contributions to `django-postgres-extra` are definitely welcome! Any contribution that implements a PostgreSQL feature in the Django ORM is welcome.
Please use GitHub pull requests to contribute changes.
##
Information on how to run tests and how to hack on the code can be found at the bottom of the [README](https://github.com/SectorLabs/django-postgres-extra#working-with-the-code).
##
If you're unsure whether your change would be a good fit for `django-postgres-extra`, please submit an issue with the [idea](https://github.com/SectorLabs/django-postgres-extra/labels/idea) label and we can talk about it.
## Requirements
* All contributions must pass our CI.
* Existing tests pass.
* PyLint passes.
* PEP8 passes.
* Features that allow creating custom indexes or fields must also implement the associated migrations. `django-postgres-extra` prides itself on the fact that it integrates smoothly with Django migrations. We'd like to keep it that way for all features.
* Sufficiently complicated changes must be accompanied by tests.
django-postgres-extra-2.0.9/LICENSE 0000664 0000000 0000000 00000002054 14634267343 0016773 0 ustar 00root root 0000000 0000000 MIT License
Copyright (c) 2017 Sector Labs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
django-postgres-extra-2.0.9/README.md 0000664 0000000 0000000 00000011055 14634267343 0017246 0 ustar 00root root 0000000 0000000
| | | |
|--------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| :white_check_mark: | **Tests** | [](https://circleci.com/gh/SectorLabs/django-postgres-extra/tree/master) |
| :memo: | **License** | [](http://doge.mit-license.org) |
| :package: | **PyPi** | [](https://pypi.python.org/pypi/django-postgres-extra) |
| :four_leaf_clover: | **Code coverage** | [](https://coveralls.io/github/SectorLabs/django-postgres-extra?branch=master) |
|
| **Django Versions** | 2.0, 2.1, 2.2, 3.0, 3.1, 3.2, 4.0, 4.1, 4.2, 5.0 |
|
| **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11 |
|
| **Psycopg Versions** | 2, 3 |
| :book: | **Documentation** | [Read The Docs](https://django-postgres-extra.readthedocs.io/en/master/) |
| :warning: | **Upgrade** | [Upgrade from v1.x](https://django-postgres-extra.readthedocs.io/en/master/major_releases.html#new-features)
| :checkered_flag: | **Installation** | [Installation Guide](https://django-postgres-extra.readthedocs.io/en/master/installation.html) |
| :fire: | **Features** | [Features & Documentation](https://django-postgres-extra.readthedocs.io/en/master/index.html#features) |
| :droplet: | **Future enhancements** | [Potential features](https://github.com/SectorLabs/django-postgres-extra/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement) |
`django-postgres-extra` aims to make all of PostgreSQL's awesome features available through the Django ORM. We do this by taking care of all the hassle. As opposed to the many small packages that are available to try to bring a single feature to Django with minimal effort. ``django-postgres-extra`` goes the extra mile, with well tested implementations, seamless migrations and much more.
With seamless we mean that any features we add will work truly seamlessly. You should not have to manually modify your migrations to work with fields and objects provided by this package.
---
:warning: **This README is for v2. See the `v1` branch for v1.x.**
---
## Major features
[See the full list](http://django-postgres-extra.readthedocs.io/#features)
* **Native upserts**
* Single query
* Concurrency safe
* With bulk support (single query)
* **Extended support for HStoreField**
* Unique constraints
* Null constraints
* Select individual keys using ``.values()`` or ``.values_list()``
* **PostgreSQL 11.x declarative table partitioning**
* Supports both range and list partitioning
* **Faster deletes**
* Truncate tables (with cascade)
* **Indexes**
* Conditional unique index.
* Case sensitive unique index.
## Working with the code
### Prerequisites
* PostgreSQL 10 or newer.
* Django 2.0 or newer (including 3.x, 4.x).
* Python 3.6 or newer.
### Getting started
1. Clone the repository:
λ git clone https://github.com/SectorLabs/django-postgres-extra.git
2. Create a virtual environment:
λ cd django-postgres-extra
λ virtualenv env
λ source env/bin/activate
3. Create a postgres user for use in tests (skip if your default user is a postgres superuser):
λ createuser --superuser psqlextra --pwprompt
λ export DATABASE_URL=postgres://psqlextra:@localhost/psqlextra
Hint: if you're using virtualenvwrapper, you might find it beneficial to put
the ``export`` line in ``$VIRTUAL_ENV/bin/postactivate`` so that it's always
available when using this virtualenv.
4. Install the development/test dependencies:
λ pip install .[test] .[analysis]
5. Run the tests:
λ tox
6. Run the benchmarks:
λ py.test -c pytest-benchmark.ini
7. Auto-format code, sort imports and auto-fix linting errors:
λ python setup.py fix
django-postgres-extra-2.0.9/docs/ 0000775 0000000 0000000 00000000000 14634267343 0016715 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/docs/.gitignore 0000664 0000000 0000000 00000000007 14634267343 0020702 0 ustar 00root root 0000000 0000000 build/
django-postgres-extra-2.0.9/docs/Makefile 0000664 0000000 0000000 00000001176 14634267343 0020362 0 ustar 00root root 0000000 0000000 # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
django-postgres-extra-2.0.9/docs/make.bat 0000664 0000000 0000000 00000001437 14634267343 0020327 0 ustar 00root root 0000000 0000000 @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
django-postgres-extra-2.0.9/docs/source/ 0000775 0000000 0000000 00000000000 14634267343 0020215 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/docs/source/annotations.rst 0000664 0000000 0000000 00000001270 14634267343 0023304 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
.. include:: ./snippets/manager_model_warning.rst
.. _annotations_page:
Annotations
===========
Renaming annotations
--------------------
Django does not allow you to create an annotation that conflicts with a field on the model. :meth:`psqlextra.query.QuerySet.rename_annotation` makes it possible to do just that.
.. code-block:: python
from psqlextra.models import PostgresModel
from django.db.models import Upper
class MyModel(PostgresModel):
name = models.TextField()
MyModel.objects.annotate(name=Upper('name'))
# OR
MyModel.objects.annotate(name_upper=Upper('name')).rename_annotations(name='name_upper')
django-postgres-extra-2.0.9/docs/source/api_reference.rst 0000664 0000000 0000000 00000002067 14634267343 0023543 0 ustar 00root root 0000000 0000000 API Reference
-------------
.. automodule:: psqlextra.manager
.. autoclass:: PostgresManager
:members:
.. automodule:: psqlextra.query
.. autoclass:: PostgresQuerySet
:members:
:exclude-members: annotate, rename_annotations
.. automodule:: psqlextra.models
:members:
.. automodule:: psqlextra.fields
.. autoclass:: HStoreField
:members:
:exclude-members: deconstruct, get_prep_value
.. automethod:: __init__
.. automodule:: psqlextra.expressions
.. autoclass:: HStoreRef
.. autoclass:: DateTimeEpoch
.. autoclass:: ExcludedCol
.. automodule:: psqlextra.indexes
.. autoclass:: UniqueIndex
.. autoclass:: ConditionalUniqueIndex
.. autoclass:: CaseInsensitiveUniqueIndex
.. automodule:: psqlextra.locking
:members:
.. automodule:: psqlextra.schema
:members:
.. automodule:: psqlextra.partitioning
:members:
.. automodule:: psqlextra.backend.migrations.operations
:members:
.. automodule:: psqlextra.types
:members:
:undoc-members:
.. automodule:: psqlextra.util
:members:
django-postgres-extra-2.0.9/docs/source/conf.py 0000664 0000000 0000000 00000001216 14634267343 0021514 0 ustar 00root root 0000000 0000000 import os
import sys
import sphinx_rtd_theme
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
sys.path.insert(0, os.path.abspath("../.."))
import django
django.setup()
project = "django-postgres-extra"
copyright = "2019-2021, Sector Labs"
author = "Sector Labs"
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
]
templates_path = ["_templates"]
exclude_patterns = []
html_theme = "sphinx_rtd_theme"
intersphinx_mapping = {
"django": ("https://docs.djangoproject.com/en/stable/", "https://docs.djangoproject.com/en/stable/_objects/"),
}
django-postgres-extra-2.0.9/docs/source/conflict_handling.rst 0000664 0000000 0000000 00000031334 14634267343 0024420 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
.. include:: ./snippets/manager_model_warning.rst
.. _conflict_handling_page:
Conflict handling
=================
The :class:`~psqlextra.manager.PostgresManager` comes with full support for PostgreSQL's `ON CONFLICT`_ clause.
This is an extremely useful feature for doing concurrency safe inserts. Often, when you want to insert a row, you want to overwrite it already exists, or simply leave the existing data there. This would require a ``SELECT`` first and then possibly a ``INSERT``. Within those two queries, another process might make a change to the row.
The alternative of trying to insert, ignoring the error and then doing a ``UPDATE`` is also not good. That would result in a lot of write overhead (due to logging).
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
from psqlextra.query import ConflictAction
class MyModel(PostgresModel):
myfield = models.CharField(max_length=255, unique=True)
# insert or update if already exists, then fetch, all in a single query
obj2 = (
MyModel.objects
.on_conflict(['myfield'], ConflictAction.UPDATE)
.insert_and_get(myfield='beer')
)
# insert, or do nothing if it already exists, then fetch
obj1 = (
MyModel.objects
.on_conflict(['myfield'], ConflictAction.NOTHING)
.insert_and_get(myfield='beer')
)
# insert or update if already exists, then fetch only the primary key
id = (
MyModel.objects
.on_conflict(['myfield'], ConflictAction.UPDATE)
.insert(myfield='beer')
)
.. warning::
The standard Django methods for inserting/updating are not affected by :meth:`~psqlextra.query.PostgresQuerySet.on_conflict`. It was a conscious decision to not override or change their behavior. The following completely ignores the :meth:`~psqlextra.query.PostgresQuerySet.on_conflict`:
.. code-block:: python
obj = (
MyModel.objects
.on_conflict(['first_name', 'last_name'], ConflictAction.UPDATE)
.create(first_name='Henk', last_name='Jansen')
)
The same applies to methods such as :meth:`~django:django.db.models.query.QuerySet.update`, :meth:`~django:django.db.models.query.QuerySet.get_or_create` or :meth:`~django:django.db.models.query.QuerySet.update_or_create` etc.
Constraint specification
------------------------
The :meth:`~psqlextra.query.PostgresQuerySet.on_conflict` function's first parameter denotes the name of the column(s) in which the conflict might occur. Although you can specify multiple columns, these columns must somehow have a single constraint. For example, in case of a :attr:`~django:django.db.models.Options.unique_together` constraint.
Multiple columns
****************
Specifying multiple columns is necessary in case of a constraint that spans multiple columns, such as when using Django's :attr:`~django:django.db.models.Options.unique_together`.
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
class MyModel(PostgresModel)
class Meta:
unique_together = ('first_name', 'last_name',)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
obj = (
MyModel.objects
.on_conflict(['first_name', 'last_name'], ConflictAction.UPDATE)
.insert_and_get(first_name='Henk', last_name='Jansen')
)
Specific constraint
*******************
Alternatively, instead of specifying the columns the constraint you're targetting applies to, you can also specify the exact constraint to use:
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
class MyModel(PostgresModel)
class Meta:
constraints = [
models.UniqueConstraint(
name="myconstraint",
fields=["first_name", "last_name"]
),
]
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
constraint = next(
constraint
for constraint in MyModel._meta.constraints
if constraint.name == "myconstraint"
), None)
obj = (
MyModel.objects
.on_conflict(constraint, ConflictAction.UPDATE)
.insert_and_get(first_name='Henk', last_name='Jansen')
)
HStore keys
***********
Catching conflicts in columns with a ``UNIQUE`` constraint on a :class:`~psqlextra.fields.HStoreField` key is also supported:
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
from psqlextra.fields import HStoreField
class MyModel(PostgresModel)
name = HStoreField(uniqueness=['en'])
id = (
MyModel.objects
.on_conflict([('name', 'en')], ConflictAction.NOTHING)
.insert(name={'en': 'Swen'})
)
This also applies to "unique together" constraints in a :class:`~psqlextra.fields.HStoreField` field:
.. code-block:: python
class MyModel(PostgresModel)
name = HStoreField(uniqueness=[('en', 'ar')])
id = (
MyModel.objects
.on_conflict([('name', 'en'), ('name', 'ar')], ConflictAction.NOTHING)
.insert(name={'en': 'Swen'})
)
insert vs insert_and_get
------------------------
After specifying :meth:`~psqlextra.query.PostgresQuerySet.on_conflict` you can use either :meth:`~psqlextra.query.PostgresQuerySet.insert` or :meth:`~psqlextra.query.PostgresQuerySet.insert_and_get` to perform the insert.
Conflict actions
----------------
There's currently two actions that can be taken when encountering a conflict. The second parameter of :meth:`~psqlextra.query.PostgresQuerySet.on_conflict` allows you to specify that should happen.
ConflictAction.UPDATE
*********************
:attr:`psqlextra.types.ConflictAction.UPDATE`
* If the row does **not exist**, insert a new one.
* If the row **exists**, update it.
This is also known as a "upsert".
Condition
"""""""""
Optionally, a condition can be added. PostgreSQL will then only apply the update if the condition holds true. A condition is specified as a custom expression.
A row level lock is acquired before evaluating the condition and proceeding with the update.
.. note::
The update condition is translated as a condition for `ON CONFLICT`_. The PostgreSQL documentation states the following:
An expression that returns a value of type boolean. Only rows for which this expression returns true will be updated, although all rows will be locked when the ON CONFLICT DO UPDATE action is taken. Note that condition is evaluated last, after a conflict has been identified as a candidate to update.
.. code-block:: python
from psqlextra.expressions import CombinedExpression, ExcludedCol
pk = (
MyModel
.objects
.on_conflict(
['name'],
ConflictAction.UPDATE,
update_condition=CombinedExpression(
MyModel._meta.get_field('priority').get_col(MyModel._meta.db_table),
'>',
ExcludedCol('priority'),
)
)
.insert(
name='henk',
priority=1,
)
)
if pk:
print('update applied or inserted')
else:
print('condition was false-ish and no changes were made')
When writing expressions, refer to the data you're trying to upsert with the :class:`psqlextra.expressions.ExcludedCol` expression.
Alternatively, with Django 3.1 or newer, :class:`~django:django.db.models.Q` objects can be used instead:
.. code-block:: python
from django.db.models import Q
from psqlextra.expressions import ExcludedCol
Q(name=ExcludedCol('name'))
Q(name__isnull=True)
Q(name__gt=ExcludedCol('priority'))
Update values
"""""""""""""
Optionally, the fields to update can be overriden. The default is to update the same fields that were specified in the rows to insert.
Refer to the insert values using the :class:`psqlextra.expressions.ExcludedCol` expression which translates to PostgreSQL's ``EXCLUDED.`` expression. All expressions and features that can be used with Django's :meth:`~django:django.db.models.query.QuerySet.update` can be used here.
.. warning::
Specifying an empty ``update_values`` (``{}``) will transform the query into :attr:`~psqlextra.types.ConflictAction.NOTHING`. Only ``None`` makes the default behaviour kick in of updating all fields that were specified.
.. code-block:: python
from django.db.models import F
from psqlextra.expressions import ExcludedCol
(
MyModel
.objects
.on_conflict(
['name'],
ConflictAction.UPDATE,
update_values=dict(
name=ExcludedCol('name'),
count=F('count') + 1,
),
)
.insert(
name='henk',
count=0,
)
)
ConflictAction.NOTHING
**********************
:attr:`psqlextra.types.ConflictAction.NOTHING`
* If the row does **not exist**, insert a new one.
* If the row **exists**, do nothing.
This is preferable when the data you're about to insert is the same as the one that already exists. This is more performant because it avoids a write in case the row already exists.
.. warning::
When using :attr:`~psqlextra.types.ConflictAction.NOTHING`, PostgreSQL only returns the row(s) that were created. Conflicting rows are not returned. See example below:
.. code-block:: python
# obj1 is _not_ none
obj1 = MyModel.objects.on_conflict(['name'], ConflictAction.NOTHING).insert(name="me")
# obj2 is none! object alreaddy exists
obj2 = MyModel.objects.on_conflict(['name'], ConflictAction.NOTHING).insert(name="me")
This applies all methods: :meth:`~psqlextra.query.PostgresQuerySet.insert`, :meth:`~psqlextra.query.PostgresQuerySet.insert_and_get`, :meth:`~psqlextra.query.PostgresQuerySet.bulk_insert`
Bulk
----
:meth:`~psqlextra.query.PostgresQuerySet.bulk_insert` allows your to use conflict resolution for bulk inserts:
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
class MyModel(PostgresModel):
name = models.CharField(max_length=255, unique=True)
obj = (
MyModel.objects
.on_conflict(['name'], ConflictAction.UPDATE)
.bulk_insert([
dict(name='swen'),
dict(name='henk'),
dict(name='adela')
])
)
:meth:`~psqlextra.query.PostgresQuerySet.bulk_insert` uses a single query to insert all specified rows at once. It returns a ``list`` of ``dict`` with each ``dict`` being a merge of the ``dict`` passed in along with any index returned from Postgres.
.. note::
In order to stick to the "everything in one query" principle, various, more advanced usages of :meth:`~psqlextra.query.PostgresQuerySet.bulk_insert` are impossible. It is not possible to have different rows specify different amounts of columns. The following example does **not work**:
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
class MyModel(PostgresModel):
first_name = models.CharField(max_length=255, unique=True)
last_name = models.CharField(max_length=255, default='kooij')
obj = (
MyModel.objects
.on_conflict(['name'], ConflictAction.UPDATE)
.bulk_insert([
dict(name='swen'),
dict(name='henk', last_name='poepjes'), # invalid, different column configuration
dict(name='adela')
])
)
An exception is thrown if this behavior is detected.
Shorthands
----------
The :meth:`~psqlextra.query.PostgresQuerySet.on_conflict`, :meth:`~psqlextra.query.PostgresQuerySet.insert` and :meth:`~psqlextra.query.PostgresQuerySet.insert_or_create` methods were only added in v1.6. Before that, only :attr:`~psqlextra.types.ConflictAction.UPDATE` was supported in the following form:
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
class MyModel(PostgresModel):
myfield = models.CharField(max_length=255, unique=True)
obj = (
MyModel.objects
.upsert_and_get(
conflict_target=['myfield']
fields=dict(myfield='beer')
)
)
id = (
MyModel.objects
.upsert(
conflict_target=['myfield']
fields=dict(myfield='beer')
)
)
(
MyModel.objects
.bulk_upsert(
conflict_target=['myfield']
rows=[
dict(myfield='beer'),
dict(myfield='wine')
]
)
)
These two short hands still exist and **are not** deprecated. They behave exactly the same as :attr:`~psqlextra.types.ConflictAction.UPDATE` and are there for convenience. It is up to you to decide what to use.
django-postgres-extra-2.0.9/docs/source/deletion.rst 0000664 0000000 0000000 00000003217 14634267343 0022555 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
Deletion
========
.. _truncate_page:
Truncate
--------
In standard Django, deleting all records in a table is quite slow and cumbersome. It requires retrieving all rows from the database and deleting them one by one (unless you use bulk delete). Postgres has a standard statement for emptying out a table: `TRUNCATE TABLE`_.
Using the :meth:`~psqlextra.manager.PostgresManager.truncate` method on the :class:`~psqlextra.manager.PostgresManager` allows you to delete all records in a table in the blink of an eye:
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
class MyModel(PostgresModel):
myfield = models.CharField(max_length=255, unique=True)
MyModel.objects.create(myfield="1")
MyModel.objects.truncate() # table is empty after this
print(MyModel.objects.count()) # zero records left
Cascade
*******
By default, Postgres will raise an error if any other table is referencing one of the rows you're trying to delete. One can tell Postgres to cascade the truncate operation to all related rows.
.. code-block:: python
from django.db import models
from psqlextra.models import PostgresModel
class MyModel1(PostgresModel):
myfield = models.CharField(max_length=255, unique=True)
class MyModel2(PostgresModel):
mymodel1 = models.ForeignKey(Model1, on_delete=models.CASCADE)
obj1 = MyModel1.objects.create(myfield="1")
MyModel2.objects.create(mymodel1=obj1)
MyModel.objects.truncate(cascade=True)
print(MyModel1.objects.count()) # zero records left
print(MyModel2.objects.count()) # zero records left
django-postgres-extra-2.0.9/docs/source/expressions.rst 0000664 0000000 0000000 00000006146 14634267343 0023340 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
.. include:: ./snippets/manager_model_warning.rst
.. _expressions_page:
Expressions
===========
Selecting an individual HStore key
----------------------------------
Use the :class:`~psqlextra.expressions.HStoreRef` expression to select an indvidiual `hstore`_ key:
.. code-block:: python
from psqlextra.models import PostgresModel
from psqlextra.fields import HStoreField
from psqlextra.expressions import HStoreRef
class MyModel(PostgresModel):
bla = HStoreField()
MyModel.objects.create(bla={'a': '1', 'b': '2'})
# '1'
a = (
MyModel.objects
.annotate(a=HStoreRef('bla', 'a'))
.values_list('a', flat=True)
.first()
)
Selecting a datetime as a UNIX epoch timestamp
----------------------------------------------
Use the :class:`~psqlextra.expressions.DateTimeEpoch` expression to select the value of a :class:`~django:django.db.models.DateTimeField` as a UNIX epoch timestamp.
.. code-block:: python
from psqlextra.models import PostgresModel
from psqlextra.fields import HStoreField
from psqlextra.expressions import DateTimeEpoch
class MyModel(PostgresModel):
datetime = DateTimeField(auto_now_add=True)
MyModel.objects.create()
timestamp = (
MyModel.objects
.annotate(timestamp=DateTimeEpoch('datetime'))
.values_list('timestamp', flat=True)
.first()
)
Multi-field coalesce
--------------------
Use the :class:`~psqlextra.expressions.IsNotNone` expression to perform something similar to a `coalesce`, but with multiple fields. The first non-null value encountered is selected.
.. code-block:: python
from psqlextra.models import PostgresModel
from psqlextra.fields import HStoreField
from psqlextra.expressions import IsNotNone
class MyModel(PostgresModel):
name_1 = models.TextField(null=True)
name_2 = models.TextField(null=True)
name_3 = models.TextField(null=True)
MyModel.objects.create(name_3='test')
# 'test'
name = (
MyModel.objects
.annotate(name=IsNotNone('name_1', 'name_2', 'name_3', default='buh'))
.values_list('name', flat=True)
.first()
)
# 'buh'
name = (
MyModel.objects
.annotate(name=IsNotNone('name_1', 'name_2', default='buh'))
.values_list('name', flat=True)
.first()
)
Excluded column
---------------
Use the :class:`~psqlextra.expressions.ExcludedCol` expression when performing an upsert using `ON CONFLICT`_ to refer to a column/field in the data is about to be upserted.
PostgreSQL keeps that data to be upserted in a special table named `EXCLUDED`. This expression is used to refer to a column in that table.
.. code-block:: python
from django.db.models import Q
from psqlextra.expressions import ExcludedCol
(
MyModel
.objects
.on_conflict(
['name'],
ConflictAction.UPDATE,
# translates to `priority > EXCLUDED.priority`
update_condition=Q(priority__gt=ExcludedCol('priority')),
)
.insert(
name='henk',
priority=1,
)
)
django-postgres-extra-2.0.9/docs/source/hstore.rst 0000664 0000000 0000000 00000004450 14634267343 0022256 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
.. include:: ./snippets/manager_model_warning.rst
.. _hstore_page:
HStore
======
:class:`psqlextra.fields.HStoreField` is based on Django's :class:`~django:django.contrib.postgres.fields.HStoreField` and therefore supports everything Django does natively and more.
Constraints
-----------
Unique
******
The ``uniqueness`` constraint can be added on one or more `hstore`_ keys, similar to how a ``UNIQUE`` constraint can be added to a column. Setting this option causes unique indexes to be created on the specified keys.
You can specify a ``list`` of strings to specify the keys that must be marked as unique:
.. code-block:: python
from psqlextra.fields import HStoreField
from psqlextra.models import PostgresModel
class MyModel(PostgresModel):
myfield = HStoreField(uniqueness=['key1']
MyModel.objects.create(myfield={'key1': 'value1'})
MyModel.objects.create(myfield={'key1': 'value1'})
The second :meth:`~django:django.db.models.query.QuerySet.create` call will fail with a :class:`~django:django.db.IntegrityError` because there's already a row with ``key1=value1``.
Uniqueness can also be enforced "together", similar to Django's :attr:`~django:django.db.models.Options.unique_together` by specifying a tuple of fields rather than a single string:
.. code-block:: python
myfield = HStoreField(uniqueness=[('key1', 'key2'), 'key3'])
In the example above, ``key1`` and ``key2`` must unique **together**, and ``key3`` must unique on its own. By default, none of the keys are marked as "unique".
Required
********
The ``required`` option can be added to ensure that the specified `hstore`_ keys are set for every row. This is similar to a ``NOT NULL`` constraint on a column. You can specify a list of `hstore`_ keys that are required:
.. code-block:: python
from psqlextra.fields import HStoreField
from psqlextra.models import PostgresModel
class MyModel(PostgresModel):
myfield = HStoreField(required=['key1'])
mymodel.objects.create(myfield={'key1': none})
MyModel.objects.create(myfield={'key2': 'value1'})
Both calls to :meth:`~django:django.db.models.query.QuerySet.create` would fail in the example above since they do not provide a non-null value for ``key1``. By default, none of the keys are required.
django-postgres-extra-2.0.9/docs/source/index.rst 0000664 0000000 0000000 00000004173 14634267343 0022063 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
Welcome
=======
``django-postgres-extra`` aims to make all of PostgreSQL's awesome features available through the Django ORM. We do this by taking care of all the hassle. As opposed to the many small packages that are available to try to bring a single feature to Django with minimal effort, ``django-postgres-extra`` goes the extra mile with well tested implementations, seamless migrations and much more.
By seamless, we mean that any features we add will work truly seamlessly. You should not have to manually modify your migrations to work with fields and objects provided by this package.
Features
--------
Explore the documentation to learn about all features:
* :ref:`Conflict handling `
Adds support for PostgreSQL's ``ON CONFLICT`` syntax for inserts. Supports for ``DO UPDATE`` and ``DO NOTHING``. In other words; single statement, atomic, concurrency safe upserts.
* :ref:`HStore `
Built on top Django's built-in support for `hstore`_ fields. Adds support for indices on keys and unique/required constraints. All of these features integrate well with Django's migrations sytem.
* :ref:`Partial unique index `
Partial (unique) index that only applies when a certain condition is true.
* :ref:`Case insensitive index `
Case insensitive index, allows searching a column and ignoring the casing.
* :ref:`Table partitioning `
Adds support for PostgreSQL 11.x declarative table partitioning.
* :ref:`Truncating tables `
Support for ``TRUNCATE TABLE`` statements (including cascading).
* :ref:`Locking models & tables `
Support for explicit table-level locks.
* :ref:`Creating/dropping schemas `
Support for managing Postgres schemas.
.. toctree::
:maxdepth: 2
:caption: Overview
installation
managers_models
hstore
indexes
conflict_handling
deletion
table_partitioning
expressions
annotations
locking
schemas
settings
api_reference
major_releases
django-postgres-extra-2.0.9/docs/source/indexes.rst 0000664 0000000 0000000 00000005476 14634267343 0022422 0 ustar 00root root 0000000 0000000 .. _indexes_page:
Indexes
=======
.. _unique_index_page:
Unique Index
-----------------------------
The :class:`~psqlextra.indexes.UniqueIndex` lets you create a unique index. Normally Django only allows you to create unique indexes by specifying ``unique=True`` on the model field.
Although it can be used on any Django model, it is most useful on views and materialized views where ``unique=True`` does not work.
.. code-block:: python
from django.db import models
from psqlextra.indexes import UniqueIndex
class Model(models.Model):
class Meta:
indexes = [
UniqueIndex(fields=['name']),
]
name = models.CharField(max_length=255)
Model.objects.create(name='henk')
Model.objects.create(name='henk') # raises IntegrityError
.. _conditional_unique_index_page:
Conditional Unique Index
------------------------
The :class:`~psqlextra.indexes.ConditionalUniqueIndex` lets you create partial unique indexes in case you ever need :attr:`~django:django.db.models.Options.unique_together` constraints
on nullable columns.
.. warning::
In Django 3.1 or newer, you might want to use :attr:`~django.db.models.indexes.condition` instead.
Before:
.. code-block:: python
from django.db import models
class Model(models.Model):
class Meta:
unique_together = ['a', 'b']
a = models.ForeignKey('some_model', null=True)
b = models.ForeignKey('some_other_model')
# Works like a charm!
b = B()
Model.objects.create(a=None, b=b)
Model.objects.create(a=None, b=b)
After:
.. code-block:: python
from django.db import models
from psqlextra.indexes import ConditionalUniqueIndex
class Model(models.Model):
class Meta:
indexes = [
ConditionalUniqueIndex(fields=['a', 'b'], condition='"a" IS NOT NULL'),
ConditionalUniqueIndex(fields=['b'], condition='"a" IS NULL')
]
a = models.ForeignKey('some_model', null=True)
b = models.ForeignKey('some_other_model')
# Integrity Error!
b = B()
Model.objects.create(a=None, b=b)
Model.objects.create(a=None, b=b)
.. _case_insensitive_unique_index_page:
Case Insensitive Unique Index
-----------------------------
The :class:`~psqlextra.indexes.CaseInsensitiveUniqueIndex` lets you create an index that ignores the casing for the specified field(s).
This makes the field(s) behave more like a text field in MySQL.
.. code-block:: python
from django.db import models
from psqlextra.indexes import CaseInsensitiveUniqueIndex
class Model(models.Model):
class Meta:
indexes = [
CaseInsensitiveUniqueIndex(fields=['name']),
]
name = models.CharField(max_length=255)
Model.objects.create(name='henk')
Model.objects.create(name='Henk') # raises IntegrityError
django-postgres-extra-2.0.9/docs/source/installation.rst 0000664 0000000 0000000 00000001367 14634267343 0023457 0 ustar 00root root 0000000 0000000 .. _installation:
Installation
============
1. Install the package from PyPi:
.. code-block:: bash
$ pip install django-postgres-extra
2. Add ``django.contrib.postgres`` and ``psqlextra`` to your ``INSTALLED_APPS``:
.. code-block:: python
INSTALLED_APPS = [
...
"django.contrib.postgres",
"psqlextra",
]
3. Set the database engine to ``psqlextra.backend``:
.. code-block:: python
DATABASES = {
"default": {
...
"ENGINE": "psqlextra.backend",
},
}
.. note::
Already using a custom back-end? Set :ref:`POSTGRES_EXTRA_DB_BACKEND_BASE ` to your custom back-end.
django-postgres-extra-2.0.9/docs/source/locking.rst 0000664 0000000 0000000 00000003553 14634267343 0022403 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
.. _locking_page:
Locking
=======
`Explicit table-level locks`_ are supported through the :meth:`psqlextra.locking.postgres_lock_model` and :meth:`psqlextra.locking.postgres_lock_table` methods. All table-level lock methods are supported.
Locks are always bound to the current transaction and are released when the transaction is committed or rolled back. There is no support (in PostgreSQL) for explicitly releasing a lock.
.. warning::
Locks are only released when the *outer* transaction commits or when a nested transaction is rolled back. You can ensure that the transaction you created is the outermost one by passing the ``durable=True`` argument to ``transaction.atomic``.
.. note::
Use `django-pglocks `_ if you need a advisory lock.
Locking a model
---------------
Use :class:`psqlextra.locking.PostgresTableLockMode` to indicate the type of lock to acquire.
.. code-block:: python
from django.db import transaction
from psqlextra.locking import PostgresTableLockMode, postgres_lock_table
with transaction.atomic(durable=True):
postgres_lock_model(MyModel, PostgresTableLockMode.EXCLUSIVE)
# locks are released here, when the transaction committed
Locking a table
---------------
Use :meth:`psqlextra.locking.postgres_lock_table` to lock arbitrary tables in arbitrary schemas.
.. code-block:: python
from django.db import transaction
from psqlextra.locking import PostgresTableLockMode, postgres_lock_table
with transaction.atomic(durable=True):
postgres_lock_table("mytable", PostgresTableLockMode.EXCLUSIVE)
postgres_lock_table(
"tableinotherschema",
PostgresTableLockMode.EXCLUSIVE,
schema_name="myschema"
)
# locks are released here, when the transaction committed
django-postgres-extra-2.0.9/docs/source/major_releases.rst 0000664 0000000 0000000 00000004617 14634267343 0023752 0 ustar 00root root 0000000 0000000 Major releases
==============
1.x
---
* First release.
2.x
---
New features
************
* Support for PostgreSQL 11.x declarative table partitioning.
* Support for ``TRUNCATE TABLE``
* Case insensitive index
Other changes
*************
* Uses Django 2.x's mechanism for overriding queries and compilers. ``django-postgres-extra`` is extensible in the same way that Django is extensible now.
* Removes hacks because Django 2.x is more extensible.
Breaking changes
****************
* Removes support for ``psqlextra.signals``. Switch to standard Django signals.
* Inserts with ``ConflictAction.NOTHING`` only returns new rows. Conflicting rows are not returned.
* Drop support for Python 3.5.
* Drop support for Django 1.x.
* Removes ``psqlextra.expressions.Min``, ``psqlextra.expressions.Max``, these are natively supported by Django.
FAQ
***
1. Why was ``psqlextra.signals`` removed?
In order to make ``psqlextra.signals.update`` work, ``django-postgres-extra`` hooked into Django's :meth:`django:django.db.models.query.QuerySet.update` method to add a ``RETURNING id`` clause to the statement. This slowed down all update queries, even if no signal handler was registered. To fix the performance impact, a breaking change was needed.
The feature had little to do with PostgreSQL itself. This package focuses on making PostgreSQL specific features available in Django.
Signals being a rarely used feature that slows down unrelated queries was enough motivation to permanently remove it.
2. Why are inserts with ``ConflictAction.NOTHING`` not returning conflicting rows anymore?
This is standard PostgresQL behavior. ``django-postgres-extra`` v1.x tried to working around this by doing a void ``ON CONFLICT UPDATE``. This trick only worked when inserting one row.
The work-around had a significant performance impact and was confusing when performing bulk inserts. In that case, only one row would be returned.
To avoid further confusion, ``ConflictAction.NOTHING`` now follows standard PostgresQL behavior.
3. Why was support dropped for Python 3.5?
Python 3.6 added support for dataclasses.
4. Why was support dropped for Django 1.x?
Mainstream support for the last Django 1.11 was dropped in December 2017. Supporting both Django 1.x and Django 2.x was a major pain point. Dropping support for 1.x simplifies ``django-postgres-extra`` and speeds up the development of new features.
django-postgres-extra-2.0.9/docs/source/managers_models.rst 0000664 0000000 0000000 00000003762 14634267343 0024117 0 ustar 00root root 0000000 0000000 .. _managers_models:
Managers & Models
=================
:class:`~psqlextra.manager.PostgresManager` exposes a lot of functionality. Your model must use this manager in order to use most of this package's functionality.
There are four ways to do this:
* Inherit your model from :class:`psqlextra.models.PostgresModel`:
.. code-block:: python
from psqlextra.models import PostgresModel
class MyModel(PostgresModel):
myfield = models.CharField(max_length=255)
* Override default manager with :class:`psqlextra.manager.PostgresManager`:
.. code-block:: python
from django.db import models
from psqlextra.manager import PostgresManager
class MyModel(models.Model):
# override default django manager
objects = PostgresManager()
myfield = models.CharField(max_length=255)
* Provide :class:`psqlextra.manager.PostgresManager` as a custom manager:
.. code-block:: python
from django.db import models
from psqlextra.manager import PostgresManager
class MyModel(models.Model):
# custom mananger name
beer = PostgresManager()
myfield = models.CharField(max_length=255)
# use like this:
MyModel.beer.upsert(..)
# not like this:
MyModel.objects.upsert(..) # error!
* Use the :meth:`psqlextra.util.postgres_manager` on the fly:
This allows the manager to be used **anywhere** on **any** model, but only within the context. This is especially useful if you want to do upserts into Django's :class:`~django:django.db.models.ManyToManyField` generated :attr:`~django:django.db.models.ManyToManyField.through` table:
.. code-block:: python
from django.db import models
from psqlextra.util import postgres_manager
class MyModel(models.Model):
myself = models.ManyToManyField('self')
# within the context, you can access psqlextra features
with postgres_manager(MyModel.myself.through) as manager:
manager.upsert(...)
django-postgres-extra-2.0.9/docs/source/schemas.rst 0000664 0000000 0000000 00000012142 14634267343 0022372 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
.. _schemas_page:
Schema
======
The :meth:`~psqlextra.schema.PostgresSchema` class provides basic schema management functionality.
Django does **NOT** support custom schemas. This module does not attempt to solve that problem.
This module merely allows you to create/drop schemas and allow you to execute raw SQL in a schema. It is not attempt at bringing multi-schema support to Django.
Reference an existing schema
----------------------------
.. code-block:: python
for psqlextra.schema import PostgresSchema
schema = PostgresSchema("myschema")
with schema.connection.cursor() as cursor:
cursor.execute("SELECT * FROM tablethatexistsinmyschema")
Checking if a schema exists
---------------------------
.. code-block:: python
for psqlextra.schema import PostgresSchema
schema = PostgresSchema("myschema")
if PostgresSchema.exists("myschema"):
print("exists!")
else:
print('does not exist!")
Creating a new schema
---------------------
With a custom name
******************
.. code-block:: python
for psqlextra.schema import PostgresSchema
# will raise an error if the schema already exists
schema = PostgresSchema.create("myschema")
Re-create if necessary with a custom name
*****************************************
.. warning::
If the schema already exists and it is non-empty or something is referencing it, it will **NOT** be dropped. Specify ``cascade=True`` to drop all of the schema's contents and **anything referencing it**.
.. code-block:: python
for psqlextra.schema import PostgresSchema
# will drop existing schema named `myschema` if it
# exists and re-create it
schema = PostgresSchema.drop_and_create("myschema")
# will drop the schema and cascade it to its contents
# and anything referencing the schema
schema = PostgresSchema.drop_and_create("otherschema", cascade=True)
With a time-based name
**********************
.. warning::
The time-based suffix is precise up to the second. If two threads or processes both try to create a time-based schema name with the same suffix in the same second, they will have conflicts.
.. code-block:: python
for psqlextra.schema import PostgresSchema
# schema name will be "myprefix_"
schema = PostgresSchema.create_time_based("myprefix")
print(schema.name)
With a random name
******************
A 8 character suffix is appended. Entropy is dependent on your system. See :meth:`~os.urandom` for more information.
.. code-block:: python
for psqlextra.schema import PostgresSchema
# schema name will be "myprefix_<8 random characters>"
schema = PostgresSchema.create_random("myprefix")
print(schema.name)
Temporary schema with random name
*********************************
Use the :meth:`~psqlextra.schema.postgres_temporary_schema` context manager to create a schema with a random name. The schema will only exist within the context manager.
By default, the schema is not dropped if an exception occurs in the context manager. This prevents unexpected data loss. Specify ``drop_on_throw=True`` to drop the schema if an exception occurs.
Without an outer transaction, the temporary schema might not be dropped when your program is exits unexpectedly (for example; if it is killed with SIGKILL). Wrap the creation of the schema in a transaction to make sure the schema is cleaned up when an error occurs or your program exits suddenly.
.. warning::
By default, the drop will fail if the schema is not empty or there is anything referencing the schema. Specify ``cascade=True`` to drop all of the schema's contents and **anything referencing it**.
.. code-block:: python
for psqlextra.schema import postgres_temporary_schema
with postgres_temporary_schema("myprefix") as schema:
pass
with postgres_temporary_schema("otherprefix", drop_on_throw=True) as schema:
raise ValueError("drop it like it's hot")
with postgres_temporary_schema("greatprefix", cascade=True) as schema:
with schema.connection.cursor() as cursor:
cursor.execute(f"CREATE TABLE {schema.name} AS SELECT 'hello'")
with postgres_temporary_schema("amazingprefix", drop_on_throw=True, cascade=True) as schema:
with schema.connection.cursor() as cursor:
cursor.execute(f"CREATE TABLE {schema.name} AS SELECT 'hello'")
raise ValueError("oops")
Deleting a schema
-----------------
Any schema can be dropped, including ones not created by :class:`~psqlextra.schema.PostgresSchema`.
The ``public`` schema cannot be dropped. This is a Postgres built-in and it is almost always a mistake to drop it. A :class:`~django.core.exceptions.SuspiciousOperation` erorr will be raised if you attempt to drop the ``public`` schema.
.. warning::
By default, the drop will fail if the schema is not empty or there is anything referencing the schema. Specify ``cascade=True`` to drop all of the schema's contents and **anything referencing it**.
.. code-block:: python
for psqlextra.schema import PostgresSchema
schema = PostgresSchema.drop("myprefix")
schema = PostgresSchema.drop("myprefix", cascade=True)
django-postgres-extra-2.0.9/docs/source/settings.rst 0000664 0000000 0000000 00000003200 14634267343 0022602 0 ustar 00root root 0000000 0000000 .. _settings:
Settings
========
.. _POSTGRES_EXTRA_DB_BACKEND_BASE:
* ``POSTGRES_EXTRA_DB_BACKEND_BASE``
``DATABASES[db_name]['ENGINE']`` must be set to ``"psqlextra.backend"``. If you're already using a custom back-end, set ``POSTGRES_EXTRA_DB_BACKEND_BASE`` to your custom back-end. This will instruct ``django-postgres-extra`` to wrap the back-end you specified.
A good example of where this might be need is if you are using the PostGIS back-end: ``django.contrib.gis.db.backends.postgis``.
**Default value**: ``django.db.backends.postgresql``
.. warning::
The custom back-end you specify must derive from the standard ``django.db.backends.postgresql``.
.. _POSTGRES_EXTRA_AUTO_EXTENSION_SET_UP:
* ``POSTGRES_EXTRA_AUTO_EXTENSION_SET_UP``
You can stop ``django-postgres-extra`` from automatically trying to enable the ``hstore`` extension on your database. Enabling extensions using ``CREATE EXTENSION`` requires superuser permissions. Disable this behaviour if you are not connecting to your database server using a superuser.
**Default value:** ``True``
.. note::
If set to ``False``, you must ensure that the ``hstore`` extension is enabled on your database manually. If not enabled, any ``hstore`` related functionality will not work.
.. _POSTGRES_EXTRA_ANNOTATE_SQL_:
* ``POSTGRES_EXTRA_ANNOTATE_SQL``
If set to ``True``, will append a comment to all SQL queries with the path and line number that the query was made from.
Format: ``/* */``
This can be useful when debugging queries found in PostgreSQL's ``pg_stat_activity`` or in its query log.
django-postgres-extra-2.0.9/docs/source/snippets/ 0000775 0000000 0000000 00000000000 14634267343 0022062 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/docs/source/snippets/manager_model_warning.rst 0000664 0000000 0000000 00000000376 14634267343 0027141 0 ustar 00root root 0000000 0000000 .. warning::
In order for any of the features described below to work, you must use the :class:`~psqlextra.manager.PostgresManager` or inherit your models from :class:`~psqlextra.models.PostgresModel`. Read more about this in :ref:`managers_models`.
django-postgres-extra-2.0.9/docs/source/snippets/postgres_doc_links.rst 0000664 0000000 0000000 00000000720 14634267343 0026506 0 ustar 00root root 0000000 0000000 .. _ON CONFLICT: https://www.postgresql.org/docs/11/sql-insert.html#SQL-ON-CONFLICT
.. _TRUNCATE TABLE: https://www.postgresql.org/docs/9.1/sql-truncate.html
.. _hstore: https://www.postgresql.org/docs/11/hstore.html
.. _PostgreSQL Declarative Table Partitioning: https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE
.. _Explicit table-level locks: https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
django-postgres-extra-2.0.9/docs/source/table_partitioning.rst 0000664 0000000 0000000 00000035755 14634267343 0024644 0 ustar 00root root 0000000 0000000 .. include:: ./snippets/postgres_doc_links.rst
.. warning::
Table partitioning is a relatively new and advanded PostgreSQL feature. It has plenty of ways to shoot yourself in the foot with.
We HIGHLY RECOMMEND you only use this feature if you're already deeply familiar with table partitioning and aware of its advantages and disadvantages.
Do study the PostgreSQL documentation carefully.
.. _table_partitioning_page:
Table partitioning
==================
:class:`~psqlextra.models.PostgresPartitionedModel` adds support for `PostgreSQL Declarative Table Partitioning`_.
The following partitioning methods are available:
* ``PARTITION BY RANGE``
* ``PARTITION BY LIST``
* ``PARTITION BY HASH``
.. note::
Although table partitioning is available in PostgreSQL 10.x, it is highly recommended you use PostgresSQL 11.x. Table partitioning got a major upgrade in PostgreSQL 11.x.
PostgreSQL 10.x does not support creating foreign keys to/from partitioned tables and does not automatically create an index across all partitions.
Creating partitioned tables
---------------------------
Partitioned tables are declared like regular Django models with a special base class and two extra options to set the partitioning method and key. Once declared, they behave like regular Django models.
Declaring the model
*******************
Inherit your model from :class:`psqlextra.models.PostgresPartitionedModel` and declare a child class named ``PartitioningMeta``. On the meta class, specify the partitioning method and key.
* Use :attr:`psqlextra.types.PostgresPartitioningMethod.RANGE` to ``PARTITION BY RANGE``
* Use :attr:`psqlextra.types.PostgresPartitioningMethod.LIST` to ``PARTITION BY LIST``
* Use :attr:`psqlextra.types.PostgresPartitioningMethod.HASH` to ``PARTITION BY HASH``
.. code-block:: python
from django.db import models
from psqlextra.types import PostgresPartitioningMethod
from psqlextra.models import PostgresPartitionedModel
class MyModel(PostgresPartitionedModel):
class PartitioningMeta:
method = PostgresPartitioningMethod.RANGE
key = ["timestamp"]
name = models.TextField()
timestamp = models.DateTimeField()
Generating a migration
**********************
Run the following command to automatically generate a migration:
.. code-block:: bash
python manage.py pgmakemigrations
This will generate a migration that creates the partitioned table with a default partition.
.. warning::
Always use ``python manage.py pgmakemigrations`` for partitioned models.
The model must be created by the :class:`~psqlextra.backend.migrations.operations.PostgresCreatePartitionedModel` operation.
Do not use the standard ``python manage.py makemigrations`` command for partitioned models. Django will issue a standard :class:`~django:django.db.migrations.operations.CreateModel` operation. Doing this will not create a partitioned table and all subsequent operations will fail.
Automatically managing partitions
---------------------------------
The ``python manage.py pgpartition`` command can help you automatically create new partitions ahead of time and delete old ones for time-based partitioning.
You can run this command manually as needed, schedule to run it periodically or run it every time you release a new version of your app.
.. warning::
We DO NOT recommend that you set up this command to automatically delete partitions without manual review.
Specify ``--skip-delete`` to not delete partitions automatically. Run the command manually periodically without the ``--yes`` flag to review partitions to be deleted.
Command-line options
********************
==================== ============= ================ ==================================================================================================== === === === === === ===
Long flag Short flag Default Description
==================== ============= ================ ==================================================================================================== === === === === === ===
``--yes`` ``-y`` ``False`` Specifies yes to all questions. You will NOT be asked for confirmation before partition deletion.
``--using`` ``-u`` ``'default'`` Optional name of the database connection to use.
``--skip-create`` ``False`` Whether to skip creating partitions.
``--skip-delete`` ``False`` Whether to skip deleting partitions.
==================== ============= ================ ==================================================================================================== === === === === === ===
Configuration
*************
In order to use the command, you have to declare an instance of :class:`psqlextra.partitioning.PostgresPartitioningManager` and set ``PSQLEXTRA_PARTITIONING_MANAGER`` to a string with the import path to your instance of :class:`psqlextra.partitioning.PostgresPartitioningManager`.
For example:
.. code-block:: python
# myapp/partitioning.py
from psqlextra.partitioning import PostgresPartitioningManager
manager = PostgresPartitioningManager(...)
# myapp/settings.py
PSQLEXTRA_PARTITIONING_MANAGER = 'myapp.partitioning.manager'
Time-based partitioning
~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
from dateutil.relativedelta import relativedelta
from psqlextra.partitioning import (
PostgresPartitioningManager,
PostgresCurrentTimePartitioningStrategy,
PostgresTimePartitionSize,
partition_by_current_time,
)
from psqlextra.partitioning.config import PostgresPartitioningConfig
manager = PostgresPartitioningManager([
# 3 partitions ahead, each partition is one month
# delete partitions older than 6 months
# partitions will be named `[table_name]_[year]_[3-letter month name]`.
PostgresPartitioningConfig(
model=MyPartitionedModel,
strategy=PostgresCurrentTimePartitioningStrategy(
size=PostgresTimePartitionSize(months=1),
count=3,
max_age=relativedelta(months=6),
),
),
# 6 partitions ahead, each partition is two weeks
# delete partitions older than 8 months
# partitions will be named `[table_name]_[year]_week_[week number]`.
PostgresPartitioningConfig(
model=MyPartitionedModel,
strategy=PostgresCurrentTimePartitioningStrategy(
size=PostgresTimePartitionSize(weeks=2),
count=6,
max_age=relativedelta(months=8),
),
),
# 12 partitions ahead, each partition is 5 days
# old partitions are never deleted, `max_age` is not set
# partitions will be named `[table_name]_[year]_[month]_[month day number]`.
PostgresPartitioningConfig(
model=MyPartitionedModel,
strategy=PostgresCurrentTimePartitioningStrategy(
size=PostgresTimePartitionSize(days=5),
count=12,
),
),
])
Changing a time partitioning strategy
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When switching partitioning strategies, you might encounter the problem that partitions for part of a particular range already exist.
In order to combat this, you can use the :class:`psqlextra.partitioning.PostgresTimePartitioningStrategy` and specify the `start_datetime` parameter. As a result, no partitions will be created before the given date/time.
Custom strategy
~~~~~~~~~~~~~~~
You can create a custom partitioning strategy by implementing the :class:`psqlextra.partitioning.PostgresPartitioningStrategy` interface.
You can look at :class:`psqlextra.partitioning.PostgresCurrentTimePartitioningStrategy` as an example.
Manually managing partitions
----------------------------
If you are using list or hash partitioning, you most likely have a fixed amount of partitions that can be created up front using migrations or using the schema editor.
Using migration operations
**************************
Adding a range partition
~~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`~psqlextra.backend.migrations.operations.PostgresAddRangePartition` operation to add a new range partition. Only use this operation when your partitioned model uses :attr:`psqlextra.types.PostgresPartitioningMethod.RANGE`.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresAddRangePartition
class Migration(migrations.Migration):
operations = [
PostgresAddRangePartition(
model_name="mypartitionedmodel",
name="pt1",
from_values="2019-01-01",
to_values="2019-02-01",
),
]
Adding a list partition
~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`~psqlextra.backend.migrations.operations.PostgresAddListPartition` operation to add a new list partition. Only use this operation when your partitioned model uses :attr:`psqlextra.types.PostgresPartitioningMethod.LIST`.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresAddListPartition
class Migration(migrations.Migration):
operations = [
PostgresAddListPartition(
model_name="mypartitionedmodel",
name="pt1",
values=["car", "boat"],
),
]
Adding a hash partition
~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`~psqlextra.backend.migrations.operations.PostgresAddHashPartition` operation to add a new list partition. Only use this operation when your partitioned model uses :attr:`psqlextra.types.PostgresPartitioningMethod.HASH`.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresAddHashPartition
class Migration(migrations.Migration):
operations = [
PostgresAddHashPartition(
model_name="mypartitionedmodel",
name="pt1",
modulus=3,
remainder=1,
),
]
Adding a default partition
~~~~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`~psqlextra.backend.migrations.operations.PostgresAddDefaultPartition` operation to add a new list partition.
Note that you can only have one default partition per partitioned table/model. An error will be thrown if you try to create a second default partition.
If you used ``python manage.py pgmakemigrations`` to generate a migration for your newly created partitioned model, you do not need this operation. This operation is added automatically when you create a new partitioned model.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresAddDefaultPartition
class Migration(migrations.Migration):
operations = [
PostgresAddDefaultPartition(
model_name="mypartitionedmodel",
name="default",
),
]
Deleting a default partition
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`~psqlextra.backend.migrations.operations.PostgresDeleteDefaultPartition` operation to delete an existing default partition.
.. warning::
Deleting the default partition and leaving your model without a default partition can be dangerous. Rows that do not fit in any other partition will fail to be inserted.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresDeleteDefaultPartition
class Migration(migrations.Migration):
operations = [
PostgresDeleteDefaultPartition(
model_name="mypartitionedmodel",
name="pt1",
),
]
Deleting a range partition
~~~~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`psqlextra.backend.migrations.operations.PostgresDeleteRangePartition` operation to delete an existing range partition. Only use this operation when your partitioned model uses :attr:`psqlextra.types.PostgresPartitioningMethod.RANGE`.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresDeleteRangePartition
class Migration(migrations.Migration):
operations = [
PostgresDeleteRangePartition(
model_name="mypartitionedmodel",
name="pt1",
),
]
Deleting a list partition
~~~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`psqlextra.backend.migrations.operations.PostgresDeleteListPartition` operation to delete an existing range partition. Only use this operation when your partitioned model uses :attr:`psqlextra.types.PostgresPartitioningMethod.LIST`.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresDeleteListPartition
class Migration(migrations.Migration):
operations = [
PostgresDeleteListPartition(
model_name="mypartitionedmodel",
name="pt1",
),
]
Deleting a hash partition
~~~~~~~~~~~~~~~~~~~~~~~~~
Use the :class:`psqlextra.backend.migrations.operations.PostgresDeleteHashPartition` operation to delete an existing range partition. Only use this operation when your partitioned model uses :attr:`psqlextra.types.PostgresPartitioningMethod.HASH`.
.. code-block:: python
from django.db import migrations, models
from psqlextra.backend.migrations.operations import PostgresDeleteHashPartition
class Migration(migrations.Migration):
operations = [
PostgresDeleteHashPartition(
model_name="mypartitionedmodel",
name="pt1",
),
]
Using the schema editor
***********************
Use the :class:`psqlextra.backend.PostgresSchemaEditor` to manage partitions directly in a more imperative fashion. The schema editor is used by the migration operations described above.
Adding a range partition
~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
from django.db import connection
connection.schema_editor().add_range_partition(
model=MyPartitionedModel,
name="pt1",
from_values="2019-01-01",
to_values="2019-02-01",
)
Adding a list partition
~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
from django.db import connection
connection.schema_editor().add_list_partition(
model=MyPartitionedModel,
name="pt1",
values=["car", "boat"],
)
Adding a hash partition
~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
from django.db import connection
connection.schema_editor().add_hash_partition(
model=MyPartitionedModel,
name="pt1",
modulus=3,
remainder=1,
)
Adding a default partition
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
from django.db import connection
connection.schema_editor().add_default_partition(
model=MyPartitionedModel,
name="default",
)
Deleting a partition
~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
from django.db import connection
connection.schema_editor().delete_partition(
model=MyPartitionedModel,
name="default",
)
django-postgres-extra-2.0.9/manage.py 0000775 0000000 0000000 00000000407 14634267343 0017573 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'settings'
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
django-postgres-extra-2.0.9/psqlextra/ 0000775 0000000 0000000 00000000000 14634267343 0020010 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/__init__.py 0000664 0000000 0000000 00000000442 14634267343 0022121 0 ustar 00root root 0000000 0000000 import django
from ._version import __version__
if django.VERSION < (3, 2): # pragma: no cover
default_app_config = "psqlextra.apps.PostgresExtraAppConfig"
__all__ = [
"default_app_config",
"__version__",
]
else:
__all__ = [
"__version__",
]
django-postgres-extra-2.0.9/psqlextra/_version.py 0000664 0000000 0000000 00000000031 14634267343 0022200 0 ustar 00root root 0000000 0000000 __version__ = "2.0.9rc4"
django-postgres-extra-2.0.9/psqlextra/apps.py 0000664 0000000 0000000 00000000334 14634267343 0021325 0 ustar 00root root 0000000 0000000 from django.apps import AppConfig
class PostgresExtraAppConfig(AppConfig):
name = "psqlextra"
verbose_name = "PostgreSQL Extra"
def ready(self) -> None:
from .lookups import InValuesLookup # noqa
django-postgres-extra-2.0.9/psqlextra/backend/ 0000775 0000000 0000000 00000000000 14634267343 0021377 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/__init__.py 0000664 0000000 0000000 00000000000 14634267343 0023476 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/base.py 0000664 0000000 0000000 00000007374 14634267343 0022676 0 ustar 00root root 0000000 0000000 import logging
from typing import TYPE_CHECKING
from django.conf import settings
from django.contrib.postgres.signals import (
get_hstore_oids,
register_type_handlers,
)
from django.db import ProgrammingError
from . import base_impl
from .introspection import PostgresIntrospection
from .operations import PostgresOperations
from .schema import PostgresSchemaEditor
from django.db.backends.postgresql.base import ( # isort:skip
DatabaseWrapper as PostgresDatabaseWrapper,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
class Wrapper(PostgresDatabaseWrapper):
pass
else:
Wrapper = base_impl.backend()
class DatabaseWrapper(Wrapper):
"""Wraps the standard PostgreSQL database back-end.
Overrides the schema editor with our custom schema editor and makes
sure the `hstore` extension is enabled.
"""
SchemaEditorClass = PostgresSchemaEditor # type: ignore[assignment]
introspection_class = PostgresIntrospection
ops_class = PostgresOperations
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Some base back-ends such as the PostGIS back-end don't properly
# set `ops_class` and `introspection_class` and initialize these
# classes themselves.
#
# This can lead to broken functionality. We fix this automatically.
if not isinstance(self.introspection, self.introspection_class):
self.introspection = self.introspection_class(self)
if not isinstance(self.ops, self.ops_class):
self.ops = self.ops_class(self)
for expected_compiler_class in self.ops.compiler_classes:
compiler_class = self.ops.compiler(expected_compiler_class.__name__)
if not issubclass(compiler_class, expected_compiler_class):
logger.warning(
"Compiler '%s.%s' is not properly deriving from '%s.%s'."
% (
compiler_class.__module__,
compiler_class.__name__,
expected_compiler_class.__module__,
expected_compiler_class.__name__,
)
)
def prepare_database(self):
"""Ran to prepare the configured database.
This is where we enable the `hstore` extension if it wasn't
enabled yet.
"""
super().prepare_database()
setup_ext = getattr(
settings, "POSTGRES_EXTRA_AUTO_EXTENSION_SET_UP", True
)
if not setup_ext:
return False
with self.cursor() as cursor:
try:
cursor.execute("CREATE EXTENSION IF NOT EXISTS hstore")
except ProgrammingError: # permission denied
logger.warning(
'Failed to create "hstore" extension. '
"Tables with hstore columns may fail to migrate. "
"If hstore is needed, make sure you are connected "
"to the database as a superuser "
"or add the extension manually.",
exc_info=True,
)
return
# Clear old (non-existent), stale oids.
get_hstore_oids.cache_clear()
# Verify that we (and Django) can find the OIDs
# for hstore.
oids, _ = get_hstore_oids(self.alias)
if not oids:
logger.warning(
'"hstore" extension was created, but we cannot find the oids'
"in the database. Something went wrong.",
)
return
# We must trigger Django into registering the type handlers now
# so that any subsequent code can properly use the newly
# registered types.
register_type_handlers(self)
django-postgres-extra-2.0.9/psqlextra/backend/base_impl.py 0000664 0000000 0000000 00000006753 14634267343 0023717 0 ustar 00root root 0000000 0000000 import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.backends.postgresql.base import DatabaseWrapper
from django.db.backends.postgresql.introspection import ( # type: ignore[import]
DatabaseIntrospection,
)
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.backends.postgresql.schema import ( # type: ignore[import]
DatabaseSchemaEditor,
)
from django.db.backends.postgresql.base import ( # isort:skip
DatabaseWrapper as Psycopg2DatabaseWrapper,
)
def base_backend_instance():
"""Gets an instance of the base class for the custom database back-end.
This should be the Django PostgreSQL back-end. However,
some people are already using a custom back-end from
another package. We are nice people and expose an option
that allows them to configure the back-end we base upon.
As long as the specified base eventually also has
the PostgreSQL back-end as a base, then everything should
work as intended.
We create an instance to inspect what classes to subclass
because not all back-ends set properties such as `ops_class`
properly. The PostGIS back-end is a good example.
"""
base_class_name = getattr(
settings,
"POSTGRES_EXTRA_DB_BACKEND_BASE",
"django.db.backends.postgresql",
)
base_class_module = importlib.import_module(base_class_name + ".base")
base_class = getattr(base_class_module, "DatabaseWrapper", None)
if not base_class:
raise ImproperlyConfigured(
(
"'%s' is not a valid database back-end."
" The module does not define a DatabaseWrapper class."
" Check the value of POSTGRES_EXTRA_DB_BACKEND_BASE."
)
% base_class_name
)
if isinstance(base_class, Psycopg2DatabaseWrapper):
raise ImproperlyConfigured(
(
"'%s' is not a valid database back-end."
" It does inherit from the PostgreSQL back-end."
" Check the value of POSTGRES_EXTRA_DB_BACKEND_BASE."
)
% base_class_name
)
base_instance = base_class(connections.databases[DEFAULT_DB_ALIAS])
if base_instance.connection:
raise ImproperlyConfigured(
(
"'%s' establishes a connection during initialization."
" This is not expected and can lead to more connections"
" being established than neccesarry."
)
% base_class_name
)
return base_instance
def backend() -> DatabaseWrapper:
"""Gets the base class for the database back-end."""
return base_backend_instance().__class__
def schema_editor() -> DatabaseSchemaEditor:
"""Gets the base class for the schema editor.
We have to use the configured base back-end's schema editor for
this.
"""
return base_backend_instance().SchemaEditorClass
def introspection() -> DatabaseIntrospection:
"""Gets the base class for the introspection class.
We have to use the configured base back-end's introspection class
for this.
"""
return base_backend_instance().introspection.__class__
def operations() -> DatabaseOperations:
"""Gets the base class for the operations class.
We have to use the configured base back-end's operations class for
this.
"""
return base_backend_instance().ops.__class__
django-postgres-extra-2.0.9/psqlextra/backend/introspection.py 0000664 0000000 0000000 00000023306 14634267343 0024655 0 ustar 00root root 0000000 0000000 from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from django.db.backends.postgresql.introspection import ( # type: ignore[import]
DatabaseIntrospection,
)
from psqlextra.types import PostgresPartitioningMethod
from . import base_impl
PARTITIONING_STRATEGY_TO_METHOD = {
"r": PostgresPartitioningMethod.RANGE,
"l": PostgresPartitioningMethod.LIST,
"h": PostgresPartitioningMethod.HASH,
}
@dataclass
class PostgresIntrospectedPartitionTable:
"""Data container for information about a partition."""
name: str
full_name: str
comment: Optional[str]
@dataclass
class PostgresIntrospectedPartitonedTable:
"""Data container for information about a partitioned table."""
name: str
method: PostgresPartitioningMethod
key: List[str]
partitions: List[PostgresIntrospectedPartitionTable]
def partition_by_name(
self, name: str
) -> Optional[PostgresIntrospectedPartitionTable]:
"""Finds the partition with the specified name."""
return next(
(
partition
for partition in self.partitions
if partition.name == name
),
None,
)
if TYPE_CHECKING:
class Introspection(DatabaseIntrospection):
pass
else:
Introspection = base_impl.introspection()
class PostgresIntrospection(Introspection):
"""Adds introspection features specific to PostgreSQL."""
# TODO: This class is a mess, both here and in the
# the base.
#
# Some methods return untyped dicts, some named tuples,
# some flat lists of strings. It's horribly inconsistent.
#
# Most methods are poorly named. For example; `get_table_description`
# does not return a complete table description. It merely returns
# the columns.
#
# We do our best in this class to stay consistent with
# the base in Django by respecting its naming scheme
# and commonly used return types. Creating an API that
# matches the look&feel from the Django base class
# is more important than fixing those issues.
def get_partitioned_tables(
self, cursor
) -> List[PostgresIntrospectedPartitonedTable]:
"""Gets a list of partitioned tables."""
cursor.execute(
"""
SELECT
pg_class.relname,
pg_partitioned_table.partstrat
FROM
pg_partitioned_table
JOIN
pg_class
ON
pg_class.oid = pg_partitioned_table.partrelid
"""
)
return [
PostgresIntrospectedPartitonedTable(
name=row[0],
method=PARTITIONING_STRATEGY_TO_METHOD[row[1]],
key=self.get_partition_key(cursor, row[0]),
partitions=self.get_partitions(cursor, row[0]),
)
for row in cursor.fetchall()
]
def get_partitioned_table(self, cursor, table_name: str):
"""Gets a single partitioned table."""
return next(
(
table
for table in self.get_partitioned_tables(cursor)
if table.name == table_name
),
None,
)
def get_partitions(
self, cursor, table_name
) -> List[PostgresIntrospectedPartitionTable]:
"""Gets a list of partitions belonging to the specified partitioned
table."""
sql = """
SELECT
child.relname,
pg_description.description
FROM pg_inherits
JOIN
pg_class parent
ON
pg_inherits.inhparent = parent.oid
JOIN
pg_class child
ON
pg_inherits.inhrelid = child.oid
JOIN
pg_namespace nmsp_parent
ON
nmsp_parent.oid = parent.relnamespace
JOIN
pg_namespace nmsp_child
ON
nmsp_child.oid = child.relnamespace
LEFT JOIN
pg_description
ON
pg_description.objoid = child.oid
WHERE
parent.relname = %s
"""
cursor.execute(sql, (table_name,))
return [
PostgresIntrospectedPartitionTable(
name=row[0].replace(f"{table_name}_", ""),
full_name=row[0],
comment=row[1] or None,
)
for row in cursor.fetchall()
]
def get_partition_key(self, cursor, table_name: str) -> List[str]:
"""Gets the partition key for the specified partitioned table.
Returns:
A list of column names that are part of the
partition key.
"""
sql = """
SELECT
col.column_name
FROM
(SELECT partrelid,
partnatts,
CASE partstrat
WHEN 'l' THEN 'list'
WHEN 'r' THEN 'range'
WHEN 'h' THEN 'hash'
END AS partition_strategy,
Unnest(partattrs) column_index
FROM pg_partitioned_table) pt
JOIN
pg_class par
ON par.oid = pt.partrelid
JOIN
information_schema.COLUMNS col
ON
col.table_schema = par.relnamespace :: regnamespace :: text
AND col.table_name = par.relname
AND ordinal_position = pt.column_index
WHERE
table_name = %s
"""
cursor.execute(sql, (table_name,))
return [row[0] for row in cursor.fetchall()]
def get_columns(self, cursor, table_name: str):
return self.get_table_description(cursor, table_name)
def get_schema_list(self, cursor) -> List[str]:
"""A flat list of available schemas."""
cursor.execute(
"""
SELECT
schema_name
FROM
information_schema.schemata
""",
tuple(),
)
return [name for name, in cursor.fetchall()]
def get_constraints(self, cursor, table_name: str):
"""Retrieve any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Also retrieve the definition of expression-based indexes.
"""
constraints = super().get_constraints(cursor, table_name)
# standard Django implementation does not return the definition
# for indexes, only for constraints, let's patch that up
cursor.execute(
"SELECT indexname, indexdef FROM pg_indexes WHERE tablename = %s",
(table_name,),
)
for index_name, definition in cursor.fetchall():
# PostgreSQL 13 or older won't give a definition if the
# index is actually a primary key.
constraint = constraints.get(index_name)
if not constraint:
continue
if constraint.get("definition") is None:
constraint["definition"] = definition
return constraints
def get_table_locks(self, cursor) -> List[Tuple[str, str, str]]:
cursor.execute(
"""
SELECT
n.nspname,
t.relname,
l.mode
FROM pg_locks l
INNER JOIN pg_class t ON t.oid = l.relation
INNER JOIN pg_namespace n ON n.oid = t.relnamespace
WHERE t.relnamespace >= 2200
ORDER BY n.nspname, t.relname, l.mode
"""
)
return cursor.fetchall()
def get_storage_settings(self, cursor, table_name: str) -> Dict[str, str]:
sql = """
SELECT
unnest(c.reloptions || array(select 'toast.' || x from pg_catalog.unnest(tc.reloptions) x))
FROM
pg_catalog.pg_class c
LEFT JOIN
pg_catalog.pg_class tc ON (c.reltoastrelid = tc.oid)
LEFT JOIN
pg_catalog.pg_am am ON (c.relam = am.oid)
WHERE
c.relname::text = %s
AND pg_catalog.pg_table_is_visible(c.oid)
"""
cursor.execute(sql, (table_name,))
storage_settings = {}
for row in cursor.fetchall():
# It's hard to believe, but storage settings are really
# represented as `key=value` strings in Postgres.
# See: https://www.postgresql.org/docs/current/catalog-pg-class.html
name, value = row[0].split("=")
storage_settings[name] = value
return storage_settings
def get_relations(self, cursor, table_name: str):
"""Gets a dictionary {field_name: (field_name_other_table,
other_table)} representing all relations in the specified table.
This is overriden because the query in Django does not handle
relations between tables in different schemas properly.
"""
cursor.execute(
"""
SELECT a1.attname, c2.relname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE
con.conrelid = %s::regclass AND
con.contype = 'f' AND
pg_catalog.pg_table_is_visible(c1.oid)
""",
[table_name],
)
return {row[0]: (row[2], row[1]) for row in cursor.fetchall()}
django-postgres-extra-2.0.9/psqlextra/backend/migrations/ 0000775 0000000 0000000 00000000000 14634267343 0023553 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/migrations/README.md 0000664 0000000 0000000 00000006674 14634267343 0025047 0 ustar 00root root 0000000 0000000 ## What's up with the shady patch functions?
Django currently does not provide a way to extend certain classes that are used when auto-generating migrations using `makemigrations`. The patch functions use Python's standard mocking framework to direct certain functions to a custom implementation.
These patches allow `django-postgres-extra` to let Django auto-generate migrations for `PostgresPartitionedModel`, `PostgresViewModel` and `PostgresMaterializedView`.
None of the patches fundamentally change how Django work. They let Django do most of the work and only customize for Postgres specific models. All of the patches call the original implementation and then patch the results instead of copying the entire implementation.
### Using the patches
The patches are all context managers. The top level `postgres_patched_migrations` context manager applies all patches for the duration of the context.
This is used in the custom `pgmakemigrations` command to extend the migration autodetector for `PostgresPartitionedModel`, `PostgresViewModel` and `PostgresMaterializedView`.
### Patches
#### Autodetector patch
* Patches `django.db.migrations.autodetector.MigrationAutodetector.add_operation`
This function is called every time the autodetector adds a new operation. For example, if Django detects a new model, `add_operation` is called with a new `CreateModel` operation instance.
The patch hooks into the `add_operation` function to transform the following operations:
* `Createmodel` into a `PostgresCreatePartitionedModel` operation if the model is a `PostgresPartitionedModel` and adds a `PostgresAddDefaultPartition` operation to create a default partition.
* `DeleteModel` into a `PostgresDeletePartitionedModel` operation if the model is a `PostgresPartitionedModel`.
* `CreateModel` into a `PostgresCreateViewModel` operation if the model is a `PostgresViewModel`.
* `DeleteModel` into a `PostgresDeleteviewModel` operation if the model is a `PostgresViewModel`.
* `CreateModel` into a `PostgresCreateMaterializedViewModel` operation if the model is a `PostgresMaterializedViewModel`.
* `DeleteModel` into a `PostgresDeleteMaterializedViewModel` operation if the model is a `PostgresMaterializedViewModel`.
* `AddField` into `ApplyState` migration if the model is a `PostgresViewModel` or `PostgresMaterializedViewModel`.
* `AlterField` into `ApplyState` migration if the model is a `PostgresViewModel` or `PostgresMaterializedViewModel`.
* `RenameField` into `ApplyState` migration if the model is a `PostgresViewModel` or `PostgresMaterializedViewModel`.
* `RemoveField` into `ApplyState` migration if the model is a `PostgresViewModel` or `PostgresMaterializedViewModel`.
#### ProjectState patch
* Patches `django.db.migrations.state.ProjectState.from_apps`
This function is called to build up the current migration state from all the installed apps. For each model, a `ModelState` is created.
The patch hooks into the `from_apps` function to transform the following:
* Create `PostgresPartitionedModelState` from the model if the model is a `PostgresPartitionedModel`.
* Create `PostgresViewModelState` from the model if the model is a `PostgresViewModel`.
* Create `PostgresMaterializedViewModelState` from the model if the model is a `PostgresMaterializedViewModel`.
These custom model states are needed to track partitioning and view options (`PartitioningMeta` and `ViewMeta`) in migrations. Without this, the partitioning and view optiosn would not end up in migrations.
django-postgres-extra-2.0.9/psqlextra/backend/migrations/__init__.py 0000664 0000000 0000000 00000000147 14634267343 0025666 0 ustar 00root root 0000000 0000000 from .patched_migrations import postgres_patched_migrations
__all__ = ["postgres_patched_migrations"]
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/ 0000775 0000000 0000000 00000000000 14634267343 0025736 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/__init__.py 0000664 0000000 0000000 00000002701 14634267343 0030047 0 ustar 00root root 0000000 0000000 from .add_default_partition import PostgresAddDefaultPartition
from .add_hash_partition import PostgresAddHashPartition
from .add_list_partition import PostgresAddListPartition
from .add_range_partition import PostgresAddRangePartition
from .apply_state import ApplyState
from .create_materialized_view_model import PostgresCreateMaterializedViewModel
from .create_partitioned_model import PostgresCreatePartitionedModel
from .create_view_model import PostgresCreateViewModel
from .delete_default_partition import PostgresDeleteDefaultPartition
from .delete_hash_partition import PostgresDeleteHashPartition
from .delete_list_partition import PostgresDeleteListPartition
from .delete_materialized_view_model import PostgresDeleteMaterializedViewModel
from .delete_partitioned_model import PostgresDeletePartitionedModel
from .delete_range_partition import PostgresDeleteRangePartition
from .delete_view_model import PostgresDeleteViewModel
__all__ = [
"ApplyState",
"PostgresAddHashPartition",
"PostgresAddListPartition",
"PostgresAddRangePartition",
"PostgresAddDefaultPartition",
"PostgresDeleteDefaultPartition",
"PostgresDeleteHashPartition",
"PostgresDeleteListPartition",
"PostgresDeleteRangePartition",
"PostgresCreatePartitionedModel",
"PostgresDeletePartitionedModel",
"PostgresCreateViewModel",
"PostgresCreateMaterializedViewModel",
"PostgresDeleteViewModel",
"PostgresDeleteMaterializedViewModel",
]
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/add_default_partition.py 0000664 0000000 0000000 00000002526 14634267343 0032642 0 ustar 00root root 0000000 0000000 from psqlextra.backend.migrations.state import PostgresPartitionState
from .partition import PostgresPartitionOperation
class PostgresAddDefaultPartition(PostgresPartitionOperation):
"""Adds a new default partition to a :see:PartitionedPostgresModel."""
def state_forwards(self, app_label, state):
model_state = state.models[(app_label, self.model_name_lower)]
model_state.add_partition(
PostgresPartitionState(
app_label=app_label, model_name=self.model_name, name=self.name
)
)
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_default_partition(model, self.name)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partition(model, self.name)
def describe(self) -> str:
return "Creates default partition '%s' on %s" % (
self.name,
self.model_name,
)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/add_hash_partition.py 0000664 0000000 0000000 00000004706 14634267343 0032143 0 ustar 00root root 0000000 0000000 from psqlextra.backend.migrations.state import PostgresHashPartitionState
from .partition import PostgresPartitionOperation
class PostgresAddHashPartition(PostgresPartitionOperation):
"""Adds a new hash partition to a :see:PartitionedPostgresModel.
Each partition will hold the rows for which the hash value of the
partition key divided by the specified modulus will produce the
specified remainder.
"""
def __init__(
self, model_name: str, name: str, modulus: int, remainder: int
):
"""Initializes new instance of :see:AddHashPartition.
Arguments:
model_name:
The name of the :see:PartitionedPostgresModel.
name:
The name to give to the new partition table.
modulus:
Integer value by which the key is divided.
remainder:
The remainder of the hash value when divided by modulus.
"""
super().__init__(model_name, name)
self.modulus = modulus
self.remainder = remainder
def state_forwards(self, app_label, state):
model = state.models[(app_label, self.model_name_lower)]
model.add_partition(
PostgresHashPartitionState(
app_label=app_label,
model_name=self.model_name,
name=self.name,
modulus=self.modulus,
remainder=self.remainder,
)
)
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_hash_partition(
model, self.name, self.modulus, self.remainder
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partition(model, self.name)
def deconstruct(self):
name, args, kwargs = super().deconstruct()
kwargs["modulus"] = self.modulus
kwargs["remainder"] = self.remainder
return name, args, kwargs
def describe(self) -> str:
return "Creates hash partition %s on %s" % (self.name, self.model_name)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/add_list_partition.py 0000664 0000000 0000000 00000004001 14634267343 0032157 0 ustar 00root root 0000000 0000000 from psqlextra.backend.migrations.state import PostgresListPartitionState
from .partition import PostgresPartitionOperation
class PostgresAddListPartition(PostgresPartitionOperation):
"""Adds a new list partition to a :see:PartitionedPostgresModel."""
def __init__(self, model_name, name, values):
"""Initializes new instance of :see:AddListPartition.
Arguments:
model_name:
The name of the :see:PartitionedPostgresModel.
name:
The name to give to the new partition table.
values:
Partition key values that should be
stored in this partition.
"""
super().__init__(model_name, name)
self.values = values
def state_forwards(self, app_label, state):
model = state.models[(app_label, self.model_name_lower)]
model.add_partition(
PostgresListPartitionState(
app_label=app_label,
model_name=self.model_name,
name=self.name,
values=self.values,
)
)
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_list_partition(model, self.name, self.values)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partition(model, self.name)
def deconstruct(self):
name, args, kwargs = super().deconstruct()
kwargs["values"] = self.values
return name, args, kwargs
def describe(self) -> str:
return "Creates list partition %s on %s" % (self.name, self.model_name)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/add_range_partition.py 0000664 0000000 0000000 00000004673 14634267343 0032317 0 ustar 00root root 0000000 0000000 from psqlextra.backend.migrations.state import PostgresRangePartitionState
from .partition import PostgresPartitionOperation
class PostgresAddRangePartition(PostgresPartitionOperation):
"""Adds a new range partition to a :see:PartitionedPostgresModel."""
def __init__(self, model_name: str, name: str, from_values, to_values):
"""Initializes new instance of :see:AddRangePartition.
Arguments:
model_name:
The name of the :see:PartitionedPostgresModel.
name:
The name to give to the new partition table.
from_values:
Start of the partitioning key range of
values that need to be stored in this
partition.
to_values:
End of the partitioning key range of
values that need to be stored in this
partition.
"""
super().__init__(model_name, name)
self.from_values = from_values
self.to_values = to_values
def state_forwards(self, app_label, state):
model = state.models[(app_label, self.model_name_lower)]
model.add_partition(
PostgresRangePartitionState(
app_label=app_label,
model_name=self.model_name,
name=self.name,
from_values=self.from_values,
to_values=self.to_values,
)
)
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_range_partition(
model, self.name, self.from_values, self.to_values
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partition(model, self.name)
def deconstruct(self):
name, args, kwargs = super().deconstruct()
kwargs["from_values"] = self.from_values
kwargs["to_values"] = self.to_values
return name, args, kwargs
def describe(self) -> str:
return "Creates range partition %s on %s" % (self.name, self.model_name)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/apply_state.py 0000664 0000000 0000000 00000002360 14634267343 0030636 0 ustar 00root root 0000000 0000000 from django.db.migrations.operations.base import Operation
class ApplyState(Operation):
"""Takes an abritrary operation and migrates the project state but does not
apply the operation to the database.
This is very similar to the :see:RunSQL `state_operations`
parameter. This is useful if you want to tell Django that an
operation was applied without actually applying it.
"""
reduces_to_sql = False
def __init__(self, state_operation: Operation) -> None:
self.state_operation = state_operation
def deconstruct(self):
kwargs = {"state_operation": self.state_operation}
return (self.__class__.__qualname__, [], kwargs)
@property
def reversible(self):
return True
def state_forwards(self, app_label, state):
self.state_operation.state_forwards(app_label, state)
def state_backwards(self, app_label, state):
self.state_operation.state_backwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
pass
def describe(self):
return "Apply state: " + self.state_operation.describe()
create_materialized_view_model.py 0000664 0000000 0000000 00000004326 14634267343 0034445 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations from django.db.migrations.operations.models import CreateModel
from psqlextra.backend.migrations.state import (
PostgresMaterializedViewModelState,
)
class PostgresCreateMaterializedViewModel(CreateModel):
"""Creates the model as a native PostgreSQL 11.x materialzed view."""
serialization_expand_args = [
"fields",
"options",
"managers",
"view_options",
]
def __init__(
self,
name,
fields,
options=None,
view_options={},
bases=None,
managers=None,
):
super().__init__(name, fields, options, bases, managers)
self.view_options = view_options or {}
def state_forwards(self, app_label, state):
state.add_model(
PostgresMaterializedViewModelState(
app_label=app_label,
name=self.name,
fields=list(self.fields),
options=dict(self.options),
bases=tuple(self.bases),
managers=list(self.managers),
view_options=dict(self.view_options),
)
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Apply this migration operation forwards."""
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_materialized_view_model(model)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
"""Apply this migration operation backwards."""
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_materialized_view_model(model)
def deconstruct(self):
name, args, kwargs = super().deconstruct()
if self.view_options:
kwargs["view_options"] = self.view_options
return name, args, kwargs
def describe(self):
"""Gets a human readable text describing this migration."""
description = super().describe()
description = description.replace("model", "materialized view model")
return description
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/create_partitioned_model.py 0000664 0000000 0000000 00000005523 14634267343 0033342 0 ustar 00root root 0000000 0000000 from django.db.migrations.operations.models import CreateModel
from psqlextra.backend.migrations.state import PostgresPartitionedModelState
class PostgresCreatePartitionedModel(CreateModel):
"""Creates the model as a native PostgreSQL 11.x partitioned table."""
serialization_expand_args = [
"fields",
"options",
"managers",
"partitioning_options",
]
def __init__(
self,
name,
fields,
options=None,
partitioning_options={},
bases=None,
managers=None,
):
super().__init__(name, fields, options, bases, managers)
self.partitioning_options = partitioning_options or {}
def state_forwards(self, app_label, state):
state.add_model(
PostgresPartitionedModelState(
app_label=app_label,
name=self.name,
fields=list(self.fields),
options=dict(self.options),
bases=tuple(self.bases),
managers=list(self.managers),
partitioning_options=dict(self.partitioning_options),
)
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Apply this migration operation forwards."""
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_partitioned_model(model)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
"""Apply this migration operation backwards."""
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partitioned_model(model)
def deconstruct(self):
name, args, kwargs = super().deconstruct()
if self.partitioning_options:
kwargs["partitioning_options"] = self.partitioning_options
return name, args, kwargs
def describe(self):
"""Gets a human readable text describing this migration."""
description = super().describe()
description = description.replace("model", "partitioned model")
return description
def reduce(self, *args, **kwargs):
result = super().reduce(*args, **kwargs)
# replace CreateModel operation with PostgresCreatePartitionedModel
if isinstance(result, list) and result:
for i, op in enumerate(result):
if isinstance(op, CreateModel):
_, args, kwargs = op.deconstruct()
result[i] = PostgresCreatePartitionedModel(
*args,
**kwargs,
partitioning_options=self.partitioning_options
)
return result
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/create_view_model.py 0000664 0000000 0000000 00000004166 14634267343 0031774 0 ustar 00root root 0000000 0000000 from django.db.migrations.operations.models import CreateModel
from psqlextra.backend.migrations.state import PostgresViewModelState
class PostgresCreateViewModel(CreateModel):
"""Creates the model as a native PostgreSQL 11.x view."""
serialization_expand_args = [
"fields",
"options",
"managers",
"view_options",
]
def __init__(
self,
name,
fields,
options=None,
view_options={},
bases=None,
managers=None,
):
super().__init__(name, fields, options, bases, managers)
self.view_options = view_options or {}
def state_forwards(self, app_label, state):
state.add_model(
PostgresViewModelState(
app_label=app_label,
name=self.name,
fields=list(self.fields),
options=dict(self.options),
bases=tuple(self.bases),
managers=list(self.managers),
view_options=dict(self.view_options),
)
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Apply this migration operation forwards."""
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_view_model(model)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
"""Apply this migration operation backwards."""
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_view_model(model)
def deconstruct(self):
name, args, kwargs = super().deconstruct()
if self.view_options:
kwargs["view_options"] = self.view_options
return name, args, kwargs
def describe(self):
"""Gets a human readable text describing this migration."""
description = super().describe()
description = description.replace("model", "view model")
return description
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/delete_default_partition.py 0000664 0000000 0000000 00000001471 14634267343 0033352 0 ustar 00root root 0000000 0000000 from .delete_partition import PostgresDeletePartition
class PostgresDeleteDefaultPartition(PostgresDeletePartition):
"""Deletes a default partition that's part of a.
:see:PartitionedPostgresModel.
"""
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = to_state.apps.get_model(app_label, self.model_name)
model_state = to_state.models[(app_label, self.model_name_lower)]
if self.allow_migrate_model(schema_editor.connection.alias, model):
partition_state = model_state.partitions[self.name]
schema_editor.add_default_partition(model, partition_state.name)
def describe(self) -> str:
return "Deletes default partition '%s' on %s" % (
self.name,
self.model_name,
)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/delete_hash_partition.py 0000664 0000000 0000000 00000001660 14634267343 0032651 0 ustar 00root root 0000000 0000000 from .delete_partition import PostgresDeletePartition
class PostgresDeleteHashPartition(PostgresDeletePartition):
"""Deletes a hash partition that's part of a.
:see:PartitionedPostgresModel.
"""
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = to_state.apps.get_model(app_label, self.model_name)
model_state = to_state.models[(app_label, self.model_name_lower)]
if self.allow_migrate_model(schema_editor.connection.alias, model):
partition_state = model_state.partitions[self.name]
schema_editor.add_hash_partition(
model,
partition_state.name,
partition_state.modulus,
partition_state.remainder,
)
def describe(self) -> str:
return "Deletes hash partition '%s' on %s" % (
self.name,
self.model_name,
)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/delete_list_partition.py 0000664 0000000 0000000 00000001543 14634267343 0032701 0 ustar 00root root 0000000 0000000 from .delete_partition import PostgresDeletePartition
class PostgresDeleteListPartition(PostgresDeletePartition):
"""Deletes a list partition that's part of a.
:see:PartitionedPostgresModel.
"""
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = to_state.apps.get_model(app_label, self.model_name)
model_state = to_state.models[(app_label, self.model_name_lower)]
if self.allow_migrate_model(schema_editor.connection.alias, model):
partition_state = model_state.partitions[self.name]
schema_editor.add_list_partition(
model, partition_state.name, partition_state.values
)
def describe(self) -> str:
return "Deletes list partition '%s' on %s" % (
self.name,
self.model_name,
)
delete_materialized_view_model.py 0000664 0000000 0000000 00000002134 14634267343 0034437 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations from django.db.migrations.operations.models import DeleteModel
class PostgresDeleteMaterializedViewModel(DeleteModel):
"""Deletes the specified materialized view model."""
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Apply this migration operation forwards."""
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_materialized_view_model(model)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
"""Apply this migration operation backwards."""
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_materialized_view_model(model)
def describe(self):
"""Gets a human readable text describing this migration."""
description = super().describe()
description = description.replace("model", "materialized view model")
return description
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/delete_partition.py 0000664 0000000 0000000 00000002346 14634267343 0031650 0 ustar 00root root 0000000 0000000 from .partition import PostgresPartitionOperation
class PostgresDeletePartition(PostgresPartitionOperation):
"""Deletes a partition that's part of a :see:PartitionedPostgresModel."""
def state_forwards(self, app_label, state):
model = state.models[(app_label, self.model_name_lower)]
model.delete_partition(self.name)
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partition(model, self.name)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = to_state.apps.get_model(app_label, self.model_name)
model_state = to_state.models[(app_label, self.model_name)]
if self.allow_migrate_model(schema_editor.connection.alias, model):
partition_state = model_state.partitions[self.name]
schema_editor.add_default_partition(model, partition_state.name)
def describe(self) -> str:
return "Deletes partition %s on %s" % (self.name, self.model_name)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/delete_partitioned_model.py 0000664 0000000 0000000 00000002077 14634267343 0033342 0 ustar 00root root 0000000 0000000 from django.db.migrations.operations.models import DeleteModel
class PostgresDeletePartitionedModel(DeleteModel):
"""Deletes the specified partitioned model."""
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Apply this migration operation forwards."""
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_partitioned_model(model)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
"""Apply this migration operation backwards."""
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_partitioned_model(model)
def describe(self):
"""Gets a human readable text describing this migration."""
description = super().describe()
description = description.replace("model", "partitioned model")
return description
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/delete_range_partition.py 0000664 0000000 0000000 00000001670 14634267343 0033023 0 ustar 00root root 0000000 0000000 from .delete_partition import PostgresDeletePartition
class PostgresDeleteRangePartition(PostgresDeletePartition):
"""Deletes a range partition that's part of a.
:see:PartitionedPostgresModel.
"""
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
model = to_state.apps.get_model(app_label, self.model_name)
model_state = to_state.models[(app_label, self.model_name_lower)]
if self.allow_migrate_model(schema_editor.connection.alias, model):
partition_state = model_state.partitions[self.name]
schema_editor.add_range_partition(
model,
partition_state.name,
partition_state.from_values,
partition_state.to_values,
)
def describe(self) -> str:
return "Deletes range partition '%s' on %s" % (
self.name,
self.model_name,
)
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/delete_view_model.py 0000664 0000000 0000000 00000002034 14634267343 0031763 0 ustar 00root root 0000000 0000000 from django.db.migrations.operations.models import DeleteModel
class PostgresDeleteViewModel(DeleteModel):
"""Deletes the specified view model."""
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Apply this migration operation forwards."""
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_view_model(model)
def database_backwards(
self, app_label, schema_editor, from_state, to_state
):
"""Apply this migration operation backwards."""
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_view_model(model)
def describe(self):
"""Gets a human readable text describing this migration."""
description = super().describe()
description = description.replace("model", "view model")
return description
django-postgres-extra-2.0.9/psqlextra/backend/migrations/operations/partition.py 0000664 0000000 0000000 00000001664 14634267343 0030330 0 ustar 00root root 0000000 0000000 from django.db.migrations.operations.base import Operation
class PostgresPartitionOperation(Operation):
def __init__(self, model_name: str, name: str) -> None:
"""Initializes new instance of :see:AddDefaultPartition.
Arguments:
model_name:
The name of the :see:PartitionedPostgresModel.
name:
The name to give to the new partition table.
"""
self.model_name = model_name
self.model_name_lower = model_name.lower()
self.name = name
def deconstruct(self):
kwargs = {"model_name": self.model_name, "name": self.name}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, *args, **kwargs):
pass
def state_backwards(self, *args, **kwargs):
pass
def reduce(self, *args, **kwargs):
# PartitionOperation doesn't break migrations optimizations
return True
django-postgres-extra-2.0.9/psqlextra/backend/migrations/patched_autodetector.py 0000664 0000000 0000000 00000027160 14634267343 0030325 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
from unittest import mock
import django
from django.db.migrations import (
AddField,
AlterField,
CreateModel,
DeleteModel,
RemoveField,
RenameField,
)
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.operations.fields import FieldOperation
from psqlextra.models import (
PostgresMaterializedViewModel,
PostgresPartitionedModel,
PostgresViewModel,
)
from psqlextra.types import PostgresPartitioningMethod
from . import operations
from .state import (
PostgresMaterializedViewModelState,
PostgresPartitionedModelState,
PostgresViewModelState,
)
# original `MigrationAutodetector.add_operation`
# function, saved here so the patched version can
# call the original
add_operation = MigrationAutodetector.add_operation
class AddOperationHandler:
"""Handler for when operations are being added to a new migration.
This is where we intercept operations such as
:see:CreateModel to replace it with our own.
"""
def __init__(self, autodetector, app_label, args, kwargs):
self.autodetector = autodetector
self.app_label = app_label
self.args = args
self.kwargs = kwargs
def add(self, operation):
"""Adds the specified operation to the list of operations to execute in
the migration."""
return add_operation(
self.autodetector,
self.app_label,
operation,
*self.args,
**self.kwargs,
)
def add_field(self, operation: AddField):
"""Adds the specified :see:AddField operation to the list of operations
to execute in the migration."""
return self._transform_view_field_operations(operation)
def remove_field(self, operation: RemoveField):
"""Adds the specified :see:RemoveField operation to the list of
operations to execute in the migration."""
return self._transform_view_field_operations(operation)
def alter_field(self, operation: AlterField):
"""Adds the specified :see:AlterField operation to the list of
operations to execute in the migration."""
return self._transform_view_field_operations(operation)
def rename_field(self, operation: RenameField):
"""Adds the specified :see:RenameField operation to the list of
operations to execute in the migration."""
return self._transform_view_field_operations(operation)
def _transform_view_field_operations(self, operation: FieldOperation):
"""Transforms operations on fields on a (materialized) view into state
only operations.
One cannot add/remove/delete fields on a (materialized) view,
however, we do want Django's migration system to keep track of
these kind of changes to the model. The :see:ApplyState
operation just tells Django the operation was applied without
actually applying it.
"""
if django.VERSION >= (4, 0):
model_identifier = (self.app_label, operation.model_name.lower())
model_state = (
self.autodetector.to_state.models.get(model_identifier)
or self.autodetector.from_state.models[model_identifier]
)
if isinstance(model_state, PostgresViewModelState):
return self.add(
operations.ApplyState(state_operation=operation)
)
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.model_name
)
if issubclass(model, PostgresViewModel):
return self.add(
operations.ApplyState(state_operation=operation)
)
return self.add(operation)
def add_create_model(self, operation: CreateModel):
"""Adds the specified :see:CreateModel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
if isinstance(model_state, PostgresPartitionedModelState):
return self.add_create_partitioned_model(operation)
elif isinstance(model_state, PostgresMaterializedViewModelState):
return self.add_create_materialized_view_model(operation)
elif isinstance(model_state, PostgresViewModelState):
return self.add_create_view_model(operation)
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
if issubclass(model, PostgresPartitionedModel):
return self.add_create_partitioned_model(operation)
elif issubclass(model, PostgresMaterializedViewModel):
return self.add_create_materialized_view_model(operation)
elif issubclass(model, PostgresViewModel):
return self.add_create_view_model(operation)
return self.add(operation)
def add_delete_model(self, operation: DeleteModel):
"""Adds the specified :see:Deletemodel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.from_state.models[
self.app_label, operation.name.lower()
]
if isinstance(model_state, PostgresPartitionedModelState):
return self.add_delete_partitioned_model(operation)
elif isinstance(model_state, PostgresMaterializedViewModelState):
return self.add_delete_materialized_view_model(operation)
elif isinstance(model_state, PostgresViewModelState):
return self.add_delete_view_model(operation)
else:
model = self.autodetector.old_apps.get_model(
self.app_label, operation.name
)
if issubclass(model, PostgresPartitionedModel):
return self.add_delete_partitioned_model(operation)
elif issubclass(model, PostgresMaterializedViewModel):
return self.add_delete_materialized_view_model(operation)
elif issubclass(model, PostgresViewModel):
return self.add_delete_view_model(operation)
return self.add(operation)
def add_create_partitioned_model(self, operation: CreateModel):
"""Adds a :see:PostgresCreatePartitionedModel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
partitioning_options = model_state.partitioning_options
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
partitioning_options = model._partitioning_meta.original_attrs
_, args, kwargs = operation.deconstruct()
if partitioning_options["method"] != PostgresPartitioningMethod.HASH:
self.add(
operations.PostgresAddDefaultPartition(
model_name=operation.name, name="default"
)
)
partitioned_kwargs = {
**kwargs,
"partitioning_options": partitioning_options,
}
self.add(
operations.PostgresCreatePartitionedModel(
*args,
**partitioned_kwargs,
)
)
def add_delete_partitioned_model(self, operation: DeleteModel):
"""Adds a :see:PostgresDeletePartitionedModel operation to the list of
operations to execute in the migration."""
_, args, kwargs = operation.deconstruct()
return self.add(
operations.PostgresDeletePartitionedModel(*args, **kwargs)
)
def add_create_view_model(self, operation: CreateModel):
"""Adds a :see:PostgresCreateViewModel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
view_options = model_state.view_options
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
view_options = model._view_meta.original_attrs
_, args, kwargs = operation.deconstruct()
view_kwargs = {**kwargs, "view_options": view_options}
self.add(operations.PostgresCreateViewModel(*args, **view_kwargs))
def add_delete_view_model(self, operation: DeleteModel):
"""Adds a :see:PostgresDeleteViewModel operation to the list of
operations to execute in the migration."""
_, args, kwargs = operation.deconstruct()
return self.add(operations.PostgresDeleteViewModel(*args, **kwargs))
def add_create_materialized_view_model(self, operation: CreateModel):
"""Adds a :see:PostgresCreateMaterializedViewModel operation to the
list of operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
view_options = model_state.view_options
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
view_options = model._view_meta.original_attrs
_, args, kwargs = operation.deconstruct()
view_kwargs = {**kwargs, "view_options": view_options}
self.add(
operations.PostgresCreateMaterializedViewModel(
*args,
**view_kwargs,
)
)
def add_delete_materialized_view_model(self, operation: DeleteModel):
"""Adds a :see:PostgresDeleteMaterializedViewModel operation to the
list of operations to execute in the migration."""
_, args, kwargs = operation.deconstruct()
return self.add(
operations.PostgresDeleteMaterializedViewModel(*args, **kwargs)
)
@contextmanager
def patched_autodetector():
"""Patches the standard Django :seee:MigrationAutodetector for the duration
of the context.
The patch intercepts the `add_operation` function to
customize how new operations are added.
We have to do this because there is no way in Django
to extend the auto detector otherwise.
"""
autodetector_module_path = "django.db.migrations.autodetector"
autodetector_class_path = (
f"{autodetector_module_path}.MigrationAutodetector"
)
add_operation_path = f"{autodetector_class_path}.add_operation"
def _patched(autodetector, app_label, operation, *args, **kwargs):
handler = AddOperationHandler(autodetector, app_label, args, kwargs)
if isinstance(operation, CreateModel):
return handler.add_create_model(operation)
if isinstance(operation, DeleteModel):
return handler.add_delete_model(operation)
if isinstance(operation, AddField):
return handler.add_field(operation)
if isinstance(operation, RemoveField):
return handler.remove_field(operation)
if isinstance(operation, AlterField):
return handler.alter_field(operation)
if isinstance(operation, RenameField):
return handler.rename_field(operation)
return handler.add(operation)
with mock.patch(add_operation_path, new=_patched):
yield
django-postgres-extra-2.0.9/psqlextra/backend/migrations/patched_migrations.py 0000664 0000000 0000000 00000000770 14634267343 0027775 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
from .patched_autodetector import patched_autodetector
from .patched_project_state import patched_project_state
@contextmanager
def postgres_patched_migrations():
"""Patches migration related classes/functions to extend how Django
generates and applies migrations.
This adds support for automatically detecting changes in Postgres
specific models.
"""
with patched_project_state():
with patched_autodetector():
yield
django-postgres-extra-2.0.9/psqlextra/backend/migrations/patched_project_state.py 0000664 0000000 0000000 00000004167 14634267343 0030473 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
from unittest import mock
from django.db.migrations.state import ProjectState
from psqlextra.models import (
PostgresMaterializedViewModel,
PostgresPartitionedModel,
PostgresViewModel,
)
from .state import (
PostgresMaterializedViewModelState,
PostgresPartitionedModelState,
PostgresViewModelState,
)
# original `ProjectState.from_apps` function,
# saved here so the patched version can call
# the original
original_from_apps = ProjectState.from_apps
def project_state_from_apps(apps):
"""Creates a :see:ProjectState instance from the specified list of apps."""
project_state = original_from_apps(apps)
for model in apps.get_models(include_swapped=True):
model_state = None
# for some of our custom models, use the more specific model
# state.. for everything else, business as usual
if issubclass(model, PostgresPartitionedModel):
model_state = PostgresPartitionedModelState.from_model(model)
elif issubclass(model, PostgresMaterializedViewModel):
model_state = PostgresMaterializedViewModelState.from_model(model)
elif issubclass(model, PostgresViewModel):
model_state = PostgresViewModelState.from_model(model)
else:
continue
model_state_key = (model_state.app_label, model_state.name_lower)
project_state.models[model_state_key] = model_state
return project_state
@contextmanager
def patched_project_state():
"""Patches the standard Django :see:ProjectState.from_apps for the duration
of the context.
The patch intercepts the `from_apps` function to control
how model state is creatd. We want to use our custom
model state classes for certain types of models.
We have to do this because there is no way in Django
to extend the project state otherwise.
"""
from_apps_module_path = "django.db.migrations.state"
from_apps_class_path = f"{from_apps_module_path}.ProjectState"
from_apps_path = f"{from_apps_class_path}.from_apps"
with mock.patch(from_apps_path, new=project_state_from_apps):
yield
django-postgres-extra-2.0.9/psqlextra/backend/migrations/state/ 0000775 0000000 0000000 00000000000 14634267343 0024673 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/migrations/state/__init__.py 0000664 0000000 0000000 00000001052 14634267343 0027002 0 ustar 00root root 0000000 0000000 from .materialized_view import PostgresMaterializedViewModelState
from .partitioning import (
PostgresHashPartitionState,
PostgresListPartitionState,
PostgresPartitionedModelState,
PostgresPartitionState,
PostgresRangePartitionState,
)
from .view import PostgresViewModelState
__all__ = [
"PostgresPartitionState",
"PostgresRangePartitionState",
"PostgresHashPartitionState",
"PostgresListPartitionState",
"PostgresPartitionedModelState",
"PostgresViewModelState",
"PostgresMaterializedViewModelState",
]
django-postgres-extra-2.0.9/psqlextra/backend/migrations/state/materialized_view.py 0000664 0000000 0000000 00000000765 14634267343 0030761 0 ustar 00root root 0000000 0000000 from typing import Type
from psqlextra.models import PostgresMaterializedViewModel
from .view import PostgresViewModelState
class PostgresMaterializedViewModelState(PostgresViewModelState):
"""Represents the state of a :see:PostgresMaterializedViewModel in the
migrations."""
@classmethod
def _get_base_model_class(self) -> Type[PostgresMaterializedViewModel]:
"""Gets the class to use as a base class for rendered models."""
return PostgresMaterializedViewModel
django-postgres-extra-2.0.9/psqlextra/backend/migrations/state/model.py 0000664 0000000 0000000 00000010006 14634267343 0026342 0 ustar 00root root 0000000 0000000 from collections.abc import Mapping
from typing import Tuple, Type, cast
from django.db.migrations.state import ModelState
from django.db.models import Model
from psqlextra.models import PostgresModel
class PostgresModelState(ModelState):
"""Base for custom model states.
We need this base class to create some hooks into rendering models,
creating new states and cloning state. Most of the logic resides
here in the base class. Our derived classes implement the `_pre_*`
methods.
"""
@classmethod
def from_model( # type: ignore[override]
cls, model: Type[PostgresModel], *args, **kwargs
) -> "PostgresModelState":
"""Creates a new :see:PostgresModelState object from the specified
model.
We override this so derived classes get the chance to attach
additional information to the newly created model state.
We also need to patch up the base class for the model.
"""
model_state = super().from_model(
cast(Type[Model], model), *args, **kwargs
)
model_state = cls._pre_new(
model, cast("PostgresModelState", model_state)
)
# django does not add abstract bases as a base in migrations
# because it assumes the base does not add anything important
# in a migration.. but it does, so we replace the Model
# base with the actual base
bases: Tuple[Type[Model], ...] = tuple()
for base in model_state.bases:
if issubclass(base, Model):
bases += (cls._get_base_model_class(),)
else:
bases += (base,)
model_state.bases = cast(Tuple[Type[Model]], bases)
return model_state
def clone(self) -> "PostgresModelState":
"""Gets an exact copy of this :see:PostgresModelState."""
model_state = super().clone()
return self._pre_clone(cast(PostgresModelState, model_state))
def render(self, apps):
"""Renders this state into an actual model."""
# TODO: figure out a way to do this witout pretty much
# copying the base class's implementation
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, str) else base)
for base in self.bases
)
except LookupError:
# TODO: this should be a InvalidBaseError
raise ValueError(
"Cannot resolve one or more bases from %r" % (self.bases,)
)
if isinstance(self.fields, Mapping):
# In Django 3.1 `self.fields` became a `dict`
fields = {
name: field.clone() for name, field in self.fields.items()
}
else:
# In Django < 3.1 `self.fields` is a list of (name, field) tuples
fields = {name: field.clone() for name, field in self.fields}
meta = type(
"Meta",
(),
{"app_label": self.app_label, "apps": apps, **self.options},
)
attributes = {
**fields,
"Meta": meta,
"__module__": "__fake__",
**dict(self.construct_managers()),
}
return type(*self._pre_render(self.name, bases, attributes))
@classmethod
def _pre_new(
cls,
model: Type[PostgresModel],
model_state: "PostgresModelState",
) -> "PostgresModelState":
"""Called when a new model state is created from the specified
model."""
return model_state
def _pre_clone(
self, model_state: "PostgresModelState"
) -> "PostgresModelState":
"""Called when this model state is cloned."""
return model_state
def _pre_render(self, name: str, bases, attributes):
"""Called when this model state is rendered into a model."""
return name, bases, attributes
@classmethod
def _get_base_model_class(self) -> Type[PostgresModel]:
"""Gets the class to use as a base class for rendered models."""
return PostgresModel
django-postgres-extra-2.0.9/psqlextra/backend/migrations/state/partitioning.py 0000664 0000000 0000000 00000010135 14634267343 0027754 0 ustar 00root root 0000000 0000000 from typing import Dict, List, Type
from psqlextra.models import PostgresPartitionedModel
from .model import PostgresModelState
class PostgresPartitionState:
"""Represents the state of a partition for a :see:PostgresPartitionedModel
during a migration."""
def __init__(self, app_label: str, model_name: str, name: str) -> None:
self.app_label = app_label
self.model_name = model_name
self.name = name
class PostgresRangePartitionState(PostgresPartitionState):
"""Represents the state of a range partition for a
:see:PostgresPartitionedModel during a migration."""
def __init__(
self, app_label: str, model_name: str, name: str, from_values, to_values
):
super().__init__(app_label, model_name, name)
self.from_values = from_values
self.to_values = to_values
class PostgresListPartitionState(PostgresPartitionState):
"""Represents the state of a list partition for a
:see:PostgresPartitionedModel during a migration."""
def __init__(self, app_label: str, model_name: str, name: str, values):
super().__init__(app_label, model_name, name)
self.values = values
class PostgresHashPartitionState(PostgresPartitionState):
"""Represents the state of a hash partition for a
:see:PostgresPartitionedModel during a migration."""
def __init__(
self,
app_label: str,
model_name: str,
name: str,
modulus: int,
remainder: int,
):
super().__init__(app_label, model_name, name)
self.modulus = modulus
self.remainder = remainder
class PostgresPartitionedModelState(PostgresModelState):
"""Represents the state of a :see:PostgresPartitionedModel in the
migrations."""
def __init__(
self,
*args,
partitions: List[PostgresPartitionState] = [],
partitioning_options={},
**kwargs
):
"""Initializes a new instance of :see:PostgresPartitionedModelState.
Arguments:
partitioning_options:
Dictionary of options for partitioning.
See: PostgresPartitionedModelMeta for a list.
"""
super().__init__(*args, **kwargs)
self.partitions: Dict[str, PostgresPartitionState] = {
partition.name: partition for partition in partitions
}
self.partitioning_options = dict(partitioning_options)
def add_partition(self, partition: PostgresPartitionState):
"""Adds a partition to this partitioned model state."""
self.partitions[partition.name] = partition
def delete_partition(self, name: str):
"""Deletes a partition from this partitioned model state."""
del self.partitions[name]
@classmethod
def _pre_new( # type: ignore[override]
cls,
model: PostgresPartitionedModel,
model_state: "PostgresPartitionedModelState",
) -> "PostgresPartitionedModelState":
"""Called when a new model state is created from the specified
model."""
model_state.partitions = dict()
model_state.partitioning_options = dict(
model._partitioning_meta.original_attrs
)
return model_state
def _pre_clone( # type: ignore[override]
self, model_state: "PostgresPartitionedModelState"
) -> "PostgresPartitionedModelState":
"""Called when this model state is cloned."""
model_state.partitions = dict(self.partitions)
model_state.partitioning_options = dict(self.partitioning_options)
return model_state
def _pre_render(self, name: str, bases, attributes):
"""Called when this model state is rendered into a model."""
partitioning_meta = type(
"PartitioningMeta", (), dict(self.partitioning_options)
)
return (
name,
bases,
{**attributes, "PartitioningMeta": partitioning_meta},
)
@classmethod
def _get_base_model_class(self) -> Type[PostgresPartitionedModel]:
"""Gets the class to use as a base class for rendered models."""
return PostgresPartitionedModel
django-postgres-extra-2.0.9/psqlextra/backend/migrations/state/view.py 0000664 0000000 0000000 00000003306 14634267343 0026221 0 ustar 00root root 0000000 0000000 from typing import Type
from psqlextra.models import PostgresViewModel
from .model import PostgresModelState
class PostgresViewModelState(PostgresModelState):
"""Represents the state of a :see:PostgresViewModel in the migrations."""
def __init__(self, *args, view_options={}, **kwargs):
"""Initializes a new instance of :see:PostgresViewModelState.
Arguments:
view_options:
Dictionary of options for views.
See: PostgresViewModelMeta for a list.
"""
super().__init__(*args, **kwargs)
self.view_options = dict(view_options)
@classmethod
def _pre_new( # type: ignore[override]
cls,
model: Type[PostgresViewModel],
model_state: "PostgresViewModelState",
) -> "PostgresViewModelState":
"""Called when a new model state is created from the specified
model."""
model_state.view_options = dict(model._view_meta.original_attrs)
return model_state
def _pre_clone( # type: ignore[override]
self, model_state: "PostgresViewModelState"
) -> "PostgresViewModelState":
"""Called when this model state is cloned."""
model_state.view_options = dict(self.view_options)
return model_state
def _pre_render(self, name: str, bases, attributes):
"""Called when this model state is rendered into a model."""
view_meta = type("ViewMeta", (), dict(self.view_options))
return name, bases, {**attributes, "ViewMeta": view_meta}
@classmethod
def _get_base_model_class(self) -> Type[PostgresViewModel]:
"""Gets the class to use as a base class for rendered models."""
return PostgresViewModel
django-postgres-extra-2.0.9/psqlextra/backend/operations.py 0000664 0000000 0000000 00000000772 14634267343 0024142 0 ustar 00root root 0000000 0000000 from psqlextra.compiler import (
SQLAggregateCompiler,
SQLCompiler,
SQLDeleteCompiler,
SQLInsertCompiler,
SQLUpdateCompiler,
)
from . import base_impl
class PostgresOperations(base_impl.operations()): # type: ignore[misc]
"""Simple operations specific to PostgreSQL."""
compiler_module = "psqlextra.compiler"
compiler_classes = [
SQLCompiler,
SQLDeleteCompiler,
SQLAggregateCompiler,
SQLUpdateCompiler,
SQLInsertCompiler,
]
django-postgres-extra-2.0.9/psqlextra/backend/schema.py 0000664 0000000 0000000 00000112055 14634267343 0023215 0 ustar 00root root 0000000 0000000 from typing import TYPE_CHECKING, Any, List, Optional, Type, cast
from unittest import mock
import django
from django.core.exceptions import (
FieldDoesNotExist,
ImproperlyConfigured,
SuspiciousOperation,
)
from django.db import transaction
from django.db.backends.ddl_references import Statement
from django.db.backends.postgresql.schema import ( # type: ignore[import]
DatabaseSchemaEditor,
)
from django.db.models import Field, Model
from psqlextra.settings import (
postgres_prepend_local_search_path,
postgres_reset_local_search_path,
)
from psqlextra.type_assertions import is_sql_with_params
from psqlextra.types import PostgresPartitioningMethod
from . import base_impl
from .introspection import PostgresIntrospection
from .side_effects import (
HStoreRequiredSchemaEditorSideEffect,
HStoreUniqueSchemaEditorSideEffect,
)
if TYPE_CHECKING:
class SchemaEditor(DatabaseSchemaEditor):
pass
else:
SchemaEditor = base_impl.schema_editor()
class PostgresSchemaEditor(SchemaEditor):
"""Schema editor that adds extra methods for PostgreSQL specific features
and hooks into existing implementations to add side effects specific to
PostgreSQL."""
sql_add_pk = "ALTER TABLE %s ADD PRIMARY KEY (%s)"
sql_create_fk_not_valid = f"{SchemaEditor.sql_create_fk} NOT VALID"
sql_validate_fk = "ALTER TABLE %s VALIDATE CONSTRAINT %s"
sql_create_sequence_with_owner = "CREATE SEQUENCE %s OWNED BY %s.%s"
sql_alter_table_storage_setting = "ALTER TABLE %s SET (%s = %s)"
sql_reset_table_storage_setting = "ALTER TABLE %s RESET (%s)"
sql_alter_table_schema = "ALTER TABLE %s SET SCHEMA %s"
sql_create_schema = "CREATE SCHEMA %s"
sql_delete_schema = "DROP SCHEMA %s"
sql_delete_schema_cascade = "DROP SCHEMA %s CASCADE"
sql_create_view = "CREATE VIEW %s AS (%s)"
sql_replace_view = "CREATE OR REPLACE VIEW %s AS (%s)"
sql_drop_view = "DROP VIEW IF EXISTS %s"
sql_create_materialized_view = (
"CREATE MATERIALIZED VIEW %s AS (%s) WITH DATA"
)
sql_drop_materialized_view = "DROP MATERIALIZED VIEW %s"
sql_refresh_materialized_view = "REFRESH MATERIALIZED VIEW %s"
sql_refresh_materialized_view_concurrently = (
"REFRESH MATERIALIZED VIEW CONCURRENTLY %s"
)
sql_partition_by = " PARTITION BY %s (%s)"
sql_add_default_partition = "CREATE TABLE %s PARTITION OF %s DEFAULT"
sql_add_hash_partition = "CREATE TABLE %s PARTITION OF %s FOR VALUES WITH (MODULUS %s, REMAINDER %s)"
sql_add_range_partition = (
"CREATE TABLE %s PARTITION OF %s FOR VALUES FROM (%s) TO (%s)"
)
sql_add_list_partition = (
"CREATE TABLE %s PARTITION OF %s FOR VALUES IN (%s)"
)
sql_delete_partition = "DROP TABLE %s"
sql_table_comment = "COMMENT ON TABLE %s IS %s"
side_effects: List[DatabaseSchemaEditor] = [
cast(DatabaseSchemaEditor, HStoreUniqueSchemaEditorSideEffect()),
cast(DatabaseSchemaEditor, HStoreRequiredSchemaEditorSideEffect()),
]
def __init__(self, connection, collect_sql=False, atomic=True):
super().__init__(connection, collect_sql, atomic)
for side_effect in self.side_effects:
side_effect.execute = self.execute
side_effect.quote_name = self.quote_name
self.deferred_sql = []
self.introspection = PostgresIntrospection(self.connection)
def create_schema(self, name: str) -> None:
"""Creates a Postgres schema."""
self.execute(self.sql_create_schema % self.quote_name(name))
def delete_schema(self, name: str, cascade: bool) -> None:
"""Drops a Postgres schema."""
sql = (
self.sql_delete_schema
if not cascade
else self.sql_delete_schema_cascade
)
self.execute(sql % self.quote_name(name))
def create_model(self, model: Type[Model]) -> None:
"""Creates a new model."""
super().create_model(model)
for side_effect in self.side_effects:
side_effect.create_model(model)
def delete_model(self, model: Type[Model]) -> None:
"""Drops/deletes an existing model."""
for side_effect in self.side_effects:
side_effect.delete_model(model)
super().delete_model(model)
def clone_model_structure_to_schema(
self, model: Type[Model], *, schema_name: str
) -> None:
"""Creates a clone of the columns for the specified model in a separate
schema.
The table will have exactly the same name as the model table
in the default schema. It will have none of the constraints,
foreign keys and indexes.
Use this to create a temporary clone of a model table to
replace the original model table later on. The lack of
indices and constraints allows for greater write speeds.
The original model table will be unaffected.
Arguments:
model:
Model to clone the table of into the
specified schema.
schema_name:
Name of the schema to create the cloned
table in.
"""
table_name = model._meta.db_table
quoted_table_name = self.quote_name(model._meta.db_table)
quoted_schema_name = self.quote_name(schema_name)
quoted_table_fqn = f"{quoted_schema_name}.{quoted_table_name}"
self.execute(
self.sql_create_table
% {
"table": quoted_table_fqn,
"definition": f"LIKE {quoted_table_name} INCLUDING ALL EXCLUDING CONSTRAINTS EXCLUDING INDEXES",
}
)
# Copy sequences
#
# Django 4.0 and older do not use IDENTITY so Postgres does
# not copy the sequences into the new table. We do it manually.
if django.VERSION < (4, 1):
with self.connection.cursor() as cursor:
sequences = self.introspection.get_sequences(cursor, table_name)
for sequence in sequences:
if sequence["table"] != table_name:
continue
quoted_sequence_name = self.quote_name(sequence["name"])
quoted_sequence_fqn = (
f"{quoted_schema_name}.{quoted_sequence_name}"
)
quoted_column_name = self.quote_name(sequence["column"])
self.execute(
self.sql_create_sequence_with_owner
% (
quoted_sequence_fqn,
quoted_table_fqn,
quoted_column_name,
)
)
self.execute(
self.sql_alter_column
% {
"table": quoted_table_fqn,
"changes": self.sql_alter_column_default
% {
"column": quoted_column_name,
"default": "nextval('%s')" % quoted_sequence_fqn,
},
}
)
# Copy storage settings
#
# Postgres only copies column-level storage options, not
# the table-level storage options.
with self.connection.cursor() as cursor:
storage_settings = self.introspection.get_storage_settings(
cursor, model._meta.db_table
)
for setting_name, setting_value in storage_settings.items():
self.alter_table_storage_setting(
quoted_table_fqn, setting_name, setting_value
)
def clone_model_constraints_and_indexes_to_schema(
self, model: Type[Model], *, schema_name: str
) -> None:
"""Adds the constraints, foreign keys and indexes to a model table that
was cloned into a separate table without them by
`clone_model_structure_to_schema`.
Arguments:
model:
Model for which the cloned table was created.
schema_name:
Name of the schema in which the cloned table
resides.
"""
with postgres_prepend_local_search_path(
[schema_name], using=self.connection.alias
):
for constraint in model._meta.constraints:
self.add_constraint(model, constraint) # type: ignore[attr-defined]
for index in model._meta.indexes:
self.add_index(model, index)
if model._meta.unique_together:
self.alter_unique_together(
model, tuple(), model._meta.unique_together
)
if model._meta.index_together:
self.alter_index_together(
model, tuple(), model._meta.index_together
)
for field in model._meta.local_concrete_fields: # type: ignore[attr-defined]
# Django creates primary keys later added to the model with
# a custom name. We want the name as it was created originally.
if field.primary_key:
with postgres_reset_local_search_path(
using=self.connection.alias
):
[primary_key_name] = self._constraint_names( # type: ignore[attr-defined]
model, primary_key=True
)
self.execute(
self.sql_create_pk
% {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(primary_key_name),
"columns": self.quote_name(
field.db_column or field.attname
),
}
)
continue
# Django creates foreign keys in a single statement which acquires
# a AccessExclusiveLock on the referenced table. We want to avoid
# that and created the FK as NOT VALID. We can run VALIDATE in
# a separate transaction later to validate the entries without
# acquiring a AccessExclusiveLock.
if field.remote_field:
with postgres_reset_local_search_path(
using=self.connection.alias
):
[fk_name] = self._constraint_names( # type: ignore[attr-defined]
model, [field.column], foreign_key=True
)
sql = Statement(
self.sql_create_fk_not_valid,
table=self.quote_name(model._meta.db_table),
name=self.quote_name(fk_name),
column=self.quote_name(field.column),
to_table=self.quote_name(
field.target_field.model._meta.db_table
),
to_column=self.quote_name(field.target_field.column),
deferrable=self.connection.ops.deferrable_sql(),
)
self.execute(sql)
# It's hard to alter a field's check because it is defined
# by the field class, not the field instance. Handle this
# manually.
field_check = field.db_parameters(self.connection).get("check")
if field_check:
with postgres_reset_local_search_path(
using=self.connection.alias
):
[field_check_name] = self._constraint_names( # type: ignore[attr-defined]
model,
[field.column],
check=True,
exclude={
constraint.name
for constraint in model._meta.constraints
},
)
self.execute(
self._create_check_sql( # type: ignore[attr-defined]
model, field_check_name, field_check
)
)
# Clone the field and alter its state to math our current
# table definition. This will cause Django see the missing
# indices and create them.
if field.remote_field:
# We add the foreign key constraint ourselves with NOT VALID,
# hence, we specify `db_constraint=False` on both old/new.
# Django won't touch the foreign key constraint.
old_field = self._clone_model_field(
field, db_index=False, unique=False, db_constraint=False
)
new_field = self._clone_model_field(
field, db_constraint=False
)
self.alter_field(model, old_field, new_field)
else:
old_field = self._clone_model_field(
field, db_index=False, unique=False
)
new_field = self._clone_model_field(field)
self.alter_field(model, old_field, new_field)
def clone_model_foreign_keys_to_schema(
self, model: Type[Model], schema_name: str
) -> None:
"""Validates the foreign keys in the cloned model table created by
`clone_model_structure_to_schema` and
`clone_model_constraints_and_indexes_to_schema`.
Do NOT run this in the same transaction as the
foreign keys were added to the table. It WILL
acquire a long-lived AccessExclusiveLock.
Arguments:
model:
Model for which the cloned table was created.
schema_name:
Name of the schema in which the cloned table
resides.
"""
constraint_names = self._constraint_names(model, foreign_key=True) # type: ignore[attr-defined]
with postgres_prepend_local_search_path(
[schema_name], using=self.connection.alias
):
for fk_name in constraint_names:
self.execute(
self.sql_validate_fk
% (
self.quote_name(model._meta.db_table),
self.quote_name(fk_name),
)
)
def alter_table_storage_setting(
self, table_name: str, name: str, value: str
) -> None:
"""Alters a storage setting for a table.
See: https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-STORAGE-PARAMETERS
Arguments:
table_name:
Name of the table to alter the setting for.
name:
Name of the setting to alter.
value:
Value to alter the setting to.
Note that this is always a string, even if it looks
like a number or a boolean. That's how Postgres
stores storage settings internally.
"""
self.execute(
self.sql_alter_table_storage_setting
% (self.quote_name(table_name), name, value)
)
def alter_model_storage_setting(
self, model: Type[Model], name: str, value: str
) -> None:
"""Alters a storage setting for the model's table.
See: https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-STORAGE-PARAMETERS
Arguments:
model:
Model of which to alter the table
setting.
name:
Name of the setting to alter.
value:
Value to alter the setting to.
Note that this is always a string, even if it looks
like a number or a boolean. That's how Postgres
stores storage settings internally.
"""
self.alter_table_storage_setting(model._meta.db_table, name, value)
def reset_table_storage_setting(self, table_name: str, name: str) -> None:
"""Resets a table's storage setting to the database or server default.
See: https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-STORAGE-PARAMETERS
Arguments:
table_name:
Name of the table to reset the setting for.
name:
Name of the setting to reset.
"""
self.execute(
self.sql_reset_table_storage_setting
% (self.quote_name(table_name), name)
)
def reset_model_storage_setting(
self, model: Type[Model], name: str
) -> None:
"""Resets a model's table storage setting to the database or server
default.
See: https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-STORAGE-PARAMETERS
Arguments:
table_name:
model:
Model for which to reset the table setting for.
name:
Name of the setting to reset.
"""
self.reset_table_storage_setting(model._meta.db_table, name)
def alter_table_schema(self, table_name: str, schema_name: str) -> None:
"""Moves the specified table into the specified schema.
WARNING: Moving models into a different schema than the default
will break querying the model.
Arguments:
table_name:
Name of the table to move into the specified schema.
schema_name:
Name of the schema to move the table to.
"""
self.execute(
self.sql_alter_table_schema
% (self.quote_name(table_name), self.quote_name(schema_name))
)
def alter_model_schema(self, model: Type[Model], schema_name: str) -> None:
"""Moves the specified model's table into the specified schema.
WARNING: Moving models into a different schema than the default
will break querying the model.
Arguments:
model:
Model of which to move the table.
schema_name:
Name of the schema to move the model's table to.
"""
self.execute(
self.sql_alter_table_schema
% (
self.quote_name(model._meta.db_table),
self.quote_name(schema_name),
)
)
def refresh_materialized_view_model(
self, model: Type[Model], concurrently: bool = False
) -> None:
"""Refreshes a materialized view."""
sql_template = (
self.sql_refresh_materialized_view_concurrently
if concurrently
else self.sql_refresh_materialized_view
)
sql = sql_template % self.quote_name(model._meta.db_table)
self.execute(sql)
def create_view_model(self, model: Type[Model]) -> None:
"""Creates a new view model."""
self._create_view_model(self.sql_create_view, model)
def replace_view_model(self, model: Type[Model]) -> None:
"""Replaces a view model with a newer version.
This is used to alter the backing query of a view.
"""
self._create_view_model(self.sql_replace_view, model)
def delete_view_model(self, model: Type[Model]) -> None:
"""Deletes a view model."""
sql = self.sql_drop_view % self.quote_name(model._meta.db_table)
self.execute(sql)
def create_materialized_view_model(self, model: Type[Model]) -> None:
"""Creates a new materialized view model."""
self._create_view_model(self.sql_create_materialized_view, model)
def replace_materialized_view_model(self, model: Type[Model]) -> None:
"""Replaces a materialized view with a newer version.
This is used to alter the backing query of a materialized view.
Replacing a materialized view is a lot trickier than a normal view.
For normal views we can use `CREATE OR REPLACE VIEW`, but for
materialized views, we have to create the new view, copy all
indexes and constraints and drop the old one.
This operation is atomic as it runs in a transaction.
"""
with self.connection.cursor() as cursor:
constraints = self.introspection.get_constraints(
cursor, model._meta.db_table
)
with transaction.atomic():
self.delete_materialized_view_model(model)
self.create_materialized_view_model(model)
for constraint_name, constraint_options in constraints.items():
if not constraint_options["definition"]:
raise SuspiciousOperation(
"Table %s has a constraint '%s' that no definition could be generated for",
(model._meta.db_table, constraint_name),
)
self.execute(constraint_options["definition"])
def delete_materialized_view_model(self, model: Type[Model]) -> None:
"""Deletes a materialized view model."""
sql = self.sql_drop_materialized_view % self.quote_name(
model._meta.db_table
)
self.execute(sql)
def create_partitioned_model(self, model: Type[Model]) -> None:
"""Creates a new partitioned model."""
meta = self._partitioning_properties_for_model(model)
# get the sql statement that django creates for normal
# table creations..
sql, params = self._extract_sql(self.create_model, model)
partitioning_key_sql = ", ".join(
self.quote_name(field_name) for field_name in meta.key
)
# create a composite key that includes the partitioning key
sql = sql.replace(" PRIMARY KEY", "")
if model._meta.pk and model._meta.pk.name not in meta.key:
sql = sql[:-1] + ", PRIMARY KEY (%s, %s))" % (
self.quote_name(model._meta.pk.name),
partitioning_key_sql,
)
else:
sql = sql[:-1] + ", PRIMARY KEY (%s))" % (partitioning_key_sql,)
# extend the standard CREATE TABLE statement with
# 'PARTITION BY ...'
sql += self.sql_partition_by % (
meta.method.upper(),
partitioning_key_sql,
)
self.execute(sql, params)
def delete_partitioned_model(self, model: Type[Model]) -> None:
"""Drops the specified partitioned model."""
return self.delete_model(model)
def add_range_partition(
self,
model: Type[Model],
name: str,
from_values: Any,
to_values: Any,
comment: Optional[str] = None,
) -> None:
"""Creates a new range partition for the specified partitioned model.
Arguments:
model:
Partitioned model to create a partition for.
name:
Name to give to the new partition.
Final name will be "{table_name}_{partition_name}"
from_values:
Start of the partitioning key range of
values that need to be stored in this
partition.
to_values:
End of the partitioning key range of
values that need to be stored in this
partition.
comment:
Optionally, a comment to add on this
partition table.
"""
# asserts the model is a model set up for partitioning
self._partitioning_properties_for_model(model)
table_name = self.create_partition_table_name(model, name)
sql = self.sql_add_range_partition % (
self.quote_name(table_name),
self.quote_name(model._meta.db_table),
"%s",
"%s",
)
with transaction.atomic():
self.execute(sql, (from_values, to_values))
if comment:
self.set_comment_on_table(table_name, comment)
def add_list_partition(
self,
model: Type[Model],
name: str,
values: List[Any],
comment: Optional[str] = None,
) -> None:
"""Creates a new list partition for the specified partitioned model.
Arguments:
model:
Partitioned model to create a partition for.
name:
Name to give to the new partition.
Final name will be "{table_name}_{partition_name}"
values:
Partition key values that should be
stored in this partition.
comment:
Optionally, a comment to add on this
partition table.
"""
# asserts the model is a model set up for partitioning
self._partitioning_properties_for_model(model)
table_name = self.create_partition_table_name(model, name)
sql = self.sql_add_list_partition % (
self.quote_name(table_name),
self.quote_name(model._meta.db_table),
",".join(["%s" for _ in range(len(values))]),
)
with transaction.atomic():
self.execute(sql, values)
if comment:
self.set_comment_on_table(table_name, comment)
def add_hash_partition(
self,
model: Type[Model],
name: str,
modulus: int,
remainder: int,
comment: Optional[str] = None,
) -> None:
"""Creates a new hash partition for the specified partitioned model.
Arguments:
model:
Partitioned model to create a partition for.
name:
Name to give to the new partition.
Final name will be "{table_name}_{partition_name}"
modulus:
Integer value by which the key is divided.
remainder:
The remainder of the hash value when divided by modulus.
comment:
Optionally, a comment to add on this partition table.
"""
# asserts the model is a model set up for partitioning
self._partitioning_properties_for_model(model)
table_name = self.create_partition_table_name(model, name)
sql = self.sql_add_hash_partition % (
self.quote_name(table_name),
self.quote_name(model._meta.db_table),
"%s",
"%s",
)
with transaction.atomic():
self.execute(sql, (modulus, remainder))
if comment:
self.set_comment_on_table(table_name, comment)
def add_default_partition(
self, model: Type[Model], name: str, comment: Optional[str] = None
) -> None:
"""Creates a new default partition for the specified partitioned model.
A default partition is a partition where rows are routed to when
no more specific partition is a match.
Arguments:
model:
Partitioned model to create a partition for.
name:
Name to give to the new partition.
Final name will be "{table_name}_{partition_name}"
comment:
Optionally, a comment to add on this
partition table.
"""
# asserts the model is a model set up for partitioning
self._partitioning_properties_for_model(model)
table_name = self.create_partition_table_name(model, name)
sql = self.sql_add_default_partition % (
self.quote_name(table_name),
self.quote_name(model._meta.db_table),
)
with transaction.atomic():
self.execute(sql)
if comment:
self.set_comment_on_table(table_name, comment)
def delete_partition(self, model: Type[Model], name: str) -> None:
"""Deletes the partition with the specified name."""
sql = self.sql_delete_partition % self.quote_name(
self.create_partition_table_name(model, name)
)
self.execute(sql)
def alter_db_table(
self, model: Type[Model], old_db_table: str, new_db_table: str
) -> None:
"""Alters a table/model."""
super().alter_db_table(model, old_db_table, new_db_table)
for side_effect in self.side_effects:
side_effect.alter_db_table(model, old_db_table, new_db_table)
def add_field(self, model: Type[Model], field: Field) -> None:
"""Adds a new field to an exisiting model."""
super().add_field(model, field)
for side_effect in self.side_effects:
side_effect.add_field(model, field)
def remove_field(self, model: Type[Model], field: Field) -> None:
"""Removes a field from an existing model."""
for side_effect in self.side_effects:
side_effect.remove_field(model, field)
super().remove_field(model, field)
def alter_field(
self,
model: Type[Model],
old_field: Field,
new_field: Field,
strict: bool = False,
) -> None:
"""Alters an existing field on an existing model."""
super().alter_field(model, old_field, new_field, strict)
for side_effect in self.side_effects:
side_effect.alter_field(model, old_field, new_field, strict)
def vacuum_table(
self,
table_name: str,
columns: List[str] = [],
*,
full: bool = False,
freeze: bool = False,
verbose: bool = False,
analyze: bool = False,
disable_page_skipping: bool = False,
skip_locked: bool = False,
index_cleanup: bool = False,
truncate: bool = False,
parallel: Optional[int] = None,
) -> None:
"""Runs the VACUUM statement on the specified table with the specified
options.
Arguments:
table_name:
Name of the table to run VACUUM on.
columns:
Optionally, a list of columns to vacuum. If not
specified, all columns are vacuumed.
"""
if self.connection.in_atomic_block:
raise SuspiciousOperation("Vacuum cannot be done in a transaction")
options = []
if full:
options.append("FULL")
if freeze:
options.append("FREEZE")
if verbose:
options.append("VERBOSE")
if analyze:
options.append("ANALYZE")
if disable_page_skipping:
options.append("DISABLE_PAGE_SKIPPING")
if skip_locked:
options.append("SKIP_LOCKED")
if index_cleanup:
options.append("INDEX_CLEANUP")
if truncate:
options.append("TRUNCATE")
if parallel is not None:
options.append(f"PARALLEL {parallel}")
sql = "VACUUM"
if options:
options_sql = ", ".join(options)
sql += f" ({options_sql})"
sql += f" {self.quote_name(table_name)}"
if columns:
columns_sql = ", ".join(
[self.quote_name(column) for column in columns]
)
sql += f" ({columns_sql})"
self.execute(sql)
def vacuum_model(
self, model: Type[Model], fields: List[Field] = [], **kwargs
) -> None:
"""Runs the VACUUM statement on the table of the specified model with
the specified options.
Arguments:
table_name:
model:
Model of which to run VACUUM the table.
fields:
Optionally, a list of fields to vacuum. If not
specified, all fields are vacuumed.
"""
columns = [
field.column
for field in fields
if getattr(field, "concrete", False) and field.column
]
self.vacuum_table(model._meta.db_table, columns, **kwargs)
def set_comment_on_table(self, table_name: str, comment: str) -> None:
"""Sets the comment on the specified table."""
sql = self.sql_table_comment % (self.quote_name(table_name), "%s")
self.execute(sql, (comment,))
def _create_view_model(self, sql: str, model: Type[Model]) -> None:
"""Creates a new view model using the specified SQL query."""
meta = self._view_properties_for_model(model)
with self.connection.cursor() as cursor:
view_sql = cursor.mogrify(*meta.query)
if isinstance(view_sql, bytes):
view_sql = view_sql.decode("utf-8")
self.execute(sql % (self.quote_name(model._meta.db_table), view_sql))
def _extract_sql(self, method, *args):
"""Calls the specified method with the specified arguments and
intercepts the SQL statement it WOULD execute.
We use this to figure out the exact SQL statement Django would
execute. We can then make a small modification and execute it
ourselves.
"""
with mock.patch.object(self, "execute") as execute:
method(*args)
return tuple(execute.mock_calls[0])[1]
@staticmethod
def _view_properties_for_model(model: Type[Model]):
"""Gets the view options for the specified model.
Raises:
ImproperlyConfigured:
When the specified model is not set up
as a view.
"""
meta = getattr(model, "_view_meta", None)
if not meta:
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be a view."
" Create the `ViewMeta` class as a child of '%s'."
)
% (model.__name__, model.__name__)
)
if not is_sql_with_params(meta.query):
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be a view."
" Set the `query` and `key` attribute on the"
" `ViewMeta` class as a child of '%s'"
)
% (model.__name__, model.__name__)
)
return meta
@staticmethod
def _partitioning_properties_for_model(model: Type[Model]):
"""Gets the partitioning options for the specified model.
Raises:
ImproperlyConfigured:
When the specified model is not set up
for partitioning.
"""
meta = getattr(model, "_partitioning_meta", None)
if not meta:
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be partitioned."
" Create the `PartitioningMeta` class as a child of '%s'."
)
% (model.__name__, model.__name__)
)
if not meta.method or not meta.key:
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be partitioned."
" Set the `method` and `key` attributes on the"
" `PartitioningMeta` class as a child of '%s'"
)
% (model.__name__, model.__name__)
)
if meta.method not in PostgresPartitioningMethod:
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be partitioned."
" '%s' is not a member of the PostgresPartitioningMethod enum."
)
% (model.__name__, meta.method)
)
if not isinstance(meta.key, list):
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be partitioned."
" Partitioning key should be a list (of field names or values,"
" depending on the partitioning method)."
)
% model.__name__
)
try:
for field_name in meta.key:
model._meta.get_field(field_name)
except FieldDoesNotExist:
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be partitioned."
" Field '%s' in partitioning key %s is not a valid field on"
" '%s'."
)
% (model.__name__, field_name, meta.key, model.__name__)
)
return meta
def create_partition_table_name(self, model: Type[Model], name: str) -> str:
return "%s_%s" % (model._meta.db_table.lower(), name.lower())
def _clone_model_field(self, field: Field, **overrides) -> Field:
"""Clones the specified model field and overrides its kwargs with the
specified overrides.
The cloned field will not be contributed to the model.
"""
_, _, field_args, field_kwargs = field.deconstruct()
cloned_field_args = field_args[:]
cloned_field_kwargs = {**field_kwargs, **overrides}
cloned_field = field.__class__(
*cloned_field_args, **cloned_field_kwargs
)
cloned_field.model = field.model
cloned_field.set_attributes_from_name(field.name)
if cloned_field.remote_field and field.remote_field:
cloned_field.remote_field.model = field.remote_field.model
cloned_field.set_attributes_from_rel() # type: ignore[attr-defined]
return cloned_field
django-postgres-extra-2.0.9/psqlextra/backend/side_effects/ 0000775 0000000 0000000 00000000000 14634267343 0024022 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/backend/side_effects/__init__.py 0000664 0000000 0000000 00000000345 14634267343 0026135 0 ustar 00root root 0000000 0000000 from .hstore_required import HStoreRequiredSchemaEditorSideEffect
from .hstore_unique import HStoreUniqueSchemaEditorSideEffect
__all__ = [
"HStoreUniqueSchemaEditorSideEffect",
"HStoreRequiredSchemaEditorSideEffect",
]
django-postgres-extra-2.0.9/psqlextra/backend/side_effects/hstore_required.py 0000664 0000000 0000000 00000013322 14634267343 0027601 0 ustar 00root root 0000000 0000000 from psqlextra.fields import HStoreField
class HStoreRequiredSchemaEditorSideEffect:
sql_hstore_required_create = (
"ALTER TABLE {table} "
"ADD CONSTRAINT {name} "
"CHECK (({field}->'{key}') "
"IS NOT NULL)"
)
sql_hstore_required_rename = (
"ALTER TABLE {table} "
"RENAME CONSTRAINT "
"{old_name} "
"TO "
"{new_name}"
)
sql_hstore_required_drop = (
"ALTER TABLE {table} " "DROP CONSTRAINT IF EXISTS {name}"
)
def create_model(self, model):
"""Ran when a new model is created."""
for field in model._meta.local_fields:
if not isinstance(field, HStoreField):
continue
self.add_field(model, field)
def delete_model(self, model):
"""Ran when a model is being deleted."""
for field in model._meta.local_fields:
if not isinstance(field, HStoreField):
continue
self.remove_field(model, field)
def alter_db_table(self, model, old_db_table, new_db_table):
"""Ran when the name of a model is changed."""
for field in model._meta.local_fields:
if not isinstance(field, HStoreField):
continue
for key in self._iterate_required_keys(field):
self._rename_hstore_required(
old_db_table, new_db_table, field, field, key
)
def add_field(self, model, field):
"""Ran when a field is added to a model."""
for key in self._iterate_required_keys(field):
self._create_hstore_required(model._meta.db_table, field, key)
def remove_field(self, model, field):
"""Ran when a field is removed from a model."""
for key in self._iterate_required_keys(field):
self._drop_hstore_required(model._meta.db_table, field, key)
def alter_field(self, model, old_field, new_field, strict=False):
"""Ran when the configuration on a field changed."""
is_old_field_hstore = isinstance(old_field, HStoreField)
is_new_field_hstore = isinstance(new_field, HStoreField)
if not is_old_field_hstore and not is_new_field_hstore:
return
old_required = getattr(old_field, "required", []) or []
new_required = getattr(new_field, "required", []) or []
# handle field renames before moving on
if str(old_field.column) != str(new_field.column):
for key in self._iterate_required_keys(old_field):
self._rename_hstore_required(
model._meta.db_table,
model._meta.db_table,
old_field,
new_field,
key,
)
# drop the constraints for keys that have been removed
for key in old_required:
if key not in new_required:
self._drop_hstore_required(model._meta.db_table, old_field, key)
# create new constraints for keys that have been added
for key in new_required:
if key not in old_required:
self._create_hstore_required(
model._meta.db_table, new_field, key
)
def _create_hstore_required(self, table_name, field, key):
"""Creates a REQUIRED CONSTRAINT for the specified hstore key."""
name = self._required_constraint_name(table_name, field, key)
sql = self.sql_hstore_required_create.format(
name=self.quote_name(name),
table=self.quote_name(table_name),
field=self.quote_name(field.column),
key=key,
)
self.execute(sql)
def _rename_hstore_required(
self, old_table_name, new_table_name, old_field, new_field, key
):
"""Renames an existing REQUIRED CONSTRAINT for the specified hstore
key."""
old_name = self._required_constraint_name(
old_table_name, old_field, key
)
new_name = self._required_constraint_name(
new_table_name, new_field, key
)
sql = self.sql_hstore_required_rename.format(
table=self.quote_name(new_table_name),
old_name=self.quote_name(old_name),
new_name=self.quote_name(new_name),
)
self.execute(sql)
def _drop_hstore_required(self, table_name, field, key):
"""Drops a REQUIRED CONSTRAINT for the specified hstore key."""
name = self._required_constraint_name(table_name, field, key)
sql = self.sql_hstore_required_drop.format(
table=self.quote_name(table_name), name=self.quote_name(name)
)
self.execute(sql)
@staticmethod
def _required_constraint_name(table: str, field, key):
"""Gets the name for a CONSTRAINT that applies to a single hstore key.
Arguments:
table:
The name of the table the field is
a part of.
field:
The hstore field to create a
UNIQUE INDEX for.
key:
The name of the hstore key
to create the name for.
Returns:
The name for the UNIQUE index.
"""
return "{table}_{field}_required_{postfix}".format(
table=table, field=field.column, postfix=key
)
@staticmethod
def _iterate_required_keys(field):
"""Iterates over the keys marked as "required" in the specified field.
Arguments:
field:
The field of which key's to
iterate over.
"""
required_keys = getattr(field, "required", None)
if not required_keys:
return
for key in required_keys:
yield key
django-postgres-extra-2.0.9/psqlextra/backend/side_effects/hstore_unique.py 0000664 0000000 0000000 00000013613 14634267343 0027272 0 ustar 00root root 0000000 0000000 from psqlextra.fields import HStoreField
class HStoreUniqueSchemaEditorSideEffect:
sql_hstore_unique_create = (
"CREATE UNIQUE INDEX IF NOT EXISTS " "{name} ON {table} " "({columns})"
)
sql_hstore_unique_rename = (
"ALTER INDEX " "{old_name} " "RENAME TO " "{new_name}"
)
sql_hstore_unique_drop = "DROP INDEX IF EXISTS {name}"
def create_model(self, model):
"""Ran when a new model is created."""
for field in model._meta.local_fields:
if not isinstance(field, HStoreField):
continue
self.add_field(model, field)
def delete_model(self, model):
"""Ran when a model is being deleted."""
for field in model._meta.local_fields:
if not isinstance(field, HStoreField):
continue
self.remove_field(model, field)
def alter_db_table(self, model, old_db_table, new_db_table):
"""Ran when the name of a model is changed."""
for field in model._meta.local_fields:
if not isinstance(field, HStoreField):
continue
for keys in self._iterate_uniqueness_keys(field):
self._rename_hstore_unique(
old_db_table, new_db_table, field, field, keys
)
def add_field(self, model, field):
"""Ran when a field is added to a model."""
for keys in self._iterate_uniqueness_keys(field):
self._create_hstore_unique(model, field, keys)
def remove_field(self, model, field):
"""Ran when a field is removed from a model."""
for keys in self._iterate_uniqueness_keys(field):
self._drop_hstore_unique(model, field, keys)
def alter_field(self, model, old_field, new_field, strict=False):
"""Ran when the configuration on a field changed."""
is_old_field_hstore = isinstance(old_field, HStoreField)
is_new_field_hstore = isinstance(new_field, HStoreField)
if not is_old_field_hstore and not is_new_field_hstore:
return
old_uniqueness = getattr(old_field, "uniqueness", []) or []
new_uniqueness = getattr(new_field, "uniqueness", []) or []
# handle field renames before moving on
if str(old_field.column) != str(new_field.column):
for keys in self._iterate_uniqueness_keys(old_field):
self._rename_hstore_unique(
model._meta.db_table,
model._meta.db_table,
old_field,
new_field,
keys,
)
# drop the indexes for keys that have been removed
for keys in old_uniqueness:
if keys not in new_uniqueness:
self._drop_hstore_unique(
model, old_field, self._compose_keys(keys)
)
# create new indexes for keys that have been added
for keys in new_uniqueness:
if keys not in old_uniqueness:
self._create_hstore_unique(
model, new_field, self._compose_keys(keys)
)
def _create_hstore_unique(self, model, field, keys):
"""Creates a UNIQUE constraint for the specified hstore keys."""
name = self._unique_constraint_name(model._meta.db_table, field, keys)
columns = ["(%s->'%s')" % (field.column, key) for key in keys]
sql = self.sql_hstore_unique_create.format(
name=self.quote_name(name),
table=self.quote_name(model._meta.db_table),
columns=",".join(columns),
)
self.execute(sql)
def _rename_hstore_unique(
self, old_table_name, new_table_name, old_field, new_field, keys
):
"""Renames an existing UNIQUE constraint for the specified hstore
keys."""
old_name = self._unique_constraint_name(old_table_name, old_field, keys)
new_name = self._unique_constraint_name(new_table_name, new_field, keys)
sql = self.sql_hstore_unique_rename.format(
old_name=self.quote_name(old_name),
new_name=self.quote_name(new_name),
)
self.execute(sql)
def _drop_hstore_unique(self, model, field, keys):
"""Drops a UNIQUE constraint for the specified hstore keys."""
name = self._unique_constraint_name(model._meta.db_table, field, keys)
sql = self.sql_hstore_unique_drop.format(name=self.quote_name(name))
self.execute(sql)
@staticmethod
def _unique_constraint_name(table: str, field, keys):
"""Gets the name for a UNIQUE INDEX that applies to one or more keys in
a hstore field.
Arguments:
table:
The name of the table the field is
a part of.
field:
The hstore field to create a
UNIQUE INDEX for.
key:
The name of the hstore key
to create the name for.
This can also be a tuple
of multiple names.
Returns:
The name for the UNIQUE index.
"""
postfix = "_".join(keys)
return "{table}_{field}_unique_{postfix}".format(
table=table, field=field.column, postfix=postfix
)
def _iterate_uniqueness_keys(self, field):
"""Iterates over the keys marked as "unique" in the specified field.
Arguments:
field:
The field of which key's to
iterate over.
"""
uniqueness = getattr(field, "uniqueness", None)
if not uniqueness:
return
for keys in uniqueness:
composed_keys = self._compose_keys(keys)
yield composed_keys
@staticmethod
def _compose_keys(constraint):
"""Turns a string into a list of string or returns it as a list."""
if isinstance(constraint, str):
return [constraint]
return constraint
django-postgres-extra-2.0.9/psqlextra/compiler.py 0000664 0000000 0000000 00000040404 14634267343 0022176 0 ustar 00root root 0000000 0000000 import inspect
import os
import sys
from collections.abc import Iterable
from typing import TYPE_CHECKING, Tuple, Union, cast
import django
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.db.models import Expression, Model, Q
from django.db.models.fields.related import RelatedField
from django.db.models.sql import compiler as django_compiler
from .expressions import HStoreValue
from .types import ConflictAction
if TYPE_CHECKING:
from .sql import PostgresInsertQuery
def append_caller_to_sql(sql):
"""Append the caller to SQL queries.
Adds the calling file and function as an SQL comment to each query.
Examples:
INSERT INTO "tests_47ee19d1" ("id", "title")
VALUES (1, 'Test')
RETURNING "tests_47ee19d1"."id"
/* 998020 test_append_caller_to_sql_crud .../django-postgres-extra/tests/test_append_caller_to_sql.py 55 */
SELECT "tests_47ee19d1"."id", "tests_47ee19d1"."title"
FROM "tests_47ee19d1"
WHERE "tests_47ee19d1"."id" = 1
LIMIT 1
/* 998020 test_append_caller_to_sql_crud .../django-postgres-extra/tests/test_append_caller_to_sql.py 69 */
UPDATE "tests_47ee19d1"
SET "title" = 'success'
WHERE "tests_47ee19d1"."id" = 1
/* 998020 test_append_caller_to_sql_crud .../django-postgres-extra/tests/test_append_caller_to_sql.py 64 */
DELETE FROM "tests_47ee19d1"
WHERE "tests_47ee19d1"."id" IN (1)
/* 998020 test_append_caller_to_sql_crud .../django-postgres-extra/tests/test_append_caller_to_sql.py 74 */
Slow and blocking queries could be easily tracked down to their originator
within the source code using the "pg_stat_activity" table.
Enable "POSTGRES_EXTRA_ANNOTATE_SQL" within the database settings to enable this feature.
"""
if not getattr(settings, "POSTGRES_EXTRA_ANNOTATE_SQL", None):
return sql
try:
# Search for the first non-Django caller
stack = inspect.stack()
for stack_frame in stack[1:]:
frame_filename = stack_frame[1]
frame_line = stack_frame[2]
frame_function = stack_frame[3]
if "/django/" in frame_filename or "/psqlextra/" in frame_filename:
continue
return f"{sql} /* {os.getpid()} {frame_function} {frame_filename} {frame_line} */"
# Django internal commands (like migrations) end up here
return f"{sql} /* {os.getpid()} {sys.argv[0]} */"
except Exception:
# Don't break anything because this convinence function runs into an unexpected situation
return sql
class SQLCompiler(django_compiler.SQLCompiler): # type: ignore [attr-defined]
def as_sql(self, *args, **kwargs):
sql, params = super().as_sql(*args, **kwargs)
return append_caller_to_sql(sql), params
class SQLDeleteCompiler(django_compiler.SQLDeleteCompiler): # type: ignore [name-defined]
def as_sql(self, *args, **kwargs):
sql, params = super().as_sql(*args, **kwargs)
return append_caller_to_sql(sql), params
class SQLAggregateCompiler(django_compiler.SQLAggregateCompiler): # type: ignore [name-defined]
def as_sql(self, *args, **kwargs):
sql, params = super().as_sql(*args, **kwargs)
return append_caller_to_sql(sql), params
class SQLUpdateCompiler(django_compiler.SQLUpdateCompiler): # type: ignore [name-defined]
"""Compiler for SQL UPDATE statements that allows us to use expressions
inside HStore values.
Like:
.update(name=dict(en=F('test')))
"""
def as_sql(self, *args, **kwargs):
self._prepare_query_values()
sql, params = super().as_sql(*args, **kwargs)
return append_caller_to_sql(sql), params
def _prepare_query_values(self):
"""Extra prep on query values by converting dictionaries into
:see:HStoreValue expressions.
This allows putting expressions in a dictionary. The
:see:HStoreValue will take care of resolving the expressions
inside the dictionary.
"""
if not self.query.values:
return
new_query_values = []
for field, model, val in self.query.values:
if not isinstance(val, dict):
new_query_values.append((field, model, val))
continue
if not self._does_dict_contain_expression(val):
new_query_values.append((field, model, val))
continue
expression = HStoreValue(dict(val))
new_query_values.append((field, model, expression))
self.query.values = new_query_values
@staticmethod
def _does_dict_contain_expression(data: dict) -> bool:
"""Gets whether the specified dictionary contains any expressions that
need to be resolved."""
for value in data.values():
if hasattr(value, "resolve_expression"):
return True
if hasattr(value, "as_sql"):
return True
return False
class SQLInsertCompiler(django_compiler.SQLInsertCompiler): # type: ignore [name-defined]
"""Compiler for SQL INSERT statements."""
def as_sql(self, *args, **kwargs):
"""Builds the SQL INSERT statement."""
queries = [
(append_caller_to_sql(sql), params)
for sql, params in super().as_sql(*args, **kwargs)
]
return queries
class PostgresInsertOnConflictCompiler(django_compiler.SQLInsertCompiler): # type: ignore [name-defined]
"""Compiler for SQL INSERT statements."""
query: "PostgresInsertQuery"
def __init__(self, *args, **kwargs):
"""Initializes a new instance of
:see:PostgresInsertOnConflictCompiler."""
super().__init__(*args, **kwargs)
self.qn = self.connection.ops.quote_name
def as_sql(self, return_id=False, *args, **kwargs):
"""Builds the SQL INSERT statement."""
queries = [
self._rewrite_insert(sql, params, return_id)
for sql, params in super().as_sql(*args, **kwargs)
]
return queries
def _rewrite_insert(self, sql, params, return_id=False):
"""Rewrites a formed SQL INSERT query to include the ON CONFLICT
clause.
Arguments:
sql:
The SQL INSERT query to rewrite.
params:
The parameters passed to the query.
return_id:
Whether to only return the ID or all
columns.
Returns:
A tuple of the rewritten SQL query and new params.
"""
returning = (
self.qn(self.query.model._meta.pk.attname) if return_id else "*"
)
(sql, params) = self._rewrite_insert_on_conflict(
sql, params, self.query.conflict_action.value, returning
)
return append_caller_to_sql(sql), params
def _rewrite_insert_on_conflict(
self, sql, params, conflict_action: ConflictAction, returning
):
"""Rewrites a normal SQL INSERT query to add the 'ON CONFLICT'
clause."""
# build the conflict target, the columns to watch
# for conflicts
on_conflict_clause = self._build_on_conflict_clause()
index_predicate = self.query.index_predicate # type: ignore[attr-defined]
update_condition = self.query.conflict_update_condition # type: ignore[attr-defined]
rewritten_sql = f"{sql} {on_conflict_clause}"
if index_predicate:
expr_sql, expr_params = self._compile_expression(index_predicate)
rewritten_sql += f" WHERE {expr_sql}"
params += tuple(expr_params)
# Fallback in case the user didn't specify any update values. We can still
# make the query work if we switch to ConflictAction.NOTHING
if (
conflict_action == ConflictAction.UPDATE.value
and not self.query.update_values
):
conflict_action = ConflictAction.NOTHING
rewritten_sql += f" DO {conflict_action}"
if conflict_action == ConflictAction.UPDATE.value:
set_sql, sql_params = self._build_set_statement()
rewritten_sql += f" SET {set_sql}"
params += sql_params
if update_condition:
expr_sql, expr_params = self._compile_expression(
update_condition
)
rewritten_sql += f" WHERE {expr_sql}"
params += tuple(expr_params)
rewritten_sql += f" RETURNING {returning}"
return (rewritten_sql, params)
def _build_set_statement(self) -> Tuple[str, tuple]:
"""Builds the SET statement for the ON CONFLICT DO UPDATE clause.
This uses the update compiler to provide full compatibility with
the standard Django's `update(...)`.
"""
# Local import to work around the circular dependency between
# the compiler and the queries.
from .sql import PostgresUpdateQuery
query = cast(PostgresUpdateQuery, self.query.chain(PostgresUpdateQuery))
query.add_update_values(self.query.update_values)
sql, params = query.get_compiler(self.connection.alias).as_sql()
return sql.split("SET")[1].split(" WHERE")[0], tuple(params)
def _build_on_conflict_clause(self):
if django.VERSION >= (2, 2):
from django.db.models.constraints import BaseConstraint
from django.db.models.indexes import Index
if isinstance(
self.query.conflict_target, BaseConstraint
) or isinstance(self.query.conflict_target, Index):
return "ON CONFLICT ON CONSTRAINT %s" % self.qn(
self.query.conflict_target.name
)
conflict_target = self._build_conflict_target()
return f"ON CONFLICT {conflict_target}"
def _build_conflict_target(self):
"""Builds the `conflict_target` for the ON CONFLICT clause."""
if not isinstance(self.query.conflict_target, Iterable):
raise SuspiciousOperation(
(
"%s is not a valid conflict target, specify "
"a list of column names, or tuples with column "
"names and hstore key."
)
% str(self.query.conflict_target)
)
conflict_target = self._build_conflict_target_by_index()
if conflict_target:
return conflict_target
return self._build_conflict_target_by_fields()
def _build_conflict_target_by_fields(self):
"""Builds the `conflict_target` for the ON CONFLICT clauses by matching
the fields specified in the specified conflict target against the
model's fields.
This requires some special handling because the fields names
might not be same as the column names.
"""
conflict_target = []
for field_name in self.query.conflict_target:
self._assert_valid_field(field_name)
# special handling for hstore keys
if isinstance(field_name, tuple):
conflict_target.append(
"(%s->'%s')"
% (self._format_field_name(field_name), field_name[1])
)
else:
conflict_target.append(self._format_field_name(field_name))
return "(%s)" % ",".join(conflict_target)
def _build_conflict_target_by_index(self):
"""Builds the `conflict_target` for the ON CONFLICT clause by trying to
find an index that matches the specified conflict target on the query.
Conflict targets must match some unique constraint, usually this
is a `UNIQUE INDEX`.
"""
matching_index = next(
(
index
for index in self.query.model._meta.indexes
if list(index.fields) == list(self.query.conflict_target)
),
None,
)
if not matching_index:
return None
with self.connection.schema_editor() as schema_editor:
stmt = matching_index.create_sql(self.query.model, schema_editor)
return "(%s)" % stmt.parts["columns"]
def _get_model_field(self, name: str):
"""Gets the field on a model with the specified name.
Arguments:
name:
The name of the field to look for.
This can be both the actual field name, or
the name of the column, both will work :)
Returns:
The field with the specified name or None if
no such field exists.
"""
field_name = self._normalize_field_name(name)
if not self.query.model:
return None
# 'pk' has special meaning and always refers to the primary
# key of a model, we have to respect this de-facto standard behaviour
if field_name == "pk" and self.query.model._meta.pk:
return self.query.model._meta.pk
for field in self.query.model._meta.local_concrete_fields: # type: ignore[attr-defined]
if field.name == field_name or field.column == field_name:
return field
return None
def _format_field_name(self, field_name) -> str:
"""Formats a field's name for usage in SQL.
Arguments:
field_name:
The field name to format.
Returns:
The specified field name formatted for
usage in SQL.
"""
field = self._get_model_field(field_name)
return self.qn(field.column)
def _format_field_value(self, field_name) -> str:
"""Formats a field's value for usage in SQL.
Arguments:
field_name:
The name of the field to format
the value of.
Returns:
The field's value formatted for usage
in SQL.
"""
field_name = self._normalize_field_name(field_name)
field = self._get_model_field(field_name)
value = getattr(self.query.objs[0], field.attname)
if isinstance(field, RelatedField) and isinstance(value, Model):
value = value.pk
return django_compiler.SQLInsertCompiler.prepare_value( # type: ignore[attr-defined]
self,
field,
# Note: this deliberately doesn't use `pre_save_val` as we don't
# want things like auto_now on DateTimeField (etc.) to change the
# value. We rely on pre_save having already been done by the
# underlying compiler so that things like FileField have already had
# the opportunity to save out their data.
value,
)
def _compile_expression(
self, expression: Union[Expression, Q, str]
) -> Tuple[str, Union[tuple, list]]:
"""Compiles an expression, Q object or raw SQL string into SQL and
tuple of parameters."""
if isinstance(expression, Q):
if django.VERSION < (3, 1):
raise SuspiciousOperation(
"Q objects in psqlextra can only be used with Django 3.1 and newer"
)
return self.query.build_where(expression).as_sql(
self, self.connection
)
elif isinstance(expression, Expression):
return self.compile(expression)
return expression, tuple()
def _assert_valid_field(self, field_name: str):
"""Asserts that a field with the specified name exists on the model and
raises :see:SuspiciousOperation if it does not."""
field_name = self._normalize_field_name(field_name)
if self._get_model_field(field_name):
return
raise SuspiciousOperation(
(
"%s is not a valid conflict target, specify "
"a list of column names, or tuples with column "
"names and hstore key."
)
% str(field_name)
)
@staticmethod
def _normalize_field_name(field_name: str) -> str:
"""Normalizes a field name into a string by extracting the field name
if it was specified as a reference to a HStore key (as a tuple).
Arguments:
field_name:
The field name to normalize.
Returns:
The normalized field name.
"""
if isinstance(field_name, tuple):
field_name, _ = field_name
return field_name
django-postgres-extra-2.0.9/psqlextra/error.py 0000664 0000000 0000000 00000003311 14634267343 0021511 0 ustar 00root root 0000000 0000000 from typing import TYPE_CHECKING, Optional, Type, Union
from django import db
if TYPE_CHECKING:
from psycopg2 import Error as _Psycopg2Error
Psycopg2Error: Optional[Type[_Psycopg2Error]]
from psycopg import Error as _Psycopg3Error
Psycopg3Error: Optional[Type[_Psycopg3Error]]
try:
from psycopg2 import Error as Psycopg2Error # type: ignore[no-redef]
except ImportError:
Psycopg2Error = None # type: ignore[misc]
try:
from psycopg import Error as Psycopg3Error # type: ignore[no-redef]
except ImportError:
Psycopg3Error = None # type: ignore[misc]
def extract_postgres_error(
error: db.Error,
) -> Optional[Union["_Psycopg2Error", "_Psycopg3Error"]]:
"""Extracts the underlying :see:psycopg2.Error from the specified Django
database error.
As per PEP-249, Django wraps all database errors in its own
exception. We can extract the underlying database error by examaning
the cause of the error.
"""
if (Psycopg2Error and not isinstance(error.__cause__, Psycopg2Error)) and (
Psycopg3Error and not isinstance(error.__cause__, Psycopg3Error)
):
return None
return error.__cause__
def extract_postgres_error_code(error: db.Error) -> Optional[str]:
"""Extracts the underlying Postgres error code.
As per PEP-249, Django wraps all database errors in its own
exception. We can extract the underlying database error by examaning
the cause of the error.
"""
cause = error.__cause__
if not cause:
return None
if Psycopg2Error and isinstance(cause, Psycopg2Error):
return cause.pgcode
if Psycopg3Error and isinstance(cause, Psycopg3Error):
return cause.sqlstate
return None
django-postgres-extra-2.0.9/psqlextra/expressions.py 0000664 0000000 0000000 00000015264 14634267343 0022754 0 ustar 00root root 0000000 0000000 from typing import Union
from django.db.models import CharField, Field, expressions
class HStoreValue(expressions.Expression):
"""Represents a HStore value.
The base PostgreSQL implementation Django provides, always
represents HStore values as dictionaries, but this doesn't work if
you want to use expressions inside hstore values.
"""
def __init__(self, value):
"""Initializes a new instance."""
self.value = value
def resolve_expression(self, *args, **kwargs):
"""Resolves expressions inside the dictionary."""
result = dict()
for key, value in self.value.items():
if hasattr(value, "resolve_expression"):
result[key] = value.resolve_expression(*args, **kwargs)
else:
result[key] = value
return HStoreValue(result)
def as_sql(self, compiler, connection):
"""Compiles the HStore value into SQL.
Compiles expressions contained in the values
of HStore entries as well.
Given a dictionary like:
dict(key1='val1', key2='val2')
The resulting SQL will be:
hstore(hstore('key1', 'val1'), hstore('key2', 'val2'))
"""
sql = []
params = []
for key, value in self.value.items():
if hasattr(value, "as_sql"):
inner_sql, inner_params = value.as_sql(compiler, connection)
sql.append(f"hstore(%s, {inner_sql})")
params.append(key)
params.extend(inner_params)
elif value is not None:
sql.append("hstore(%s, %s)")
params.append(key)
params.append(str(value))
else:
sql.append("hstore(%s, NULL)")
params.append(key)
return " || ".join(sql), params
class HStoreColumn(expressions.Col):
"""HStoreColumn expression.
Generates expressions like:
[db table].[column]->'[hstore key]'
"""
contains_column_references = True
def __init__(self, alias, target, hstore_key):
"""Initializes a new instance of :see:HStoreColumn.
Arguments:
alias:
The table name.
target:
The field instance.
hstore_key
The name of the hstore key to include
in the epxression.
"""
super().__init__(alias, target, output_field=target)
self.alias, self.target, self.hstore_key = alias, target, hstore_key
def __repr__(self):
"""Gets a textual representation of this expresion."""
return "{}({}, {}->'{}')".format(
self.__class__.__name__, self.alias, self.target, self.hstore_key
)
def as_sql(self, compiler, connection):
"""Compiles this expression into SQL."""
qn = compiler.quote_name_unless_alias
return (
"%s.%s->'%s'"
% (qn(self.alias), qn(self.target.column), self.hstore_key),
[],
)
def relabeled_clone(self, relabels):
"""Gets a re-labeled clone of this expression."""
return self.__class__(
relabels.get(self.alias, self.alias),
self.target,
self.hstore_key,
self.output_field,
)
class HStoreRef(expressions.F):
"""Inline reference to a HStore key.
Allows selecting individual keys in annotations.
"""
def __init__(self, name: str, key: str):
"""Initializes a new instance of :see:HStoreRef.
Arguments:
name:
The name of the column/field to resolve.
key:
The name of the HStore key to select.
"""
super().__init__(name)
self.key = key
def resolve_expression(self, *args, **kwargs):
"""Resolves the expression into a :see:HStoreColumn expression."""
original_expression: expressions.Col = super().resolve_expression( # type: ignore[annotation-unchecked]
*args, **kwargs
)
expression = HStoreColumn(
original_expression.alias, original_expression.target, self.key
)
return expression
class DateTimeEpochColumn(expressions.Col):
"""Gets the date/time column as a UNIX epoch timestamp."""
contains_column_references = True
def as_sql(self, compiler, connection):
"""Compiles this expression into SQL."""
sql, params = super().as_sql(compiler, connection)
return "EXTRACT(epoch FROM {})".format(sql), params
def get_group_by_cols(self):
return []
class DateTimeEpoch(expressions.F):
"""Gets the date/time column as a UNIX epoch timestamp."""
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
original_expression = super().resolve_expression(*args, **kwargs)
expression = DateTimeEpochColumn(
original_expression.alias, original_expression.target
)
return expression
def IsNotNone(*fields, default=None):
"""Selects whichever field is not None, in the specified order.
Arguments:
fields:
The fields to attempt to get a value from,
in order.
default:
The value to return in case all values are None.
Returns:
A Case-When expression that tries each field and
returns the specified default value when all of
them are None.
"""
when_clauses = [
expressions.When(
~expressions.Q(**{field: None}), then=expressions.F(field)
)
for field in reversed(fields)
]
return expressions.Case(
*when_clauses,
default=expressions.Value(default),
output_field=CharField(),
)
class ExcludedCol(expressions.Expression):
"""References a column in PostgreSQL's special EXCLUDED column, which is
used in upserts to refer to the data about to be inserted/updated.
See: https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT
"""
def __init__(self, field_or_name: Union[Field, str]):
# We support both field classes or just field names here. We prefer
# fields because when the expression is compiled, it might need
# the field information to figure out the correct placeholder.
# Even though that isn't require for this particular expression.
if isinstance(field_or_name, Field):
super().__init__(field_or_name)
self.name = field_or_name.column
else:
super().__init__(None)
self.name = field_or_name
def as_sql(self, compiler, connection):
quoted_name = connection.ops.quote_name(self.name)
return f"EXCLUDED.{quoted_name}", tuple()
django-postgres-extra-2.0.9/psqlextra/fields/ 0000775 0000000 0000000 00000000000 14634267343 0021256 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/fields/__init__.py 0000664 0000000 0000000 00000000101 14634267343 0023357 0 ustar 00root root 0000000 0000000 from .hstore_field import HStoreField
__all__ = ["HStoreField"]
django-postgres-extra-2.0.9/psqlextra/fields/hstore_field.py 0000664 0000000 0000000 00000004424 14634267343 0024303 0 ustar 00root root 0000000 0000000 from typing import List, Optional, Tuple, Union
from django.contrib.postgres.fields import HStoreField as DjangoHStoreField
from django.db.models.expressions import Expression
from django.db.models.fields import Field
class HStoreField(DjangoHStoreField):
"""Improved version of Django's :see:HStoreField that adds support for
database-level constraints.
Notes:
- For the implementation of uniqueness, see the
custom database back-end.
"""
def __init__(
self,
*args,
uniqueness: Optional[List[Union[str, Tuple[str, ...]]]] = None,
required: Optional[List[str]] = None,
**kwargs
):
"""Initializes a new instance of :see:HStoreField.
Arguments:
uniqueness:
List of keys to enforce as unique. Use tuples
to enforce multiple keys together to be unique.
required:
List of keys that should be enforced as required.
"""
super(HStoreField, self).__init__(*args, **kwargs)
self.uniqueness = uniqueness
self.required = required
def get_prep_value(self, value):
"""Override the base class so it doesn't cast all values to strings.
psqlextra supports expressions in hstore fields, so casting all
values to strings is a bad idea.
"""
value = Field.get_prep_value(self, value)
if isinstance(value, dict):
prep_value = {}
for key, val in value.items():
if isinstance(val, Expression):
prep_value[key] = val
elif val is not None:
prep_value[key] = str(val)
else:
prep_value[key] = val
value = prep_value
if isinstance(value, list):
value = [str(item) for item in value]
return value
def deconstruct(self):
"""Gets the values to pass to :see:__init__ when re-creating this
object."""
name, path, args, kwargs = super(HStoreField, self).deconstruct()
if self.uniqueness is not None:
kwargs["uniqueness"] = self.uniqueness
if self.required is not None:
kwargs["required"] = self.required
return name, path, args, kwargs
django-postgres-extra-2.0.9/psqlextra/indexes/ 0000775 0000000 0000000 00000000000 14634267343 0021447 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/indexes/__init__.py 0000664 0000000 0000000 00000000413 14634267343 0023556 0 ustar 00root root 0000000 0000000 from .case_insensitive_unique_index import CaseInsensitiveUniqueIndex
from .conditional_unique_index import ConditionalUniqueIndex
from .unique_index import UniqueIndex
__all__ = [
"UniqueIndex",
"ConditionalUniqueIndex",
"CaseInsensitiveUniqueIndex",
]
django-postgres-extra-2.0.9/psqlextra/indexes/case_insensitive_unique_index.py 0000664 0000000 0000000 00000002534 14634267343 0030135 0 ustar 00root root 0000000 0000000 from django.db.models.indexes import Index
class CaseInsensitiveUniqueIndex(Index):
sql_create_unique_index = (
"CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
)
def create_sql(self, model, schema_editor, using="", **kwargs):
statement = super().create_sql(model, schema_editor, using)
statement.template = self.sql_create_unique_index
column_collection = statement.parts["columns"]
statement.parts["columns"] = ", ".join(
[
"LOWER(%s)" % self._quote_column(column_collection, column, idx)
for idx, column in enumerate(column_collection.columns)
]
)
return statement
def deconstruct(self):
"""Serializes the :see:CaseInsensitiveUniqueIndex for the migrations
file."""
_, args, kwargs = super().deconstruct()
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
path = path.replace("django.db.models.indexes", "django.db.models")
return path, args, kwargs
@staticmethod
def _quote_column(column_collection, column, idx):
quoted_name = column_collection.quote_name(column)
try:
return quoted_name + column_collection.col_suffixes[idx]
except IndexError:
return column_collection.quote_name(column)
django-postgres-extra-2.0.9/psqlextra/indexes/conditional_unique_index.py 0000664 0000000 0000000 00000004103 14634267343 0027077 0 ustar 00root root 0000000 0000000 import django
from django.db.models.indexes import Index
class ConditionalUniqueIndex(Index):
"""Creates a partial unique index based on a given condition.
Useful, for example, if you need unique combination of foreign keys, but you might want to include
NULL as a valid value. In that case, you can just use:
>>> class Meta:
>>> indexes = [
>>> ConditionalUniqueIndex(fields=['a', 'b', 'c'], condition='"c" IS NOT NULL'),
>>> ConditionalUniqueIndex(fields=['a', 'b'], condition='"c" IS NULL')
>>> ]
"""
sql_create_index = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s WHERE %(condition)s"
def __init__(self, condition: str, fields=[], name=None):
"""Initializes a new instance of :see:ConditionalUniqueIndex."""
super().__init__(fields=fields, name=name)
self._condition = condition
def create_sql(self, model, schema_editor, using="", **kwargs):
"""Creates the actual SQL used when applying the migration."""
if django.VERSION >= (2, 0):
statement = super().create_sql(model, schema_editor, using)
statement.template = self.sql_create_index
statement.parts["condition"] = self._condition
return statement
else:
sql_create_index = self.sql_create_index
sql_parameters = {
**Index.get_sql_create_template_values(
self, model, schema_editor, using
),
"condition": self._condition,
}
return sql_create_index % sql_parameters
def deconstruct(self):
"""Serializes the :see:ConditionalUniqueIndex for the migrations
file."""
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
path = path.replace("django.db.models.indexes", "django.db.models")
return (
path,
(),
{
"fields": self.fields,
"name": self.name,
"condition": self._condition,
},
)
django-postgres-extra-2.0.9/psqlextra/indexes/unique_index.py 0000664 0000000 0000000 00000001033 14634267343 0024513 0 ustar 00root root 0000000 0000000 import django
from django.db.models.indexes import Index
class UniqueIndex(Index):
def create_sql(self, *args, **kwargs):
if django.VERSION >= (2, 0):
statement = super().create_sql(*args, **kwargs)
statement.template = self._rewrite_sql(statement.template)
return statement
sql = super().create_sql(*args, **kwargs)
return self._rewrite_sql(sql)
@staticmethod
def _rewrite_sql(sql: str) -> str:
return sql.replace("CREATE INDEX", "CREATE UNIQUE INDEX")
django-postgres-extra-2.0.9/psqlextra/introspect/ 0000775 0000000 0000000 00000000000 14634267343 0022202 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/introspect/__init__.py 0000664 0000000 0000000 00000000337 14634267343 0024316 0 ustar 00root root 0000000 0000000 from .fields import inspect_model_local_concrete_fields
from .models import model_from_cursor, models_from_cursor
__all__ = [
"models_from_cursor",
"model_from_cursor",
"inspect_model_local_concrete_fields",
]
django-postgres-extra-2.0.9/psqlextra/introspect/fields.py 0000664 0000000 0000000 00000001322 14634267343 0024020 0 ustar 00root root 0000000 0000000 from typing import List, Type
from django.db.models import Field, Model
def inspect_model_local_concrete_fields(model: Type[Model]) -> List[Field]:
"""Gets a complete list of local and concrete fields on a model, these are
fields that directly map to a database colmn directly on the table backing
the model.
This is similar to Django's `Meta.local_concrete_fields`, which is a
private API. This method utilizes only public APIs.
"""
local_concrete_fields = []
for field in model._meta.get_fields(include_parents=False):
if isinstance(field, Field) and field.column and not field.many_to_many:
local_concrete_fields.append(field)
return local_concrete_fields
django-postgres-extra-2.0.9/psqlextra/introspect/models.py 0000664 0000000 0000000 00000012161 14634267343 0024040 0 ustar 00root root 0000000 0000000 from typing import (
Any,
Dict,
Generator,
Iterable,
List,
Optional,
Type,
TypeVar,
cast,
)
from django.core.exceptions import FieldDoesNotExist
from django.db import connection, models
from django.db.models import Field, Model
from django.db.models.expressions import Expression
from .fields import inspect_model_local_concrete_fields
TModel = TypeVar("TModel", bound=models.Model)
def _construct_model(
model: Type[TModel],
columns: Iterable[str],
values: Iterable[Any],
*,
apply_converters: bool = True
) -> TModel:
fields_by_name_and_column = {}
for concrete_field in inspect_model_local_concrete_fields(model):
fields_by_name_and_column[concrete_field.attname] = concrete_field
if concrete_field.db_column:
fields_by_name_and_column[concrete_field.db_column] = concrete_field
indexable_columns = list(columns)
row = {}
for index, value in enumerate(values):
column = indexable_columns[index]
try:
field: Optional[Field] = cast(Field, model._meta.get_field(column))
except FieldDoesNotExist:
field = fields_by_name_and_column.get(column)
if not field:
continue
field_column_expression = field.get_col(model._meta.db_table)
if apply_converters:
converters = cast(Expression, field).get_db_converters(
connection
) + connection.ops.get_db_converters(field_column_expression)
converted_value = value
for converter in converters:
converted_value = converter(
converted_value,
field_column_expression,
connection,
)
else:
converted_value = value
row[field.attname] = converted_value
instance = model(**row)
instance._state.adding = False
instance._state.db = connection.alias
return instance
def models_from_cursor(
model: Type[TModel], cursor, *, related_fields: List[str] = []
) -> Generator[TModel, None, None]:
"""Fetches all rows from a cursor and converts the values into model
instances.
This is roughly what Django does internally when you do queries. This
goes further than `Model.from_db` as it also applies converters to make
sure that values are converted into their Python equivalent.
Use this when you've outgrown the ORM and you are writing performant
queries yourself and you need to map the results back into ORM objects.
Arguments:
model:
Model to construct.
cursor:
Cursor to read the rows from.
related_fields:
List of ForeignKey/OneToOneField names that were joined
into the raw query. Use this to achieve the same thing
that Django's `.select_related()` does.
Field names should be specified in the order that they
are SELECT'd in.
"""
columns = [col[0] for col in cursor.description]
field_offset = len(inspect_model_local_concrete_fields(model))
rows = cursor.fetchmany()
while rows:
for values in rows:
instance = _construct_model(
model, columns[:field_offset], values[:field_offset]
)
for index, related_field_name in enumerate(related_fields):
related_model = model._meta.get_field(
related_field_name
).related_model
if not related_model:
continue
related_field_count = len(
inspect_model_local_concrete_fields(related_model)
)
# autopep8: off
related_columns = columns[
field_offset : field_offset + related_field_count # noqa
]
related_values = values[
field_offset : field_offset + related_field_count # noqa
]
# autopep8: one
if (
not related_columns
or not related_values
or all([value is None for value in related_values])
):
continue
related_instance = _construct_model(
cast(Type[Model], related_model),
related_columns,
related_values,
)
instance._state.fields_cache[related_field_name] = related_instance # type: ignore
field_offset += len(
inspect_model_local_concrete_fields(related_model)
)
yield instance
rows = cursor.fetchmany()
def model_from_cursor(
model: Type[TModel], cursor, *, related_fields: List[str] = []
) -> Optional[TModel]:
return next(
models_from_cursor(model, cursor, related_fields=related_fields), None
)
def model_from_dict(
model: Type[TModel], row: Dict[str, Any], *, apply_converters: bool = True
) -> TModel:
return _construct_model(
model, row.keys(), row.values(), apply_converters=apply_converters
)
django-postgres-extra-2.0.9/psqlextra/locking.py 0000664 0000000 0000000 00000005476 14634267343 0022024 0 ustar 00root root 0000000 0000000 from enum import Enum
from typing import Optional, Type
from django.db import DEFAULT_DB_ALIAS, connections, models
class PostgresTableLockMode(Enum):
"""List of table locking modes.
See: https://www.postgresql.org/docs/current/explicit-locking.html
"""
ACCESS_SHARE = "ACCESS SHARE"
ROW_SHARE = "ROW SHARE"
ROW_EXCLUSIVE = "ROW EXCLUSIVE"
SHARE_UPDATE_EXCLUSIVE = "SHARE UPDATE EXCLUSIVE"
SHARE = "SHARE"
SHARE_ROW_EXCLUSIVE = "SHARE ROW EXCLUSIVE"
EXCLUSIVE = "EXCLUSIVE"
ACCESS_EXCLUSIVE = "ACCESS EXCLUSIVE"
@property
def alias(self) -> str:
return (
"".join([word.title() for word in self.name.lower().split("_")])
+ "Lock"
)
def postgres_lock_table(
table_name: str,
lock_mode: PostgresTableLockMode,
*,
schema_name: Optional[str] = None,
using: str = DEFAULT_DB_ALIAS,
) -> None:
"""Locks the specified table with the specified mode.
The lock is held until the end of the current transaction.
Arguments:
table_name:
Unquoted table name to acquire the lock on.
lock_mode:
Type of lock to acquire.
schema_name:
Optionally, the unquoted name of the schema
the table to lock is in. If not specified,
the table name is resolved by PostgreSQL
using it's ``search_path``.
using:
Optional name of the database connection to use.
"""
connection = connections[using]
with connection.cursor() as cursor:
quoted_fqn = connection.ops.quote_name(table_name)
if schema_name:
quoted_fqn = (
connection.ops.quote_name(schema_name) + "." + quoted_fqn
)
cursor.execute(f"LOCK TABLE {quoted_fqn} IN {lock_mode.value} MODE")
def postgres_lock_model(
model: Type[models.Model],
lock_mode: PostgresTableLockMode,
*,
using: str = DEFAULT_DB_ALIAS,
schema_name: Optional[str] = None,
) -> None:
"""Locks the specified model with the specified mode.
The lock is held until the end of the current transaction.
Arguments:
model:
The model of which to lock the table.
lock_mode:
Type of lock to acquire.
schema_name:
Optionally, the unquoted name of the schema
the table to lock is in. If not specified,
the table name is resolved by PostgreSQL
using it's ``search_path``.
Django models always reside in the default
("public") schema. You should not specify
this unless you're doing something special.
using:
Optional name of the database connection to use.
"""
postgres_lock_table(
model._meta.db_table, lock_mode, schema_name=schema_name, using=using
)
django-postgres-extra-2.0.9/psqlextra/lookups.py 0000664 0000000 0000000 00000002053 14634267343 0022056 0 ustar 00root root 0000000 0000000 from django.db.models import lookups
from django.db.models.fields import Field, related_lookups
from django.db.models.fields.related import ForeignObject
class InValuesLookupMixin:
"""Performs a `lhs IN VALUES ((a), (b), (c))` lookup.
This can be significantly faster then a normal `IN (a, b, c)`. The
latter sometimes causes the Postgres query planner do a sequential
scan.
"""
def as_sql(self, compiler, connection):
if not self.rhs_is_direct_value():
return super().as_sql(compiler, connection)
lhs, lhs_params = self.process_lhs(compiler, connection)
_, rhs_params = self.process_rhs(compiler, connection)
rhs = ",".join([f"(%s)" for _ in rhs_params]) # noqa: F541
return f"{lhs} IN (VALUES {rhs})", lhs_params + list(rhs_params)
@Field.register_lookup
class InValuesLookup(InValuesLookupMixin, lookups.In):
lookup_name = "invalues"
@ForeignObject.register_lookup
class InValuesRelatedLookup(InValuesLookupMixin, related_lookups.RelatedIn):
lookup_name = "invalues"
django-postgres-extra-2.0.9/psqlextra/management/ 0000775 0000000 0000000 00000000000 14634267343 0022124 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/management/__init__.py 0000664 0000000 0000000 00000000000 14634267343 0024223 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/management/commands/ 0000775 0000000 0000000 00000000000 14634267343 0023725 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/management/commands/__init__.py 0000664 0000000 0000000 00000000000 14634267343 0026024 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/management/commands/pgmakemigrations.py 0000664 0000000 0000000 00000000652 14634267343 0027643 0 ustar 00root root 0000000 0000000 from django.core.management.commands import ( # type: ignore[attr-defined]
makemigrations,
)
from psqlextra.backend.migrations import postgres_patched_migrations
class Command(makemigrations.Command):
help = "Creates new PostgreSQL specific migration(s) for apps."
def handle(self, *app_labels, **options):
with postgres_patched_migrations():
return super().handle(*app_labels, **options)
django-postgres-extra-2.0.9/psqlextra/management/commands/pgpartition.py 0000664 0000000 0000000 00000006560 14634267343 0026646 0 ustar 00root root 0000000 0000000 import sys
from typing import Optional
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.module_loading import import_string
from psqlextra.partitioning import PostgresPartitioningError
class Command(BaseCommand):
"""Create new partitions and delete old ones according to the configured
partitioning strategies."""
help = "Create new partitions and delete old ones using the configured partitioning manager. The PSQLEXTRA_PARTITIONING_MANAGER setting must be configured."
def add_arguments(self, parser):
parser.add_argument(
"--dry",
"-d",
action="store_true",
help="When specified, no partition will be created/deleted. Just a simulation.",
required=False,
default=False,
)
parser.add_argument(
"--yes",
"-y",
action="store_true",
help="Answer yes to all questions. WARNING: You will not be asked before deleting a partition.",
required=False,
default=False,
)
parser.add_argument(
"--using",
"-u",
help="Optional name of the database connection to use.",
default="default",
)
parser.add_argument(
"--skip-create",
action="store_true",
help="Do not create partitions.",
required=False,
default=False,
)
parser.add_argument(
"--skip-delete",
action="store_true",
help="Do not delete partitions.",
required=False,
default=False,
)
def handle( # type: ignore[override]
self,
dry: bool,
yes: bool,
using: Optional[str],
skip_create: bool,
skip_delete: bool,
*args,
**kwargs,
):
partitioning_manager = self._partitioning_manager()
plan = partitioning_manager.plan(
skip_create=skip_create, skip_delete=skip_delete, using=using
)
creations_count = len(plan.creations)
deletions_count = len(plan.deletions)
if creations_count == 0 and deletions_count == 0:
print("Nothing to be done.")
return
plan.print()
if dry:
return
if not yes:
sys.stdout.write("Do you want to proceed? (y/N) ")
if not self._ask_for_confirmation():
print("Operation aborted.")
return
plan.apply(using=using)
print("Operations applied.")
@staticmethod
def _ask_for_confirmation() -> bool:
answer = input("").lower()
if not answer:
return False
if answer[0] == "y" or answer == "yes":
return True
return False
@staticmethod
def _partitioning_manager():
partitioning_manager = getattr(
settings, "PSQLEXTRA_PARTITIONING_MANAGER", None
)
if not partitioning_manager:
raise PostgresPartitioningError(
"You must configure the PSQLEXTRA_PARTITIONING_MANAGER setting "
"for automatic partitioning to work."
)
if isinstance(partitioning_manager, str):
partitioning_manager = import_string(partitioning_manager)
return partitioning_manager
django-postgres-extra-2.0.9/psqlextra/management/commands/pgrefreshmv.py 0000664 0000000 0000000 00000003024 14634267343 0026626 0 ustar 00root root 0000000 0000000 from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.utils import NotSupportedError, OperationalError
from psqlextra.models import PostgresMaterializedViewModel
class Command(BaseCommand):
"""Refreshes a :see:PostgresMaterializedViewModel."""
help = "Refreshes the specified materialized view."
def add_arguments(self, parser):
parser.add_argument(
"app_label",
type=str,
help="Label of the app the materialized view model is in.",
)
parser.add_argument(
"model_name",
type=str,
help="Name of the materialized view model to refresh.",
)
parser.add_argument(
"--concurrently",
"-c",
action="store_true",
help="Whether to refresh the materialized view model concurrently.",
required=False,
default=False,
)
def handle(self, *app_labels, **options):
app_label = options.get("app_label")
model_name = options.get("model_name")
concurrently = options.get("concurrently")
model = apps.get_model(app_label, model_name)
if not model:
raise OperationalError(f"Cannot find a model named '{model_name}'")
if not issubclass(model, PostgresMaterializedViewModel):
raise NotSupportedError(
f"Model {model.__name__} is not a `PostgresMaterializedViewModel`"
)
model.refresh(concurrently=concurrently)
django-postgres-extra-2.0.9/psqlextra/manager/ 0000775 0000000 0000000 00000000000 14634267343 0021422 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/manager/__init__.py 0000664 0000000 0000000 00000000405 14634267343 0023532 0 ustar 00root root 0000000 0000000 # this should not be here, but there are users depending
# on this being here, so let's leave it here so we don't
# break them
from psqlextra.query import PostgresQuerySet
from .manager import PostgresManager
__all__ = ["PostgresManager", "PostgresQuerySet"]
django-postgres-extra-2.0.9/psqlextra/manager/manager.py 0000664 0000000 0000000 00000004025 14634267343 0023407 0 ustar 00root root 0000000 0000000 from typing import Optional
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connections
from django.db.models import Manager
from psqlextra.query import PostgresQuerySet
class PostgresManager(Manager.from_queryset(PostgresQuerySet)): # type: ignore[misc]
"""Adds support for PostgreSQL specifics."""
use_in_migrations = True
def __init__(self, *args, **kwargs):
"""Initializes a new instance of :see:PostgresManager."""
super().__init__(*args, **kwargs)
# make sure our back-end is set in at least one db and refuse to proceed
has_psqlextra_backend = any(
[
db_settings
for db_settings in settings.DATABASES.values()
if "psqlextra" in db_settings["ENGINE"]
]
)
if not has_psqlextra_backend:
raise ImproperlyConfigured(
(
"Could not locate the 'psqlextra.backend'. "
"django-postgres-extra cannot function without "
"the 'psqlextra.backend'. Set DATABASES.ENGINE."
)
)
def truncate(
self, cascade: bool = False, using: Optional[str] = None
) -> None:
"""Truncates this model/table using the TRUNCATE statement.
This DELETES ALL ROWS. No signals will be fired.
See: https://www.postgresql.org/docs/9.1/sql-truncate.html
Arguments:
cascade:
Whether to delete dependent rows. If set to
False, an error will be raised if there
are rows in other tables referencing
the rows you're trying to delete.
"""
connection = connections[using or "default"]
table_name = connection.ops.quote_name(self.model._meta.db_table)
with connection.cursor() as cursor:
sql = "TRUNCATE TABLE %s" % table_name
if cascade:
sql += " CASCADE"
cursor.execute(sql)
django-postgres-extra-2.0.9/psqlextra/models/ 0000775 0000000 0000000 00000000000 14634267343 0021273 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/models/__init__.py 0000664 0000000 0000000 00000000427 14634267343 0023407 0 ustar 00root root 0000000 0000000 from .base import PostgresModel
from .partitioned import PostgresPartitionedModel
from .view import PostgresMaterializedViewModel, PostgresViewModel
__all__ = [
"PostgresModel",
"PostgresViewModel",
"PostgresMaterializedViewModel",
"PostgresPartitionedModel",
]
django-postgres-extra-2.0.9/psqlextra/models/base.py 0000664 0000000 0000000 00000000575 14634267343 0022566 0 ustar 00root root 0000000 0000000 from typing import Any
from django.db import models
from django.db.models import Manager
from psqlextra.manager import PostgresManager
class PostgresModel(models.Model):
"""Base class for for taking advantage of PostgreSQL specific features."""
class Meta:
abstract = True
base_manager_name = "objects"
objects: "Manager[Any]" = PostgresManager()
django-postgres-extra-2.0.9/psqlextra/models/options.py 0000664 0000000 0000000 00000001722 14634267343 0023342 0 ustar 00root root 0000000 0000000 from typing import Dict, List, Optional, Union
from psqlextra.types import PostgresPartitioningMethod, SQLWithParams
class PostgresPartitionedModelOptions:
"""Container for :see:PostgresPartitionedModel options.
This is where attributes copied from the model's `PartitioningMeta`
are held.
"""
def __init__(self, method: PostgresPartitioningMethod, key: List[str]):
self.method = method
self.key = key
self.original_attrs: Dict[
str, Union[PostgresPartitioningMethod, List[str]]
] = dict(method=method, key=key)
class PostgresViewOptions:
"""Container for :see:PostgresView and :see:PostgresMaterializedView
options.
This is where attributes copied from the model's `ViewMeta` are
held.
"""
def __init__(self, query: Optional[SQLWithParams]):
self.query = query
self.original_attrs: Dict[str, Optional[SQLWithParams]] = dict(
query=self.query
)
django-postgres-extra-2.0.9/psqlextra/models/partitioned.py 0000664 0000000 0000000 00000002703 14634267343 0024171 0 ustar 00root root 0000000 0000000 from typing import Iterable
from django.db.models.base import ModelBase
from psqlextra.types import PostgresPartitioningMethod
from .base import PostgresModel
from .options import PostgresPartitionedModelOptions
class PostgresPartitionedModelMeta(ModelBase):
"""Custom meta class for :see:PostgresPartitionedModel.
This meta class extracts attributes from the inner
`PartitioningMeta` class and copies it onto a `_partitioning_meta`
attribute. This is similar to how Django's `_meta` works.
"""
default_method = PostgresPartitioningMethod.RANGE
default_key: Iterable[str] = []
def __new__(cls, name, bases, attrs, **kwargs):
new_class = super().__new__(cls, name, bases, attrs, **kwargs)
meta_class = attrs.pop("PartitioningMeta", None)
method = getattr(meta_class, "method", None)
key = getattr(meta_class, "key", None)
patitioning_meta = PostgresPartitionedModelOptions(
method=method or cls.default_method, key=key or cls.default_key
)
new_class.add_to_class("_partitioning_meta", patitioning_meta)
return new_class
class PostgresPartitionedModel(
PostgresModel, metaclass=PostgresPartitionedModelMeta
):
"""Base class for taking advantage of PostgreSQL's 11.x native support for
table partitioning."""
_partitioning_meta: PostgresPartitionedModelOptions
class Meta:
abstract = True
base_manager_name = "objects"
django-postgres-extra-2.0.9/psqlextra/models/view.py 0000664 0000000 0000000 00000011234 14634267343 0022620 0 ustar 00root root 0000000 0000000 from typing import TYPE_CHECKING, Any, Callable, Optional, Union, cast
from django.core.exceptions import ImproperlyConfigured
from django.db import connections
from django.db.models import Model
from django.db.models.base import ModelBase
from django.db.models.query import QuerySet
from psqlextra.type_assertions import is_query_set, is_sql, is_sql_with_params
from psqlextra.types import SQL, SQLWithParams
from .base import PostgresModel
from .options import PostgresViewOptions
if TYPE_CHECKING:
from psqlextra.backend.schema import PostgresSchemaEditor
ViewQueryValue = Union[QuerySet, SQLWithParams, SQL]
ViewQuery = Optional[Union[ViewQueryValue, Callable[[], ViewQueryValue]]]
class PostgresViewModelMeta(ModelBase):
"""Custom meta class for :see:PostgresView and
:see:PostgresMaterializedView.
This meta class extracts attributes from the inner
`ViewMeta` class and copies it onto a `_vew_meta`
attribute. This is similar to how Django's `_meta` works.
"""
def __new__(cls, name, bases, attrs, **kwargs):
new_class = super().__new__(cls, name, bases, attrs, **kwargs)
meta_class = attrs.pop("ViewMeta", None)
view_query = getattr(meta_class, "query", None)
sql_with_params = cls._view_query_as_sql_with_params(
new_class, view_query
)
view_meta = PostgresViewOptions(query=sql_with_params)
new_class.add_to_class("_view_meta", view_meta)
return new_class
@staticmethod
def _view_query_as_sql_with_params(
model: Model, view_query: ViewQuery
) -> Optional[SQLWithParams]:
"""Gets the query associated with the view as a raw SQL query with bind
parameters.
The query can be specified as a query set, raw SQL with params
or without params. The query can also be specified as a callable
which returns any of the above.
When copying the meta options from the model, we convert any
from the above to a raw SQL query with bind parameters. We do
this is because it is what the SQL driver understands and
we can easily serialize it into a migration.
"""
# might be a callable to support delayed imports
view_query = view_query() if callable(view_query) else view_query
# make sure we don't do a boolean check on query sets,
# because that might evaluate the query set
if not is_query_set(view_query) and not view_query:
return None
is_valid_view_query = (
is_query_set(view_query)
or is_sql_with_params(view_query)
or is_sql(view_query)
)
if not is_valid_view_query:
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be a view."
" Set the `query` attribute on the `ViewMeta` class"
" to be a valid `django.db.models.query.QuerySet`"
" SQL string, or tuple of SQL string and params."
)
% (model.__class__.__name__)
)
# querysets can easily be converted into sql, params
if is_query_set(view_query):
return cast("QuerySet[Any]", view_query).query.sql_with_params()
# query was already specified in the target format
if is_sql_with_params(view_query):
return cast(SQLWithParams, view_query)
view_query_sql = cast(str, view_query)
return view_query_sql, tuple()
class PostgresViewModel(PostgresModel, metaclass=PostgresViewModelMeta):
"""Base class for creating a model that is a view."""
_view_meta: PostgresViewOptions
class Meta:
abstract = True
base_manager_name = "objects"
class PostgresMaterializedViewModel(
PostgresViewModel, metaclass=PostgresViewModelMeta
):
"""Base class for creating a model that is a materialized view."""
class Meta:
abstract = True
base_manager_name = "objects"
@classmethod
def refresh(
cls, concurrently: bool = False, using: Optional[str] = None
) -> None:
"""Refreshes this materialized view.
Arguments:
concurrently:
Whether to tell PostgreSQL to refresh this
materialized view concurrently.
using:
Optionally, the name of the database connection
to use for refreshing the materialized view.
"""
conn_name = using or "default"
with connections[conn_name].schema_editor() as schema_editor:
cast(
"PostgresSchemaEditor", schema_editor
).refresh_materialized_view_model(cls, concurrently)
django-postgres-extra-2.0.9/psqlextra/partitioning/ 0000775 0000000 0000000 00000000000 14634267343 0022517 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/partitioning/__init__.py 0000664 0000000 0000000 00000002277 14634267343 0024640 0 ustar 00root root 0000000 0000000 from .config import PostgresPartitioningConfig
from .current_time_strategy import PostgresCurrentTimePartitioningStrategy
from .error import PostgresPartitioningError
from .manager import PostgresPartitioningManager
from .partition import PostgresPartition
from .plan import PostgresModelPartitioningPlan, PostgresPartitioningPlan
from .range_partition import PostgresRangePartition
from .range_strategy import PostgresRangePartitioningStrategy
from .shorthands import partition_by_current_time
from .strategy import PostgresPartitioningStrategy
from .time_partition import PostgresTimePartition
from .time_partition_size import PostgresTimePartitionSize
from .time_strategy import PostgresTimePartitioningStrategy
__all__ = [
"PostgresPartitioningManager",
"partition_by_current_time",
"PostgresPartitioningError",
"PostgresPartitioningPlan",
"PostgresModelPartitioningPlan",
"PostgresPartition",
"PostgresRangePartition",
"PostgresTimePartition",
"PostgresPartitioningStrategy",
"PostgresTimePartitioningStrategy",
"PostgresRangePartitioningStrategy",
"PostgresCurrentTimePartitioningStrategy",
"PostgresPartitioningConfig",
"PostgresTimePartitionSize",
]
django-postgres-extra-2.0.9/psqlextra/partitioning/config.py 0000664 0000000 0000000 00000001000 14634267343 0024325 0 ustar 00root root 0000000 0000000 from typing import Type
from psqlextra.models import PostgresPartitionedModel
from .strategy import PostgresPartitioningStrategy
class PostgresPartitioningConfig:
"""Configuration for partitioning a specific model according to the
specified strategy."""
def __init__(
self,
model: Type[PostgresPartitionedModel],
strategy: PostgresPartitioningStrategy,
) -> None:
self.model = model
self.strategy = strategy
__all__ = ["PostgresPartitioningConfig"]
django-postgres-extra-2.0.9/psqlextra/partitioning/constants.py 0000664 0000000 0000000 00000000531 14634267343 0025104 0 ustar 00root root 0000000 0000000 # comment placed on partition tables created by the partitioner
# partition tables that do not have this comment will _never_
# be deleted by the partitioner, this is a safety mechanism so
# manually created partitions aren't accidently cleaned up
AUTO_PARTITIONED_COMMENT = "psqlextra_auto_partitioned"
__all__ = ["AUTO_PARTITIONED_COMMENT"]
django-postgres-extra-2.0.9/psqlextra/partitioning/current_time_strategy.py 0000664 0000000 0000000 00000004666 14634267343 0027527 0 ustar 00root root 0000000 0000000 from datetime import datetime, timezone
from typing import Generator, Optional
from dateutil.relativedelta import relativedelta
from .range_strategy import PostgresRangePartitioningStrategy
from .time_partition import PostgresTimePartition
from .time_partition_size import PostgresTimePartitionSize
class PostgresCurrentTimePartitioningStrategy(
PostgresRangePartitioningStrategy
):
"""Implments a time based partitioning strategy where each partition
contains values for a specific time period.
All buckets will be equal in size and start at the start of the
unit. With monthly partitioning, partitions start on the 1st and
with weekly partitioning, partitions start on monday.
"""
def __init__(
self,
size: PostgresTimePartitionSize,
count: int,
max_age: Optional[relativedelta] = None,
name_format: Optional[str] = None,
) -> None:
"""Initializes a new instance of :see:PostgresTimePartitioningStrategy.
Arguments:
size:
The size of each partition.
count:
The amount of partitions to create ahead
from the current date/time.
max_age:
Maximum age of a partition. Partitions
older than this are deleted during
auto cleanup.
"""
self.size = size
self.count = count
self.max_age = max_age
self.name_format = name_format
def to_create(self) -> Generator[PostgresTimePartition, None, None]:
current_datetime = self.size.start(self.get_start_datetime())
for _ in range(self.count):
yield PostgresTimePartition(
start_datetime=current_datetime,
size=self.size,
name_format=self.name_format,
)
current_datetime += self.size.as_delta()
def to_delete(self) -> Generator[PostgresTimePartition, None, None]:
if not self.max_age:
return
current_datetime = self.size.start(
self.get_start_datetime() - self.max_age
)
while True:
yield PostgresTimePartition(
start_datetime=current_datetime,
size=self.size,
name_format=self.name_format,
)
current_datetime -= self.size.as_delta()
def get_start_datetime(self) -> datetime:
return datetime.now(timezone.utc)
django-postgres-extra-2.0.9/psqlextra/partitioning/error.py 0000664 0000000 0000000 00000000321 14634267343 0024216 0 ustar 00root root 0000000 0000000 class PostgresPartitioningError(RuntimeError):
"""Raised when the partitioning configuration is broken or automatically
creating/deleting partitions fails."""
__all__ = ["PostgresPartitioningError"]
django-postgres-extra-2.0.9/psqlextra/partitioning/manager.py 0000664 0000000 0000000 00000010735 14634267343 0024511 0 ustar 00root root 0000000 0000000 from typing import List, Optional, Tuple, Type
from django.db import connections
from psqlextra.models import PostgresPartitionedModel
from .config import PostgresPartitioningConfig
from .constants import AUTO_PARTITIONED_COMMENT
from .error import PostgresPartitioningError
from .partition import PostgresPartition
from .plan import PostgresModelPartitioningPlan, PostgresPartitioningPlan
PartitionList = List[Tuple[PostgresPartitionedModel, List[PostgresPartition]]]
class PostgresPartitioningManager:
"""Helps managing partitions by automatically creating new partitions and
deleting old ones according to the configuration."""
def __init__(self, configs: List[PostgresPartitioningConfig]) -> None:
self.configs = configs
self._validate_configs(self.configs)
def plan(
self,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> PostgresPartitioningPlan:
"""Plans which partitions should be deleted/created.
Arguments:
skip_create:
If set to True, no partitions will be marked
for creation, regardless of the configuration.
skip_delete:
If set to True, no partitions will be marked
for deletion, regardless of the configuration.
using:
Optional name of the database connection to use.
Returns:
A plan describing what partitions would be created
and deleted if the plan is applied.
"""
model_plans = []
for config in self.configs:
model_plan = self._plan_for_config(
config,
skip_create=skip_create,
skip_delete=skip_delete,
using=using,
)
if not model_plan:
continue
model_plans.append(model_plan)
return PostgresPartitioningPlan(model_plans)
def find_config_for_model(
self, model: PostgresPartitionedModel
) -> Optional[PostgresPartitioningConfig]:
"""Finds the partitioning config for the specified model."""
return next(
(config for config in self.configs if config.model == model), None
)
def _plan_for_config(
self,
config: PostgresPartitioningConfig,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> Optional[PostgresModelPartitioningPlan]:
"""Creates a partitioning plan for one partitioning config."""
connection = connections[using or "default"]
table = self._get_partitioned_table(connection, config.model)
model_plan = PostgresModelPartitioningPlan(config)
if not skip_create:
for partition in config.strategy.to_create():
if table.partition_by_name(name=partition.name()):
continue
model_plan.creations.append(partition)
if not skip_delete:
for partition in config.strategy.to_delete():
introspected_partition = table.partition_by_name(
name=partition.name()
)
if not introspected_partition:
break
if introspected_partition.comment != AUTO_PARTITIONED_COMMENT:
continue
model_plan.deletions.append(partition)
if len(model_plan.creations) == 0 and len(model_plan.deletions) == 0:
return None
return model_plan
@staticmethod
def _get_partitioned_table(
connection, model: Type[PostgresPartitionedModel]
):
with connection.cursor() as cursor:
table = connection.introspection.get_partitioned_table(
cursor, model._meta.db_table
)
if not table:
raise PostgresPartitioningError(
f"Model {model.__name__}, with table "
f"{model._meta.db_table} does not exists in the "
"database. Did you run `python manage.py migrate`?"
)
return table
@staticmethod
def _validate_configs(configs: List[PostgresPartitioningConfig]):
"""Ensures there is only one config per model."""
models = set([config.model.__name__ for config in configs])
if len(models) != len(configs):
raise PostgresPartitioningError(
"Only one partitioning config per model is allowed"
)
django-postgres-extra-2.0.9/psqlextra/partitioning/partition.py 0000664 0000000 0000000 00000001773 14634267343 0025112 0 ustar 00root root 0000000 0000000 from abc import abstractmethod
from typing import Optional, Type
from psqlextra.backend.schema import PostgresSchemaEditor
from psqlextra.models import PostgresPartitionedModel
class PostgresPartition:
"""Base class for a PostgreSQL table partition."""
@abstractmethod
def name(self) -> str:
"""Generates/computes the name for this partition."""
@abstractmethod
def create(
self,
model: Type[PostgresPartitionedModel],
schema_editor: PostgresSchemaEditor,
comment: Optional[str] = None,
) -> None:
"""Creates this partition in the database."""
@abstractmethod
def delete(
self,
model: Type[PostgresPartitionedModel],
schema_editor: PostgresSchemaEditor,
) -> None:
"""Deletes this partition from the database."""
def deconstruct(self) -> dict:
"""Deconstructs this partition into a dict of attributes/fields."""
return {"name": self.name()}
__all__ = ["PostgresPartition"]
django-postgres-extra-2.0.9/psqlextra/partitioning/plan.py 0000664 0000000 0000000 00000007270 14634267343 0024031 0 ustar 00root root 0000000 0000000 from dataclasses import dataclass, field
from typing import TYPE_CHECKING, List, Optional, cast
from django.db import connections, transaction
from .config import PostgresPartitioningConfig
from .constants import AUTO_PARTITIONED_COMMENT
from .partition import PostgresPartition
if TYPE_CHECKING:
from psqlextra.backend.schema import PostgresSchemaEditor
@dataclass
class PostgresModelPartitioningPlan:
"""Describes the partitions that are going to be created/deleted for a
particular partitioning config.
A "partitioning config" applies to one model.
"""
config: PostgresPartitioningConfig
creations: List[PostgresPartition] = field(default_factory=list)
deletions: List[PostgresPartition] = field(default_factory=list)
def apply(self, using: Optional[str]) -> None:
"""Applies this partitioning plan by creating and deleting the planned
partitions.
Applying the plan runs in a transaction.
Arguments:
using:
Optional name of the database connection to use.
"""
connection = connections[using or "default"]
with transaction.atomic():
with connection.schema_editor() as schema_editor:
for partition in self.creations:
partition.create(
self.config.model,
cast("PostgresSchemaEditor", schema_editor),
comment=AUTO_PARTITIONED_COMMENT,
)
for partition in self.deletions:
partition.delete(
self.config.model,
cast("PostgresSchemaEditor", schema_editor),
)
def print(self) -> None:
"""Prints this model plan to the terminal in a readable format."""
print(f"{self.config.model.__name__}:")
for partition in self.deletions:
print(" - %s" % partition.name())
for key, value in partition.deconstruct().items():
print(f" {key}: {value}")
for partition in self.creations:
print(" + %s" % partition.name())
for key, value in partition.deconstruct().items():
print(f" {key}: {value}")
@dataclass
class PostgresPartitioningPlan:
"""Describes the partitions that are going to be created/deleted."""
model_plans: List[PostgresModelPartitioningPlan]
@property
def creations(self) -> List[PostgresPartition]:
"""Gets a complete flat list of the partitions that are going to be
created."""
creations = []
for model_plan in self.model_plans:
creations.extend(model_plan.creations)
return creations
@property
def deletions(self) -> List[PostgresPartition]:
"""Gets a complete flat list of the partitions that are going to be
deleted."""
deletions = []
for model_plan in self.model_plans:
deletions.extend(model_plan.deletions)
return deletions
def apply(self, using: Optional[str] = None) -> None:
"""Applies this plan by creating/deleting all planned partitions."""
for model_plan in self.model_plans:
model_plan.apply(using=using)
def print(self) -> None:
"""Prints this plan to the terminal in a readable format."""
for model_plan in self.model_plans:
model_plan.print()
print("")
create_count = len(self.creations)
delete_count = len(self.deletions)
print(f"{delete_count} partitions will be deleted")
print(f"{create_count} partitions will be created")
__all__ = ["PostgresPartitioningPlan", "PostgresModelPartitioningPlan"]
django-postgres-extra-2.0.9/psqlextra/partitioning/range_partition.py 0000664 0000000 0000000 00000002412 14634267343 0026255 0 ustar 00root root 0000000 0000000 from typing import Any, Optional, Type
from psqlextra.backend.schema import PostgresSchemaEditor
from psqlextra.models import PostgresPartitionedModel
from .partition import PostgresPartition
class PostgresRangePartition(PostgresPartition):
"""Base class for a PostgreSQL table partition in a range partitioned
table."""
def __init__(self, from_values: Any, to_values: Any) -> None:
self.from_values = from_values
self.to_values = to_values
def deconstruct(self) -> dict:
return {
**super().deconstruct(),
"from_values": self.from_values,
"to_values": self.to_values,
}
def create(
self,
model: Type[PostgresPartitionedModel],
schema_editor: PostgresSchemaEditor,
comment: Optional[str] = None,
) -> None:
schema_editor.add_range_partition(
model=model,
name=self.name(),
from_values=self.from_values,
to_values=self.to_values,
comment=comment,
)
def delete(
self,
model: Type[PostgresPartitionedModel],
schema_editor: PostgresSchemaEditor,
) -> None:
schema_editor.delete_partition(model, self.name())
__all__ = ["PostgresRangePartition"]
django-postgres-extra-2.0.9/psqlextra/partitioning/range_strategy.py 0000664 0000000 0000000 00000000417 14634267343 0026111 0 ustar 00root root 0000000 0000000 from .strategy import PostgresPartitioningStrategy
class PostgresRangePartitioningStrategy(PostgresPartitioningStrategy):
"""Base class for implementing a partitioning strategy for a range
partitioned table."""
__all__ = ["PostgresRangePartitioningStrategy"]
django-postgres-extra-2.0.9/psqlextra/partitioning/shorthands.py 0000664 0000000 0000000 00000004120 14634267343 0025243 0 ustar 00root root 0000000 0000000 from typing import Optional, Type
from dateutil.relativedelta import relativedelta
from psqlextra.models import PostgresPartitionedModel
from .config import PostgresPartitioningConfig
from .current_time_strategy import PostgresCurrentTimePartitioningStrategy
from .time_partition_size import PostgresTimePartitionSize
def partition_by_current_time(
model: Type[PostgresPartitionedModel],
count: int,
years: Optional[int] = None,
months: Optional[int] = None,
weeks: Optional[int] = None,
days: Optional[int] = None,
max_age: Optional[relativedelta] = None,
name_format: Optional[str] = None,
) -> PostgresPartitioningConfig:
"""Short-hand for generating a partitioning config that partitions the
specified model by time.
One specifies one of the `years`, `months`, `weeks`
or `days` parameter to indicate the size of each
partition. These parameters cannot be combined.
Arguments:
count:
The amount of partitions to create ahead of
the current date/time.
years:
The amount of years each partition should contain.
months:
The amount of months each partition should contain.
weeks:
The amount of weeks each partition should contain.
days:
The amount of days each partition should contain.
max_age:
The maximum age of a partition (calculated from the
start of the partition).
Partitions older than this are deleted when running
a delete/cleanup run.
name_format:
The datetime format which is being passed to datetime.strftime
to generate the partition name.
"""
size = PostgresTimePartitionSize(
years=years, months=months, weeks=weeks, days=days
)
return PostgresPartitioningConfig(
model=model,
strategy=PostgresCurrentTimePartitioningStrategy(
size=size,
count=count,
max_age=max_age,
name_format=name_format,
),
)
__all_ = ["partition_by_current_time"]
django-postgres-extra-2.0.9/psqlextra/partitioning/strategy.py 0000664 0000000 0000000 00000001136 14634267343 0024734 0 ustar 00root root 0000000 0000000 from abc import abstractmethod
from typing import Generator
from .partition import PostgresPartition
class PostgresPartitioningStrategy:
"""Base class for implementing a partitioning strategy for a partitioned
table."""
@abstractmethod
def to_create(
self,
) -> Generator[PostgresPartition, None, None]:
"""Generates a list of partitions to be created."""
@abstractmethod
def to_delete(
self,
) -> Generator[PostgresPartition, None, None]:
"""Generates a list of partitions to be deleted."""
__all__ = ["PostgresPartitioningStrategy"]
django-postgres-extra-2.0.9/psqlextra/partitioning/time_partition.py 0000664 0000000 0000000 00000003251 14634267343 0026121 0 ustar 00root root 0000000 0000000 from datetime import datetime
from typing import Optional
from .error import PostgresPartitioningError
from .range_partition import PostgresRangePartition
from .time_partition_size import (
PostgresTimePartitionSize,
PostgresTimePartitionUnit,
)
class PostgresTimePartition(PostgresRangePartition):
"""Time-based range table partition.
:see:PostgresTimePartitioningStrategy for more info.
"""
_unit_name_format = {
PostgresTimePartitionUnit.YEARS: "%Y",
PostgresTimePartitionUnit.MONTHS: "%Y_%b",
PostgresTimePartitionUnit.WEEKS: "%Y_week_%W",
PostgresTimePartitionUnit.DAYS: "%Y_%b_%d",
}
def __init__(
self,
size: PostgresTimePartitionSize,
start_datetime: datetime,
name_format: Optional[str] = None,
) -> None:
end_datetime = start_datetime + size.as_delta()
super().__init__(
from_values=start_datetime.strftime("%Y-%m-%d"),
to_values=end_datetime.strftime("%Y-%m-%d"),
)
self.size = size
self.start_datetime = start_datetime
self.end_datetime = end_datetime
self.name_format = name_format
def name(self) -> str:
name_format = self.name_format or self._unit_name_format.get(
self.size.unit
)
if not name_format:
raise PostgresPartitioningError("Unknown size/unit")
return self.start_datetime.strftime(name_format).lower()
def deconstruct(self) -> dict:
return {
**super().deconstruct(),
"size_unit": self.size.unit.value,
"size_value": self.size.value,
}
__all__ = ["PostgresTimePartition"]
django-postgres-extra-2.0.9/psqlextra/partitioning/time_partition_size.py 0000664 0000000 0000000 00000005557 14634267343 0027166 0 ustar 00root root 0000000 0000000 import enum
from datetime import date, datetime
from typing import Optional, Union
from dateutil.relativedelta import relativedelta
from .error import PostgresPartitioningError
class PostgresTimePartitionUnit(enum.Enum):
YEARS = "years"
MONTHS = "months"
WEEKS = "weeks"
DAYS = "days"
class PostgresTimePartitionSize:
"""Size of a time-based range partition table."""
unit: PostgresTimePartitionUnit
value: int
def __init__(
self,
years: Optional[int] = None,
months: Optional[int] = None,
weeks: Optional[int] = None,
days: Optional[int] = None,
) -> None:
sizes = [years, months, weeks, days]
if not any(sizes):
raise PostgresPartitioningError("Partition cannot be 0 in size.")
if len([size for size in sizes if size and size > 0]) > 1:
raise PostgresPartitioningError(
"Partition can only have on size unit."
)
if years:
self.unit = PostgresTimePartitionUnit.YEARS
self.value = years
elif months:
self.unit = PostgresTimePartitionUnit.MONTHS
self.value = months
elif weeks:
self.unit = PostgresTimePartitionUnit.WEEKS
self.value = weeks
elif days:
self.unit = PostgresTimePartitionUnit.DAYS
self.value = days
else:
raise PostgresPartitioningError(
"Unsupported time partitioning unit"
)
def as_delta(self) -> relativedelta:
if self.unit == PostgresTimePartitionUnit.YEARS:
return relativedelta(years=self.value)
if self.unit == PostgresTimePartitionUnit.MONTHS:
return relativedelta(months=self.value)
if self.unit == PostgresTimePartitionUnit.WEEKS:
return relativedelta(weeks=self.value)
if self.unit == PostgresTimePartitionUnit.DAYS:
return relativedelta(days=self.value)
raise PostgresPartitioningError(
"Unsupported time partitioning unit: %s" % self.unit
)
def start(self, dt: datetime) -> datetime:
if self.unit == PostgresTimePartitionUnit.YEARS:
return self._ensure_datetime(dt.replace(month=1, day=1))
if self.unit == PostgresTimePartitionUnit.MONTHS:
return self._ensure_datetime(dt.replace(day=1))
if self.unit == PostgresTimePartitionUnit.WEEKS:
return self._ensure_datetime(dt - relativedelta(days=dt.weekday()))
return self._ensure_datetime(dt)
@staticmethod
def _ensure_datetime(dt: Union[date, datetime]) -> datetime:
return datetime(year=dt.year, month=dt.month, day=dt.day)
def __repr__(self) -> str:
return "PostgresTimePartitionSize<%s, %s>" % (self.unit, self.value)
__all__ = ["PostgresTimePartitionUnit", "PostgresTimePartitionSize"]
django-postgres-extra-2.0.9/psqlextra/partitioning/time_strategy.py 0000664 0000000 0000000 00000001264 14634267343 0025754 0 ustar 00root root 0000000 0000000 from datetime import datetime
from typing import Optional
from dateutil.relativedelta import relativedelta
from .current_time_strategy import PostgresCurrentTimePartitioningStrategy
from .time_partition_size import PostgresTimePartitionSize
class PostgresTimePartitioningStrategy(PostgresCurrentTimePartitioningStrategy):
def __init__(
self,
start_datetime: datetime,
size: PostgresTimePartitionSize,
count: int,
max_age: Optional[relativedelta] = None,
) -> None:
super().__init__(size, count, max_age)
self.start_datetime = start_datetime
def get_start_datetime(self) -> datetime:
return self.start_datetime
django-postgres-extra-2.0.9/psqlextra/py.typed 0000664 0000000 0000000 00000000000 14634267343 0021475 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/psqlextra/query.py 0000664 0000000 0000000 00000051435 14634267343 0021537 0 ustar 00root root 0000000 0000000 from collections import OrderedDict
from itertools import chain
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
)
from django.core.exceptions import SuspiciousOperation
from django.db import models, router
from django.db.backends.utils import CursorWrapper
from django.db.models import Expression, Q, QuerySet
from django.db.models.fields import NOT_PROVIDED
from .expressions import ExcludedCol
from .introspect import model_from_cursor, models_from_cursor
from .sql import PostgresInsertQuery, PostgresQuery
from .types import ConflictAction
if TYPE_CHECKING:
from django.db.models.constraints import BaseConstraint
from django.db.models.indexes import Index
ConflictTarget = Union[List[Union[str, Tuple[str]]], "BaseConstraint", "Index"]
TModel = TypeVar("TModel", bound=models.Model, covariant=True)
if TYPE_CHECKING:
from typing_extensions import Self
QuerySetBase = QuerySet[TModel]
else:
QuerySetBase = QuerySet
class PostgresQuerySet(QuerySetBase, Generic[TModel]):
"""Adds support for PostgreSQL specifics."""
def __init__(self, model=None, query=None, using=None, hints=None):
"""Initializes a new instance of :see:PostgresQuerySet."""
super().__init__(model, query, using, hints)
self.query = query or PostgresQuery(self.model)
self.conflict_target = None
self.conflict_action = None
self.conflict_update_condition = None
self.index_predicate = None
self.update_values = None
def annotate(self, **annotations) -> "Self": # type: ignore[valid-type, override]
"""Custom version of the standard annotate function that allows using
field names as annotated fields.
Normally, the annotate function doesn't allow you to use the
name of an existing field on the model as the alias name. This
version of the function does allow that.
This is done by temporarily renaming the fields in order to avoid the
check for conflicts that the base class does.
We rename all fields instead of the ones that already exist because
the annotations are stored in an OrderedDict. Renaming only the
conflicts will mess up the order.
"""
fields = {field.name: field for field in self.model._meta.get_fields()}
new_annotations = OrderedDict()
renames = {}
for name, value in annotations.items():
if name in fields:
new_name = "%s_new" % name
new_annotations[new_name] = value
renames[new_name] = name
else:
new_annotations[name] = value
# run the base class's annotate function
result = super().annotate(**new_annotations)
# rename the annotations back to as specified
result.rename_annotations(**renames)
return result
def rename_annotations(self, **annotations):
"""Renames the aliases for the specified annotations:
.annotate(myfield=F('somestuf__myfield'))
.rename_annotations(myfield='field')
Arguments:
annotations:
The annotations to rename. Mapping the
old name to the new name.
"""
self.query.rename_annotations(annotations)
return self
def on_conflict(
self,
fields: ConflictTarget,
action: ConflictAction,
index_predicate: Optional[Union[Expression, Q, str]] = None,
update_condition: Optional[Union[Expression, Q, str]] = None,
update_values: Optional[Dict[str, Union[Any, Expression]]] = None,
):
"""Sets the action to take when conflicts arise when attempting to
insert/create a new row.
Arguments:
fields:
The fields the conflicts can occur in.
action:
The action to take when the conflict occurs.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
update_condition:
Only update if this SQL expression evaluates to true.
update_values:
Optionally, values/expressions to use when rows
conflict. If not specified, all columns specified
in the rows are updated with the values you specified.
"""
self.conflict_target = fields
self.conflict_action = action
self.conflict_update_condition = update_condition
self.index_predicate = index_predicate
self.update_values = update_values
return self
def bulk_insert(
self,
rows: Iterable[Dict[str, Any]],
return_model: bool = False,
using: Optional[str] = None,
):
"""Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An iterable of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: False):
If model instances should be returned rather than
just dicts.
using:
Optional name of the database connection to use for
this query.
Returns:
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified
"""
def is_empty(r):
return all([False for _ in r])
if not rows or is_empty(rows):
return []
if not self.conflict_target and not self.conflict_action:
# no special action required, use the standard Django bulk_create(..)
return super().bulk_create(
[self.model(**fields) for fields in rows]
)
deduped_rows = rows
# when we do a ConflictAction.NOTHING, we are actually
# doing a ON CONFLICT DO UPDATE with a trick to avoid
# touching conflicting rows... however, ON CONFLICT UPDATE
# barfs when you specify the exact same row twice:
#
# > "cannot affect row a second time"
#
# we filter out the duplicates here to make sure we maintain
# the same behaviour as the real ON CONFLICT DO NOTHING
if self.conflict_action == ConflictAction.NOTHING:
deduped_rows = []
for row in rows:
if row in deduped_rows:
continue
deduped_rows.append(row)
compiler = self._build_insert_compiler(deduped_rows, using=using)
with compiler.connection.cursor() as cursor:
for sql, params in compiler.as_sql(return_id=not return_model):
cursor.execute(sql, params)
if return_model:
return list(models_from_cursor(self.model, cursor))
return self._consume_cursor_as_dicts(
cursor, original_rows=deduped_rows
)
def insert(self, using: Optional[str] = None, **fields):
"""Creates a new record in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
fields:
The fields of the row to create.
using:
The name of the database connection
to use for this query.
Returns:
The primary key of the record that was created.
"""
if self.conflict_target or self.conflict_action:
if not self.model or not self.model.pk:
return None
compiler = self._build_insert_compiler([fields], using=using)
with compiler.connection.cursor() as cursor:
for sql, params in compiler.as_sql(return_id=True):
cursor.execute(sql, params)
row = cursor.fetchone()
if not row:
return None
return row[0]
# no special action required, use the standard Django create(..)
return super().create(**fields).pk
def insert_and_get(self, using: Optional[str] = None, **fields):
"""Creates a new record in the database and then gets the entire row.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
fields:
The fields of the row to create.
using:
The name of the database connection
to use for this query.
Returns:
The model instance representing the row that was created.
"""
if not self.conflict_target and not self.conflict_action:
# no special action required, use the standard Django create(..)
return super().create(**fields)
compiler = self._build_insert_compiler([fields], using=using)
with compiler.connection.cursor() as cursor:
for sql, params in compiler.as_sql(return_id=False):
cursor.execute(sql, params)
return model_from_cursor(self.model, cursor)
def upsert(
self,
conflict_target: ConflictTarget,
fields: dict,
index_predicate: Optional[Union[Expression, Q, str]] = None,
using: Optional[str] = None,
update_condition: Optional[Union[Expression, Q, str]] = None,
update_values: Optional[Dict[str, Union[Any, Expression]]] = None,
) -> int:
"""Creates a new record or updates the existing one with the specified
data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
using:
The name of the database connection to
use for this query.
update_condition:
Only update if this SQL expression evaluates to true.
update_values:
Optionally, values/expressions to use when rows
conflict. If not specified, all columns specified
in the rows are updated with the values you specified.
Returns:
The primary key of the row that was created/updated.
"""
self.on_conflict(
conflict_target,
ConflictAction.UPDATE
if (update_condition or update_condition is None)
else ConflictAction.NOTHING,
index_predicate=index_predicate,
update_condition=update_condition,
update_values=update_values,
)
kwargs = {**fields, "using": using}
return self.insert(**kwargs)
def upsert_and_get(
self,
conflict_target: ConflictTarget,
fields: dict,
index_predicate: Optional[Union[Expression, Q, str]] = None,
using: Optional[str] = None,
update_condition: Optional[Union[Expression, Q, str]] = None,
update_values: Optional[Dict[str, Union[Any, Expression]]] = None,
):
"""Creates a new record or updates the existing one with the specified
data and then gets the row.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
using:
The name of the database connection to
use for this query.
update_condition:
Only update if this SQL expression evaluates to true.
update_values:
Optionally, values/expressions to use when rows
conflict. If not specified, all columns specified
in the rows are updated with the values you specified.
Returns:
The model instance representing the row
that was created/updated.
"""
self.on_conflict(
conflict_target,
ConflictAction.UPDATE,
index_predicate=index_predicate,
update_condition=update_condition,
update_values=update_values,
)
kwargs = {**fields, "using": using}
return self.insert_and_get(**kwargs)
def bulk_upsert(
self,
conflict_target: ConflictTarget,
rows: Iterable[Dict],
index_predicate: Optional[Union[Expression, Q, str]] = None,
return_model: bool = False,
using: Optional[str] = None,
update_condition: Optional[Union[Expression, Q, str]] = None,
update_values: Optional[Dict[str, Union[Any, Expression]]] = None,
):
"""Creates a set of new records or updates the existing ones with the
specified data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
rows:
Rows to upsert.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
return_model (default: False):
If model instances should be returned rather than
just dicts.
using:
The name of the database connection to use
for this query.
update_condition:
Only update if this SQL expression evaluates to true.
update_values:
Optionally, values/expressions to use when rows
conflict. If not specified, all columns specified
in the rows are updated with the values you specified.
Returns:
A list of either the dicts of the rows upserted, including the pk or
the models of the rows upserted
"""
self.on_conflict(
conflict_target,
ConflictAction.UPDATE,
index_predicate=index_predicate,
update_condition=update_condition,
update_values=update_values,
)
return self.bulk_insert(rows, return_model, using=using)
@staticmethod
def _consume_cursor_as_dicts(
cursor: CursorWrapper, *, original_rows: Iterable[Dict[str, Any]]
) -> List[dict]:
cursor_description = cursor.description
return [
{
**original_row,
**{
column.name: row[column_index]
for column_index, column in enumerate(cursor_description)
if row
},
}
for original_row, row in zip(original_rows, cursor)
]
def _build_insert_compiler(
self, rows: Iterable[Dict], using: Optional[str] = None
):
"""Builds the SQL compiler for a insert query.
Arguments:
rows:
An iterable of dictionaries, where each entry
describes a record to insert.
using:
The name of the database connection to use
for this query.
Returns:
The SQL compiler for the insert.
"""
# ask the db router which connection to use
using = (
using or self._db or router.db_for_write(self.model, **self._hints) # type: ignore[attr-defined]
)
# create model objects, we also have to detect cases
# such as:
# [dict(first_name='swen'), dict(fist_name='swen', last_name='kooij')]
# we need to be certain that each row specifies the exact same
# amount of fields/columns
objs = []
rows_iter = iter(rows)
first_row = next(rows_iter)
field_count = len(first_row)
for index, row in enumerate(chain([first_row], rows_iter)):
if field_count != len(row):
raise SuspiciousOperation(
(
"In bulk upserts, you cannot have rows with different field "
"configurations. Row {0} has a different field config than "
"the first row."
).format(index)
)
obj = self.model(**row.copy())
obj._state.db = using
obj._state.adding = False
objs.append(obj)
# get the fields to be used during update/insert
insert_fields, update_values = self._get_upsert_fields(first_row)
# allow the user to override what should happen on update
if self.update_values is not None:
update_values = self.update_values
# build a normal insert query
query = PostgresInsertQuery(self.model)
query.conflict_action = self.conflict_action
query.conflict_target = self.conflict_target
query.conflict_update_condition = self.conflict_update_condition
query.index_predicate = self.index_predicate
query.insert_on_conflict_values(objs, insert_fields, update_values)
compiler = query.get_compiler(using)
return compiler
def _is_magical_field(self, model_instance, field, is_insert: bool):
"""Verifies whether this field is gonna modify something on its own.
"Magical" means that a field modifies the field value
during the pre_save.
Arguments:
model_instance:
The model instance the field is defined on.
field:
The field to get of whether the field is
magical.
is_insert:
Pretend whether this is an insert?
Returns:
True when this field modifies something.
"""
# does this field modify someting upon insert?
old_value = getattr(model_instance, field.name, None)
field.pre_save(model_instance, is_insert)
new_value = getattr(model_instance, field.name, None)
return old_value != new_value
def _get_upsert_fields(self, kwargs):
"""Gets the fields to use in an upsert.
This some nice magic. We'll split the fields into
a group of "insert fields" and "update fields":
INSERT INTO bla ("val1", "val2") ON CONFLICT DO UPDATE SET val1 = EXCLUDED.val1
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
insert_fields update_fields
Often, fields appear in both lists. But, for example,
a :see:DateTime field with `auto_now_add=True` set, will
only appear in "insert_fields", since it won't be set
on existing rows.
Other than that, the user specificies a list of fields
in the upsert() call. That migt not be all fields. The
user could decide to leave out optional fields. If we
end up doing an update, we don't want to overwrite
those non-specified fields.
We cannot just take the list of fields the user
specifies, because as mentioned, some fields
make modifications to the model on their own.
We'll have to detect which fields make modifications
and include them in the list of insert/update fields.
"""
model_instance = self.model(**kwargs)
insert_fields = []
update_values = {}
for field in model_instance._meta.local_concrete_fields:
has_default = field.default != NOT_PROVIDED
if field.name in kwargs or field.column in kwargs:
insert_fields.append(field)
update_values[field.name] = ExcludedCol(field)
continue
elif has_default:
insert_fields.append(field)
continue
# special handling for 'pk' which always refers to
# the primary key, so if we the user specifies `pk`
# instead of a concrete field, we have to handle that
if field.primary_key is True and "pk" in kwargs:
insert_fields.append(field)
update_values[field.name] = ExcludedCol(field)
continue
if self._is_magical_field(model_instance, field, is_insert=True):
insert_fields.append(field)
if self._is_magical_field(model_instance, field, is_insert=False):
update_values[field.name] = ExcludedCol(field)
return insert_fields, update_values
django-postgres-extra-2.0.9/psqlextra/schema.py 0000664 0000000 0000000 00000015663 14634267343 0021635 0 ustar 00root root 0000000 0000000 import os
from contextlib import contextmanager
from typing import TYPE_CHECKING, Generator, cast
from django.core.exceptions import SuspiciousOperation, ValidationError
from django.db import DEFAULT_DB_ALIAS, connections, transaction
from django.utils import timezone
if TYPE_CHECKING:
from psqlextra.backend.introspection import PostgresIntrospection
from psqlextra.backend.schema import PostgresSchemaEditor
class PostgresSchema:
"""Represents a Postgres schema.
See: https://www.postgresql.org/docs/current/ddl-schemas.html
"""
NAME_MAX_LENGTH = 63
name: str
default: "PostgresSchema"
def __init__(self, name: str) -> None:
self.name = name
@classmethod
def create(
cls, name: str, *, using: str = DEFAULT_DB_ALIAS
) -> "PostgresSchema":
"""Creates a new schema with the specified name.
This throws if the schema already exists as that is most likely
a problem that requires careful handling. Pretending everything
is ok might cause the caller to overwrite data, thinking it got
a empty schema.
Arguments:
name:
The name to give to the new schema (max 63 characters).
using:
Optional name of the database connection to use.
"""
if len(name) > cls.NAME_MAX_LENGTH:
raise ValidationError(
f"Schema name '{name}' is longer than Postgres's limit of {cls.NAME_MAX_LENGTH} characters"
)
with connections[using].schema_editor() as schema_editor:
cast("PostgresSchemaEditor", schema_editor).create_schema(name)
return cls(name)
@classmethod
def create_time_based(
cls, prefix: str, *, using: str = DEFAULT_DB_ALIAS
) -> "PostgresSchema":
"""Creates a new schema with a time-based suffix.
The time is precise up to the second. Creating
multiple time based schema in the same second
WILL lead to conflicts.
Arguments:
prefix:
Name to prefix the final name with. The name plus
prefix cannot be longer than 63 characters.
using:
Name of the database connection to use.
"""
suffix = timezone.now().strftime("%Y%m%d%H%m%S")
name = cls._create_generated_name(prefix, suffix)
return cls.create(name, using=using)
@classmethod
def create_random(
cls, prefix: str, *, using: str = DEFAULT_DB_ALIAS
) -> "PostgresSchema":
"""Creates a new schema with a random suffix.
Arguments:
prefix:
Name to prefix the final name with. The name plus
prefix cannot be longer than 63 characters.
using:
Name of the database connection to use.
"""
suffix = os.urandom(4).hex()
name = cls._create_generated_name(prefix, suffix)
return cls.create(name, using=using)
@classmethod
def delete_and_create(
cls, name: str, *, cascade: bool = False, using: str = DEFAULT_DB_ALIAS
) -> "PostgresSchema":
"""Deletes the schema if it exists before re-creating it.
Arguments:
name:
Name of the schema to delete+create (max 63 characters).
cascade:
Whether to delete the contents of the schema
and anything that references it if it exists.
using:
Optional name of the database connection to use.
"""
with transaction.atomic(using=using):
cls(name).delete(cascade=cascade, using=using)
return cls.create(name, using=using)
@classmethod
def exists(cls, name: str, *, using: str = DEFAULT_DB_ALIAS) -> bool:
"""Gets whether a schema with the specified name exists.
Arguments:
name:
Name of the schema to check of whether it
exists.
using:
Optional name of the database connection to use.
"""
connection = connections[using]
with connection.cursor() as cursor:
return name in cast(
"PostgresIntrospection", connection.introspection
).get_schema_list(cursor)
def delete(
self, *, cascade: bool = False, using: str = DEFAULT_DB_ALIAS
) -> None:
"""Deletes the schema and optionally deletes the contents of the schema
and anything that references it.
Arguments:
cascade:
Cascade the delete to the contents of the schema
and anything that references it.
If not set, the schema will refuse to be deleted
unless it is empty and there are not remaining
references.
"""
if self.name == "public":
raise SuspiciousOperation(
"Pretty sure you are about to make a mistake by trying to drop the 'public' schema. I have stopped you. Thank me later."
)
with connections[using].schema_editor() as schema_editor:
cast("PostgresSchemaEditor", schema_editor).delete_schema(
self.name, cascade=cascade
)
@classmethod
def _create_generated_name(cls, prefix: str, suffix: str) -> str:
separator = "_"
generated_name = f"{prefix}{separator}{suffix}"
max_prefix_length = cls.NAME_MAX_LENGTH - len(suffix) - len(separator)
if len(generated_name) > cls.NAME_MAX_LENGTH:
raise ValidationError(
f"Schema prefix '{prefix}' is longer than {max_prefix_length} characters. Together with the separator and generated suffix of {len(suffix)} characters, the name would exceed Postgres's limit of {cls.NAME_MAX_LENGTH} characters."
)
return generated_name
PostgresSchema.default = PostgresSchema("public")
@contextmanager
def postgres_temporary_schema(
prefix: str,
*,
cascade: bool = False,
delete_on_throw: bool = False,
using: str = DEFAULT_DB_ALIAS,
) -> Generator[PostgresSchema, None, None]:
"""Creates a temporary schema that only lives in the context of this
context manager.
Arguments:
prefix:
Name to prefix the final name with.
cascade:
Whether to cascade the delete when dropping the
schema. If enabled, the contents of the schema
are deleted as well as anything that references
the schema.
delete_on_throw:
Whether to automatically drop the schema if
any error occurs within the context manager.
using:
Optional name of the database connection to use.
"""
schema = PostgresSchema.create_random(prefix, using=using)
try:
yield schema
except Exception as e:
if delete_on_throw:
schema.delete(cascade=cascade, using=using)
raise e
schema.delete(cascade=cascade, using=using)
django-postgres-extra-2.0.9/psqlextra/settings.py 0000664 0000000 0000000 00000007252 14634267343 0022230 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
from typing import Generator, List, Optional, Union
from django.core.exceptions import SuspiciousOperation
from django.db import DEFAULT_DB_ALIAS, connections
@contextmanager
def postgres_set_local(
*,
using: str = DEFAULT_DB_ALIAS,
**options: Optional[Union[str, int, float, List[str]]],
) -> Generator[None, None, None]:
"""Sets the specified PostgreSQL options using SET LOCAL so that they apply
to the current transacton only.
The effect is undone when the context manager exits.
See https://www.postgresql.org/docs/current/runtime-config-client.html
for an overview of all available options.
"""
connection = connections[using]
qn = connection.ops.quote_name
if not connection.in_atomic_block:
raise SuspiciousOperation(
"SET LOCAL makes no sense outside a transaction. Start a transaction first."
)
sql = []
params: List[Union[str, int, float, List[str]]] = []
for name, value in options.items():
if value is None:
sql.append(f"SET LOCAL {qn(name)} TO DEFAULT")
continue
# Settings that accept a list of values are actually
# stored as string lists. We cannot just pass a list
# of values. We have to create the comma separated
# string ourselves.
if isinstance(value, list) or isinstance(value, tuple):
placeholder = ", ".join(["%s" for _ in value])
params.extend(value)
else:
placeholder = "%s"
params.append(value)
sql.append(f"SET LOCAL {qn(name)} = {placeholder}")
with connection.cursor() as cursor:
cursor.execute(
"SELECT name, setting FROM pg_settings WHERE name = ANY(%s)",
(list(options.keys()),),
)
original_values = dict(cursor.fetchall())
cursor.execute("; ".join(sql), params)
yield
# Put everything back to how it was. DEFAULT is
# not good enough as a outer SET LOCAL might
# have set a different value.
with connection.cursor() as cursor:
sql = []
params = []
for name, value in options.items():
original_value = original_values.get(name)
if original_value:
sql.append(f"SET LOCAL {qn(name)} = {original_value}")
else:
sql.append(f"SET LOCAL {qn(name)} TO DEFAULT")
cursor.execute("; ".join(sql), params)
@contextmanager
def postgres_set_local_search_path(
search_path: List[str], *, using: str = DEFAULT_DB_ALIAS
) -> Generator[None, None, None]:
"""Sets the search path to the specified schemas."""
with postgres_set_local(search_path=search_path, using=using):
yield
@contextmanager
def postgres_prepend_local_search_path(
search_path: List[str], *, using: str = DEFAULT_DB_ALIAS
) -> Generator[None, None, None]:
"""Prepends the current local search path with the specified schemas."""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute("SHOW search_path")
[
original_search_path,
] = cursor.fetchone()
placeholders = ", ".join(["%s" for _ in search_path])
cursor.execute(
f"SET LOCAL search_path = {placeholders}, {original_search_path}",
tuple(search_path),
)
yield
cursor.execute(f"SET LOCAL search_path = {original_search_path}")
@contextmanager
def postgres_reset_local_search_path(
*, using: str = DEFAULT_DB_ALIAS
) -> Generator[None, None, None]:
"""Resets the local search path to the default."""
with postgres_set_local(search_path=None, using=using):
yield
django-postgres-extra-2.0.9/psqlextra/sql.py 0000664 0000000 0000000 00000017265 14634267343 0021174 0 ustar 00root root 0000000 0000000 from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import django
from django.core.exceptions import SuspiciousOperation
from django.db import connections, models
from django.db.models import Expression, sql
from django.db.models.constants import LOOKUP_SEP
from .compiler import PostgresInsertOnConflictCompiler
from .compiler import SQLUpdateCompiler as PostgresUpdateCompiler
from .expressions import HStoreColumn
from .fields import HStoreField
from .types import ConflictAction
class PostgresQuery(sql.Query):
select: Tuple[Expression, ...]
def chain(self, klass=None):
"""Chains this query to another.
We override this so that we can make sure our subclassed query
classes are used.
"""
if klass == sql.UpdateQuery:
return super().chain(PostgresUpdateQuery)
if klass == sql.InsertQuery:
return super().chain(PostgresInsertQuery)
return super().chain(klass)
def rename_annotations(self, annotations) -> None:
"""Renames the aliases for the specified annotations:
.annotate(myfield=F('somestuf__myfield'))
.rename_annotations(myfield='field')
Arguments:
annotations:
The annotations to rename. Mapping the
old name to the new name.
"""
# safety check only, make sure there are no renames
# left that cannot be mapped back to the original name
for old_name, new_name in annotations.items():
annotation = self.annotations.get(old_name)
if not annotation:
raise SuspiciousOperation(
(
'Cannot rename annotation "{old_name}" to "{new_name}", because there'
' is no annotation named "{old_name}".'
).format(old_name=old_name, new_name=new_name)
)
# rebuild the annotations according to the original order
new_annotations = OrderedDict()
for old_name, annotation in self.annotations.items():
new_name = annotations.get(old_name)
new_annotations[new_name or old_name] = annotation
if new_name and self.annotation_select_mask:
# It's a set in all versions prior to Django 5.x
# and a list in Django 5.x and newer.
# https://github.com/django/django/commit/d6b6e5d0fd4e6b6d0183b4cf6e4bd4f9afc7bf67
if isinstance(self.annotation_select_mask, set):
self.annotation_select_mask.discard(old_name)
self.annotation_select_mask.add(new_name)
elif isinstance(self.annotation_select_mask, list):
self.annotation_select_mask.remove(old_name)
self.annotation_select_mask.append(new_name)
self.annotations.clear()
self.annotations.update(new_annotations)
def add_fields(self, field_names, *args, **kwargs) -> None:
"""Adds the given (model) fields to the select set.
The field names are added in the order specified. This overrides
the base class's add_fields method. This is called by the
.values() or .values_list() method of the query set. It
instructs the ORM to only select certain values. A lot of
processing is neccesarry because it can be used to easily do
joins. For example, `my_fk__name` pulls in the `name` field in
foreign key `my_fk`. In our case, we want to be able to do
`title__en`, where `title` is a HStoreField and `en` a key. This
doesn't really involve a join. We iterate over the specified
field names and filter out the ones that refer to HStoreField
and compile it into an expression which is added to the list of
to be selected fields using `self.add_select`.
"""
# django knows how to do all of this natively from v2.1
# see: https://github.com/django/django/commit/20bab2cf9d02a5c6477d8aac066a635986e0d3f3
if django.VERSION >= (2, 1):
return super().add_fields(field_names, *args, **kwargs)
select = []
field_names_without_hstore = []
for name in field_names:
parts = name.split(LOOKUP_SEP)
# it cannot be a special hstore thing if there's no __ in it
if len(parts) > 1:
column_name, hstore_key = parts[:2]
is_hstore, field = self._is_hstore_field(column_name)
if self.model and is_hstore:
select.append(
HStoreColumn(
self.model._meta.db_table
or self.model.__class__.__name__,
field,
hstore_key,
)
)
continue
field_names_without_hstore.append(name)
super().add_fields(field_names_without_hstore, *args, **kwargs)
if len(select) > 0:
self.set_select(list(self.select + tuple(select)))
def _is_hstore_field(
self, field_name: str
) -> Tuple[bool, Optional[models.Field]]:
"""Gets whether the field with the specified name is a HStoreField.
Returns A tuple of a boolean indicating whether the field
with the specified name is a HStoreField, and the field
instance.
"""
if not self.model:
return (False, None)
field_instance = None
for field in self.model._meta.local_concrete_fields: # type: ignore[attr-defined]
if field.name == field_name or field.column == field_name:
field_instance = field
break
return isinstance(field_instance, HStoreField), field_instance
class PostgresInsertQuery(sql.InsertQuery):
"""Insert query using PostgreSQL."""
def __init__(self, *args, **kwargs):
"""Initializes a new instance :see:PostgresInsertQuery."""
super(PostgresInsertQuery, self).__init__(*args, **kwargs)
self.conflict_target = []
self.conflict_action = ConflictAction.UPDATE
self.conflict_update_condition = None
self.index_predicate = None
self.update_values = {}
def insert_on_conflict_values(
self,
objs: List,
insert_fields: List,
update_values: Dict[str, Union[Any, Expression]] = {},
):
"""Sets the values to be used in this query.
Insert fields are fields that are definitely
going to be inserted, and if an existing row
is found, are going to be overwritten with the
specified value.
Update fields are fields that should be overwritten
in case an update takes place rather than an insert.
If we're dealing with a INSERT, these will not be used.
Arguments:
objs:
The objects to apply this query to.
insert_fields:
The fields to use in the INSERT statement
update_values:
Expressions/values to use when a conflict
occurs and an UPDATE is performed.
"""
self.insert_values(insert_fields, objs, raw=False)
self.update_values = update_values
def get_compiler(self, using=None, connection=None):
if using:
connection = connections[using]
return PostgresInsertOnConflictCompiler(self, connection, using)
class PostgresUpdateQuery(sql.UpdateQuery):
"""Update query using PostgreSQL."""
def get_compiler(self, using=None, connection=None):
if using:
connection = connections[using]
return PostgresUpdateCompiler(self, connection, using)
django-postgres-extra-2.0.9/psqlextra/type_assertions.py 0000664 0000000 0000000 00000001424 14634267343 0023616 0 ustar 00root root 0000000 0000000 from collections.abc import Iterable
from typing import Any
from django.db.models.query import QuerySet
def is_query_set(value: Any) -> bool:
"""Gets whether the specified value is a :see:QuerySet."""
return isinstance(value, QuerySet) # type: ignore[misc]
def is_sql(value: Any) -> bool:
"""Gets whether the specified value could be a raw SQL query."""
return isinstance(value, str)
def is_sql_with_params(value: Any) -> bool:
"""Gets whether the specified value is a tuple of a SQL query (as a string)
and a tuple of bind parameters."""
return (
isinstance(value, tuple)
and len(value) == 2
and is_sql(value[0])
and isinstance(value[1], Iterable)
and not isinstance(value[1], (str, bytes, bytearray))
)
django-postgres-extra-2.0.9/psqlextra/types.py 0000664 0000000 0000000 00000001643 14634267343 0021532 0 ustar 00root root 0000000 0000000 from enum import Enum
from typing import Any, Dict, List, Tuple, Union
SQL = str
SQLWithParams = Tuple[str, Union[Tuple[Any, ...], Dict[str, Any]]]
class StrEnum(str, Enum):
@classmethod
def all(cls) -> List["StrEnum"]:
return [choice for choice in cls]
@classmethod
def values(cls) -> List[str]:
return [choice.value for choice in cls]
def __str__(self) -> str:
return str(self.value)
class ConflictAction(Enum):
"""Possible actions to take on a conflict."""
NOTHING = "NOTHING"
UPDATE = "UPDATE"
@classmethod
def all(cls) -> List["ConflictAction"]:
return [choice for choice in cls]
def __str__(self) -> str:
return self.value
class PostgresPartitioningMethod(StrEnum):
"""Methods of partitioning supported by PostgreSQL 11.x native support for
table partitioning."""
RANGE = "range"
LIST = "list"
HASH = "hash"
django-postgres-extra-2.0.9/psqlextra/util.py 0000664 0000000 0000000 00000001022 14634267343 0021332 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
from typing import Generator, Type
from django.db import models
from .manager import PostgresManager
@contextmanager
def postgres_manager(
model: Type[models.Model],
) -> Generator[PostgresManager, None, None]:
"""Allows you to use the :see:PostgresManager with the specified model
instance on the fly.
Arguments:
model:
The model or model instance to use this on.
"""
manager = PostgresManager()
manager.model = model
yield manager
django-postgres-extra-2.0.9/pyproject.toml 0000664 0000000 0000000 00000000616 14634267343 0020704 0 ustar 00root root 0000000 0000000 [tool.black]
line-length = 80
exclude = '''
(
/(
| .env
| env
| venv
| tests/snapshots
)/
)
'''
[tool.mypy]
python_version = "3.8"
plugins = ["mypy_django_plugin.main"]
mypy_path = ["stubs", "."]
exclude = "(env|build|dist|migrations)"
[[tool.mypy.overrides]]
module = [
"psycopg.*"
]
ignore_missing_imports = true
[tool.django-stubs]
django_settings_module = "settings"
django-postgres-extra-2.0.9/pytest-benchmark.ini 0000664 0000000 0000000 00000000120 14634267343 0021737 0 ustar 00root root 0000000 0000000 [pytest]
DJANGO_SETTINGS_MODULE=settings
testpaths=tests
addopts=-m "benchmark"
django-postgres-extra-2.0.9/pytest.ini 0000664 0000000 0000000 00000000221 14634267343 0020011 0 ustar 00root root 0000000 0000000 [pytest]
DJANGO_SETTINGS_MODULE=settings
testpaths=tests
addopts=-m "not benchmark"
junit_family=legacy
filterwarnings =
ignore::UserWarning
django-postgres-extra-2.0.9/requirements-all.txt 0000664 0000000 0000000 00000000052 14634267343 0022014 0 ustar 00root root 0000000 0000000 -e .
-e .[test]
-e .[analysis]
-e .[docs]
django-postgres-extra-2.0.9/settings.py 0000664 0000000 0000000 00000000710 14634267343 0020175 0 ustar 00root root 0000000 0000000 import dj_database_url
DEBUG = True
TEMPLATE_DEBUG = True
SECRET_KEY = 'this is my secret key' # NOQA
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
DATABASES = {
'default': dj_database_url.config(default='postgres:///psqlextra'),
}
DATABASES['default']['ENGINE'] = 'psqlextra.backend'
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
('ro', 'Romanian'),
('nl', 'Dutch')
)
INSTALLED_APPS = (
'psqlextra',
'tests',
)
django-postgres-extra-2.0.9/setup.cfg 0000664 0000000 0000000 00000000455 14634267343 0017612 0 ustar 00root root 0000000 0000000 [flake8]
ignore = E252,E501,W503
exclude = env,.tox,.git,config/settings,*/migrations/*,*/static/CACHE/*,docs,node_modules
[isort]
line_length=80
multi_line_output=3
lines_between_types=1
include_trailing_comma=True
known_third_party=pytest,freezegun
float_to_top=true
skip_glob=tests/snapshots/*.py
django-postgres-extra-2.0.9/setup.py 0000664 0000000 0000000 00000015615 14634267343 0017507 0 ustar 00root root 0000000 0000000 import distutils.cmd
import os
import subprocess
from setuptools import find_packages, setup
exec(open("psqlextra/_version.py").read())
class BaseCommand(distutils.cmd.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def create_command(text, commands):
"""Creates a custom setup.py command."""
class CustomCommand(BaseCommand):
description = text
def run(self):
for cmd in commands:
subprocess.check_call(cmd)
return CustomCommand
with open(
os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8"
) as readme:
README = readme.read().split("h1>\n", 2)[1]
setup(
name="django-postgres-extra",
version=__version__,
packages=find_packages(exclude=["tests"]),
package_data={"psqlextra": ["py.typed"]},
include_package_data=True,
license="MIT License",
description="Bringing all of PostgreSQL's awesomeness to Django.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/SectorLabs/django-postgres-extra",
author="Sector Labs",
author_email="open-source@sectorlabs.ro",
keywords=["django", "postgres", "extra", "hstore", "ltree"],
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
python_requires=">=3.6",
install_requires=[
"Django>=2.0,<6.0",
"python-dateutil>=2.8.0,<=3.0.0",
],
extras_require={
':python_version <= "3.6"': ["dataclasses"],
"docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"],
"test": [
"psycopg2>=2.8.4,<3.0.0",
"dj-database-url==0.5.0",
"pytest==6.2.5",
"pytest-benchmark==3.4.1",
"pytest-django==4.4.0",
"pytest-cov==3.0.0",
"pytest-lazy-fixture==0.6.3",
"pytest-freezegun==0.4.2",
"tox==3.24.4",
"freezegun==1.1.0",
"coveralls==3.3.0",
"snapshottest==0.6.0",
],
"analysis": [
"black==22.3.0",
"flake8==4.0.1",
"autoflake==1.4",
"autopep8==1.6.0",
"isort==5.10.0",
"docformatter==1.4",
"mypy==1.2.0; python_version > '3.6'",
"mypy==0.971; python_version <= '3.6'",
"django-stubs==1.16.0; python_version > '3.6'",
"django-stubs==1.9.0; python_version <= '3.6'",
"typing-extensions==4.5.0; python_version > '3.6'",
"typing-extensions==4.1.0; python_version <= '3.6'",
"types-dj-database-url==1.3.0.0",
"types-psycopg2==2.9.21.9",
"types-python-dateutil==2.8.19.12",
],
"publish": [
"build==0.7.0",
"twine==3.7.1",
],
},
cmdclass={
"lint": create_command(
"Lints the code",
[
[
"flake8",
"--builtin=__version__",
"setup.py",
"psqlextra",
"tests",
]
],
),
"lint_fix": create_command(
"Lints the code",
[
[
"autoflake",
"--remove-all",
"-i",
"-r",
"setup.py",
"psqlextra",
"tests",
],
["autopep8", "-i", "-r", "setup.py", "psqlextra", "tests"],
],
),
"lint_types": create_command(
"Type-checks the code",
[
[
"mypy",
"--package",
"psqlextra",
"--pretty",
"--show-error-codes",
],
],
),
"format": create_command(
"Formats the code", [["black", "setup.py", "psqlextra", "tests"]]
),
"format_verify": create_command(
"Checks if the code is auto-formatted",
[["black", "--check", "setup.py", "psqlextra", "tests"]],
),
"format_docstrings": create_command(
"Auto-formats doc strings", [["docformatter", "-r", "-i", "."]]
),
"format_docstrings_verify": create_command(
"Verifies that doc strings are properly formatted",
[["docformatter", "-r", "-c", "."]],
),
"sort_imports": create_command(
"Automatically sorts imports",
[
["isort", "setup.py"],
["isort", "psqlextra"],
["isort", "tests"],
],
),
"sort_imports_verify": create_command(
"Verifies all imports are properly sorted.",
[
["isort", "-c", "setup.py"],
["isort", "-c", "psqlextra"],
["isort", "-c", "tests"],
],
),
"fix": create_command(
"Automatically format code and fix linting errors",
[
["python", "setup.py", "format"],
["python", "setup.py", "format_docstrings"],
["python", "setup.py", "sort_imports"],
["python", "setup.py", "lint_fix"],
["python", "setup.py", "lint"],
["python", "setup.py", "lint_types"],
],
),
"verify": create_command(
"Verifies whether the code is auto-formatted and has no linting errors",
[
["python", "setup.py", "format_verify"],
["python", "setup.py", "format_docstrings_verify"],
["python", "setup.py", "sort_imports_verify"],
["python", "setup.py", "lint"],
["python", "setup.py", "lint_types"],
],
),
"test": create_command(
"Runs all the tests",
[
[
"pytest",
"--cov=psqlextra",
"--cov-report=term",
"--cov-report=xml:reports/xml",
"--cov-report=html:reports/html",
"--junitxml=reports/junit/tests.xml",
"--reuse-db",
]
],
),
},
)
django-postgres-extra-2.0.9/tests/ 0000775 0000000 0000000 00000000000 14634267343 0017127 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/tests/__init__.py 0000664 0000000 0000000 00000000000 14634267343 0021226 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/tests/benchmarks/ 0000775 0000000 0000000 00000000000 14634267343 0021244 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/tests/benchmarks/__init__.py 0000664 0000000 0000000 00000000000 14634267343 0023343 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/tests/benchmarks/test_insert_nothing.py 0000664 0000000 0000000 00000002646 14634267343 0025717 0 ustar 00root root 0000000 0000000 import uuid
import pytest
from django.db import models, transaction
from django.db.utils import IntegrityError
from psqlextra.query import ConflictAction
from ..fake_model import get_fake_model
@pytest.mark.benchmark()
def test_insert_nothing_traditional(benchmark):
model = get_fake_model(
{"field": models.CharField(max_length=255, unique=True)}
)
random_value = str(uuid.uuid4())[:8]
model.objects.create(field=random_value)
def _traditional_insert(model, random_value):
"""Performs a concurrency safe insert the traditional way."""
try:
with transaction.atomic():
return model.objects.create(field=random_value)
except IntegrityError:
return model.objects.filter(field=random_value).first()
benchmark(_traditional_insert, model, random_value)
@pytest.mark.benchmark()
def test_insert_nothing_native(benchmark):
model = get_fake_model(
{"field": models.CharField(max_length=255, unique=True)}
)
random_value = str(uuid.uuid4())[:8]
model.objects.create(field=random_value)
def _native_insert(model, random_value):
"""Performs a concurrency safeinsert using the native PostgreSQL
conflict resolution."""
return model.objects.on_conflict(
["field"], ConflictAction.NOTHING
).insert_and_get(field=random_value)
benchmark(_native_insert, model, random_value)
django-postgres-extra-2.0.9/tests/benchmarks/test_upsert.py 0000664 0000000 0000000 00000002602 14634267343 0024177 0 ustar 00root root 0000000 0000000 import uuid
import pytest
from django.db import models, transaction
from django.db.utils import IntegrityError
from ..fake_model import get_fake_model
@pytest.mark.benchmark()
def test_upsert_traditional(benchmark):
model = get_fake_model(
{"field": models.CharField(max_length=255, unique=True)}
)
random_value = str(uuid.uuid4())[:8]
model.objects.create(field=random_value)
def _traditional_upsert(model, random_value):
"""Performs a concurrency safe upsert the traditional way."""
try:
with transaction.atomic():
return model.objects.create(field=random_value)
except IntegrityError:
model.objects.update(field=random_value)
return model.objects.get(field=random_value)
benchmark(_traditional_upsert, model, random_value)
@pytest.mark.benchmark()
def test_upsert_native(benchmark):
model = get_fake_model(
{"field": models.CharField(max_length=255, unique=True)}
)
random_value = str(uuid.uuid4())[:8]
model.objects.create(field=random_value)
def _native_upsert(model, random_value):
"""Performs a concurrency safe upsert using the native PostgreSQL
upsert."""
return model.objects.upsert_and_get(
conflict_target=["field"], fields=dict(field=random_value)
)
benchmark(_native_upsert, model, random_value)
django-postgres-extra-2.0.9/tests/benchmarks/test_upsert_bulk.py 0000664 0000000 0000000 00000003343 14634267343 0025217 0 ustar 00root root 0000000 0000000 import uuid
import pytest
from django.db import models
from psqlextra.query import ConflictAction
from ..fake_model import get_fake_model
ROW_COUNT = 10000
@pytest.mark.benchmark()
def test_upsert_bulk_naive(benchmark):
model = get_fake_model(
{"field": models.CharField(max_length=255, unique=True)}
)
rows = []
random_values = []
for i in range(0, ROW_COUNT):
random_value = str(uuid.uuid4())
random_values.append(random_value)
rows.append(model(field=random_value))
model.objects.bulk_create(rows)
def _native_upsert(model, random_values):
"""Performs a concurrency safe upsert using the native PostgreSQL
upsert."""
rows = [dict(field=random_value) for random_value in random_values]
for row in rows:
model.objects.on_conflict(["field"], ConflictAction.UPDATE).insert(
**row
)
benchmark(_native_upsert, model, random_values)
@pytest.mark.benchmark()
def test_upsert_bulk_native(benchmark):
model = get_fake_model(
{"field": models.CharField(max_length=255, unique=True)}
)
rows = []
random_values = []
for i in range(0, ROW_COUNT):
random_value = str(uuid.uuid4())
random_values.append(random_value)
rows.append(model(field=random_value))
model.objects.bulk_create(rows)
def _native_upsert(model, random_values):
"""Performs a concurrency safe upsert using the native PostgreSQL
upsert."""
rows = [dict(field=random_value) for random_value in random_values]
model.objects.on_conflict(["field"], ConflictAction.UPDATE).bulk_insert(
rows
)
benchmark(_native_upsert, model, random_values)
django-postgres-extra-2.0.9/tests/conftest.py 0000664 0000000 0000000 00000002550 14634267343 0021330 0 ustar 00root root 0000000 0000000 import pytest
from django.contrib.postgres.signals import register_type_handlers
from django.db import connection
from .fake_model import define_fake_app
@pytest.fixture(scope="function", autouse=True)
def database_access(db):
"""Automatically enable database access for all tests."""
# enable the hstore extension on our database because
# our tests rely on it...
with connection.schema_editor() as schema_editor:
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS hstore")
register_type_handlers(schema_editor.connection)
@pytest.fixture
def fake_app():
"""Creates a fake Django app and deletes it at the end of the test."""
with define_fake_app() as fake_app:
yield fake_app
@pytest.fixture
def postgres_server_version(db) -> int:
"""Gets the PostgreSQL server version."""
return connection.cursor().connection.info.server_version
@pytest.fixture(autouse=True)
def _apply_postgres_version_marker(request, postgres_server_version):
"""Skip tests based on Postgres server version number marker condition."""
marker = request.node.get_closest_marker("postgres_version")
if not marker:
return
lt = marker.kwargs.get("lt")
if lt and postgres_server_version < lt:
pytest.skip(
f"Server version is {postgres_server_version}, the test needs {lt} or newer."
)
django-postgres-extra-2.0.9/tests/db_introspection.py 0000664 0000000 0000000 00000005474 14634267343 0023060 0 ustar 00root root 0000000 0000000 """Thin wrappers over functions in connection.introspection that don't require
creating a cursor.
This makes test code less verbose and easier to read/write.
"""
from contextlib import contextmanager
from typing import Optional
from django.db import connection
from psqlextra.settings import postgres_set_local
@contextmanager
def introspect(schema_name: Optional[str] = None):
with postgres_set_local(search_path=schema_name or None):
with connection.cursor() as cursor:
yield connection.introspection, cursor
def table_names(
include_views: bool = True, *, schema_name: Optional[str] = None
):
"""Gets a flat list of tables in the default database."""
with introspect(schema_name) as (introspection, cursor):
return introspection.table_names(cursor, include_views)
def get_partitioned_table(
table_name: str,
*,
schema_name: Optional[str] = None,
):
"""Gets the definition of a partitioned table in the default database."""
with introspect(schema_name) as (introspection, cursor):
return introspection.get_partitioned_table(cursor, table_name)
def get_partitions(
table_name: str,
*,
schema_name: Optional[str] = None,
):
"""Gets a list of partitions for the specified partitioned table in the
default database."""
with introspect(schema_name) as (introspection, cursor):
return introspection.get_partitions(cursor, table_name)
def get_columns(
table_name: str,
*,
schema_name: Optional[str] = None,
):
"""Gets a list of columns for the specified table."""
with introspect(schema_name) as (introspection, cursor):
return introspection.get_columns(cursor, table_name)
def get_relations(
table_name: str,
*,
schema_name: Optional[str] = None,
):
"""Gets a list of relations for the specified table."""
with introspect(schema_name) as (introspection, cursor):
return introspection.get_relations(cursor, table_name)
def get_constraints(
table_name: str,
*,
schema_name: Optional[str] = None,
):
"""Gets a list of constraints and indexes for the specified table."""
with introspect(schema_name) as (introspection, cursor):
return introspection.get_constraints(cursor, table_name)
def get_sequences(
table_name: str,
*,
schema_name: Optional[str] = None,
):
"""Gets a list of sequences own by the specified table."""
with introspect(schema_name) as (introspection, cursor):
return introspection.get_sequences(cursor, table_name)
def get_storage_settings(table_name: str, *, schema_name: Optional[str] = None):
"""Gets a list of all storage settings that have been set on the specified
table."""
with introspect(schema_name) as (introspection, cursor):
return introspection.get_storage_settings(cursor, table_name)
django-postgres-extra-2.0.9/tests/fake_model.py 0000664 0000000 0000000 00000007705 14634267343 0021600 0 ustar 00root root 0000000 0000000 import os
import sys
import uuid
from contextlib import contextmanager
from typing import Type
from django.apps import AppConfig, apps
from django.db import connection, models
from psqlextra.models import (
PostgresMaterializedViewModel,
PostgresModel,
PostgresPartitionedModel,
PostgresViewModel,
)
def define_fake_model(
fields=None, model_base=PostgresModel, meta_options={}, **attributes
):
"""Defines a fake model (but does not create it in the database)."""
name = str(uuid.uuid4()).replace("-", "")[:8].title()
attributes = {
"app_label": meta_options.get("app_label") or "tests",
"__module__": __name__,
"__name__": name,
"Meta": type("Meta", (object,), meta_options),
**attributes,
}
if fields:
attributes.update(fields)
model = type(name, (model_base,), attributes)
apps.app_configs[attributes["app_label"]].models[name] = model
return model
def undefine_fake_model(model: Type[models.Model]) -> None:
"""Removes the fake model from the app registry."""
app_label = model._meta.app_label or "tests"
app_models = apps.app_configs[app_label].models
for model_name in [model.__name__, model.__name__.lower()]:
if model_name in app_models:
del app_models[model_name]
def define_fake_view_model(
fields=None, view_options={}, meta_options={}, model_base=PostgresViewModel
):
"""Defines a fake view model."""
model = define_fake_model(
fields=fields,
model_base=model_base,
meta_options=meta_options,
ViewMeta=type("ViewMeta", (object,), view_options),
)
return model
def define_fake_materialized_view_model(
fields=None,
view_options={},
meta_options={},
model_base=PostgresMaterializedViewModel,
):
"""Defines a fake materialized view model."""
model = define_fake_model(
fields=fields,
model_base=model_base,
meta_options=meta_options,
ViewMeta=type("ViewMeta", (object,), view_options),
)
return model
def define_fake_partitioned_model(
fields=None, partitioning_options={}, meta_options={}
):
"""Defines a fake partitioned model."""
model = define_fake_model(
fields=fields,
model_base=PostgresPartitionedModel,
meta_options=meta_options,
PartitioningMeta=type(
"PartitioningMeta", (object,), partitioning_options
),
)
return model
def get_fake_partitioned_model(
fields=None, partitioning_options={}, meta_options={}
):
"""Defines a fake partitioned model and creates it in the database."""
model = define_fake_partitioned_model(
fields, partitioning_options, meta_options
)
with connection.schema_editor() as schema_editor:
schema_editor.create_model(model)
return model
def get_fake_model(fields=None, model_base=PostgresModel, meta_options={}):
"""Defines a fake model and creates it in the database."""
model = define_fake_model(fields, model_base, meta_options)
with connection.schema_editor() as schema_editor:
schema_editor.create_model(model)
return model
def delete_fake_model(model: Type[models.Model]) -> None:
"""Deletes a fake model from the database and the internal app registry."""
undefine_fake_model(model)
with connection.schema_editor() as schema_editor:
schema_editor.delete_model(model)
@contextmanager
def define_fake_app():
"""Creates and registers a fake Django app."""
name = "app_" + str(uuid.uuid4()).replace("-", "")[:8]
app_config_cls = type(
name + "Config",
(AppConfig,),
{"name": name, "path": os.path.dirname(__file__)},
)
app_config = app_config_cls(name, "")
app_config.apps = apps
app_config.models = {}
apps.app_configs[name] = app_config
sys.modules[name] = {}
try:
yield app_config
finally:
del apps.app_configs[name]
del sys.modules[name]
django-postgres-extra-2.0.9/tests/migrations.py 0000664 0000000 0000000 00000016513 14634267343 0021663 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
from typing import List
from unittest import mock
from django.apps import apps
from django.db import connection, migrations
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import NonInteractiveMigrationQuestioner
from django.db.migrations.state import ProjectState
from psqlextra.backend.schema import PostgresSchemaEditor
from .fake_model import define_fake_model
@contextmanager
def filtered_schema_editor(*filters: List[str]):
"""Gets a schema editor, but filters executed SQL statements based on the
specified text filters.
Arguments:
filters:
List of strings to filter SQL
statements on.
"""
with connection.schema_editor() as schema_editor:
wrapper_for = schema_editor.execute
with mock.patch.object(
PostgresSchemaEditor, "execute", wraps=wrapper_for
) as execute:
filter_results = {}
yield filter_results
for filter_text in filters:
filter_results[filter_text] = [
call for call in execute.mock_calls if filter_text in str(call)
]
def apply_migration(operations, state=None, backwards: bool = False):
"""Executes the specified migration operations using the specified schema
editor.
Arguments:
operations:
The migration operations to execute.
state:
The state state to use during the
migrations.
backwards:
Whether to apply the operations
in reverse (backwards).
"""
state = state or migrations.state.ProjectState.from_apps(apps)
class Migration(migrations.Migration):
pass
Migration.operations = operations
migration = Migration("migration", "tests")
executor = MigrationExecutor(connection)
if not backwards:
executor.apply_migration(state, migration)
else:
executor.unapply_migration(state, migration)
return migration
def make_migration(app_label="tests", from_state=None, to_state=None):
"""Generates migrations based on the specified app's state."""
app_labels = [app_label]
loader = MigrationLoader(None, ignore_no_migrations=True)
loader.check_consistent_history(connection)
questioner = NonInteractiveMigrationQuestioner(
specified_apps=app_labels, dry_run=False
)
autodetector = MigrationAutodetector(
from_state or loader.project_state(),
to_state or ProjectState.from_apps(apps),
questioner,
)
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name="test",
)
changes_for_app = changes.get(app_label)
if not changes_for_app or len(changes_for_app) == 0:
return None
return changes_for_app[0]
@contextmanager
def create_drop_model(field, filters: List[str]):
"""Creates and drops a model with the specified field.
Arguments:
field:
The field to include on the
model to create and drop.
filters:
List of strings to filter
SQL statements on.
"""
model = define_fake_model({"title": field})
with filtered_schema_editor(*filters) as calls:
apply_migration(
[
migrations.CreateModel(
model.__name__, fields=[("title", field.clone())]
),
migrations.DeleteModel(model.__name__),
]
)
yield calls
@contextmanager
def alter_db_table(field, filters: List[str]):
"""Creates a model with the specified field and then renames the database
table.
Arguments:
field:
The field to include into the
model.
filters:
List of strings to filter
SQL statements on.
"""
model = define_fake_model()
state = migrations.state.ProjectState.from_apps(apps)
apply_migration(
[
migrations.CreateModel(
model.__name__, fields=[("title", field.clone())]
)
],
state,
)
with filtered_schema_editor(*filters) as calls:
apply_migration(
[migrations.AlterModelTable(model.__name__, "NewTableName")], state
)
yield calls
@contextmanager
def add_field(field, filters: List[str]):
"""Adds the specified field to a model.
Arguments:
field:
The field to add to a model.
filters:
List of strings to filter
SQL statements on.
"""
model = define_fake_model()
state = migrations.state.ProjectState.from_apps(apps)
apply_migration([migrations.CreateModel(model.__name__, fields=[])], state)
with filtered_schema_editor(*filters) as calls:
apply_migration(
[migrations.AddField(model.__name__, "title", field)], state
)
yield calls
@contextmanager
def remove_field(field, filters: List[str]):
"""Removes the specified field from a model.
Arguments:
field:
The field to remove from a model.
filters:
List of strings to filter
SQL statements on.
"""
model = define_fake_model({"title": field})
state = migrations.state.ProjectState.from_apps(apps)
apply_migration(
[
migrations.CreateModel(
model.__name__, fields=[("title", field.clone())]
)
],
state,
)
with filtered_schema_editor(*filters) as calls:
apply_migration(
[migrations.RemoveField(model.__name__, "title")], state
)
yield calls
@contextmanager
def alter_field(old_field, new_field, filters: List[str]):
"""Alters a field from one state to the other.
Arguments:
old_field:
The field before altering it.
new_field:
The field after altering it.
filters:
List of strings to filter
SQL statements on.
"""
model = define_fake_model({"title": old_field})
state = migrations.state.ProjectState.from_apps(apps)
apply_migration(
[
migrations.CreateModel(
model.__name__, fields=[("title", old_field.clone())]
)
],
state,
)
with filtered_schema_editor(*filters) as calls:
apply_migration(
[migrations.AlterField(model.__name__, "title", new_field)], state
)
yield calls
@contextmanager
def rename_field(field, filters: List[str]):
"""Renames a field from one name to the other.
Arguments:
field:
Field to be renamed.
filters:
List of strings to filter
SQL statements on.
"""
model = define_fake_model({"title": field})
state = migrations.state.ProjectState.from_apps(apps)
apply_migration(
[
migrations.CreateModel(
model.__name__, fields=[("title", field.clone())]
)
],
state,
)
with filtered_schema_editor(*filters) as calls:
apply_migration(
[migrations.RenameField(model.__name__, "title", "newtitle")], state
)
yield calls
django-postgres-extra-2.0.9/tests/snapshots/ 0000775 0000000 0000000 00000000000 14634267343 0021151 5 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/tests/snapshots/__init__.py 0000664 0000000 0000000 00000000000 14634267343 0023250 0 ustar 00root root 0000000 0000000 django-postgres-extra-2.0.9/tests/snapshots/snap_test_management_command_partition.py 0000664 0000000 0000000 00000006756 14634267343 0031524 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['test_management_command_partition_auto_confirm[--yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')")
snapshots['test_management_command_partition_auto_confirm[-y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[NO] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[N] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[No] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[n] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[no] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[YES] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[Y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_dry_run[--dry] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')")
snapshots['test_management_command_partition_dry_run[-d] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')")
django-postgres-extra-2.0.9/tests/test_append_caller_to_sql.py 0000664 0000000 0000000 00000004334 14634267343 0024716 0 ustar 00root root 0000000 0000000 import pytest
from django.db import connection, models
from django.test.utils import CaptureQueriesContext, override_settings
from psqlextra.compiler import append_caller_to_sql
from .fake_model import get_fake_model
class psqlextraSimulated:
def callMockedClass(self):
return MockedClass().mockedMethod()
class MockedClass:
def mockedMethod(self):
return append_caller_to_sql("sql")
def mockedFunction():
return append_caller_to_sql("sql")
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=False)
def test_disable_append_caller_to_sql():
commented_sql = mockedFunction()
assert commented_sql == "sql"
@pytest.mark.parametrize(
"entry_point",
[
MockedClass().mockedMethod,
psqlextraSimulated().callMockedClass,
],
)
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=True)
def test_append_caller_to_sql_class(entry_point):
commented_sql = entry_point()
assert commented_sql.startswith("sql /* ")
assert "mockedMethod" in commented_sql
assert __file__ in commented_sql
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=True)
def test_append_caller_to_sql_function():
commented_sql = mockedFunction()
assert commented_sql.startswith("sql /* ")
assert "mockedFunction" in commented_sql
assert __file__ in commented_sql
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=True)
def test_append_caller_to_sql_crud():
model = get_fake_model(
{
"title": models.CharField(max_length=255, null=True),
}
)
obj = None
with CaptureQueriesContext(connection) as queries:
obj = model.objects.create(
id=1,
title="Test",
)
assert "test_append_caller_to_sql_crud " in queries[0]["sql"]
obj.title = "success"
with CaptureQueriesContext(connection) as queries:
obj.save()
assert "test_append_caller_to_sql_crud " in queries[0]["sql"]
with CaptureQueriesContext(connection) as queries:
assert model.objects.filter(id=obj.id)[0].id == obj.id
assert "test_append_caller_to_sql_crud " in queries[0]["sql"]
with CaptureQueriesContext(connection) as queries:
obj.delete()
assert "test_append_caller_to_sql_crud " in queries[0]["sql"]
django-postgres-extra-2.0.9/tests/test_case_insensitive_unique_index.py 0000664 0000000 0000000 00000004055 14634267343 0026654 0 ustar 00root root 0000000 0000000 import pytest
from django.db import IntegrityError, connection, models
from django.db.migrations import AddIndex, CreateModel
from psqlextra.indexes import CaseInsensitiveUniqueIndex
from psqlextra.models import PostgresModel
from .fake_model import get_fake_model
from .migrations import apply_migration, filtered_schema_editor
def test_ciui_migrations():
"""Tests whether migrations for case sensitive indexes are being created as
expected."""
index_1 = CaseInsensitiveUniqueIndex(
fields=["name", "other_name"], name="index1"
)
ops = [
CreateModel(
name="mymodel",
fields=[
("name", models.CharField(max_length=255)),
("other_name", models.CharField(max_length=255)),
],
),
AddIndex(model_name="mymodel", index=index_1),
]
with filtered_schema_editor("CREATE UNIQUE INDEX") as calls:
apply_migration(ops)
sql = str([call[0] for _, call, _ in calls["CREATE UNIQUE INDEX"]][0])
expected_sql = 'CREATE UNIQUE INDEX "index1" ON "tests_mymodel" (LOWER("name"), LOWER("other_name"))'
assert sql == expected_sql
def test_ciui():
"""Tests whether the case insensitive unique index works as expected."""
index_1 = CaseInsensitiveUniqueIndex(fields=["name"], name="index1")
model = get_fake_model(
{"name": models.CharField(max_length=255)}, PostgresModel
)
with connection.schema_editor() as schema_editor:
schema_editor.add_index(model, index_1)
model.objects.create(name="henk")
with pytest.raises(IntegrityError):
model.objects.create(name="Henk")
def test_ciui_on_conflict():
"""Tests wether fields with a :see:CaseInsensitiveUniqueIndex can be used
as a conflict target."""
index_1 = CaseInsensitiveUniqueIndex(fields=["name"], name="index1")
model = get_fake_model(
{"name": models.CharField(max_length=255)},
PostgresModel,
{"indexes": [index_1]},
)
model.objects.upsert(conflict_target=["name"], fields=dict(name="henk"))
django-postgres-extra-2.0.9/tests/test_conditional_unique_index.py 0000664 0000000 0000000 00000011203 14634267343 0025615 0 ustar 00root root 0000000 0000000 import pytest
from django.db import IntegrityError, models, transaction
from django.db.migrations import AddIndex, CreateModel
from psqlextra.indexes import ConditionalUniqueIndex
from .fake_model import get_fake_model
from .migrations import apply_migration, filtered_schema_editor
def test_cui_deconstruct():
"""Tests whether the :see:ConditionalUniqueIndex's deconstruct() method
works properly."""
original_kwargs = dict(
condition="field IS NULL", name="great_index", fields=["field", "build"]
)
_, _, new_kwargs = ConditionalUniqueIndex(**original_kwargs).deconstruct()
for key, value in original_kwargs.items():
assert new_kwargs[key] == value
def test_cui_migrations():
"""Tests whether the migrations are properly generated and executed."""
index_1 = ConditionalUniqueIndex(
fields=["name", "other_name"],
condition='"name" IS NOT NULL',
name="index1",
)
index_2 = ConditionalUniqueIndex(
fields=["other_name"], condition='"name" IS NULL', name="index2"
)
ops = [
CreateModel(
name="mymodel",
fields=[
("id", models.IntegerField(primary_key=True)),
("name", models.CharField(max_length=255, null=True)),
("other_name", models.CharField(max_length=255)),
],
options={
# "indexes": [index_1, index_2],
},
),
AddIndex(model_name="mymodel", index=index_1),
AddIndex(model_name="mymodel", index=index_2),
]
with filtered_schema_editor("CREATE UNIQUE INDEX") as calls:
apply_migration(ops)
calls = [call[0] for _, call, _ in calls["CREATE UNIQUE INDEX"]]
db_table = "tests_mymodel"
query = 'CREATE UNIQUE INDEX "index1" ON "{0}" ("name", "other_name") WHERE "name" IS NOT NULL'
assert str(calls[0]) == query.format(db_table)
query = 'CREATE UNIQUE INDEX "index2" ON "{0}" ("other_name") WHERE "name" IS NULL'
assert str(calls[1]) == query.format(db_table)
def test_cui_upserting():
"""Tests upserting respects the :see:ConditionalUniqueIndex rules."""
model = get_fake_model(
fields={
"a": models.IntegerField(),
"b": models.IntegerField(null=True),
"c": models.IntegerField(),
},
meta_options={
"indexes": [
ConditionalUniqueIndex(
fields=["a", "b"], condition='"b" IS NOT NULL'
),
ConditionalUniqueIndex(fields=["a"], condition='"b" IS NULL'),
]
},
)
model.objects.upsert(
conflict_target=["a"],
index_predicate='"b" IS NULL',
fields=dict(a=1, c=1),
)
assert model.objects.all().count() == 1
assert model.objects.filter(a=1, c=1).count() == 1
model.objects.upsert(
conflict_target=["a"],
index_predicate='"b" IS NULL',
fields=dict(a=1, c=2),
)
assert model.objects.all().count() == 1
assert model.objects.filter(a=1, c=1).count() == 0
assert model.objects.filter(a=1, c=2).count() == 1
model.objects.upsert(
conflict_target=["a", "b"],
index_predicate='"b" IS NOT NULL',
fields=dict(a=1, b=1, c=1),
)
assert model.objects.all().count() == 2
assert model.objects.filter(a=1, c=2).count() == 1
assert model.objects.filter(a=1, b=1, c=1).count() == 1
model.objects.upsert(
conflict_target=["a", "b"],
index_predicate='"b" IS NOT NULL',
fields=dict(a=1, b=1, c=2),
)
assert model.objects.all().count() == 2
assert model.objects.filter(a=1, c=1).count() == 0
assert model.objects.filter(a=1, b=1, c=2).count() == 1
def test_cui_inserting():
"""Tests inserting respects the :see:ConditionalUniqueIndex rules."""
model = get_fake_model(
fields={
"a": models.IntegerField(),
"b": models.IntegerField(null=True),
"c": models.IntegerField(),
},
meta_options={
"indexes": [
ConditionalUniqueIndex(
fields=["a", "b"], condition='"b" IS NOT NULL'
),
ConditionalUniqueIndex(fields=["a"], condition='"b" IS NULL'),
]
},
)
model.objects.create(a=1, c=1)
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(a=1, c=2)
model.objects.create(a=2, c=1)
model.objects.create(a=1, b=1, c=1)
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(a=1, b=1, c=2)
model.objects.create(a=1, b=2, c=1)
django-postgres-extra-2.0.9/tests/test_db_backend.py 0000664 0000000 0000000 00000000540 14634267343 0022573 0 ustar 00root root 0000000 0000000 from django.db import connection
def test_db_backend_hstore_extension_enabled():
"""Tests whether the `hstore` extension was enabled automatically."""
with connection.cursor() as cursor:
cursor.execute(
("SELECT count(*) FROM pg_extension " "WHERE extname = 'hstore'")
)
assert cursor.fetchone()[0] == 1
django-postgres-extra-2.0.9/tests/test_hstore_autodetect.py 0000664 0000000 0000000 00000005171 14634267343 0024271 0 ustar 00root root 0000000 0000000 from django.db import migrations
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.state import ProjectState
from psqlextra.fields import HStoreField
def _make_project_state(model_states):
"""Shortcut to make :see:ProjectState from a list of predefined models."""
project_state = ProjectState()
for model_state in model_states:
project_state.add_model(model_state.clone())
return project_state
def _detect_changes(before_states, after_states):
"""Uses the migration autodetector to detect changes in the specified
project states."""
return MigrationAutodetector(
_make_project_state(before_states), _make_project_state(after_states)
)._detect_changes()
def _assert_autodetector(changes, expected):
"""Asserts whether the results of the auto detector are as expected."""
assert "tests" in changes
assert len("tests") > 0
operations = changes["tests"][0].operations
for i, expected_operation in enumerate(expected):
real_operation = operations[i]
_, _, real_args, real_kwargs = real_operation.field.deconstruct()
(
_,
_,
expected_args,
expected_kwargs,
) = expected_operation.field.deconstruct()
assert real_args == expected_args
assert real_kwargs == expected_kwargs
def test_hstore_autodetect_uniqueness():
"""Tests whether changes in the `uniqueness` option are properly detected
by the auto detector."""
before = [
migrations.state.ModelState(
"tests", "Model1", [("title", HStoreField())]
)
]
after = [
migrations.state.ModelState(
"tests", "Model1", [("title", HStoreField(uniqueness=["en"]))]
)
]
changes = _detect_changes(before, after)
_assert_autodetector(
changes,
[
migrations.AlterField(
"Model1", "title", HStoreField(uniqueness=["en"])
)
],
)
def test_hstore_autodetect_required():
"""Tests whether changes in the `required` option are properly detected by
the auto detector."""
before = [
migrations.state.ModelState(
"tests", "Model1", [("title", HStoreField())]
)
]
after = [
migrations.state.ModelState(
"tests", "Model1", [("title", HStoreField(required=["en"]))]
)
]
changes = _detect_changes(before, after)
_assert_autodetector(
changes,
[
migrations.AlterField(
"Model1", "title", HStoreField(required=["en"])
)
],
)
django-postgres-extra-2.0.9/tests/test_hstore_field.py 0000664 0000000 0000000 00000001743 14634267343 0023214 0 ustar 00root root 0000000 0000000 import pytest
from psqlextra.fields import HStoreField
def test_hstore_field_deconstruct():
"""Tests whether the :see:HStoreField's deconstruct() method works
properly."""
original_kwargs = dict(uniqueness=["beer", "other"], required=[])
_, _, _, new_kwargs = HStoreField(**original_kwargs).deconstruct()
for key, value in original_kwargs.items():
assert new_kwargs[key] == value
@pytest.mark.parametrize(
"input,output",
[
(dict(key1=1, key2=2), dict(key1="1", key2="2")),
(dict(key1="1", key2="2"), dict(key1="1", key2="2")),
(
dict(key1=1, key2=None, key3="3"),
dict(key1="1", key2=None, key3="3"),
),
([1, 2, 3], ["1", "2", "3"]),
(["1", "2", "3"], ["1", "2", "3"]),
],
)
def test_hstore_field_get_prep_value(input, output):
"""Tests whether the :see:HStoreField's get_prep_value method works
properly."""
assert HStoreField().get_prep_value(input) == output
django-postgres-extra-2.0.9/tests/test_hstore_required.py 0000664 0000000 0000000 00000010320 14634267343 0023740 0 ustar 00root root 0000000 0000000 import pytest
from django.db.utils import IntegrityError
from psqlextra.fields import HStoreField
from . import migrations
from .fake_model import get_fake_model
def test_hstore_required_migration_create_drop_model():
"""Tests whether constraints are properly created and dropped when creating
and dropping a model."""
required = ["beer", "cookies"]
test = migrations.create_drop_model(
HStoreField(required=required), ["ADD CONSTRAINT", "DROP CONSTRAINT"]
)
with test as calls:
assert len(calls["ADD CONSTRAINT"]) == len(required)
assert len(calls["DROP CONSTRAINT"]) == len(required)
def test_hstore_required_migration_alter_db_table():
"""Tests whether constraints are renamed properly when renaming the
database table."""
test = migrations.alter_db_table(
HStoreField(required=["beer", "cookie"]),
["RENAME CONSTRAINT", "ADD CONSTRAINT", "DROP CONSTRAINT"],
)
with test as calls:
assert len(calls["RENAME CONSTRAINT"]) == 2
assert len(calls.get("ADD CONSTRAINT", [])) == 0
assert len(calls.get("DROP CONSTRAINT", [])) == 0
def test_hstore_required_add_field():
"""Tests whether adding a field properly creates the constraints."""
test = migrations.add_field(
HStoreField(required=["beer"]), ["ADD CONSTRAINT", "DROP CONSTRAINT"]
)
with test as calls:
assert len(calls.get("ADD CONSTRAINT", [])) == 1
assert len(calls.get("DROP CONSTRAINT", [])) == 0
def test_hstore_required_remove_field():
"""Tests whether removing a field properly removes the constraint."""
test = migrations.remove_field(
HStoreField(required=["beer"]), ["ADD CONSTRAINT", "DROP CONSTRAINT"]
)
with test as calls:
assert len(calls.get("ADD CONSTRAINT", [])) == 0
assert len(calls.get("DROP CONSTRAINT", [])) == 1
def test_hstore_required_alter_field_nothing():
"""Tests whether no constraints are dropped when not changing anything in
the required."""
test = migrations.alter_field(
HStoreField(required=["beer"]),
HStoreField(required=["beer"]),
["ADD CONSTRAINT", "DROP CONSTRAINT"],
)
with test as calls:
assert len(calls.get("ADD CONSTRAINT", [])) == 0
assert len(calls.get("DROP CONSTRAINT", [])) == 0
def test_hstore_required_alter_field_add():
"""Tests whether only one constraint is created when adding another key to
the required."""
test = migrations.alter_field(
HStoreField(required=["beer"]),
HStoreField(required=["beer", "beer1"]),
["ADD CONSTRAINT", "DROP CONSTRAINT"],
)
with test as calls:
assert len(calls.get("ADD CONSTRAINT", [])) == 1
assert len(calls.get("DROP CONSTRAINT", [])) == 0
def test_hstore_required_alter_field_remove():
"""Tests whether one constraint is dropped when removing a key from
required."""
test = migrations.alter_field(
HStoreField(required=["beer"]),
HStoreField(required=[]),
["ADD CONSTRAINT", "DROP CONSTRAINT"],
)
with test as calls:
assert len(calls.get("ADD CONSTRAINT", [])) == 0
assert len(calls.get("DROP CONSTRAINT", [])) == 1
def test_hstore_required_rename_field():
"""Tests whether renaming a field doesn't cause the constraint to be re-
created."""
test = migrations.rename_field(
HStoreField(required=["beer", "cookies"]),
["RENAME CONSTRAINT", "ADD CONSTRAINT", "DROP CONSTRAINT"],
)
with test as calls:
assert len(calls.get("RENAME CONSTRAINT", [])) == 2
assert len(calls.get("ADD CONSTRAINT", [])) == 0
assert len(calls.get("DROP CONSTRAINT", [])) == 0
def test_hstore_required_required_enforcement():
"""Tests whether the constraints are actually properly enforced."""
model = get_fake_model({"title": HStoreField(required=["en"])})
with pytest.raises(IntegrityError):
model.objects.create(title={"ar": "hello"})
def test_hstore_required_no_required():
"""Tests whether setting `required` to False casues no requiredness
constraints to be added."""
model = get_fake_model({"title": HStoreField(required=False)})
model.objects.create(title={"ar": "hello"})
django-postgres-extra-2.0.9/tests/test_hstore_unique.py 0000664 0000000 0000000 00000013110 14634267343 0023426 0 ustar 00root root 0000000 0000000 import pytest
from django.db import transaction
from django.db.utils import IntegrityError
from psqlextra.fields import HStoreField
from . import migrations
from .fake_model import get_fake_model
def test_hstore_unique_migration_create_drop_model():
"""Tests whether indexes are properly created and dropped when creating and
dropping a model."""
uniqueness = ["beer", "cookies"]
test = migrations.create_drop_model(
HStoreField(uniqueness=uniqueness), ["CREATE UNIQUE", "DROP INDEX"]
)
with test as calls:
assert len(calls["CREATE UNIQUE"]) == len(uniqueness)
assert len(calls["DROP INDEX"]) == len(uniqueness)
def test_hstore_unique_migration_alter_db_table():
"""Tests whether indexes are renamed properly when renaming the database
table."""
test = migrations.alter_db_table(
HStoreField(uniqueness=["beer", "cookie"]),
["RENAME TO", "CREATE INDEX", "DROP INDEX"],
)
with test as calls:
# 1 rename for table, 2 for hstore keys
assert len(calls["RENAME TO"]) == 3
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 0
def test_hstore_unique_add_field():
"""Tests whether adding a field properly creates the indexes."""
test = migrations.add_field(
HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"]
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 1
assert len(calls.get("DROP INDEX", [])) == 0
def test_hstore_unique_remove_field():
"""Tests whether removing a field properly removes the index."""
test = migrations.remove_field(
HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"]
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 1
def test_hstore_unique_alter_field_nothing():
"""Tests whether no indexes are dropped when not changing anything in the
uniqueness."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=["beer"]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 0
def test_hstore_unique_alter_field_add():
"""Tests whether only one index is created when adding another key to the
uniqueness."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=["beer", "beer1"]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 1
assert len(calls.get("DROP INDEX", [])) == 0
def test_hstore_unique_alter_field_remove():
"""Tests whether one index is dropped when removing a key from
uniqueness."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=[]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 1
def test_hstore_unique_alter_field_add_together():
"""Tests whether adding one index is created when adding a "unique
together"."""
test = migrations.alter_field(
HStoreField(uniqueness=["beer"]),
HStoreField(uniqueness=["beer", ("beer1", "beer2")]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 1
assert len(calls.get("DROP INDEX", [])) == 0
def test_hstore_unique_alter_field_remove_together():
"""Tests whether adding one index is dropped when adding a "unique
together"."""
test = migrations.alter_field(
HStoreField(uniqueness=[("beer1", "beer2")]),
HStoreField(uniqueness=[]),
["CREATE UNIQUE", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 1
def test_hstore_unique_rename_field():
"""Tests whether renaming a field doesn't cause the index to be re-
created."""
test = migrations.rename_field(
HStoreField(uniqueness=["beer", "cookies"]),
["RENAME TO", "CREATE INDEX", "DROP INDEX"],
)
with test as calls:
assert len(calls.get("RENAME TO", [])) == 2
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 0
def test_hstore_unique_enforcement():
"""Tests whether the constraints are actually properly enforced."""
model = get_fake_model({"title": HStoreField(uniqueness=["en"])})
# should pass, table is empty and 'ar' does not have to be unique
model.objects.create(title={"en": "unique", "ar": "notunique"})
model.objects.create(title={"en": "elseunique", "ar": "notunique"})
# this should fail, key 'en' must be unique
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(title={"en": "unique", "ar": "notunique"})
def test_hstore_unique_enforcement_together():
"""Tests whether unique_together style constraints are enforced
properly."""
model = get_fake_model({"title": HStoreField(uniqueness=[("en", "ar")])})
model.objects.create(title={"en": "unique", "ar": "notunique"})
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(title={"en": "unique", "ar": "notunique"})
model.objects.create(title={"en": "notunique", "ar": "unique"})
django-postgres-extra-2.0.9/tests/test_insert.py 0000664 0000000 0000000 00000007327 14634267343 0022055 0 ustar 00root root 0000000 0000000 from django.db import models
from psqlextra.query import ConflictAction
from .fake_model import get_fake_model
def test_insert():
"""Tests whether inserts works when the primary key is explicitly
specified."""
model = get_fake_model(
{"cookies": models.CharField(max_length=255, null=True)}
)
pk = model.objects.all().insert(cookies="some-cookies")
assert pk is not None
obj1 = model.objects.get()
assert obj1.pk == pk
assert obj1.cookies == "some-cookies"
def test_insert_explicit_pk():
"""Tests whether inserts works when the primary key is explicitly
specified."""
model = get_fake_model(
{
"name": models.CharField(max_length=255, primary_key=True),
"cookies": models.CharField(max_length=255, null=True),
}
)
pk = model.objects.all().insert(name="the-object", cookies="some-cookies")
assert pk == "the-object"
obj1 = model.objects.get()
assert obj1.pk == "the-object"
assert obj1.name == "the-object"
assert obj1.cookies == "some-cookies"
def test_insert_on_conflict():
"""Tests whether inserts works when a conflict is anticipated."""
model = get_fake_model(
{
"name": models.CharField(max_length=255, unique=True),
"cookies": models.CharField(max_length=255, null=True),
}
)
pk = model.objects.on_conflict([("pk")], ConflictAction.NOTHING).insert(
name="the-object", cookies="some-cookies"
)
assert pk is not None
obj1 = model.objects.get()
assert obj1.pk == pk
assert obj1.name == "the-object"
assert obj1.cookies == "some-cookies"
def test_insert_on_conflict_explicit_pk():
"""Tests whether inserts works when a conflict is anticipated and the
primary key is explicitly specified."""
model = get_fake_model(
{
"name": models.CharField(max_length=255, primary_key=True),
"cookies": models.CharField(max_length=255, null=True),
}
)
pk = model.objects.on_conflict([("name")], ConflictAction.NOTHING).insert(
name="the-object", cookies="some-cookies"
)
assert pk == "the-object"
obj1 = model.objects.get()
assert obj1.pk == "the-object"
assert obj1.name == "the-object"
assert obj1.cookies == "some-cookies"
def test_insert_with_different_column_name():
"""Tests whether inserts works when the primary key is explicitly
specified."""
model = get_fake_model(
{
"name": models.CharField(max_length=255, primary_key=True),
"cookies": models.CharField(
max_length=255, null=True, db_column="brownies"
),
}
)
cookie_string = "these-are-brownies"
results = model.objects.on_conflict(
["name"], ConflictAction.NOTHING
).insert_and_get(name="the-object", cookies=cookie_string)
assert results is not None
assert results.cookies == cookie_string
obj1 = model.objects.get()
assert obj1.cookies == cookie_string
def test_insert_many_to_many():
"""Tests whether adding a rows to a m2m works after using insert_and_get.
The model returned by `insert_and_get` must be configured in a
special way. Just creating a instance of the model is not enough to
be able to add m2m rows.
"""
model1 = get_fake_model({"name": models.TextField(primary_key=True)})
model2 = get_fake_model(
{
"name": models.TextField(primary_key=True),
"model1s": models.ManyToManyField(model1),
}
)
row2 = model2.objects.on_conflict(
["name"], ConflictAction.UPDATE
).insert_and_get(name="swen")
row1 = model1.objects.create(name="booh")
row2.model1s.add(row1)
row2.save()
django-postgres-extra-2.0.9/tests/test_introspect.py 0000664 0000000 0000000 00000034013 14634267343 0022733 0 ustar 00root root 0000000 0000000 import django
import pytest
from django.contrib.postgres.fields import ArrayField
from django.db import connection, models
from django.test.utils import CaptureQueriesContext
from django.utils import timezone
from psqlextra.introspect import model_from_cursor, models_from_cursor
from .fake_model import get_fake_model
django_31_skip_reason = "Django < 3.1 does not support JSON fields which are required for these tests"
@pytest.fixture
def mocked_model_varying_fields():
return get_fake_model(
{
"title": models.TextField(null=True),
"updated_at": models.DateTimeField(null=True),
"content": models.JSONField(null=True),
"items": ArrayField(models.TextField(), null=True),
}
)
@pytest.fixture
def mocked_model_single_field():
return get_fake_model(
{
"name": models.TextField(),
}
)
@pytest.fixture
def mocked_model_foreign_keys(
mocked_model_varying_fields, mocked_model_single_field
):
return get_fake_model(
{
"varying_fields": models.ForeignKey(
mocked_model_varying_fields, null=True, on_delete=models.CASCADE
),
"single_field": models.ForeignKey(
mocked_model_single_field, null=True, on_delete=models.CASCADE
),
}
)
@pytest.fixture
def mocked_model_varying_fields_instance(freezer, mocked_model_varying_fields):
return mocked_model_varying_fields.objects.create(
title="hello world",
updated_at=timezone.now(),
content={"a": 1},
items=["a", "b"],
)
@pytest.fixture
def models_from_cursor_wrapper_multiple():
def _wrapper(*args, **kwargs):
return list(models_from_cursor(*args, **kwargs))[0]
return _wrapper
@pytest.fixture
def models_from_cursor_wrapper_single():
return model_from_cursor
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
"models_from_cursor_wrapper",
[
pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
pytest.lazy_fixture("models_from_cursor_wrapper_single"),
],
)
def test_models_from_cursor_applies_converters(
mocked_model_varying_fields,
mocked_model_varying_fields_instance,
models_from_cursor_wrapper,
):
with connection.cursor() as cursor:
cursor.execute(
*mocked_model_varying_fields.objects.all().query.sql_with_params()
)
queried_instance = models_from_cursor_wrapper(
mocked_model_varying_fields, cursor
)
assert queried_instance.id == mocked_model_varying_fields_instance.id
assert queried_instance.title == mocked_model_varying_fields_instance.title
assert (
queried_instance.updated_at
== mocked_model_varying_fields_instance.updated_at
)
assert (
queried_instance.content == mocked_model_varying_fields_instance.content
)
assert queried_instance.items == mocked_model_varying_fields_instance.items
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
"models_from_cursor_wrapper",
[
pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
pytest.lazy_fixture("models_from_cursor_wrapper_single"),
],
)
def test_models_from_cursor_handles_field_order(
mocked_model_varying_fields,
mocked_model_varying_fields_instance,
models_from_cursor_wrapper,
):
with connection.cursor() as cursor:
cursor.execute(
f'SELECT content, items, id, title, updated_at FROM "{mocked_model_varying_fields._meta.db_table}"',
tuple(),
)
queried_instance = models_from_cursor_wrapper(
mocked_model_varying_fields, cursor
)
assert queried_instance.id == mocked_model_varying_fields_instance.id
assert queried_instance.title == mocked_model_varying_fields_instance.title
assert (
queried_instance.updated_at
== mocked_model_varying_fields_instance.updated_at
)
assert (
queried_instance.content == mocked_model_varying_fields_instance.content
)
assert queried_instance.items == mocked_model_varying_fields_instance.items
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
"models_from_cursor_wrapper",
[
pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
pytest.lazy_fixture("models_from_cursor_wrapper_single"),
],
)
def test_models_from_cursor_handles_partial_fields(
mocked_model_varying_fields,
mocked_model_varying_fields_instance,
models_from_cursor_wrapper,
):
with connection.cursor() as cursor:
cursor.execute(
f'SELECT id FROM "{mocked_model_varying_fields._meta.db_table}"',
tuple(),
)
queried_instance = models_from_cursor_wrapper(
mocked_model_varying_fields, cursor
)
assert queried_instance.id == mocked_model_varying_fields_instance.id
assert queried_instance.title is None
assert queried_instance.updated_at is None
assert queried_instance.content is None
assert queried_instance.items is None
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
"models_from_cursor_wrapper",
[
pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
pytest.lazy_fixture("models_from_cursor_wrapper_single"),
],
)
def test_models_from_cursor_handles_null(
mocked_model_varying_fields, models_from_cursor_wrapper
):
instance = mocked_model_varying_fields.objects.create()
with connection.cursor() as cursor:
cursor.execute(
*mocked_model_varying_fields.objects.all().query.sql_with_params()
)
queried_instance = models_from_cursor_wrapper(
mocked_model_varying_fields, cursor
)
assert queried_instance.id == instance.id
assert queried_instance.title is None
assert queried_instance.updated_at is None
assert queried_instance.content is None
assert queried_instance.items is None
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
"models_from_cursor_wrapper",
[
pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
pytest.lazy_fixture("models_from_cursor_wrapper_single"),
],
)
def test_models_from_cursor_foreign_key(
mocked_model_single_field,
mocked_model_foreign_keys,
models_from_cursor_wrapper,
):
instance = mocked_model_foreign_keys.objects.create(
varying_fields=None,
single_field=mocked_model_single_field.objects.create(name="test"),
)
with connection.cursor() as cursor:
cursor.execute(
*mocked_model_foreign_keys.objects.all().query.sql_with_params()
)
queried_instance = models_from_cursor_wrapper(
mocked_model_foreign_keys, cursor
)
with CaptureQueriesContext(connection) as ctx:
assert queried_instance.id == instance.id
assert queried_instance.varying_fields_id is None
assert queried_instance.varying_fields is None
assert queried_instance.single_field_id == instance.single_field_id
assert queried_instance.single_field.id == instance.single_field.id
assert queried_instance.single_field.name == instance.single_field.name
assert len(ctx.captured_queries) == 1
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
"models_from_cursor_wrapper",
[
pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
pytest.lazy_fixture("models_from_cursor_wrapper_single"),
],
)
def test_models_from_cursor_related_fields(
mocked_model_varying_fields,
mocked_model_single_field,
mocked_model_foreign_keys,
models_from_cursor_wrapper,
):
instance = mocked_model_foreign_keys.objects.create(
varying_fields=mocked_model_varying_fields.objects.create(
title="test", updated_at=timezone.now()
),
single_field=mocked_model_single_field.objects.create(name="test"),
)
with connection.cursor() as cursor:
cursor.execute(
*mocked_model_foreign_keys.objects.select_related(
"varying_fields", "single_field"
)
.all()
.query.sql_with_params()
)
queried_instance = models_from_cursor_wrapper(
mocked_model_foreign_keys,
cursor,
related_fields=["varying_fields", "single_field"],
)
with CaptureQueriesContext(connection) as ctx:
assert queried_instance.id == instance.id
assert queried_instance.varying_fields_id == instance.varying_fields_id
assert queried_instance.varying_fields.id == instance.varying_fields.id
assert (
queried_instance.varying_fields.title
== instance.varying_fields.title
)
assert (
queried_instance.varying_fields.updated_at
== instance.varying_fields.updated_at
)
assert (
queried_instance.varying_fields.content
== instance.varying_fields.content
)
assert (
queried_instance.varying_fields.items
== instance.varying_fields.items
)
assert queried_instance.single_field_id == instance.single_field_id
assert queried_instance.single_field.id == instance.single_field.id
assert queried_instance.single_field.name == instance.single_field.name
assert len(ctx.captured_queries) == 0
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
"models_from_cursor_wrapper",
[
pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
pytest.lazy_fixture("models_from_cursor_wrapper_single"),
],
)
@pytest.mark.parametrize(
"selected", [True, False], ids=["selected", "not_selected"]
)
def test_models_from_cursor_related_fields_optional(
mocked_model_varying_fields,
mocked_model_foreign_keys,
models_from_cursor_wrapper,
selected,
):
instance = mocked_model_foreign_keys.objects.create(
varying_fields=mocked_model_varying_fields.objects.create(
title="test", updated_at=timezone.now()
),
single_field=None,
)
with connection.cursor() as cursor:
select_related = ["varying_fields"]
if selected:
select_related.append("single_field")
cursor.execute(
*mocked_model_foreign_keys.objects.select_related(*select_related)
.all()
.query.sql_with_params()
)
queried_instance = models_from_cursor_wrapper(
mocked_model_foreign_keys,
cursor,
related_fields=["varying_fields", "single_field"],
)
assert queried_instance.id == instance.id
assert queried_instance.varying_fields_id == instance.varying_fields_id
assert queried_instance.single_field_id == instance.single_field_id
with CaptureQueriesContext(connection) as ctx:
assert queried_instance.varying_fields.id == instance.varying_fields.id
assert (
queried_instance.varying_fields.title
== instance.varying_fields.title
)
assert (
queried_instance.varying_fields.updated_at
== instance.varying_fields.updated_at
)
assert (
queried_instance.varying_fields.content
== instance.varying_fields.content
)
assert (
queried_instance.varying_fields.items
== instance.varying_fields.items
)
assert queried_instance.single_field is None
assert len(ctx.captured_queries) == 0
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
def test_models_from_cursor_generator_efficiency(
mocked_model_varying_fields, mocked_model_single_field
):
mocked_model_single_field.objects.create(name="a")
mocked_model_single_field.objects.create(name="b")
with connection.cursor() as cursor:
cursor.execute(
*mocked_model_single_field.objects.all().query.sql_with_params()
)
instances_generator = models_from_cursor(
mocked_model_single_field, cursor
)
assert cursor.rownumber == 0
next(instances_generator)
assert cursor.rownumber == 1
next(instances_generator)
assert cursor.rownumber == 2
assert not next(instances_generator, None)
assert cursor.rownumber == 2
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason=django_31_skip_reason,
)
def test_models_from_cursor_tolerates_additional_columns(
mocked_model_foreign_keys, mocked_model_varying_fields
):
with connection.cursor() as cursor:
cursor.execute(
f"ALTER TABLE {mocked_model_foreign_keys._meta.db_table} ADD COLUMN new_col text DEFAULT NULL"
)
cursor.execute(
f"ALTER TABLE {mocked_model_varying_fields._meta.db_table} ADD COLUMN new_col text DEFAULT NULL"
)
instance = mocked_model_foreign_keys.objects.create(
varying_fields=mocked_model_varying_fields.objects.create(
title="test", updated_at=timezone.now()
),
single_field=None,
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT fk_t.*, vf_t.* FROM {mocked_model_foreign_keys._meta.db_table} fk_t
INNER JOIN {mocked_model_varying_fields._meta.db_table} vf_t ON vf_t.id = fk_t.varying_fields_id
"""
)
queried_instances = list(
models_from_cursor(
mocked_model_foreign_keys,
cursor,
related_fields=["varying_fields"],
)
)
assert len(queried_instances) == 1
assert queried_instances[0].id == instance.id
assert (
queried_instances[0].varying_fields.id == instance.varying_fields.id
)
django-postgres-extra-2.0.9/tests/test_locking.py 0000664 0000000 0000000 00000006113 14634267343 0022167 0 ustar 00root root 0000000 0000000 import uuid
import pytest
from django.db import connection, models, transaction
from psqlextra.locking import (
PostgresTableLockMode,
postgres_lock_model,
postgres_lock_table,
)
from .fake_model import get_fake_model
@pytest.fixture
def mocked_model():
return get_fake_model(
{
"name": models.TextField(),
}
)
def get_table_locks():
with connection.cursor() as cursor:
return connection.introspection.get_table_locks(cursor)
@pytest.mark.django_db(transaction=True)
def test_postgres_lock_table(mocked_model):
lock_signature = (
"public",
mocked_model._meta.db_table,
"AccessExclusiveLock",
)
with transaction.atomic():
postgres_lock_table(
mocked_model._meta.db_table, PostgresTableLockMode.ACCESS_EXCLUSIVE
)
assert lock_signature in get_table_locks()
assert lock_signature not in get_table_locks()
@pytest.mark.django_db(transaction=True)
def test_postgres_lock_table_in_schema():
schema_name = str(uuid.uuid4())[:8]
table_name = str(uuid.uuid4())[:8]
quoted_schema_name = connection.ops.quote_name(schema_name)
quoted_table_name = connection.ops.quote_name(table_name)
with connection.cursor() as cursor:
cursor.execute(f"CREATE SCHEMA {quoted_schema_name}")
cursor.execute(
f"CREATE TABLE {quoted_schema_name}.{quoted_table_name} AS SELECT 'hello world'"
)
lock_signature = (schema_name, table_name, "ExclusiveLock")
with transaction.atomic():
postgres_lock_table(
table_name, PostgresTableLockMode.EXCLUSIVE, schema_name=schema_name
)
assert lock_signature in get_table_locks()
assert lock_signature not in get_table_locks()
@pytest.mark.parametrize("lock_mode", list(PostgresTableLockMode))
@pytest.mark.django_db(transaction=True)
def test_postgres_lock_model(mocked_model, lock_mode):
lock_signature = (
"public",
mocked_model._meta.db_table,
lock_mode.alias,
)
with transaction.atomic():
postgres_lock_model(mocked_model, lock_mode)
assert lock_signature in get_table_locks()
assert lock_signature not in get_table_locks()
@pytest.mark.django_db(transaction=True)
def test_postgres_lock_model_in_schema(mocked_model):
schema_name = str(uuid.uuid4())[:8]
quoted_schema_name = connection.ops.quote_name(schema_name)
quoted_table_name = connection.ops.quote_name(mocked_model._meta.db_table)
with connection.cursor() as cursor:
cursor.execute(f"CREATE SCHEMA {quoted_schema_name}")
cursor.execute(
f"CREATE TABLE {quoted_schema_name}.{quoted_table_name} (LIKE public.{quoted_table_name} INCLUDING ALL)"
)
lock_signature = (schema_name, mocked_model._meta.db_table, "ExclusiveLock")
with transaction.atomic():
postgres_lock_model(
mocked_model,
PostgresTableLockMode.EXCLUSIVE,
schema_name=schema_name,
)
assert lock_signature in get_table_locks()
assert lock_signature not in get_table_locks()
django-postgres-extra-2.0.9/tests/test_lookups.py 0000664 0000000 0000000 00000005063 14634267343 0022240 0 ustar 00root root 0000000 0000000 from django.db import models
from .fake_model import get_fake_model
def test_invalues_lookup_text_field():
model = get_fake_model({"name": models.TextField()})
[a, b] = model.objects.bulk_create(
[
model(name="a"),
model(name="b"),
]
)
results = list(model.objects.filter(name__invalues=[a.name, b.name, "c"]))
assert results == [a, b]
def test_invalues_lookup_integer_field():
model = get_fake_model({"number": models.IntegerField()})
[a, b] = model.objects.bulk_create(
[
model(number=1),
model(number=2),
]
)
results = list(
model.objects.filter(number__invalues=[a.number, b.number, 3])
)
assert results == [a, b]
def test_invalues_lookup_uuid_field():
model = get_fake_model({"value": models.UUIDField()})
[a, b] = model.objects.bulk_create(
[
model(value="f8fe0431-29f8-4c4c-839c-8a6bf29f95d5"),
model(value="2fb0f45b-afaf-4e24-8637-2d81ded997bb"),
]
)
results = list(
model.objects.filter(
value__invalues=[
a.value,
b.value,
"d7a8df83-f3f8-487b-b982-547c8f22b0bb",
]
)
)
assert results == [a, b]
def test_invalues_lookup_related_field():
model_1 = get_fake_model({"name": models.TextField()})
model_2 = get_fake_model(
{"relation": models.ForeignKey(model_1, on_delete=models.CASCADE)}
)
[a_relation, b_relation] = model_1.objects.bulk_create(
[
model_1(name="a"),
model_1(name="b"),
]
)
[a, b] = model_2.objects.bulk_create(
[model_2(relation=a_relation), model_2(relation=b_relation)]
)
results = list(
model_2.objects.filter(relation__invalues=[a_relation, b_relation])
)
assert results == [a, b]
def test_invalues_lookup_related_field_subquery():
model_1 = get_fake_model({"name": models.TextField()})
model_2 = get_fake_model(
{"relation": models.ForeignKey(model_1, on_delete=models.CASCADE)}
)
[a_relation, b_relation] = model_1.objects.bulk_create(
[
model_1(name="a"),
model_1(name="b"),
]
)
[a, b] = model_2.objects.bulk_create(
[model_2(relation=a_relation), model_2(relation=b_relation)]
)
results = list(
model_2.objects.filter(
relation__invalues=model_1.objects.all().values_list(
"id", flat=True
)
)
)
assert results == [a, b]
django-postgres-extra-2.0.9/tests/test_make_migrations.py 0000664 0000000 0000000 00000020566 14634267343 0023722 0 ustar 00root root 0000000 0000000 import django
import pytest
from django.apps import apps
from django.db import models
from django.db.migrations import AddField, AlterField, RemoveField
from django.db.migrations.state import ProjectState
from psqlextra.backend.migrations import operations, postgres_patched_migrations
from psqlextra.models import (
PostgresMaterializedViewModel,
PostgresPartitionedModel,
PostgresViewModel,
)
from psqlextra.types import PostgresPartitioningMethod
from .fake_model import (
define_fake_materialized_view_model,
define_fake_model,
define_fake_partitioned_model,
define_fake_view_model,
get_fake_model,
)
from .migrations import apply_migration, make_migration
@pytest.mark.parametrize(
"model_config",
[
dict(
fields={"category": models.TextField()},
partitioning_options=dict(
method=PostgresPartitioningMethod.LIST, key="category"
),
),
dict(
fields={"timestamp": models.DateTimeField()},
partitioning_options=dict(
method=PostgresPartitioningMethod.RANGE, key="timestamp"
),
),
dict(
fields={"artist_id": models.IntegerField()},
partitioning_options=dict(
method=PostgresPartitioningMethod.HASH, key="artist_id"
),
),
],
)
@postgres_patched_migrations()
def test_make_migration_create_partitioned_model(fake_app, model_config):
"""Tests whether the right operations are generated when creating a new
partitioned model."""
model = define_fake_partitioned_model(
**model_config, meta_options=dict(app_label=fake_app.name)
)
migration = make_migration(fake_app.name)
ops = migration.operations
method = model_config["partitioning_options"]["method"]
if method == PostgresPartitioningMethod.HASH:
# should have one operation to create the partitioned model
# and no default partition
assert len(ops) == 1
assert isinstance(ops[0], operations.PostgresCreatePartitionedModel)
else:
# should have one operation to create the partitioned model
# and one more to add a default partition
assert len(ops) == 2
assert isinstance(ops[0], operations.PostgresCreatePartitionedModel)
assert isinstance(ops[1], operations.PostgresAddDefaultPartition)
# make sure the default partition is named "default"
assert ops[1].model_name == model.__name__
assert ops[1].name == "default"
# make sure the base is set correctly
assert len(ops[0].bases) == 1
assert issubclass(ops[0].bases[0], PostgresPartitionedModel)
# make sure the partitioning options got copied correctly
assert ops[0].partitioning_options == model_config["partitioning_options"]
@postgres_patched_migrations()
def test_make_migration_create_view_model(fake_app):
"""Tests whether the right operations are generated when creating a new
view model."""
underlying_model = get_fake_model({"name": models.TextField()})
model = define_fake_view_model(
fields={"name": models.TextField()},
view_options=dict(query=underlying_model.objects.all()),
meta_options=dict(app_label=fake_app.name),
)
migration = make_migration(model._meta.app_label)
ops = migration.operations
assert len(ops) == 1
assert isinstance(ops[0], operations.PostgresCreateViewModel)
# make sure the base is set correctly
assert len(ops[0].bases) == 1
assert issubclass(ops[0].bases[0], PostgresViewModel)
# make sure the view options got copied correctly
assert ops[0].view_options == model._view_meta.original_attrs
@postgres_patched_migrations()
def test_make_migration_create_materialized_view_model(fake_app):
"""Tests whether the right operations are generated when creating a new
materialized view model."""
underlying_model = get_fake_model({"name": models.TextField()})
model = define_fake_materialized_view_model(
fields={"name": models.TextField()},
view_options=dict(query=underlying_model.objects.all()),
meta_options=dict(app_label=fake_app.name),
)
migration = make_migration(model._meta.app_label)
ops = migration.operations
assert len(ops) == 1
assert isinstance(ops[0], operations.PostgresCreateMaterializedViewModel)
# make sure the base is set correctly
assert len(ops[0].bases) == 1
assert issubclass(ops[0].bases[0], PostgresMaterializedViewModel)
# make sure the view options got copied correctly
assert ops[0].view_options == model._view_meta.original_attrs
@pytest.mark.parametrize(
"define_view_model",
[define_fake_materialized_view_model, define_fake_view_model],
)
@postgres_patched_migrations()
def test_make_migration_field_operations_view_models(
fake_app, define_view_model
):
"""Tests whether field operations against a (materialized) view are always
wrapped in the :see:ApplyState operation so that they don't actually get
applied to the database, yet Django applies to them to the project state.
This is important because you can't actually alter/add or delete
fields from a (materialized) view.
"""
underlying_model = get_fake_model(
{"first_name": models.TextField(), "last_name": models.TextField()},
meta_options=dict(app_label=fake_app.name),
)
model = define_view_model(
fields={"first_name": models.TextField()},
view_options=dict(query=underlying_model.objects.all()),
meta_options=dict(app_label=fake_app.name),
)
state_1 = ProjectState.from_apps(apps)
migration = make_migration(model._meta.app_label)
apply_migration(migration.operations, state_1)
# add a field to the materialized view
last_name_field = models.TextField(null=True)
last_name_field.contribute_to_class(model, "last_name")
migration = make_migration(model._meta.app_label, from_state=state_1)
assert len(migration.operations) == 1
assert isinstance(migration.operations[0], operations.ApplyState)
assert isinstance(migration.operations[0].state_operation, AddField)
# alter the field on the materialized view
state_2 = ProjectState.from_apps(apps)
last_name_field = models.TextField(null=True, blank=True)
last_name_field.contribute_to_class(model, "last_name")
migration = make_migration(model._meta.app_label, from_state=state_2)
assert len(migration.operations) == 1
assert isinstance(migration.operations[0], operations.ApplyState)
assert isinstance(migration.operations[0].state_operation, AlterField)
# remove the field from the materialized view
migration = make_migration(
model._meta.app_label,
from_state=ProjectState.from_apps(apps),
to_state=state_1,
)
assert isinstance(migration.operations[0], operations.ApplyState)
assert isinstance(migration.operations[0].state_operation, RemoveField)
@pytest.mark.skipif(
django.VERSION < (2, 2),
reason="Django < 2.2 doesn't implement left-to-right migration optimizations",
)
@pytest.mark.parametrize("method", PostgresPartitioningMethod.all())
@postgres_patched_migrations()
def test_autodetect_fk_issue(fake_app, method):
"""Test whether Django can perform ForeignKey optimization.
Fixes https://github.com/SectorLabs/django-postgres-extra/issues/123 for Django >= 2.2
"""
meta_options = {"app_label": fake_app.name}
partitioning_options = {"method": method, "key": "artist_id"}
artist_model_fields = {"name": models.TextField()}
Artist = define_fake_model(artist_model_fields, meta_options=meta_options)
from_state = ProjectState.from_apps(apps)
album_model_fields = {
"name": models.TextField(),
"artist": models.ForeignKey(
to=Artist.__name__, on_delete=models.CASCADE
),
}
define_fake_partitioned_model(
album_model_fields,
partitioning_options=partitioning_options,
meta_options=meta_options,
)
migration = make_migration(fake_app.name, from_state=from_state)
ops = migration.operations
if method == PostgresPartitioningMethod.HASH:
assert len(ops) == 1
assert isinstance(ops[0], operations.PostgresCreatePartitionedModel)
else:
assert len(ops) == 2
assert isinstance(ops[0], operations.PostgresCreatePartitionedModel)
assert isinstance(ops[1], operations.PostgresAddDefaultPartition)
django-postgres-extra-2.0.9/tests/test_management_command_partition.py 0000664 0000000 0000000 00000013464 14634267343 0026453 0 ustar 00root root 0000000 0000000 import argparse
from unittest.mock import MagicMock, create_autospec, patch
import pytest
from django.db import models
from django.test import override_settings
from psqlextra.backend.introspection import (
PostgresIntrospectedPartitionTable,
PostgresIntrospectedPartitonedTable,
)
from psqlextra.management.commands.pgpartition import Command
from psqlextra.partitioning import PostgresPartitioningManager
from psqlextra.partitioning.config import PostgresPartitioningConfig
from psqlextra.partitioning.partition import PostgresPartition
from psqlextra.partitioning.strategy import PostgresPartitioningStrategy
from .fake_model import define_fake_partitioned_model
@pytest.fixture
def fake_strategy():
strategy = create_autospec(PostgresPartitioningStrategy)
strategy.createable_partition = create_autospec(PostgresPartition)
strategy.createable_partition.name = MagicMock(return_value="tobecreated")
strategy.to_create = MagicMock(return_value=[strategy.createable_partition])
strategy.deleteable_partition = create_autospec(PostgresPartition)
strategy.deleteable_partition.name = MagicMock(return_value="tobedeleted")
strategy.to_delete = MagicMock(return_value=[strategy.deleteable_partition])
return strategy
@pytest.fixture
def fake_model(fake_strategy):
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
# consistent model name so snapshot tests work
model.__name__ = "test"
# we have to trick the system into thinking the model/table
# actually exists with one partition (so we can simulate deletions)
deleteable_partition_name = fake_strategy.deleteable_partition.name()
mocked_partitioned_table = PostgresIntrospectedPartitonedTable(
name=model._meta.db_table,
method=model._partitioning_meta.method,
key=model._partitioning_meta.key,
partitions=[
PostgresIntrospectedPartitionTable(
name=deleteable_partition_name,
full_name=f"{model._meta.db_table}_{deleteable_partition_name}",
comment="psqlextra_auto_partitioned",
)
],
)
introspection_package = "psqlextra.backend.introspection"
introspection_class = f"{introspection_package}.PostgresIntrospection"
get_partitioned_table_path = f"{introspection_class}.get_partitioned_table"
with patch(get_partitioned_table_path) as mock:
mock.return_value = mocked_partitioned_table
yield model
@pytest.fixture
def fake_partitioning_manager(fake_model, fake_strategy):
manager = PostgresPartitioningManager(
[PostgresPartitioningConfig(fake_model, fake_strategy)]
)
with override_settings(PSQLEXTRA_PARTITIONING_MANAGER=manager):
yield manager
@pytest.fixture
def run(capsys):
def _run(*args):
parser = argparse.ArgumentParser()
command = Command()
command.add_arguments(parser)
command.handle(**vars(parser.parse_args(args)))
return capsys.readouterr()
return _run
@pytest.mark.parametrize("args", ["-d", "--dry"])
def test_management_command_partition_dry_run(
args, snapshot, run, fake_model, fake_partitioning_manager
):
"""Tests whether the --dry option actually makes it a dry run and does not
create/delete partitions."""
config = fake_partitioning_manager.find_config_for_model(fake_model)
snapshot.assert_match(run(args))
config.strategy.createable_partition.create.assert_not_called()
config.strategy.createable_partition.delete.assert_not_called()
config.strategy.deleteable_partition.create.assert_not_called()
config.strategy.deleteable_partition.delete.assert_not_called()
@pytest.mark.parametrize("args", ["-y", "--yes"])
def test_management_command_partition_auto_confirm(
args, snapshot, run, fake_model, fake_partitioning_manager
):
"""Tests whether the --yes option makes it not ask for confirmation before
creating/deleting partitions."""
config = fake_partitioning_manager.find_config_for_model(fake_model)
snapshot.assert_match(run(args))
config.strategy.createable_partition.create.assert_called_once()
config.strategy.createable_partition.delete.assert_not_called()
config.strategy.deleteable_partition.create.assert_not_called()
config.strategy.deleteable_partition.delete.assert_called_once()
@pytest.mark.parametrize("answer", ["y", "Y", "yes", "YES"])
def test_management_command_partition_confirm_yes(
answer, monkeypatch, snapshot, run, fake_model, fake_partitioning_manager
):
"""Tests whether the --yes option makes it not ask for confirmation before
creating/deleting partitions."""
config = fake_partitioning_manager.find_config_for_model(fake_model)
monkeypatch.setattr("builtins.input", lambda _: answer)
snapshot.assert_match(run())
config.strategy.createable_partition.create.assert_called_once()
config.strategy.createable_partition.delete.assert_not_called()
config.strategy.deleteable_partition.create.assert_not_called()
config.strategy.deleteable_partition.delete.assert_called_once()
@pytest.mark.parametrize("answer", ["n", "N", "no", "No", "NO"])
def test_management_command_partition_confirm_no(
answer, monkeypatch, snapshot, run, fake_model, fake_partitioning_manager
):
"""Tests whether the --yes option makes it not ask for confirmation before
creating/deleting partitions."""
config = fake_partitioning_manager.find_config_for_model(fake_model)
monkeypatch.setattr("builtins.input", lambda _: answer)
snapshot.assert_match(run())
config.strategy.createable_partition.create.assert_not_called()
config.strategy.createable_partition.delete.assert_not_called()
config.strategy.deleteable_partition.create.assert_not_called()
config.strategy.deleteable_partition.delete.assert_not_called()
django-postgres-extra-2.0.9/tests/test_manager.py 0000664 0000000 0000000 00000006007 14634267343 0022155 0 ustar 00root root 0000000 0000000 import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.test import override_settings
from psqlextra.manager import PostgresManager
from psqlextra.models import PostgresModel
from .fake_model import get_fake_model
@pytest.mark.parametrize(
"databases",
[
{"default": {"ENGINE": "psqlextra.backend"}},
{
"default": {"ENGINE": "django.db.backends.postgresql"},
"other": {"ENGINE": "psqlextra.backend"},
},
{
"default": {"ENGINE": "psqlextra.backend"},
"other": {"ENGINE": "psqlextra.backend"},
},
],
)
def test_manager_backend_set(databases):
"""Tests that creating a new instance of :see:PostgresManager succeseeds
without any errors if one or more databases are configured with
`psqlextra.backend` as its ENGINE."""
with override_settings(DATABASES=databases):
assert PostgresManager()
def test_manager_backend_not_set():
"""Tests whether creating a new instance of
:see:PostgresManager fails if no database
has `psqlextra.backend` configured
as its ENGINE."""
with override_settings(
DATABASES={"default": {"ENGINE": "django.db.backends.postgresql"}}
):
with pytest.raises(ImproperlyConfigured):
PostgresManager()
def test_manager_truncate():
"""Tests whether truncating a table works."""
model = get_fake_model({"name": models.CharField(max_length=255)})
model.objects.create(name="henk1")
model.objects.create(name="henk2")
assert model.objects.count() == 2
model.objects.truncate()
assert model.objects.count() == 0
@pytest.mark.django_db(transaction=True)
def test_manager_truncate_cascade():
"""Tests whether truncating a table with cascade works."""
model_1 = get_fake_model({"name": models.CharField(max_length=255)})
model_2 = get_fake_model(
{
"name": models.CharField(max_length=255),
"model_1": models.ForeignKey(
model_1, on_delete=models.CASCADE, null=True
),
}
)
obj_1 = model_1.objects.create(name="henk1")
model_2.objects.create(name="henk1", model_1_id=obj_1.id)
assert model_1.objects.count() == 1
assert model_2.objects.count() == 1
model_1.objects.truncate(cascade=True)
assert model_1.objects.count() == 0
assert model_2.objects.count() == 0
def test_manager_truncate_quote_name():
"""Tests whether the truncate statement properly quotes the table name."""
model = get_fake_model(
{"name": models.CharField(max_length=255)},
PostgresModel,
{
# without quoting, table names are always
# lower-case, using a capital case table
# name requires quoting to work
"db_table": "MyTable"
},
)
model.objects.create(name="henk1")
model.objects.create(name="henk2")
assert model.objects.count() == 2
model.objects.truncate()
assert model.objects.count() == 0
django-postgres-extra-2.0.9/tests/test_manager_context.py 0000664 0000000 0000000 00000001177 14634267343 0023724 0 ustar 00root root 0000000 0000000 from django.db import models
from psqlextra.util import postgres_manager
from .fake_model import get_fake_model
def test_manager_context():
"""Tests whether the :see:postgres_manager context manager can be used to
get access to :see:PostgresManager on a model that does not use it directly
or inherits from :see:PostgresModel."""
model = get_fake_model(
{"myfield": models.CharField(max_length=255, unique=True)}, models.Model
)
with postgres_manager(model) as manager:
manager.upsert(conflict_target=["myfield"], fields=dict(myfield="beer"))
assert manager.first().myfield == "beer"
django-postgres-extra-2.0.9/tests/test_materialized_view_model.py 0000664 0000000 0000000 00000002062 14634267343 0025424 0 ustar 00root root 0000000 0000000 from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from .fake_model import define_fake_materialized_view_model, get_fake_model
def test_materialized_view_model_refresh():
"""Tests whether a materialized view can be refreshed."""
underlying_model = get_fake_model({"name": models.TextField()})
model = define_fake_materialized_view_model(
{"name": models.TextField()},
{"query": underlying_model.objects.filter(name="test1")},
)
underlying_model.objects.create(name="test1")
underlying_model.objects.create(name="test2")
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_materialized_view_model(model)
# materialized view should only show records name="test"1
objs = list(model.objects.all())
assert len(objs) == 1
assert objs[0].name == "test1"
# create another record with "test1" and refresh
underlying_model.objects.create(name="test1")
model.refresh()
objs = list(model.objects.all())
assert len(objs) == 2
django-postgres-extra-2.0.9/tests/test_migration_operations.py 0000664 0000000 0000000 00000020326 14634267343 0024777 0 ustar 00root root 0000000 0000000 import pytest
from django.apps import apps
from django.db import connection, migrations, models
from psqlextra.backend.migrations import operations
from psqlextra.manager import PostgresManager
from psqlextra.models import PostgresPartitionedModel
from psqlextra.types import PostgresPartitioningMethod
from . import db_introspection
from .migrations import apply_migration
def _partitioned_table_exists(op: operations.PostgresCreatePartitionedModel):
"""Checks whether the specified partitioned model operation was succesfully
applied."""
model_table_name = f"tests_{op.name}"
table = db_introspection.get_partitioned_table(model_table_name)
if not table:
return False
part_options = op.partitioning_options
return (
table.method == part_options["method"]
and table.key == part_options["key"]
)
def _partition_exists(model_op, op):
"""Checks whether the parttitioned model and partition operations were
succesfully applied."""
model_table_name = f"tests_{model_op.name}"
table = db_introspection.get_partitioned_table(model_table_name)
if not table:
return False
partition = next(
(
partition
for partition in table.partitions
if partition.full_name == f"{model_table_name}_{op.name}"
),
None,
)
return bool(partition)
@pytest.fixture
def create_model():
"""Factory for creating a :see:PostgresCreatePartitionedModel operation."""
def _create_model(method):
fields = [("name", models.TextField())]
key = []
if method == PostgresPartitioningMethod.RANGE:
key.append("timestamp")
fields.append(("timestamp", models.DateTimeField()))
elif method == PostgresPartitioningMethod.LIST:
key.append("category")
fields.append(("category", models.TextField()))
elif method == PostgresPartitioningMethod.HASH:
key.append("artist_id")
fields.append(("artist_id", models.IntegerField()))
else:
raise NotImplementedError
return operations.PostgresCreatePartitionedModel(
"test",
fields=fields,
bases=(PostgresPartitionedModel,),
managers=[("objects", PostgresManager())],
partitioning_options={"method": method, "key": key},
)
return _create_model
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize("method", PostgresPartitioningMethod.all())
def test_migration_operations_create_partitioned_table(method, create_model):
"""Tests whether the see :PostgresCreatePartitionedModel operation works as
expected in a migration."""
create_operation = create_model(method)
state = migrations.state.ProjectState.from_apps(apps)
# migrate forwards, is the table there?
apply_migration([create_operation], state)
assert _partitioned_table_exists(create_operation)
# migrate backwards, is the table there?
apply_migration([create_operation], state=state, backwards=True)
assert not _partitioned_table_exists(create_operation)
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize("method", PostgresPartitioningMethod.all())
def test_migration_operations_delete_partitioned_table(method, create_model):
"""Tests whether the see :PostgresDeletePartitionedModel operation works as
expected in a migration."""
create_operation = create_model(method)
delete_operation = operations.PostgresDeletePartitionedModel(
create_operation.name
)
state = migrations.state.ProjectState.from_apps(apps)
# migrate forwards, create model
apply_migration([create_operation], state)
assert _partitioned_table_exists(create_operation)
# record intermediate state, the state we'll
# migrate backwards to
intm_state = state.clone()
# migrate forwards, delete model
apply_migration([delete_operation], state)
assert not _partitioned_table_exists(create_operation)
# migrate backwards, undelete model
delete_operation.database_backwards(
"tests", connection.schema_editor(), state, intm_state
)
assert _partitioned_table_exists(create_operation)
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize(
"method,add_partition_operation",
[
(
PostgresPartitioningMethod.LIST,
operations.PostgresAddDefaultPartition(
model_name="test", name="pt1"
),
),
(
PostgresPartitioningMethod.RANGE,
operations.PostgresAddRangePartition(
model_name="test",
name="pt1",
from_values="2019-01-01",
to_values="2019-02-01",
),
),
(
PostgresPartitioningMethod.LIST,
operations.PostgresAddListPartition(
model_name="test", name="pt1", values=["car", "boat"]
),
),
(
PostgresPartitioningMethod.HASH,
operations.PostgresAddHashPartition(
model_name="test", name="pt1", modulus=3, remainder=0
),
),
],
)
def test_migration_operations_add_partition(
method, add_partition_operation, create_model
):
"""Tests whether adding partitions and then rolling them back works as
expected."""
create_operation = create_model(method)
state = migrations.state.ProjectState.from_apps(apps)
# migrate forwards
apply_migration([create_operation, add_partition_operation], state)
assert _partition_exists(create_operation, add_partition_operation)
# rollback
apply_migration(
[create_operation, add_partition_operation], state, backwards=True
)
assert not _partition_exists(create_operation, add_partition_operation)
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize(
"method,add_partition_operation,delete_partition_operation",
[
(
PostgresPartitioningMethod.LIST,
operations.PostgresAddDefaultPartition(
model_name="test", name="pt1"
),
operations.PostgresDeleteDefaultPartition(
model_name="test", name="pt1"
),
),
(
PostgresPartitioningMethod.RANGE,
operations.PostgresAddRangePartition(
model_name="test",
name="pt1",
from_values="2019-01-01",
to_values="2019-02-01",
),
operations.PostgresDeleteRangePartition(
model_name="test", name="pt1"
),
),
(
PostgresPartitioningMethod.LIST,
operations.PostgresAddListPartition(
model_name="test", name="pt1", values=["car", "boat"]
),
operations.PostgresDeleteListPartition(
model_name="test", name="pt1"
),
),
(
PostgresPartitioningMethod.HASH,
operations.PostgresAddHashPartition(
model_name="test", name="pt1", modulus=3, remainder=0
),
operations.PostgresDeleteHashPartition(
model_name="test", name="pt1"
),
),
],
)
def test_migration_operations_add_delete_partition(
method, add_partition_operation, delete_partition_operation, create_model
):
"""Tests whether adding partitions and then removing them works as
expected."""
create_operation = create_model(method)
state = migrations.state.ProjectState.from_apps(apps)
# migrate forwards, create model and partition
apply_migration([create_operation, add_partition_operation], state)
assert _partition_exists(create_operation, add_partition_operation)
# record intermediate state, the state we'll
# migrate backwards to
intm_state = state.clone()
# migrate forwards, delete the partition
apply_migration([delete_partition_operation], state)
assert not _partition_exists(create_operation, add_partition_operation)
# migrate backwards, undelete the partition
delete_partition_operation.database_backwards(
"tests", connection.schema_editor(), state, intm_state
)
assert _partition_exists(create_operation, add_partition_operation)
django-postgres-extra-2.0.9/tests/test_on_conflict.py 0000664 0000000 0000000 00000032417 14634267343 0023044 0 ustar 00root root 0000000 0000000 import django
import pytest
from django.core.exceptions import SuspiciousOperation
from django.db import connection, models
from django.test.utils import CaptureQueriesContext, override_settings
from django.utils import timezone
from psqlextra.fields import HStoreField
from psqlextra.models import PostgresModel
from psqlextra.query import ConflictAction
from .fake_model import get_fake_model
@pytest.mark.parametrize("conflict_action", ConflictAction.all())
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=True)
def test_on_conflict(conflict_action):
"""Tests whether simple inserts work correctly."""
model = get_fake_model(
{
"title": HStoreField(uniqueness=["key1"]),
"cookies": models.CharField(max_length=255, null=True),
}
)
with CaptureQueriesContext(connection) as queries:
obj = model.objects.on_conflict(
[("title", "key1")], conflict_action
).insert_and_get(title={"key1": "beer"}, cookies="cheers")
assert " test_on_conflict " in queries[0]["sql"]
model.objects.on_conflict(
[("title", "key1")], conflict_action
).insert_and_get(title={"key1": "beer"})
assert model.objects.count() == 1
# make sure the data is actually in the db
obj.refresh_from_db()
assert obj.title["key1"] == "beer"
assert obj.cookies == "cheers"
def test_on_conflict_auto_fields():
"""Asserts that fields that automatically add something to the model
automatically still work properly when upserting."""
model = get_fake_model(
{
"title": models.CharField(max_length=255, unique=True),
"date_added": models.DateTimeField(auto_now_add=True),
"date_updated": models.DateTimeField(auto_now=True),
}
)
obj1 = model.objects.on_conflict(
["title"], ConflictAction.UPDATE
).insert_and_get(title="beer")
obj2 = model.objects.on_conflict(
["title"], ConflictAction.UPDATE
).insert_and_get(title="beer")
obj2.refresh_from_db()
assert obj1.date_added
assert obj2.date_added
assert obj1.date_updated
assert obj2.date_updated
assert obj1.id == obj2.id
assert obj1.title == obj2.title
assert obj1.date_added == obj2.date_added
assert obj1.date_updated != obj2.date_updated
@pytest.mark.parametrize("conflict_action", ConflictAction.all())
def test_on_conflict_foreign_key(conflict_action):
"""Asserts that models with foreign key relationships can safely be
inserted."""
model1 = get_fake_model(
{"name": models.CharField(max_length=255, unique=True)}
)
model2 = get_fake_model(
{
"name": models.CharField(max_length=255, unique=True),
"model1": models.ForeignKey(model1, on_delete=models.CASCADE),
}
)
model1_row = model1.objects.on_conflict(
["name"], conflict_action
).insert_and_get(name="item1")
# insert by id, that should work
model2_row = model2.objects.on_conflict(
["name"], conflict_action
).insert_and_get(name="item1", model1_id=model1_row.id)
model2_row = model2.objects.get(name="item1")
assert model2_row.name == "item1"
assert model2_row.model1.id == model1_row.id
# insert by object, that should also work
model2_row = model2.objects.on_conflict(
["name"], conflict_action
).insert_and_get(name="item2", model1=model1_row)
model2_row.refresh_from_db()
assert model2_row.name == "item2"
assert model2_row.model1.id == model1_row.id
def test_on_conflict_partial_get():
"""Asserts that when doing a insert_and_get with only part of the columns
on the model, all fields are returned properly."""
model = get_fake_model(
{
"title": models.CharField(max_length=140, unique=True),
"purpose": models.CharField(max_length=10, null=True),
"created_at": models.DateTimeField(auto_now_add=True),
"updated_at": models.DateTimeField(auto_now=True),
}
)
obj1 = model.objects.on_conflict(
["title"], ConflictAction.UPDATE
).insert_and_get(title="beer", purpose="for-sale")
obj2 = model.objects.on_conflict(
["title"], ConflictAction.UPDATE
).insert_and_get(title="beer")
obj2.refresh_from_db()
assert obj2.title == obj1.title
assert obj2.purpose == obj1.purpose
assert obj2.created_at == obj2.created_at
assert obj1.updated_at != obj2.updated_at
@pytest.mark.parametrize("conflict_action", ConflictAction.all())
def test_on_conflict_invalid_target(conflict_action):
"""Tests whether specifying a invalid value for `conflict_target` raises an
error."""
model = get_fake_model(
{"title": models.CharField(max_length=140, unique=True)}
)
with pytest.raises(SuspiciousOperation):
(
model.objects.on_conflict(["cookie"], conflict_action).insert(
title="beer"
)
)
with pytest.raises(SuspiciousOperation):
(
model.objects.on_conflict([None], conflict_action).insert(
title="beer"
)
)
@pytest.mark.parametrize("conflict_action", ConflictAction.all())
def test_on_conflict_outdated_model(conflict_action):
"""Tests whether insert properly handles fields that are in the database
but not on the model.
This happens if somebody manually modified the database
to add a column that is not present in the model.
This should be handled properly by ignoring the column
returned by the database.
"""
model = get_fake_model(
{"title": models.CharField(max_length=140, unique=True)}
)
# manually create the colum that is not on the model
with connection.cursor() as cursor:
cursor.execute(
(
"ALTER TABLE {table} " "ADD COLUMN beer character varying(50);"
).format(table=model._meta.db_table)
)
# without proper handling, this would fail with a TypeError
(
model.objects.on_conflict(["title"], conflict_action).insert_and_get(
title="beer"
)
)
@pytest.mark.parametrize("conflict_action", ConflictAction.all())
def test_on_conflict_custom_column_names(conflict_action):
"""Asserts that models with custom column names (models where the column
and field name are different) work properly."""
model = get_fake_model(
{
"title": models.CharField(
max_length=140, unique=True, db_column="beer"
),
"description": models.CharField(max_length=255, db_column="desc"),
}
)
(
model.objects.on_conflict(["title"], conflict_action).insert(
title="yeey", description="great thing"
)
)
def test_on_conflict_unique_together():
"""Asserts that inserts on models with a unique_together works properly."""
model = get_fake_model(
{
"first_name": models.CharField(max_length=140),
"last_name": models.CharField(max_length=255),
},
PostgresModel,
{"unique_together": ("first_name", "last_name")},
)
id1 = model.objects.on_conflict(
["first_name", "last_name"], ConflictAction.UPDATE
).insert(first_name="swen", last_name="kooij")
id2 = model.objects.on_conflict(
["first_name", "last_name"], ConflictAction.UPDATE
).insert(first_name="swen", last_name="kooij")
assert id1 == id2
def test_on_conflict_unique_together_fk():
"""Asserts that inserts on models with a unique_together and a foreign key
relationship works properly."""
model = get_fake_model({"name": models.CharField(max_length=140)})
model2 = get_fake_model(
{
"model1": models.ForeignKey(model, on_delete=models.CASCADE),
"model2": models.ForeignKey(model, on_delete=models.CASCADE),
},
PostgresModel,
{"unique_together": ("model1", "model2")},
)
id1 = model.objects.create(name="one").id
id2 = model.objects.create(name="two").id
assert id1 != id2
id3 = model2.objects.on_conflict(
["model1_id", "model2_id"], ConflictAction.UPDATE
).insert(model1_id=id1, model2_id=id2)
id4 = model2.objects.on_conflict(
["model1_id", "model2_id"], ConflictAction.UPDATE
).insert(model1_id=id1, model2_id=id2)
assert id3 == id4
def test_on_conflict_pk_conflict_target():
"""Tests whether `on_conflict` properly accepts the 'pk' as a conflict
target, which should resolve into the primary key of a model."""
model = get_fake_model({"name": models.CharField(max_length=255)})
obj1 = model.objects.on_conflict(
["pk"], ConflictAction.UPDATE
).insert_and_get(pk=0, name="beer")
obj2 = model.objects.on_conflict(
["pk"], ConflictAction.UPDATE
).insert_and_get(pk=0, name="beer")
assert obj1.name == "beer"
assert obj2.name == "beer"
assert obj1.id == obj2.id
assert obj1.id == 0
assert obj2.id == 0
def test_on_conflict_default_value():
"""Tests whether setting a default for a field and not specifying it
explicitely when upserting properly causes the default value to be used."""
model = get_fake_model(
{"title": models.CharField(max_length=255, default="great")}
)
obj1 = model.objects.on_conflict(
["id"], ConflictAction.UPDATE
).insert_and_get(id=0)
assert obj1.title == "great"
obj2 = model.objects.on_conflict(
["id"], ConflictAction.UPDATE
).insert_and_get(id=0)
assert obj1.id == obj2.id
assert obj2.title == "great"
def test_on_conflict_default_value_no_overwrite():
"""Tests whether setting a default for a field, inserting a non-default
value and then trying to update it without specifying that field doesn't
result in it being overwritten."""
model = get_fake_model(
{"title": models.CharField(max_length=255, default="great")}
)
obj1 = model.objects.on_conflict(
["id"], ConflictAction.UPDATE
).insert_and_get(id=0, title="mytitle")
assert obj1.title == "mytitle"
obj2 = model.objects.on_conflict(
["id"], ConflictAction.UPDATE
).insert_and_get(id=0)
assert obj1.id == obj2.id
assert obj2.title == "mytitle"
def test_on_conflict_bulk():
"""Tests whether using `on_conflict` with `insert_bulk` properly works."""
model = get_fake_model(
{"title": models.CharField(max_length=255, unique=True)}
)
rows = [
dict(title="this is my title"),
dict(title="this is another title"),
dict(title="and another one"),
]
(
model.objects.on_conflict(["title"], ConflictAction.UPDATE).bulk_insert(
rows
)
)
assert model.objects.all().count() == len(rows)
for index, obj in enumerate(list(model.objects.all())):
assert obj.title == rows[index]["title"]
def test_bulk_return():
"""Tests if primary keys are properly returned from 'bulk_insert'."""
model = get_fake_model(
{
"id": models.BigAutoField(primary_key=True),
"name": models.CharField(max_length=255, unique=True),
}
)
rows = [dict(name="John Smith"), dict(name="Jane Doe")]
objs = model.objects.on_conflict(
["name"], ConflictAction.UPDATE
).bulk_insert(rows)
for index, obj in enumerate(objs, 1):
assert obj["id"] == index
# Add objects again, update should return the same ids
# as we're just updating.
objs = model.objects.on_conflict(
["name"], ConflictAction.UPDATE
).bulk_insert(rows)
for index, obj in enumerate(objs, 1):
assert obj["id"] == index
@pytest.mark.parametrize("conflict_action", ConflictAction.all())
def test_bulk_return_models(conflict_action):
"""Tests whether models are returned instead of dictionaries when
specifying the return_model=True argument."""
model = get_fake_model(
{
"id": models.BigAutoField(primary_key=True),
"name": models.CharField(max_length=255, unique=True),
}
)
rows = [dict(name="John Smith"), dict(name="Jane Doe")]
objs = model.objects.on_conflict(["name"], conflict_action).bulk_insert(
rows, return_model=True
)
for index, obj in enumerate(objs, 1):
assert isinstance(obj, model)
assert obj.id == index
@pytest.mark.skipif(
django.VERSION < (3, 1),
reason="Django < 3.1 doesn't implement JSONField",
)
@pytest.mark.parametrize("conflict_action", ConflictAction.all())
def test_bulk_return_models_converters(conflict_action):
"""Tests whether converters are properly applied when using
return_model=True."""
model = get_fake_model(
{
"name": models.TextField(unique=True),
"data": models.JSONField(unique=True),
"updated_at": models.DateTimeField(),
}
)
now = timezone.now()
rows = [
dict(name="John Smith", data={"a": 1}, updated_at=now.isoformat()),
dict(name="Jane Doe", data={"b": 2}, updated_at=now),
]
objs = model.objects.on_conflict(["name"], conflict_action).bulk_insert(
rows, return_model=True
)
for index, (obj, row) in enumerate(zip(objs, rows), 1):
assert isinstance(obj, model)
assert obj.id == index
assert obj.name == row["name"]
assert obj.data == row["data"]
assert obj.updated_at == now
django-postgres-extra-2.0.9/tests/test_on_conflict_nothing.py 0000664 0000000 0000000 00000012473 14634267343 0024572 0 ustar 00root root 0000000 0000000 import pytest
from django.db import models
from psqlextra.fields import HStoreField
from psqlextra.query import ConflictAction
from .fake_model import get_fake_model
def test_on_conflict_nothing():
"""Tests whether simple insert NOTHING works correctly."""
model = get_fake_model(
{
"title": HStoreField(uniqueness=["key1"]),
"cookies": models.CharField(max_length=255, null=True),
}
)
# row does not conflict, new row should be created
obj1 = model.objects.on_conflict(
[("title", "key1")], ConflictAction.NOTHING
).insert_and_get(title={"key1": "beer"}, cookies="cheers")
obj1.refresh_from_db()
assert obj1.title["key1"] == "beer"
assert obj1.cookies == "cheers"
# row conflicts, no new row should be created
obj2 = model.objects.on_conflict(
[("title", "key1")], ConflictAction.NOTHING
).insert_and_get(title={"key1": "beer"}, cookies="choco")
assert not obj2
# assert that the 'cookies' field didn't change
obj1.refresh_from_db()
assert obj1.title["key1"] == "beer"
assert obj1.cookies == "cheers"
assert model.objects.count() == 1
def test_on_conflict_nothing_foreign_primary_key():
"""Tests whether simple insert NOTHING works correctly when the primary key
of a field is a foreign key with a custom name."""
referenced_model = get_fake_model({})
model = get_fake_model(
{
"parent": models.OneToOneField(
referenced_model, primary_key=True, on_delete=models.CASCADE
),
"cookies": models.CharField(max_length=255),
}
)
referenced_obj = referenced_model.objects.create()
# row does not conflict, new row should be created
obj1 = model.objects.on_conflict(
["parent_id"], ConflictAction.NOTHING
).insert_and_get(parent_id=referenced_obj.pk, cookies="cheers")
obj1.refresh_from_db()
assert obj1.parent == referenced_obj
assert obj1.cookies == "cheers"
# row conflicts, no new row should be created
obj2 = model.objects.on_conflict(
["parent_id"], ConflictAction.NOTHING
).insert_and_get(parent_id=referenced_obj.pk, cookies="choco")
assert not obj2
obj1.refresh_from_db()
assert obj1.cookies == "cheers"
assert model.objects.count() == 1
def test_on_conflict_nothing_foreign_key_by_object():
"""Tests whether simple insert NOTHING works correctly when the potentially
conflicting field is a foreign key specified as an object."""
other_model = get_fake_model({})
model = get_fake_model(
{
"other": models.OneToOneField(
other_model, on_delete=models.CASCADE
),
"data": models.CharField(max_length=255),
}
)
other_obj = other_model.objects.create()
# row does not conflict, new row should be created
obj1 = model.objects.on_conflict(
["other"], ConflictAction.NOTHING
).insert_and_get(other=other_obj, data="some data")
assert obj1.other == other_obj
assert obj1.data == "some data"
obj1.refresh_from_db()
assert obj1.other == other_obj
assert obj1.data == "some data"
with pytest.raises(ValueError):
(
model.objects.on_conflict(
["other"], ConflictAction.NOTHING
).insert_and_get(other=obj1)
)
# row conflicts, no new row should be created
obj2 = model.objects.on_conflict(
["other"], ConflictAction.NOTHING
).insert_and_get(other=other_obj, data="different data")
assert not obj2
obj1.refresh_from_db()
assert model.objects.count() == 1
assert obj1.other == other_obj
assert obj1.data == "some data"
def test_on_conflict_nothing_foreign_key_by_id():
"""Tests whether simple insert NOTHING works correctly when the potentially
conflicting field is a foreign key specified as an id."""
other_model = get_fake_model({})
model = get_fake_model(
{
"other": models.OneToOneField(
other_model, on_delete=models.CASCADE
),
"data": models.CharField(max_length=255),
}
)
other_obj = other_model.objects.create()
# row does not conflict, new row should be created
obj1 = model.objects.on_conflict(
["other_id"], ConflictAction.NOTHING
).insert_and_get(other_id=other_obj.pk, data="some data")
assert obj1.other == other_obj
assert obj1.data == "some data"
obj1.refresh_from_db()
assert obj1.other == other_obj
assert obj1.data == "some data"
# row conflicts, no new row should be created
obj2 = model.objects.on_conflict(
["other_id"], ConflictAction.NOTHING
).insert_and_get(other_id=other_obj.pk, data="different data")
assert not obj2
assert model.objects.count() == 1
obj1.refresh_from_db()
assert obj1.other == other_obj
assert obj1.data == "some data"
def test_on_conflict_nothing_duplicate_rows():
"""Tests whether duplicate rows are filtered out when doing a insert
NOTHING and no error is raised when the list of rows contains
duplicates."""
model = get_fake_model({"amount": models.IntegerField(unique=True)})
rows = [dict(amount=1), dict(amount=1)]
(
model.objects.on_conflict(
["amount"], ConflictAction.NOTHING
).bulk_insert(rows)
)
django-postgres-extra-2.0.9/tests/test_on_conflict_update.py 0000664 0000000 0000000 00000011052 14634267343 0024376 0 ustar 00root root 0000000 0000000 import django
import pytest
from django.db import models
from psqlextra.fields import HStoreField
from psqlextra.query import ConflictAction
from .fake_model import get_fake_model
def test_on_conflict_update():
"""Tests whether simple upserts works correctly."""
model = get_fake_model(
{
"title": HStoreField(uniqueness=["key1"]),
"cookies": models.CharField(max_length=255, null=True),
}
)
obj1 = model.objects.on_conflict(
[("title", "key1")], ConflictAction.UPDATE
).insert_and_get(title={"key1": "beer"}, cookies="cheers")
obj1.refresh_from_db()
assert obj1.title["key1"] == "beer"
assert obj1.cookies == "cheers"
obj2 = model.objects.on_conflict(
[("title", "key1")], ConflictAction.UPDATE
).insert_and_get(title={"key1": "beer"}, cookies="choco")
obj1.refresh_from_db()
obj2.refresh_from_db()
# assert both objects are the same
assert obj1.id == obj2.id
assert obj1.title["key1"] == "beer"
assert obj1.cookies == "choco"
assert obj2.title["key1"] == "beer"
assert obj2.cookies == "choco"
@pytest.mark.skipif(
django.VERSION < (2, 2),
reason="Django < 2.2 doesn't implement constraints",
)
def test_on_conflict_update_by_unique_constraint():
model = get_fake_model(
{
"title": models.CharField(max_length=255, null=True),
},
meta_options={
"constraints": [
models.UniqueConstraint(name="test_uniq", fields=["title"]),
],
},
)
constraint = next(
(
constraint
for constraint in model._meta.constraints
if constraint.name == "test_uniq"
)
)
model.objects.on_conflict(constraint, ConflictAction.UPDATE).insert_and_get(
title="title"
)
def test_on_conflict_update_foreign_key_by_object():
"""Tests whether simple upsert works correctly when the conflicting field
is a foreign key specified as an object."""
other_model = get_fake_model({})
model = get_fake_model(
{
"other": models.OneToOneField(
other_model, on_delete=models.CASCADE
),
"data": models.CharField(max_length=255),
}
)
other_obj = other_model.objects.create()
obj1 = model.objects.on_conflict(
["other"], ConflictAction.UPDATE
).insert_and_get(other=other_obj, data="some data")
assert obj1.other == other_obj
assert obj1.data == "some data"
obj1.refresh_from_db()
assert obj1.other == other_obj
assert obj1.data == "some data"
with pytest.raises(ValueError):
(
model.objects.on_conflict(
["other"], ConflictAction.UPDATE
).insert_and_get(other=obj1)
)
obj2 = model.objects.on_conflict(
["other"], ConflictAction.UPDATE
).insert_and_get(other=other_obj, data="different data")
assert obj2.other == other_obj
assert obj2.data == "different data"
obj1.refresh_from_db()
obj2.refresh_from_db()
# assert that the 'other' field didn't change
assert obj1.id == obj2.id
assert obj1.other == other_obj
assert obj2.other == other_obj
assert obj1.data == "different data"
assert obj2.data == "different data"
def test_on_conflict_update_foreign_key_by_id():
"""Tests whether simple upsert works correctly when the conflicting field
is a foreign key specified as an id."""
other_model = get_fake_model({})
model = get_fake_model(
{
"other": models.OneToOneField(
other_model, on_delete=models.CASCADE
),
"data": models.CharField(max_length=255),
}
)
other_obj = other_model.objects.create()
obj1 = model.objects.on_conflict(
["other_id"], ConflictAction.UPDATE
).insert_and_get(other_id=other_obj.pk, data="some data")
assert obj1.other == other_obj
assert obj1.data == "some data"
obj1.refresh_from_db()
assert obj1.other == other_obj
assert obj1.data == "some data"
obj2 = model.objects.on_conflict(
["other_id"], ConflictAction.UPDATE
).insert_and_get(other_id=other_obj.pk, data="different data")
assert obj2.other == other_obj
assert obj2.data == "different data"
obj1.refresh_from_db()
obj2.refresh_from_db()
# assert that the 'other' field didn't change
assert obj1.id == obj2.id
assert obj1.other == other_obj
assert obj2.other == other_obj
assert obj1.data == "different data"
assert obj2.data == "different data"
django-postgres-extra-2.0.9/tests/test_partitioned_model.py 0000664 0000000 0000000 00000004240 14634267343 0024242 0 ustar 00root root 0000000 0000000 from psqlextra.models import PostgresPartitionedModel
from psqlextra.types import PostgresPartitioningMethod
from .fake_model import define_fake_partitioned_model
def test_partitioned_model_abstract():
"""Tests whether :see:PostgresPartitionedModel is abstract."""
assert PostgresPartitionedModel._meta.abstract
def test_partitioning_model_options_meta():
"""Tests whether the `_partitioning_meta` attribute is available on the
class (created by the meta class) and not just creating when the model is
instantiated."""
assert PostgresPartitionedModel._partitioning_meta
def test_partitioned_model_default_options():
"""Tests whether the default partitioning options are set as expected on.
:see:PostgresPartitionedModel.
"""
model = define_fake_partitioned_model()
assert model._partitioning_meta.method == PostgresPartitioningMethod.RANGE
assert model._partitioning_meta.key == []
def test_partitioned_model_method_option():
"""Tests whether the `method` partitioning option is properly copied onto
the options object."""
model = define_fake_partitioned_model(
partitioning_options=dict(method=PostgresPartitioningMethod.LIST)
)
assert model._partitioning_meta.method == PostgresPartitioningMethod.LIST
def test_partitioned_model_method_option_none():
"""Tests whether setting the `method` partitioning option results in the
default being set."""
model = define_fake_partitioned_model(
partitioning_options=dict(method=None)
)
assert model._partitioning_meta.method == PostgresPartitioningMethod.RANGE
def test_partitioned_model_key_option():
"""Tests whether the `key` partitioning option is properly copied onto the
options object."""
model = define_fake_partitioned_model(
partitioning_options=dict(key=["timestamp"])
)
assert model._partitioning_meta.key == ["timestamp"]
def test_partitioned_model_key_option_none():
"""Tests whether setting the `key` partitioning option results in the
default being set."""
model = define_fake_partitioned_model(partitioning_options=dict(key=None))
assert model._partitioning_meta.key == []
django-postgres-extra-2.0.9/tests/test_partitioned_model_state.py 0000664 0000000 0000000 00000006550 14634267343 0025450 0 ustar 00root root 0000000 0000000 import uuid
import pytest
from django.apps import apps
from django.db import models
from psqlextra.backend.migrations.state import (
PostgresPartitionedModelState,
PostgresPartitionState,
)
from psqlextra.manager import PostgresManager
from psqlextra.models import PostgresPartitionedModel
from psqlextra.types import PostgresPartitioningMethod
from .fake_model import define_fake_partitioned_model
@pytest.fixture
def model():
fields = {"name": models.TextField(), "category": models.TextField()}
partitioning_options = {
"method": PostgresPartitioningMethod.LIST,
"key": ["category"],
}
model = define_fake_partitioned_model(fields, partitioning_options)
return model
def test_partitioned_model_state_copies():
"""Tests whether cloning the model state properly copies all the options.
If it does not copy them, bad things can happen as the state is
mutated to build up migration state.
"""
options = dict(method=PostgresPartitioningMethod.RANGE, key=["timestamp"])
state = PostgresPartitionedModelState(
app_label="tests",
name=str(uuid.uuid4()),
fields=[],
options=None,
partitioning_options=options,
bases=(PostgresPartitionedModel,),
)
assert options is not state.partitioning_options
def test_partitioned_model_state_from_model(model):
"""Tests whether creating state from an existing model works as
expected."""
state = PostgresPartitionedModelState.from_model(model)
assert state.partitions == {}
assert (
state.partitioning_options["method"] == model._partitioning_meta.method
)
assert state.partitioning_options["key"] == model._partitioning_meta.key
def test_partitioned_model_clone(model):
"""Tests whether cloning the state actually clones the partitioning
options.
If its not a copy, but a reference instead, bad things can happen as
the options are mutated to build up migration state.
"""
state = PostgresPartitionedModelState.from_model(model)
state.partitions = {
"pt1": PostgresPartitionState(
app_label="tests", model_name="tests", name="pt1"
)
}
state_copy = state.clone()
assert state.partitions is not state_copy.partitions
assert state.partitioning_options is not state_copy.partitioning_options
def test_partitioned_model_render(model):
"""Tests whether the state can be rendered into a valid model class."""
options = dict(method=PostgresPartitioningMethod.RANGE, key=["timestamp"])
state = PostgresPartitionedModelState(
app_label="tests",
name=str(uuid.uuid4()),
fields=[("name", models.TextField())],
options=None,
partitioning_options=options,
bases=(PostgresPartitionedModel,),
managers=[("cookie", PostgresManager())],
)
rendered_model = state.render(apps)
assert issubclass(rendered_model, PostgresPartitionedModel)
assert rendered_model.name
assert isinstance(rendered_model.objects, PostgresManager)
assert isinstance(rendered_model.cookie, PostgresManager)
assert rendered_model.__name__ == state.name
assert rendered_model._meta.apps == apps
assert rendered_model._meta.app_label == "tests"
assert rendered_model._partitioning_meta.method == options["method"]
assert rendered_model._partitioning_meta.key == options["key"]
django-postgres-extra-2.0.9/tests/test_partitioning_manager.py 0000664 0000000 0000000 00000004557 14634267343 0024754 0 ustar 00root root 0000000 0000000 import pytest
from django.db import models
from psqlextra.partitioning import (
PostgresPartitioningError,
PostgresPartitioningManager,
partition_by_current_time,
)
from .fake_model import define_fake_partitioned_model, get_fake_model
def test_partitioning_manager_duplicate_model():
"""Tests whether it is not possible to have more than one partitioning
config per model."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
with pytest.raises(PostgresPartitioningError):
PostgresPartitioningManager(
[
partition_by_current_time(model, years=1, count=3),
partition_by_current_time(model, years=1, count=3),
]
)
def test_partitioning_manager_find_config_for_model():
"""Tests that finding a partitioning config by the model works as
expected."""
model1 = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
config1 = partition_by_current_time(model1, years=1, count=3)
model2 = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
config2 = partition_by_current_time(model2, months=1, count=2)
manager = PostgresPartitioningManager([config1, config2])
assert manager.find_config_for_model(model1) == config1
assert manager.find_config_for_model(model2) == config2
def test_partitioning_manager_plan_not_partitioned_model():
"""Tests that the auto partitioner does not try to auto partition for non-
partitioned models/tables."""
model = get_fake_model({"timestamp": models.DateTimeField()})
with pytest.raises(PostgresPartitioningError):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, months=1, count=2)]
)
manager.plan()
def test_partitioning_manager_plan_non_existent_model():
"""Tests that the auto partitioner does not try to partition for non-
existent partitioned tables."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
with pytest.raises(PostgresPartitioningError):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, months=1, count=2)]
)
manager.plan()
django-postgres-extra-2.0.9/tests/test_partitioning_time.py 0000664 0000000 0000000 00000042072 14634267343 0024272 0 ustar 00root root 0000000 0000000 import datetime
import freezegun
import pytest
from dateutil.relativedelta import relativedelta
from django.db import connection, models, transaction
from django.db.utils import IntegrityError
from psqlextra.partitioning import (
PostgresPartitioningError,
PostgresPartitioningManager,
partition_by_current_time,
)
from . import db_introspection
from .fake_model import define_fake_partitioned_model
def _get_partitioned_table(model):
return db_introspection.get_partitioned_table(model._meta.db_table)
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_yearly_apply():
"""Tests whether automatically creating new partitions ahead yearly works
as expected."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
with freezegun.freeze_time("2019-1-1"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, years=1, count=2)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 2
assert table.partitions[0].name == "2019"
assert table.partitions[1].name == "2020"
with freezegun.freeze_time("2019-12-30"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, years=1, count=3)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 3
assert table.partitions[0].name == "2019"
assert table.partitions[1].name == "2020"
assert table.partitions[2].name == "2021"
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_monthly_apply():
"""Tests whether automatically creating new partitions ahead monthly works
as expected."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
# create partitions for the next 12 months (including the current)
with freezegun.freeze_time("2019-1-30"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, months=1, count=12)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 12
assert table.partitions[0].name == "2019_jan"
assert table.partitions[1].name == "2019_feb"
assert table.partitions[2].name == "2019_mar"
assert table.partitions[3].name == "2019_apr"
assert table.partitions[4].name == "2019_may"
assert table.partitions[5].name == "2019_jun"
assert table.partitions[6].name == "2019_jul"
assert table.partitions[7].name == "2019_aug"
assert table.partitions[8].name == "2019_sep"
assert table.partitions[9].name == "2019_oct"
assert table.partitions[10].name == "2019_nov"
assert table.partitions[11].name == "2019_dec"
# re-running it with 13, should just create one additional partition
with freezegun.freeze_time("2019-1-30"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, months=1, count=13)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 13
assert table.partitions[12].name == "2020_jan"
# it's november now, we only want to create 4 partitions ahead,
# so only one new partition should be created for february 1338
with freezegun.freeze_time("2019-11-1"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, months=1, count=4)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 14
assert table.partitions[13].name == "2020_feb"
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_monthly_with_custom_naming_apply():
"""Tests whether automatically created new partitions are named according
to the specified name_format."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
# create partitions for the next 12 months (including the current)
with freezegun.freeze_time("2019-1-30"):
manager = PostgresPartitioningManager(
[
partition_by_current_time(
model, months=1, count=12, name_format="%Y_%m"
)
]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 12
assert table.partitions[0].name == "2019_01"
assert table.partitions[1].name == "2019_02"
assert table.partitions[2].name == "2019_03"
assert table.partitions[3].name == "2019_04"
assert table.partitions[4].name == "2019_05"
assert table.partitions[5].name == "2019_06"
assert table.partitions[6].name == "2019_07"
assert table.partitions[7].name == "2019_08"
assert table.partitions[8].name == "2019_09"
assert table.partitions[9].name == "2019_10"
assert table.partitions[10].name == "2019_11"
assert table.partitions[11].name == "2019_12"
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_weekly_apply():
"""Tests whether automatically creating new partitions ahead weekly works
as expected."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
# create partitions for the next 4 weeks (including the current)
with freezegun.freeze_time("2019-1-23"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, weeks=1, count=4)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 4
assert table.partitions[0].name == "2019_week_03"
assert table.partitions[1].name == "2019_week_04"
assert table.partitions[2].name == "2019_week_05"
assert table.partitions[3].name == "2019_week_06"
# re-running it with 5, should just create one additional partition
with freezegun.freeze_time("2019-1-23"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, weeks=1, count=5)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 5
assert table.partitions[4].name == "2019_week_07"
# it's june now, we want to partition two weeks ahead
with freezegun.freeze_time("2019-06-03"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, weeks=1, count=2)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 7
assert table.partitions[5].name == "2019_week_22"
assert table.partitions[6].name == "2019_week_23"
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_daily_apply():
"""Tests whether automatically creating new partitions ahead daily works as
expected."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
# create partitions for the next 4 days (including the current)
with freezegun.freeze_time("2019-1-23"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, days=1, count=4)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 4
assert table.partitions[0].name == "2019_jan_23"
assert table.partitions[1].name == "2019_jan_24"
assert table.partitions[2].name == "2019_jan_25"
assert table.partitions[3].name == "2019_jan_26"
# re-running it with 5, should just create one additional partition
with freezegun.freeze_time("2019-1-23"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, days=1, count=5)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 5
assert table.partitions[4].name == "2019_jan_27"
# it's june now, we want to partition two days ahead
with freezegun.freeze_time("2019-06-03"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, days=1, count=2)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 7
assert table.partitions[5].name == "2019_jun_03"
assert table.partitions[6].name == "2019_jun_04"
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_monthly_apply_insert():
"""Tests whether automatically created monthly partitions line up
perfectly."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
with freezegun.freeze_time("2019-1-1"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, months=1, count=2)]
)
manager.plan().apply()
model.objects.create(timestamp=datetime.date(2019, 1, 1))
model.objects.create(timestamp=datetime.date(2019, 1, 31))
model.objects.create(timestamp=datetime.date(2019, 2, 28))
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(timestamp=datetime.date(2019, 3, 1))
model.objects.create(timestamp=datetime.date(2019, 3, 2))
with freezegun.freeze_time("2019-1-1"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, months=1, count=3)]
)
manager.plan().apply()
model.objects.create(timestamp=datetime.date(2019, 3, 1))
model.objects.create(timestamp=datetime.date(2019, 3, 2))
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_weekly_apply_insert():
"""Tests whether automatically created weekly partitions line up
perfectly."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
# that's a monday
with freezegun.freeze_time("2019-1-08"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, weeks=1, count=2)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 2
model.objects.create(timestamp=datetime.date(2019, 1, 7))
model.objects.create(timestamp=datetime.date(2019, 1, 14))
model.objects.create(timestamp=datetime.date(2019, 1, 20))
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(timestamp=datetime.date(2019, 1, 21))
model.objects.create(timestamp=datetime.date(2019, 1, 22))
with freezegun.freeze_time("2019-1-07"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, weeks=1, count=3)]
)
manager.plan().apply()
model.objects.create(timestamp=datetime.date(2019, 1, 21))
model.objects.create(timestamp=datetime.date(2019, 1, 22))
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_daily_apply_insert():
"""Tests whether automatically created daily partitions line up
perfectly."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
# that's a monday
with freezegun.freeze_time("2019-1-07"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, days=1, count=2)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 2
model.objects.create(timestamp=datetime.date(2019, 1, 7))
model.objects.create(timestamp=datetime.date(2019, 1, 8))
with transaction.atomic():
with pytest.raises(IntegrityError):
model.objects.create(timestamp=datetime.date(2019, 1, 9))
model.objects.create(timestamp=datetime.date(2019, 1, 10))
with freezegun.freeze_time("2019-1-07"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, days=1, count=4)]
)
manager.plan().apply()
model.objects.create(timestamp=datetime.date(2019, 1, 9))
model.objects.create(timestamp=datetime.date(2019, 1, 10))
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize(
"kwargs,partition_names",
[
(dict(days=2), ["2019_jan_01", "2019_jan_03"]),
(dict(weeks=2), ["2018_week_53", "2019_week_02"]),
(dict(months=2), ["2019_jan", "2019_mar"]),
(dict(years=2), ["2019", "2021"]),
],
)
def test_partitioning_time_multiple(kwargs, partition_names):
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
with freezegun.freeze_time("2019-1-1"):
manager = PostgresPartitioningManager(
[partition_by_current_time(model, **kwargs, count=2)]
)
manager.plan().apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 2
assert partition_names == [par.name for par in table.partitions]
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize(
"kwargs,timepoints",
[
(
dict(years=1, max_age=relativedelta(years=2)),
[("2019-1-1", 6), ("2020-1-1", 6), ("2021-1-1", 5)],
),
(
dict(months=1, max_age=relativedelta(months=1)),
[
("2019-1-1", 6),
("2019-2-1", 5),
("2019-2-28", 5),
("2019-3-1", 4),
],
),
(
dict(days=7, max_age=relativedelta(weeks=1)),
[
("2019-1-1", 6),
("2019-1-4", 6),
("2019-1-8", 5),
("2019-1-15", 4),
("2019-1-16", 4),
],
),
],
)
def test_partitioning_time_delete(kwargs, timepoints):
"""Tests whether partitions older than the specified max_age are
automatically deleted."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
partition_kwargs = {"model": model, "count": 6, **kwargs}
manager = PostgresPartitioningManager(
[partition_by_current_time(**partition_kwargs)]
)
with freezegun.freeze_time(timepoints[0][0]):
manager.plan().apply()
for index, (dt, partition_count) in enumerate(timepoints):
with freezegun.freeze_time(dt):
manager.plan(skip_create=True).apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == partition_count
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_delete_ignore_manual():
"""Tests whether partitions that were created manually are ignored.
Partitions created automatically have a special comment attached to
them. Only partitions with this special comments would be deleted.
"""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
schema_editor = connection.schema_editor()
schema_editor.create_partitioned_model(model)
manager = PostgresPartitioningManager(
[partition_by_current_time(model, count=2, months=1)]
)
schema_editor.add_range_partition(
model, "2019_jan", from_values="2019-1-1", to_values="2019-2-1"
)
with freezegun.freeze_time("2020-1-1"):
manager.plan(skip_create=True).apply()
table = _get_partitioned_table(model)
assert len(table.partitions) == 1
def test_partitioning_time_no_size():
"""Tests whether an error is raised when size for the partitions is
specified."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
with pytest.raises(PostgresPartitioningError):
partition_by_current_time(model, count=1)
def test_partitioning_time_multiple_sizes():
"""Tests whether an error is raised when multiple sizes for a partition are
specified."""
model = define_fake_partitioned_model(
{"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
)
with pytest.raises(PostgresPartitioningError):
partition_by_current_time(model, weeks=1, months=2, count=1)
django-postgres-extra-2.0.9/tests/test_query.py 0000664 0000000 0000000 00000010617 14634267343 0021712 0 ustar 00root root 0000000 0000000 from django.db import connection, models
from django.db.models import Case, F, Q, Value, When
from django.test.utils import CaptureQueriesContext, override_settings
from psqlextra.expressions import HStoreRef
from psqlextra.fields import HStoreField
from .fake_model import get_fake_model
def test_query_annotate_hstore_key_ref():
"""Tests whether annotating using a :see:HStoreRef expression works
correctly.
This allows you to select an individual hstore key.
"""
model_fk = get_fake_model({"title": HStoreField()})
model = get_fake_model(
{"fk": models.ForeignKey(model_fk, on_delete=models.CASCADE)}
)
fk = model_fk.objects.create(title={"en": "english", "ar": "arabic"})
model.objects.create(fk=fk)
queryset = (
model.objects.annotate(english_title=HStoreRef("fk__title", "en"))
.values("english_title")
.first()
)
assert queryset["english_title"] == "english"
def test_query_annotate_rename():
"""Tests whether field names can be overwritten with a annotated field."""
model = get_fake_model({"title": models.CharField(max_length=12)})
model.objects.create(title="swen")
obj = model.objects.annotate(title=F("title")).first()
assert obj.title == "swen"
def test_query_annotate_rename_chain():
"""Tests whether annotations are behaving correctly after a QuerySet
chain."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
model.objects.create(name="test", value=23)
obj = model.objects.values("name").annotate(value=F("value"))[:1]
assert "value" in obj[0]
assert obj[0]["value"] == 23
def test_query_annotate_rename_order():
"""Tests whether annotation order is preserved after a rename."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
qs = model.objects.annotate(value=F("value"), value_2=F("value"))
assert list(qs.query.annotations.keys()) == ["value", "value_2"]
def test_query_annotate_in_expression():
"""Tests whether annotations can be used in expressions."""
model = get_fake_model({"name": models.CharField(max_length=10)})
model.objects.create(name="henk")
result = model.objects.annotate(
real_name=F("name"),
is_he_henk=Case(
When(Q(real_name="henk"), then=Value("really henk")),
default=Value("definitely not henk"),
output_field=models.CharField(),
),
).first()
assert result.real_name == "henk"
assert result.is_he_henk == "really henk"
def test_query_hstore_value_update_f_ref():
"""Tests whether F(..) expressions can be used in hstore values when
performing update queries."""
model = get_fake_model(
{"name": models.CharField(max_length=255), "name_new": HStoreField()}
)
model.objects.create(name="waqas", name_new=dict(en="swen"))
model.objects.update(name_new=dict(en=models.F("name")))
inst = model.objects.all().first()
assert inst.name_new.get("en") == "waqas"
def test_query_hstore_value_update_cast():
"""Tests whether values in a HStore field are automatically cast to strings
when doing updates."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en=2))
inst = model.objects.all().first()
assert inst.title.get("en") == "2"
def test_query_hstore_value_update_escape():
"""Tests whether values in a HStore field are properly escaped using
prepared statement values."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en="console.log('test')"))
inst = model.objects.all().first()
assert inst.title.get("en") == "console.log('test')"
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=True)
def test_query_comment():
"""Tests whether the query is commented."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
with CaptureQueriesContext(connection) as queries:
qs = model.objects.all()
assert " test_query_comment " in str(qs.query)
list(qs)
assert " test_query_comment " in queries[0]["sql"]
django-postgres-extra-2.0.9/tests/test_query_values.py 0000664 0000000 0000000 00000004352 14634267343 0023270 0 ustar 00root root 0000000 0000000 import django
import pytest
from django.db import models
from psqlextra.fields import HStoreField
from .fake_model import get_fake_model
@pytest.fixture
def model():
"""Test models, where the first model has a foreign key relationship to the
second."""
return get_fake_model({"title": HStoreField()})
@pytest.fixture
def modelobj(model):
"""Data for the test models, one row per model."""
return model.objects.create(title={"en": "english", "ar": "arabic"})
def test_query_values_hstore(model, modelobj):
"""Tests that selecting all the keys properly works and returns a.
:see:LocalizedValue instance.
"""
result = list(model.objects.values("title"))[0]
assert result["title"] == modelobj.title
def test_query_values_hstore_key(model, modelobj):
"""Tests whether selecting a single key from a :see:HStoreField using the
query set's .values() works properly."""
result = list(model.objects.values("title__en", "title__ar"))[0]
assert result["title__en"] == modelobj.title["en"]
assert result["title__ar"] == modelobj.title["ar"]
def test_query_values_list_hstore_key(model, modelobj):
"""Tests that selecting a single key from a :see:HStoreField using the
query set's .values_list() works properly."""
result = list(model.objects.values_list("title__en", "title__ar"))[0]
assert result[0] == modelobj.title["en"]
assert result[1] == modelobj.title["ar"]
@pytest.mark.skipif(
django.VERSION < (2, 1), reason="requires django 2.1 or newer"
)
def test_query_values_hstore_key_through_fk():
"""Tests whether selecting a single key from a :see:HStoreField using the
query set's .values() works properly when there's a foreign key
relationship involved."""
# this starting working in django 2.1
# see: https://github.com/django/django/commit/20bab2cf9d02a5c6477d8aac066a635986e0d3f3
fmodel = get_fake_model({"name": HStoreField()})
model = get_fake_model(
{"fk": models.ForeignKey(fmodel, on_delete=models.CASCADE)}
)
fobj = fmodel.objects.create(name={"en": "swen", "ar": "arabic swen"})
model.objects.create(fk=fobj)
result = list(model.objects.values("fk__name__ar"))[0]
assert result["fk__name__ar"] == fobj.name["ar"]
django-postgres-extra-2.0.9/tests/test_schema.py 0000664 0000000 0000000 00000014607 14634267343 0022010 0 ustar 00root root 0000000 0000000 import freezegun
import pytest
from django.core.exceptions import SuspiciousOperation, ValidationError
from django.db import InternalError, ProgrammingError, connection
from psycopg2 import errorcodes
from psqlextra.error import extract_postgres_error_code
from psqlextra.schema import PostgresSchema, postgres_temporary_schema
def _does_schema_exist(name: str) -> bool:
with connection.cursor() as cursor:
return name in connection.introspection.get_schema_list(cursor)
def test_postgres_schema_create():
schema = PostgresSchema.create("myschema")
assert schema.name == "myschema"
assert _does_schema_exist(schema.name)
def test_postgres_schema_does_not_overwrite():
schema = PostgresSchema.create("myschema")
with pytest.raises(ProgrammingError):
PostgresSchema.create(schema.name)
def test_postgres_schema_create_max_name_length():
with pytest.raises(ValidationError) as exc_info:
PostgresSchema.create(
"stringthatislongerhtan63charactersforsureabsolutelysurethisislongerthanthat"
)
assert "is longer than Postgres's limit" in str(exc_info.value)
def test_postgres_schema_create_name_that_requires_escaping():
# 'table' needs escaping because it conflicts with
# the SQL keyword TABLE
schema = PostgresSchema.create("table")
assert schema.name == "table"
assert _does_schema_exist("table")
def test_postgres_schema_create_time_based():
with freezegun.freeze_time("2023-04-07 13:37:23.4"):
schema = PostgresSchema.create_time_based("myprefix")
assert schema.name == "myprefix_20230407130423"
assert _does_schema_exist(schema.name)
def test_postgres_schema_create_time_based_long_prefix():
with pytest.raises(ValidationError) as exc_info:
with freezegun.freeze_time("2023-04-07 13:37:23.4"):
PostgresSchema.create_time_based("a" * 49)
assert "is longer than 48 characters" in str(exc_info.value)
def test_postgres_schema_create_random():
schema = PostgresSchema.create_random("myprefix")
prefix, suffix = schema.name.split("_")
assert prefix == "myprefix"
assert len(suffix) == 8
assert _does_schema_exist(schema.name)
def test_postgres_schema_create_random_long_prefix():
with pytest.raises(ValidationError) as exc_info:
PostgresSchema.create_random("a" * 55)
assert "is longer than 54 characters" in str(exc_info.value)
def test_postgres_schema_delete_and_create():
schema = PostgresSchema.create("test")
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE test.bla AS SELECT 'hello'")
cursor.execute("SELECT * FROM test.bla")
assert cursor.fetchone() == ("hello",)
# Should refuse to delete since we added a table to the schema
with pytest.raises(InternalError) as exc_info:
schema = PostgresSchema.delete_and_create(schema.name)
pg_error = extract_postgres_error_code(exc_info.value)
assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST
# Verify that the schema and table still exist
assert _does_schema_exist(schema.name)
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM test.bla")
assert cursor.fetchone() == ("hello",)
# Dropping the schema should work with cascade=True
schema = PostgresSchema.delete_and_create(schema.name, cascade=True)
assert _does_schema_exist(schema.name)
# Since the schema was deleted and re-created, the `bla`
# table should not exist anymore.
with pytest.raises(ProgrammingError) as exc_info:
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM test.bla")
assert cursor.fetchone() == ("hello",)
pg_error = extract_postgres_error_code(exc_info.value)
assert pg_error == errorcodes.UNDEFINED_TABLE
def test_postgres_schema_delete():
schema = PostgresSchema.create("test")
assert _does_schema_exist(schema.name)
schema.delete()
assert not _does_schema_exist(schema.name)
def test_postgres_schema_delete_not_empty():
schema = PostgresSchema.create("test")
assert _does_schema_exist(schema.name)
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE test.bla AS SELECT 'hello'")
with pytest.raises(InternalError) as exc_info:
schema.delete()
pg_error = extract_postgres_error_code(exc_info.value)
assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST
def test_postgres_schema_delete_cascade_not_empty():
schema = PostgresSchema.create("test")
assert _does_schema_exist(schema.name)
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE test.bla AS SELECT 'hello'")
schema.delete(cascade=True)
assert not _does_schema_exist(schema.name)
def test_postgres_schema_no_delete_default():
with pytest.raises(SuspiciousOperation):
PostgresSchema.default.delete()
with pytest.raises(SuspiciousOperation):
PostgresSchema("public").delete()
def test_postgres_temporary_schema():
with postgres_temporary_schema("temp") as schema:
name_prefix, name_suffix = schema.name.split("_")
assert name_prefix == "temp"
assert len(name_suffix) == 8
assert _does_schema_exist(schema.name)
assert not _does_schema_exist(schema.name)
def test_postgres_temporary_schema_not_empty():
with pytest.raises(InternalError) as exc_info:
with postgres_temporary_schema("temp") as schema:
with connection.cursor() as cursor:
cursor.execute(
f"CREATE TABLE {schema.name}.mytable AS SELECT 'hello world'"
)
pg_error = extract_postgres_error_code(exc_info.value)
assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST
def test_postgres_temporary_schema_not_empty_cascade():
with postgres_temporary_schema("temp", cascade=True) as schema:
with connection.cursor() as cursor:
cursor.execute(
f"CREATE TABLE {schema.name}.mytable AS SELECT 'hello world'"
)
assert not _does_schema_exist(schema.name)
@pytest.mark.parametrize("delete_on_throw", [True, False])
def test_postgres_temporary_schema_no_delete_on_throw(delete_on_throw):
with pytest.raises(ValueError):
with postgres_temporary_schema(
"temp", delete_on_throw=delete_on_throw
) as schema:
raise ValueError("test")
assert _does_schema_exist(schema.name) != delete_on_throw
django-postgres-extra-2.0.9/tests/test_schema_editor_alter_schema.py 0000664 0000000 0000000 00000002357 14634267343 0026064 0 ustar 00root root 0000000 0000000 import pytest
from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from .fake_model import get_fake_model
@pytest.fixture
def fake_model():
return get_fake_model(
{
"text": models.TextField(),
}
)
def test_schema_editor_alter_table_schema(fake_model):
obj = fake_model.objects.create(text="hello")
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA target")
schema_editor = PostgresSchemaEditor(connection)
schema_editor.alter_table_schema(fake_model._meta.db_table, "target")
with connection.cursor() as cursor:
cursor.execute(f"SELECT * FROM target.{fake_model._meta.db_table}")
assert cursor.fetchall() == [(obj.id, obj.text)]
def test_schema_editor_alter_model_schema(fake_model):
obj = fake_model.objects.create(text="hello")
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA target")
schema_editor = PostgresSchemaEditor(connection)
schema_editor.alter_model_schema(fake_model, "target")
with connection.cursor() as cursor:
cursor.execute(f"SELECT * FROM target.{fake_model._meta.db_table}")
assert cursor.fetchall() == [(obj.id, obj.text)]
django-postgres-extra-2.0.9/tests/test_schema_editor_clone_model_to_schema.py 0000664 0000000 0000000 00000022671 14634267343 0027740 0 ustar 00root root 0000000 0000000 import os
from typing import Set, Tuple
import django
import pytest
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.indexes import GinIndex
from django.db import connection, models, transaction
from django.db.models import Q
from psqlextra.backend.schema import PostgresSchemaEditor
from . import db_introspection
from .fake_model import delete_fake_model, get_fake_model
django_32_skip_reason = "Django < 3.2 can't support cloning models because it has hard coded references to the public schema"
def _create_schema() -> str:
name = os.urandom(4).hex()
with connection.cursor() as cursor:
cursor.execute(
"DROP SCHEMA IF EXISTS %s CASCADE"
% connection.ops.quote_name(name),
tuple(),
)
cursor.execute(
"CREATE SCHEMA %s" % connection.ops.quote_name(name), tuple()
)
return name
@transaction.atomic
def _assert_cloned_table_is_same(
source_table_fqn: Tuple[str, str],
target_table_fqn: Tuple[str, str],
excluding_constraints_and_indexes: bool = False,
):
source_schema_name, source_table_name = source_table_fqn
target_schema_name, target_table_name = target_table_fqn
source_columns = db_introspection.get_columns(
source_table_name, schema_name=source_schema_name
)
target_columns = db_introspection.get_columns(
target_table_name, schema_name=target_schema_name
)
assert source_columns == target_columns
source_relations = db_introspection.get_relations(
source_table_name, schema_name=source_schema_name
)
target_relations = db_introspection.get_relations(
target_table_name, schema_name=target_schema_name
)
if excluding_constraints_and_indexes:
assert target_relations == {}
else:
assert source_relations == target_relations
source_constraints = db_introspection.get_constraints(
source_table_name, schema_name=source_schema_name
)
target_constraints = db_introspection.get_constraints(
target_table_name, schema_name=target_schema_name
)
if excluding_constraints_and_indexes:
assert target_constraints == {}
else:
assert source_constraints == target_constraints
source_sequences = db_introspection.get_sequences(
source_table_name, schema_name=source_schema_name
)
target_sequences = db_introspection.get_sequences(
target_table_name, schema_name=target_schema_name
)
assert source_sequences == target_sequences
source_storage_settings = db_introspection.get_storage_settings(
source_table_name,
schema_name=source_schema_name,
)
target_storage_settings = db_introspection.get_storage_settings(
target_table_name, schema_name=target_schema_name
)
assert source_storage_settings == target_storage_settings
def _list_lock_modes_in_schema(schema_name: str) -> Set[str]:
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT
l.mode
FROM pg_locks l
INNER JOIN pg_class t ON t.oid = l.relation
INNER JOIN pg_namespace n ON n.oid = t.relnamespace
WHERE
t.relnamespace >= 2200
AND n.nspname = %s
ORDER BY n.nspname, t.relname, l.mode
""",
(schema_name,),
)
return {lock_mode for lock_mode, in cursor.fetchall()}
def _clone_model_into_schema(model):
schema_name = _create_schema()
with PostgresSchemaEditor(connection) as schema_editor:
schema_editor.clone_model_structure_to_schema(
model, schema_name=schema_name
)
schema_editor.clone_model_constraints_and_indexes_to_schema(
model, schema_name=schema_name
)
schema_editor.clone_model_foreign_keys_to_schema(
model, schema_name=schema_name
)
return schema_name
@pytest.fixture
def fake_model_fk_target_1():
model = get_fake_model(
{
"name": models.TextField(),
},
)
yield model
delete_fake_model(model)
@pytest.fixture
def fake_model_fk_target_2():
model = get_fake_model(
{
"name": models.TextField(),
},
)
yield model
delete_fake_model(model)
@pytest.fixture
def fake_model(fake_model_fk_target_1, fake_model_fk_target_2):
model = get_fake_model(
{
"first_name": models.TextField(null=True),
"last_name": models.TextField(),
"age": models.PositiveIntegerField(),
"height": models.FloatField(),
"nicknames": ArrayField(base_field=models.TextField()),
"blob": models.JSONField(),
"family": models.ForeignKey(
fake_model_fk_target_1, on_delete=models.CASCADE
),
"alternative_family": models.ForeignKey(
fake_model_fk_target_2, null=True, on_delete=models.SET_NULL
),
},
meta_options={
"indexes": [
models.Index(fields=["age", "height"]),
models.Index(fields=["age"], name="age_index"),
GinIndex(fields=["nicknames"], name="nickname_index"),
],
"constraints": [
models.UniqueConstraint(
fields=["first_name", "last_name"],
name="first_last_name_uniq",
),
models.CheckConstraint(
check=Q(age__gt=0, height__gt=0), name="age_height_check"
),
],
"unique_together": (
"first_name",
"nicknames",
),
"index_together": (
"blob",
"age",
),
},
)
yield model
delete_fake_model(model)
@pytest.mark.skipif(
django.VERSION < (3, 2),
reason=django_32_skip_reason,
)
@pytest.mark.django_db(transaction=True)
def test_schema_editor_clone_model_to_schema(
fake_model, fake_model_fk_target_1, fake_model_fk_target_2
):
"""Tests that cloning a model into a separate schema without obtaining
AccessExclusiveLock on the source table works as expected."""
schema_editor = PostgresSchemaEditor(connection)
with schema_editor:
schema_editor.alter_table_storage_setting(
fake_model._meta.db_table, "autovacuum_enabled", "false"
)
table_name = fake_model._meta.db_table
source_schema_name = "public"
target_schema_name = _create_schema()
with schema_editor:
schema_editor.clone_model_structure_to_schema(
fake_model, schema_name=target_schema_name
)
assert _list_lock_modes_in_schema(source_schema_name) == {
"AccessShareLock"
}
_assert_cloned_table_is_same(
(source_schema_name, table_name),
(target_schema_name, table_name),
excluding_constraints_and_indexes=True,
)
with schema_editor:
schema_editor.clone_model_constraints_and_indexes_to_schema(
fake_model, schema_name=target_schema_name
)
assert _list_lock_modes_in_schema(source_schema_name) == {
"AccessShareLock",
"ShareRowExclusiveLock",
}
_assert_cloned_table_is_same(
(source_schema_name, table_name),
(target_schema_name, table_name),
)
with schema_editor:
schema_editor.clone_model_foreign_keys_to_schema(
fake_model, schema_name=target_schema_name
)
assert _list_lock_modes_in_schema(source_schema_name) == {
"AccessShareLock",
"RowShareLock",
}
_assert_cloned_table_is_same(
(source_schema_name, table_name),
(target_schema_name, table_name),
)
@pytest.mark.skipif(
django.VERSION < (3, 2),
reason=django_32_skip_reason,
)
def test_schema_editor_clone_model_to_schema_custom_constraint_names(
fake_model, fake_model_fk_target_1
):
"""Tests that even if constraints were given custom names, the cloned table
has those same custom names."""
table_name = fake_model._meta.db_table
source_schema_name = "public"
constraints = db_introspection.get_constraints(table_name)
primary_key_constraint = next(
(
name
for name, constraint in constraints.items()
if constraint["primary_key"]
),
None,
)
foreign_key_constraint = next(
(
name
for name, constraint in constraints.items()
if constraint["foreign_key"]
== (fake_model_fk_target_1._meta.db_table, "id")
),
None,
)
check_constraint = next(
(
name
for name, constraint in constraints.items()
if constraint["check"] and constraint["columns"] == ["age"]
),
None,
)
with connection.cursor() as cursor:
cursor.execute(
f"ALTER TABLE {table_name} RENAME CONSTRAINT {primary_key_constraint} TO custompkname"
)
cursor.execute(
f"ALTER TABLE {table_name} RENAME CONSTRAINT {foreign_key_constraint} TO customfkname"
)
cursor.execute(
f"ALTER TABLE {table_name} RENAME CONSTRAINT {check_constraint} TO customcheckname"
)
target_schema_name = _clone_model_into_schema(fake_model)
_assert_cloned_table_is_same(
(source_schema_name, table_name),
(target_schema_name, table_name),
)
django-postgres-extra-2.0.9/tests/test_schema_editor_partitioning.py 0000664 0000000 0000000 00000022404 14634267343 0026137 0 ustar 00root root 0000000 0000000 import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from psqlextra.types import PostgresPartitioningMethod
from . import db_introspection
from .fake_model import define_fake_partitioned_model
@pytest.mark.postgres_version(lt=110000)
def test_schema_editor_create_delete_partitioned_model_range():
"""Tests whether creating a partitioned model and adding a list partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.RANGE
key = ["timestamp"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(model, "pt1", "2019-01-01", "2019-02-01")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
@pytest.mark.postgres_version(lt=110000)
def test_schema_editor_create_delete_partitioned_model_list():
"""Tests whether creating a partitioned model and adding a range partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(model, "pt1", ["car", "boat"])
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize("key", [["name"], ["id", "name"]])
def test_schema_editor_create_delete_partitioned_model_hash(key):
"""Tests whether creating a partitioned model and adding a hash partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.HASH
model = define_fake_partitioned_model(
{"name": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_hash_partition(model, "pt1", modulus=1, remainder=0)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
@pytest.mark.postgres_version(lt=110000)
def test_schema_editor_create_delete_partitioned_model_default():
"""Tests whether creating a partitioned model and adding a default
partition to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(model, "default")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_default"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
@pytest.mark.postgres_version(lt=110000)
def test_schema_editor_create_partitioned_model_no_method():
"""Tests whether its possible to create a partitioned model without
explicitly setting a partitioning method.
The default is "range" so setting one explicitely should not be
needed.
"""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
pt = db_introspection.get_partitioned_table(model._meta.db_table)
assert pt.method == PostgresPartitioningMethod.RANGE
assert len(pt.partitions) == 0
def test_schema_editor_create_partitioned_model_no_key():
"""Tests whether trying to create a partitioned model without a
partitioning key raises :see:ImproperlyConfigured as its not possible to
create a partitioned model without one and we cannot have a sane
default."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": PostgresPartitioningMethod.RANGE},
)
schema_editor = PostgresSchemaEditor(connection)
with pytest.raises(ImproperlyConfigured):
schema_editor.create_partitioned_model(model)
@pytest.mark.postgres_version(lt=110000)
def test_schema_editor_add_range_partition():
"""Tests whether adding a range partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(
model,
name="mypartition",
from_values="2019-1-1",
to_values="2019-2-1",
comment="test",
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
@pytest.mark.postgres_version(lt=110000)
def test_schema_editor_add_list_partition():
"""Tests whether adding a list partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField()},
{"method": PostgresPartitioningMethod.LIST, "key": ["name"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(
model, name="mypartition", values=["1"], comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize(
"method,key",
[
(PostgresPartitioningMethod.RANGE, ["timestamp"]),
(PostgresPartitioningMethod.LIST, ["name"]),
],
)
def test_schema_editor_add_default_partition(method, key):
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(
model, name="mypartition", comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
django-postgres-extra-2.0.9/tests/test_schema_editor_storage_settings.py 0000664 0000000 0000000 00000002551 14634267343 0027015 0 ustar 00root root 0000000 0000000 import pytest
from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from . import db_introspection
from .fake_model import get_fake_model
@pytest.fixture
def fake_model():
return get_fake_model(
{
"text": models.TextField(),
}
)
def test_schema_editor_storage_settings_table_alter_and_reset(fake_model):
table_name = fake_model._meta.db_table
schema_editor = PostgresSchemaEditor(connection)
schema_editor.alter_table_storage_setting(
table_name, "autovacuum_enabled", "false"
)
assert db_introspection.get_storage_settings(table_name) == {
"autovacuum_enabled": "false"
}
schema_editor.reset_table_storage_setting(table_name, "autovacuum_enabled")
assert db_introspection.get_storage_settings(table_name) == {}
def test_schema_editor_storage_settings_model_alter_and_reset(fake_model):
table_name = fake_model._meta.db_table
schema_editor = PostgresSchemaEditor(connection)
schema_editor.alter_model_storage_setting(
fake_model, "autovacuum_enabled", "false"
)
assert db_introspection.get_storage_settings(table_name) == {
"autovacuum_enabled": "false"
}
schema_editor.reset_model_storage_setting(fake_model, "autovacuum_enabled")
assert db_introspection.get_storage_settings(table_name) == {}
django-postgres-extra-2.0.9/tests/test_schema_editor_vacuum.py 0000664 0000000 0000000 00000010532 14634267343 0024727 0 ustar 00root root 0000000 0000000 import pytest
from django.core.exceptions import SuspiciousOperation
from django.db import connection, models
from django.test.utils import CaptureQueriesContext
from psqlextra.backend.schema import PostgresSchemaEditor
from .fake_model import delete_fake_model, get_fake_model
@pytest.fixture
def fake_model():
model = get_fake_model(
{
"name": models.TextField(),
}
)
yield model
delete_fake_model(model)
@pytest.fixture
def fake_model_non_concrete_field(fake_model):
model = get_fake_model(
{
"fk": models.ForeignKey(
fake_model, on_delete=models.CASCADE, related_name="fakes"
),
}
)
yield model
delete_fake_model(model)
def test_schema_editor_vacuum_not_in_transaction(fake_model):
schema_editor = PostgresSchemaEditor(connection)
with pytest.raises(SuspiciousOperation):
schema_editor.vacuum_table(fake_model._meta.db_table)
@pytest.mark.parametrize(
"kwargs,query",
[
(dict(), "VACUUM %s"),
(dict(full=True), "VACUUM (FULL) %s"),
(dict(analyze=True), "VACUUM (ANALYZE) %s"),
(dict(parallel=8), "VACUUM (PARALLEL 8) %s"),
(dict(analyze=True, verbose=True), "VACUUM (VERBOSE, ANALYZE) %s"),
(
dict(analyze=True, parallel=8, verbose=True),
"VACUUM (VERBOSE, ANALYZE, PARALLEL 8) %s",
),
(dict(freeze=True), "VACUUM (FREEZE) %s"),
(dict(verbose=True), "VACUUM (VERBOSE) %s"),
(dict(disable_page_skipping=True), "VACUUM (DISABLE_PAGE_SKIPPING) %s"),
(dict(skip_locked=True), "VACUUM (SKIP_LOCKED) %s"),
(dict(index_cleanup=True), "VACUUM (INDEX_CLEANUP) %s"),
(dict(truncate=True), "VACUUM (TRUNCATE) %s"),
],
)
@pytest.mark.django_db(transaction=True)
def test_schema_editor_vacuum_table(fake_model, kwargs, query):
schema_editor = PostgresSchemaEditor(connection)
with CaptureQueriesContext(connection) as ctx:
schema_editor.vacuum_table(fake_model._meta.db_table, **kwargs)
queries = [query["sql"] for query in ctx.captured_queries]
assert queries == [
query % connection.ops.quote_name(fake_model._meta.db_table)
]
@pytest.mark.django_db(transaction=True)
def test_schema_editor_vacuum_table_columns(fake_model):
schema_editor = PostgresSchemaEditor(connection)
with CaptureQueriesContext(connection) as ctx:
schema_editor.vacuum_table(
fake_model._meta.db_table, ["id", "name"], analyze=True
)
queries = [query["sql"] for query in ctx.captured_queries]
assert queries == [
'VACUUM (ANALYZE) %s ("id", "name")'
% connection.ops.quote_name(fake_model._meta.db_table)
]
@pytest.mark.django_db(transaction=True)
def test_schema_editor_vacuum_model(fake_model):
schema_editor = PostgresSchemaEditor(connection)
with CaptureQueriesContext(connection) as ctx:
schema_editor.vacuum_model(fake_model, analyze=True, parallel=8)
queries = [query["sql"] for query in ctx.captured_queries]
assert queries == [
"VACUUM (ANALYZE, PARALLEL 8) %s"
% connection.ops.quote_name(fake_model._meta.db_table)
]
@pytest.mark.django_db(transaction=True)
def test_schema_editor_vacuum_model_fields(fake_model):
schema_editor = PostgresSchemaEditor(connection)
with CaptureQueriesContext(connection) as ctx:
schema_editor.vacuum_model(
fake_model,
[fake_model._meta.get_field("name")],
analyze=True,
parallel=8,
)
queries = [query["sql"] for query in ctx.captured_queries]
assert queries == [
'VACUUM (ANALYZE, PARALLEL 8) %s ("name")'
% connection.ops.quote_name(fake_model._meta.db_table)
]
@pytest.mark.django_db(transaction=True)
def test_schema_editor_vacuum_model_non_concrete_fields(
fake_model, fake_model_non_concrete_field
):
schema_editor = PostgresSchemaEditor(connection)
with CaptureQueriesContext(connection) as ctx:
schema_editor.vacuum_model(
fake_model,
[fake_model._meta.get_field("fakes")],
analyze=True,
parallel=8,
)
queries = [query["sql"] for query in ctx.captured_queries]
assert queries == [
"VACUUM (ANALYZE, PARALLEL 8) %s"
% connection.ops.quote_name(fake_model._meta.db_table)
]
django-postgres-extra-2.0.9/tests/test_schema_editor_view.py 0000664 0000000 0000000 00000011146 14634267343 0024403 0 ustar 00root root 0000000 0000000 from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from . import db_introspection
from .fake_model import (
define_fake_materialized_view_model,
define_fake_view_model,
get_fake_model,
)
def test_schema_editor_create_delete_view():
"""Tests whether creating and then deleting a view using the schema editor
works as expected."""
underlying_model = get_fake_model({"name": models.TextField()})
model = define_fake_view_model(
{"name": models.TextField()},
{"query": underlying_model.objects.filter(name="test1")},
)
underlying_model.objects.create(name="test1")
underlying_model.objects.create(name="test2")
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_view_model(model)
# view should only show records name="test"1
objs = list(model.objects.all())
assert len(objs) == 1
assert objs[0].name == "test1"
# create another record, view should have it right away
underlying_model.objects.create(name="test1")
assert model.objects.count() == 2
# delete the view
schema_editor.delete_view_model(model)
# make sure it was actually deleted
assert model._meta.db_table not in db_introspection.table_names(True)
def test_schema_editor_replace_view():
"""Tests whether creating a view and then replacing it with another one
(thus changing the backing query) works as expected."""
underlying_model = get_fake_model({"name": models.TextField()})
model = define_fake_view_model(
{"name": models.TextField()},
{"query": underlying_model.objects.filter(name="test1")},
)
underlying_model.objects.create(name="test1")
underlying_model.objects.create(name="test2")
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_view_model(model)
objs = list(model.objects.all())
assert len(objs) == 1
assert objs[0].name == "test1"
model._view_meta.query = underlying_model.objects.filter(
name="test2"
).query.sql_with_params()
schema_editor.replace_view_model(model)
objs = list(model.objects.all())
assert len(objs) == 1
assert objs[0].name == "test2"
def test_schema_editor_create_delete_materialized_view():
"""Tests whether creating and then deleting a materialized view using the
schema editor works as expected."""
underlying_model = get_fake_model({"name": models.TextField()})
model = define_fake_materialized_view_model(
{"name": models.TextField()},
{"query": underlying_model.objects.filter(name="test1")},
)
underlying_model.objects.create(name="test1")
underlying_model.objects.create(name="test2")
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_materialized_view_model(model)
# materialized view should only show records name="test"1
objs = list(model.objects.all())
assert len(objs) == 1
assert objs[0].name == "test1"
# delete the materialized view
schema_editor.delete_materialized_view_model(model)
# make sure it was actually deleted
assert model._meta.db_table not in db_introspection.table_names(True)
def test_schema_editor_replace_materialized_view():
"""Tests whether creating a materialized view and then replacing it with
another one (thus changing the backing query) works as expected."""
underlying_model = get_fake_model({"name": models.TextField()})
model = define_fake_materialized_view_model(
{"name": models.TextField()},
{"query": underlying_model.objects.filter(name="test1")},
{"indexes": [models.Index(fields=["name"])]},
)
underlying_model.objects.create(name="test1")
underlying_model.objects.create(name="test2")
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_materialized_view_model(model)
for index in model._meta.indexes:
schema_editor.add_index(model, index)
constraints_before = db_introspection.get_constraints(model._meta.db_table)
objs = list(model.objects.all())
assert len(objs) == 1
assert objs[0].name == "test1"
model._view_meta.query = underlying_model.objects.filter(
name="test2"
).query.sql_with_params()
schema_editor.replace_materialized_view_model(model)
objs = list(model.objects.all())
assert len(objs) == 1
assert objs[0].name == "test2"
# make sure all indexes/constraints still exists because
# replacing a materialized view involves re-creating it
constraints_after = db_introspection.get_constraints(model._meta.db_table)
assert constraints_after == constraints_before
django-postgres-extra-2.0.9/tests/test_settings.py 0000664 0000000 0000000 00000005765 14634267343 0022415 0 ustar 00root root 0000000 0000000 import pytest
from django.core.exceptions import SuspiciousOperation
from django.db import connection
from psqlextra.settings import (
postgres_prepend_local_search_path,
postgres_reset_local_search_path,
postgres_set_local,
postgres_set_local_search_path,
)
def _get_current_setting(name: str) -> None:
with connection.cursor() as cursor:
cursor.execute(f"SHOW {name}")
return cursor.fetchone()[0]
@postgres_set_local(statement_timeout="2s", lock_timeout="3s")
def test_postgres_set_local_function_decorator():
assert _get_current_setting("statement_timeout") == "2s"
assert _get_current_setting("lock_timeout") == "3s"
def test_postgres_set_local_context_manager():
with postgres_set_local(statement_timeout="2s"):
assert _get_current_setting("statement_timeout") == "2s"
assert _get_current_setting("statement_timeout") == "0"
def test_postgres_set_local_iterable():
with postgres_set_local(search_path=["a", "public"]):
assert _get_current_setting("search_path") == "a, public"
assert _get_current_setting("search_path") == '"$user", public'
def test_postgres_set_local_nested():
with postgres_set_local(statement_timeout="2s"):
assert _get_current_setting("statement_timeout") == "2s"
with postgres_set_local(statement_timeout="3s"):
assert _get_current_setting("statement_timeout") == "3s"
assert _get_current_setting("statement_timeout") == "2s"
assert _get_current_setting("statement_timeout") == "0"
@pytest.mark.django_db(transaction=True)
def test_postgres_set_local_no_transaction():
with pytest.raises(SuspiciousOperation):
with postgres_set_local(statement_timeout="2s"):
pass
def test_postgres_set_local_search_path():
with postgres_set_local_search_path(["a", "public"]):
assert _get_current_setting("search_path") == "a, public"
assert _get_current_setting("search_path") == '"$user", public'
def test_postgres_reset_local_search_path():
with postgres_set_local_search_path(["a", "public"]):
with postgres_reset_local_search_path():
assert _get_current_setting("search_path") == '"$user", public'
assert _get_current_setting("search_path") == "a, public"
assert _get_current_setting("search_path") == '"$user", public'
def test_postgres_prepend_local_search_path():
with postgres_prepend_local_search_path(["a", "b"]):
assert _get_current_setting("search_path") == 'a, b, "$user", public'
assert _get_current_setting("search_path") == '"$user", public'
def test_postgres_prepend_local_search_path_nested():
with postgres_prepend_local_search_path(["a", "b"]):
with postgres_prepend_local_search_path(["c"]):
assert (
_get_current_setting("search_path")
== 'c, a, b, "$user", public'
)
assert _get_current_setting("search_path") == 'a, b, "$user", public'
assert _get_current_setting("search_path") == '"$user", public'
django-postgres-extra-2.0.9/tests/test_unique_index.py 0000664 0000000 0000000 00000001704 14634267343 0023237 0 ustar 00root root 0000000 0000000 from django.db import models
from django.db.migrations import AddIndex, CreateModel
from psqlextra.indexes import UniqueIndex
from .migrations import apply_migration, filtered_schema_editor
def test_unique_index_migrations():
index = UniqueIndex(fields=["name", "other_name"], name="index1")
ops = [
CreateModel(
name="mymodel",
fields=[
("name", models.TextField()),
("other_name", models.TextField()),
],
options={
# "indexes": [index],
},
),
AddIndex(model_name="mymodel", index=index),
]
with filtered_schema_editor("CREATE UNIQUE INDEX") as calls:
apply_migration(ops)
calls = [call[0] for _, call, _ in calls["CREATE UNIQUE INDEX"]]
db_table = "tests_mymodel"
query = 'CREATE UNIQUE INDEX "index1" ON "{0}" ("name", "other_name")'
assert str(calls[0]) == query.format(db_table)
django-postgres-extra-2.0.9/tests/test_upsert.py 0000664 0000000 0000000 00000033031 14634267343 0022062 0 ustar 00root root 0000000 0000000 import django
import pytest
from django.db import connection, models
from django.db.models import F, Q
from django.db.models.expressions import CombinedExpression, Value
from django.test.utils import CaptureQueriesContext
from psqlextra.expressions import ExcludedCol
from psqlextra.fields import HStoreField
from psqlextra.query import ConflictAction
from .fake_model import get_fake_model
def test_upsert():
"""Tests whether simple upserts works correctly."""
model = get_fake_model(
{
"title": HStoreField(uniqueness=["key1"]),
"cookies": models.CharField(max_length=255, null=True),
}
)
obj1 = model.objects.upsert_and_get(
conflict_target=[("title", "key1")],
fields=dict(title={"key1": "beer"}, cookies="cheers"),
)
obj1.refresh_from_db()
assert obj1.title["key1"] == "beer"
assert obj1.cookies == "cheers"
obj2 = model.objects.upsert_and_get(
conflict_target=[("title", "key1")],
fields=dict(title={"key1": "beer"}, cookies="choco"),
)
obj1.refresh_from_db()
obj2.refresh_from_db()
# assert both objects are the same
assert obj1.id == obj2.id
assert obj1.title["key1"] == "beer"
assert obj1.cookies == "choco"
assert obj2.title["key1"] == "beer"
assert obj2.cookies == "choco"
def test_upsert_explicit_pk():
"""Tests whether upserts works when the primary key is explicitly
specified."""
model = get_fake_model(
{
"name": models.CharField(max_length=255, primary_key=True),
"cookies": models.CharField(max_length=255, null=True),
}
)
obj1 = model.objects.upsert_and_get(
conflict_target=[("name")],
fields=dict(name="the-object", cookies="first-cheers"),
)
obj1.refresh_from_db()
assert obj1.name == "the-object"
assert obj1.cookies == "first-cheers"
obj2 = model.objects.upsert_and_get(
conflict_target=[("name")],
fields=dict(name="the-object", cookies="second-boo"),
)
obj1.refresh_from_db()
obj2.refresh_from_db()
# assert both objects are the same
assert obj1.pk == obj2.pk
assert obj1.name == "the-object"
assert obj1.cookies == "second-boo"
assert obj2.name == "the-object"
assert obj2.cookies == "second-boo"
def test_upsert_one_to_one_field():
model1 = get_fake_model({"title": models.TextField(unique=True)})
model2 = get_fake_model(
{"model1": models.OneToOneField(model1, on_delete=models.CASCADE)}
)
obj1 = model1.objects.create(title="hello world")
obj2_id = model2.objects.upsert(
conflict_target=["model1"], fields=dict(model1=obj1)
)
obj2 = model2.objects.get(id=obj2_id)
assert obj2.model1 == obj1
def test_upsert_with_update_condition():
"""Tests that an expression can be used as an upsert update condition."""
model = get_fake_model(
{
"name": models.TextField(unique=True),
"priority": models.IntegerField(),
"active": models.BooleanField(),
}
)
obj1 = model.objects.create(name="joe", priority=1, active=False)
# should not return anything because no rows were affected
assert not model.objects.upsert(
conflict_target=["name"],
update_condition=CombinedExpression(
model._meta.get_field("active").get_col(model._meta.db_table),
"=",
ExcludedCol("active"),
),
fields=dict(name="joe", priority=2, active=True),
)
obj1.refresh_from_db()
assert obj1.priority == 1
assert not obj1.active
# should return something because one row was affected
obj1_pk = model.objects.upsert(
conflict_target=["name"],
update_condition=CombinedExpression(
model._meta.get_field("active").get_col(model._meta.db_table),
"=",
Value(False),
),
fields=dict(name="joe", priority=2, active=True),
)
obj1.refresh_from_db()
assert obj1.pk == obj1_pk
assert obj1.priority == 2
assert obj1.active
@pytest.mark.parametrize("update_condition_value", [0, False])
def test_upsert_with_update_condition_false(update_condition_value):
"""Tests that an expression can be used as an upsert update condition."""
model = get_fake_model(
{
"name": models.TextField(unique=True),
"priority": models.IntegerField(),
"active": models.BooleanField(),
}
)
obj1 = model.objects.create(name="joe", priority=1, active=False)
with CaptureQueriesContext(connection) as ctx:
upsert_result = model.objects.upsert(
conflict_target=["name"],
update_condition=update_condition_value,
fields=dict(name="joe", priority=2, active=True),
)
assert upsert_result is None
assert len(ctx) == 1
assert 'ON CONFLICT ("name") DO NOTHING' in ctx[0]["sql"]
obj1.refresh_from_db()
assert obj1.priority == 1
assert not obj1.active
def test_upsert_with_update_values():
"""Tests that the default update values can be overriden with custom
expressions."""
model = get_fake_model(
{
"name": models.TextField(unique=True),
"count": models.IntegerField(default=0),
}
)
obj1 = model.objects.create(name="joe")
model.objects.upsert(
conflict_target=["name"],
fields=dict(name="joe"),
update_values=dict(
count=F("count") + 1,
),
)
obj1.refresh_from_db()
assert obj1.count == 1
def test_upsert_with_update_values_empty():
"""Tests that an upsert with an empty dict turns into ON CONFLICT DO
NOTHING."""
model = get_fake_model(
{
"name": models.TextField(unique=True),
"count": models.IntegerField(default=0),
}
)
obj1 = model.objects.create(name="joe")
model.objects.upsert(
conflict_target=["name"],
fields=dict(name="joe"),
update_values={},
)
obj1.refresh_from_db()
assert obj1.count == 0
@pytest.mark.skipif(
django.VERSION < (3, 1), reason="requires django 3.1 or newer"
)
def test_upsert_with_update_condition_with_q_object():
"""Tests that :see:Q objects can be used as an upsert update condition."""
model = get_fake_model(
{
"name": models.TextField(unique=True),
"priority": models.IntegerField(),
"active": models.BooleanField(),
}
)
obj1 = model.objects.create(name="joe", priority=1, active=False)
# should not return anything because no rows were affected
assert not model.objects.upsert(
conflict_target=["name"],
update_condition=Q(active=ExcludedCol("active")),
fields=dict(name="joe", priority=2, active=True),
)
obj1.refresh_from_db()
assert obj1.priority == 1
assert not obj1.active
# should return something because one row was affected
obj1_pk = model.objects.upsert(
conflict_target=["name"],
update_condition=Q(active=Value(False)),
fields=dict(name="joe", priority=2, active=True),
)
obj1.refresh_from_db()
assert obj1.pk == obj1_pk
assert obj1.priority == 2
assert obj1.active
def test_upsert_and_get_applies_converters():
"""Tests that converters are properly applied when using upsert_and_get."""
class MyCustomField(models.TextField):
def from_db_value(self, value, expression, connection):
return value.replace("hello", "bye")
model = get_fake_model({"title": MyCustomField(unique=True)})
obj = model.objects.upsert_and_get(
conflict_target=["title"], fields=dict(title="hello")
)
assert obj.title == "bye"
def test_bulk_upsert():
"""Tests whether bulk_upsert works properly."""
model = get_fake_model(
{
"first_name": models.CharField(
max_length=255, null=True, unique=True
),
"last_name": models.CharField(max_length=255, null=True),
}
)
model.objects.bulk_upsert(
conflict_target=["first_name"],
rows=[
dict(first_name="Swen", last_name="Kooij"),
dict(first_name="Henk", last_name="Test"),
],
)
row_a = model.objects.get(first_name="Swen")
row_b = model.objects.get(first_name="Henk")
model.objects.bulk_upsert(
conflict_target=["first_name"],
rows=[
dict(first_name="Swen", last_name="Test"),
dict(first_name="Henk", last_name="Kooij"),
],
)
row_a.refresh_from_db()
assert row_a.last_name == "Test"
row_b.refresh_from_db()
assert row_b.last_name == "Kooij"
def test_upsert_bulk_no_rows():
"""Tests whether bulk_upsert doesn't crash when specifying no rows or a
falsy value."""
model = get_fake_model(
{"name": models.CharField(max_length=255, null=True, unique=True)}
)
model.objects.on_conflict(ConflictAction.UPDATE, ["name"]).bulk_insert(
rows=[]
)
model.objects.bulk_upsert(conflict_target=["name"], rows=[])
model.objects.bulk_upsert(conflict_target=["name"], rows=None)
model.objects.on_conflict(ConflictAction.UPDATE, ["name"]).bulk_insert(
rows=None
)
def test_bulk_upsert_return_models():
"""Tests whether models are returned instead of dictionaries when
specifying the return_model=True argument."""
model = get_fake_model(
{
"id": models.BigAutoField(primary_key=True),
"name": models.CharField(max_length=255, unique=True),
}
)
rows = [dict(name="John Smith"), dict(name="Jane Doe")]
objs = model.objects.bulk_upsert(
conflict_target=["name"], rows=rows, return_model=True
)
for index, obj in enumerate(objs, 1):
assert isinstance(obj, model)
assert obj.id == index
def test_bulk_upsert_accepts_getitem_iterable():
"""Tests whether an iterable only implementing the __getitem__ method works
correctly."""
class GetItemIterable:
def __init__(self, items):
self.items = items
def __getitem__(self, key):
return self.items[key]
model = get_fake_model(
{
"id": models.BigAutoField(primary_key=True),
"name": models.CharField(max_length=255, unique=True),
}
)
rows = GetItemIterable([dict(name="John Smith"), dict(name="Jane Doe")])
objs = model.objects.bulk_upsert(
conflict_target=["name"], rows=rows, return_model=True
)
for index, obj in enumerate(objs, 1):
assert isinstance(obj, model)
assert obj.id == index
def test_bulk_upsert_accepts_iter_iterable():
"""Tests whether an iterable only implementing the __iter__ method works
correctly."""
class IterIterable:
def __init__(self, items):
self.items = items
def __iter__(self):
return iter(self.items)
model = get_fake_model(
{
"id": models.BigAutoField(primary_key=True),
"name": models.CharField(max_length=255, unique=True),
}
)
rows = IterIterable([dict(name="John Smith"), dict(name="Jane Doe")])
objs = model.objects.bulk_upsert(
conflict_target=["name"], rows=rows, return_model=True
)
for index, obj in enumerate(objs, 1):
assert isinstance(obj, model)
assert obj.id == index
def test_bulk_upsert_update_values():
model = get_fake_model(
{
"name": models.CharField(max_length=255, unique=True),
"count": models.IntegerField(default=0),
}
)
model.objects.bulk_create(
[
model(name="joe"),
model(name="john"),
]
)
objs = model.objects.bulk_upsert(
conflict_target=["name"],
rows=[],
return_model=True,
update_values=dict(count=F("count") + 1),
)
assert all([obj for obj in objs if obj.count == 1])
@pytest.mark.parametrize("return_model", [True])
def test_bulk_upsert_extra_columns_in_schema(return_model):
"""Tests that extra columns being returned by the database that aren't
known by Django don't make the bulk upsert crash."""
model = get_fake_model(
{
"name": models.CharField(max_length=255, unique=True),
}
)
with connection.cursor() as cursor:
cursor.execute(
f"ALTER TABLE {model._meta.db_table} ADD COLUMN new_name text NOT NULL DEFAULT %s",
("newjoe",),
)
objs = model.objects.bulk_upsert(
conflict_target=["name"],
rows=[
dict(name="joe"),
],
return_model=return_model,
)
assert len(objs) == 1
if return_model:
assert objs[0].name == "joe"
else:
assert objs[0]["name"] == "joe"
assert sorted(list(objs[0].keys())) == ["id", "name"]
def test_upsert_extra_columns_in_schema():
"""Tests that extra columns being returned by the database that aren't
known by Django don't make the upsert crash."""
model = get_fake_model(
{
"name": models.CharField(max_length=255, unique=True),
}
)
with connection.cursor() as cursor:
cursor.execute(
f"ALTER TABLE {model._meta.db_table} ADD COLUMN new_name text NOT NULL DEFAULT %s",
("newjoe",),
)
obj_id = model.objects.upsert(
conflict_target=["name"],
fields=dict(name="joe"),
)
assert obj_id == 1
obj = model.objects.upsert_and_get(
conflict_target=["name"],
fields=dict(name="joe"),
)
assert obj.name == "joe"
django-postgres-extra-2.0.9/tests/test_view_models.py 0000664 0000000 0000000 00000007031 14634267343 0023056 0 ustar 00root root 0000000 0000000 import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.test.utils import override_settings
from psqlextra.models import PostgresMaterializedViewModel, PostgresViewModel
from .fake_model import define_fake_model, define_fake_view_model
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
@override_settings(POSTGRES_EXTRA_ANNOTATE_SQL=True)
def test_view_model_meta_query_set(model_base):
"""Tests whether you can set a :see:QuerySet to be used as the underlying
query for a view."""
model = define_fake_model({"name": models.TextField()})
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": model.objects.all()},
)
expected_sql = 'SELECT "{0}"."id", "{0}"."name" FROM "{0}"'.format(
model._meta.db_table
)
assert view_model._view_meta.query[0].startswith(expected_sql + " /* ")
assert view_model._view_meta.query[1] == tuple()
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
@pytest.mark.parametrize("bind_params", [("test",), ["test"]])
def test_view_model_meta_sql_with_params(model_base, bind_params):
"""Tests whether you can set a raw SQL query with a tuple of bind params as
the underlying query for a view."""
model = define_fake_model({"name": models.TextField()})
sql = "select * from %s where name = %s" % (model._meta.db_table, "%s")
sql_with_params = (sql, bind_params)
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": sql_with_params},
)
assert view_model._view_meta.query == sql_with_params
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
def test_view_model_meta_sql_with_named_params(model_base):
"""Tests whether you can set a raw SQL query with a tuple of bind params as
the underlying query for a view."""
model = define_fake_model({"name": models.TextField()})
sql = "select * from " + model._meta.db_table + " where name = %(name)s"
sql_with_params = (sql, dict(name="test"))
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": sql_with_params},
)
assert view_model._view_meta.query == sql_with_params
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
def test_view_model_meta_sql(model_base):
"""Tests whether you can set a raw SQL query without any params."""
sql = "select 1"
view_model = define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": sql},
)
assert view_model._view_meta.query == (sql, tuple())
@pytest.mark.parametrize(
"model_base", [PostgresViewModel, PostgresMaterializedViewModel]
)
@pytest.mark.parametrize(
"view_query",
[
dict(a=1),
tuple("test"),
("test", None),
(None, None),
(1, 2),
("select 1", ("a", "b"), "onetoomay"),
],
)
def test_view_model_meta_bad_query(model_base, view_query):
"""Tests whether a bad view query configuration raises and error."""
with pytest.raises(ImproperlyConfigured):
define_fake_view_model(
{"name": models.TextField()},
model_base=model_base,
view_options={"query": view_query},
)
django-postgres-extra-2.0.9/tox.ini 0000664 0000000 0000000 00000001300 14634267343 0017272 0 ustar 00root root 0000000 0000000 [tox]
envlist =
{py36,py37}-dj{20,21,22,30,31,32}-psycopg{28,29}
{py38,py39,py310}-dj{21,22,30,31,32,40}-psycopg{28,29}
{py38,py39,py310,py311}-dj{41}-psycopg{28,29}
{py310,py311}-dj{42,50}-psycopg{28,29,31}
[testenv]
deps =
dj20: Django~=2.0.0
dj21: Django~=2.1.0
dj22: Django~=2.2.0
dj30: Django~=3.0.0
dj31: Django~=3.1.0
dj32: Django~=3.2.0
dj40: Django~=4.0.0
dj41: Django~=4.1.0
dj42: Django~=4.2.0
dj50: Django~=5.0.1
psycopg28: psycopg2[binary]~=2.8
psycopg29: psycopg2[binary]~=2.9
psycopg31: psycopg[binary]~=3.1
.[test]
setenv =
DJANGO_SETTINGS_MODULE=settings
passenv = DATABASE_URL
commands = python setup.py test