peewee-3.17.7/000077500000000000000000000000001470346076600131005ustar00rootroot00000000000000peewee-3.17.7/.github/000077500000000000000000000000001470346076600144405ustar00rootroot00000000000000peewee-3.17.7/.github/workflows/000077500000000000000000000000001470346076600164755ustar00rootroot00000000000000peewee-3.17.7/.github/workflows/tests.yaml000066400000000000000000000045751470346076600205360ustar00rootroot00000000000000name: Tests on: [push] jobs: tests: name: ${{ matrix.peewee-backend }} - ${{ matrix.python-version }} runs-on: ubuntu-latest timeout-minutes: 15 services: mysql: image: mariadb:latest env: MYSQL_ROOT_PASSWORD: peewee MYSQL_DATABASE: peewee_test ports: - 3306:3306 postgres: image: postgres env: POSTGRES_USER: postgres POSTGRES_PASSWORD: peewee POSTGRES_DB: peewee_test ports: - 5432:5432 strategy: fail-fast: false matrix: python-version: [3.8, 3.9, "3.11"] peewee-backend: - "sqlite" - "postgresql" - "mysql" exclude: - python-version: 2.7 peewee-backend: postgresql include: - python-version: 3.8 peewee-backend: cockroachdb - python-version: "3.11" peewee-backend: cockroachdb - python-version: "3.11" peewee-backend: psycopg3 steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: deps env: PGUSER: postgres PGHOST: 127.0.0.1 PGPASSWORD: peewee run: | pip install psycopg2-binary cython pymysql 'apsw' mysql-connector python setup.py build_ext -i psql peewee_test -c 'CREATE EXTENSION hstore;' - name: sqlcipher if: ${{ matrix.python-version != 2.7 }} run: pip install sqlcipher3-binary - name: psycopg3 if: ${{ matrix.python-version != 2.7 }} run: pip install 'psycopg[binary]' - name: crdb if: ${{ matrix.peewee-backend == 'cockroachdb' }} run: | wget -qO- https://binaries.cockroachdb.com/cockroach-v22.2.6.linux-amd64.tgz | tar xz ./cockroach-v22.2.6.linux-amd64/cockroach start-single-node --insecure --background ./cockroach-v22.2.6.linux-amd64/cockroach sql --insecure -e 'create database peewee_test;' - name: runtests ${{ matrix.peewee-backend }} - ${{ matrix.python-version }} env: PEEWEE_TEST_BACKEND: ${{ matrix.peewee-backend }} PGUSER: postgres PGHOST: 127.0.0.1 PGPASSWORD: peewee run: python runtests.py --mysql-user=root --mysql-password=peewee -s peewee-3.17.7/.gitignore000066400000000000000000000002441470346076600150700ustar00rootroot00000000000000*.pyc build prof/ docs/_build/ playhouse/*.c playhouse/*.h playhouse/*.so playhouse/tests/peewee_test.db .idea/ MANIFEST peewee_test.db closure.so lsm.so regexp.so peewee-3.17.7/.readthedocs.yaml000066400000000000000000000002441470346076600163270ustar00rootroot00000000000000version: 2 python: install: - requirements: docs/requirements.txt build: os: ubuntu-22.04 tools: python: "3.11" sphinx: configuration: docs/conf.py peewee-3.17.7/.travis.yml000066400000000000000000000035061470346076600152150ustar00rootroot00000000000000language: python python: - 2.7 - 3.4 - 3.5 - 3.6 env: - PEEWEE_TEST_BACKEND=sqlite - PEEWEE_TEST_BACKEND=postgresql - PEEWEE_TEST_BACKEND=mysql matrix: include: - python: 3.7 dist: xenial env: PEEWEE_TEST_BACKEND=sqlite - python: 3.7 dist: xenial env: PEEWEE_TEST_BACKEND=postgresql - python: 3.7 dist: xenial env: PEEWEE_TEST_BACKEND=mysql - python: 3.8 dist: xenial - python: 3.7 dist: xenial env: - PEEWEE_TEST_BUILD_SQLITE=1 - PEEWEE_CLOSURE_EXTENSION=/usr/local/lib/closure.so - LSM_EXTENSION=/usr/local/lib/lsm.so before_install: - sudo apt-get install -y tcl-dev - ./.travis_deps.sh - sudo ldconfig script: "python runtests.py -v2" - python: 3.7 dist: xenial env: - PEEWEE_TEST_BACKEND=cockroachdb before_install: - wget -qO- https://binaries.cockroachdb.com/cockroach-v20.1.1.linux-amd64.tgz | tar xvz - ./cockroach-v20.1.1.linux-amd64/cockroach start --insecure --background - ./cockroach-v20.1.1.linux-amd64/cockroach sql --insecure -e 'create database peewee_test;' allow_failures: addons: postgresql: "9.6" mariadb: "10.3" services: - postgresql - mariadb install: "pip install psycopg2-binary Cython pymysql apsw mysql-connector" before_script: - python setup.py build_ext -i - psql -c 'drop database if exists peewee_test;' -U postgres - psql -c 'create database peewee_test;' -U postgres - psql peewee_test -c 'create extension hstore;' -U postgres - mysql -e 'drop user if exists travis@localhost;' - mysql -e 'create user travis@localhost;' - mysql -e 'drop database if exists peewee_test;' - mysql -e 'create database peewee_test;' - mysql -e 'grant all on *.* to travis@localhost;' || true script: "python runtests.py" peewee-3.17.7/.travis_deps.sh000077500000000000000000000020111470346076600160320ustar00rootroot00000000000000#!/bin/bash setup_sqlite_deps() { wget https://www.sqlite.org/src/tarball/sqlite.tar.gz tar xzf sqlite.tar.gz cd sqlite/ export CFLAGS="-DSQLITE_ENABLE_FTS3 \ -DSQLITE_ENABLE_FTS3_PARENTHESIS \ -DSQLITE_ENABLE_FTS4 \ -DSQLITE_ENABLE_FTS5 \ -DSQLITE_ENABLE_JSON1 \ -DSQLITE_ENABLE_LOAD_EXTENSION \ -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT \ -DSQLITE_TEMP_STORE=3 \ -DSQLITE_USE_URI \ -O2 \ -fPIC" export PREFIX="/usr/local" LIBS="-lm" ./configure \ --disable-tcl \ --enable-shared \ --enable-tempstore=always \ --prefix="$PREFIX" make && sudo make install cd ext/misc/ # Build the transitive closure extension and copy shared library. gcc -fPIC -O2 -lsqlite3 -shared closure.c -o closure.so sudo cp closure.so /usr/local/lib # Build the lsm1 extension and copy shared library. cd ../lsm1 export CFLAGS="-fPIC -O2" TCCX="gcc -fPIC -O2" make lsm.so sudo cp lsm.so /usr/local/lib } if [ -n "$PEEWEE_TEST_BUILD_SQLITE" ]; then setup_sqlite_deps fi peewee-3.17.7/CHANGELOG.md000066400000000000000000003723651470346076600147310ustar00rootroot00000000000000# Changelog Tracking changes in peewee between versions. For a complete view of all the releases, visit GitHub: https://github.com/coleifer/peewee/releases ## master [View commits](https://github.com/coleifer/peewee/compare/3.17.7...master) ## 3.17.7 * Add db_url support for psycopg3 via `psycopg3://`. * Ensure double-quotes are escaped properly when introspecting constraints. * A few documentation-related fixes. [View commits](https://github.com/coleifer/peewee/compare/3.17.6...3.17.7) ## 3.17.6 * Fix bug in recursive `model.delete_instance()` when a table contains foreign-keys at multiple depths of the graph, #2893. * Fix regression in pool behavior on systems where `time.time()` returns identical values for two connections. This adds a no-op comparable sentinel to the heap to prevent any recurrence of this problem, #2901. * Ensure that subqueries inside `CASE` statements generate correct SQL. * Fix regression that broke server-side cursors with Postgres (introduced in 3.16.0). * Fix to ensure compatibility with psycopg3 - the libpq TransactionStatus constants are no longer available on the `Connection` instance. * Fix quoting issue in pwiz that could generate invalid python code for double-quoted string literals used as column defaults. [View commits](https://github.com/coleifer/peewee/compare/3.17.5...3.17.6) ## 3.17.5 This release fixes a build system problem in Python 3.12, #2891. [View commits](https://github.com/coleifer/peewee/compare/3.17.4...3.17.5) ## 3.17.4 * Fix bug that could occur when using CASE inside a function, and one or more of the CASE clauses consisted of a subquery. Refs #2873. new fix in #2872 for regression in truthiness of cursor. * Fix bug in the conversion of TIMESTAMP type in Sqlite on Python 3.12+. * Fix for hybrid properties on subclasses when aliased (#2888). * Many fixes for SqliteQueueDatabase (#2874, #2876, #2877). [View commits](https://github.com/coleifer/peewee/compare/3.17.3...3.17.4) ## 3.17.3 * Better fix for #2871 (extraneous queries when coercing query to list), and [View commits](https://github.com/coleifer/peewee/compare/3.17.2...3.17.3) ## 3.17.2 * Full support for `psycopg3`. * Basic support for Sqlite `jsonb`. * Fix bug where calling `list(query)` resulted in extra queries, #2871 [View commits](https://github.com/coleifer/peewee/compare/3.17.1...3.17.2) ## 3.17.1 * Add bitwise and other helper methods to `BigBitField`, #2802. * Add `add_column_default` and `drop_column_default` migrator methods for specifying a server-side default value, #2803. * The new `star` attribute was causing issues for users who had a field named star on their models. This attribute is now renamed to `__star__`. #2796. * Fix compatibility issues with 3.12 related to utcnow() deprecation. * Add stricter locking on connection pool to prevent race conditions. * Add adapters and converters to Sqlite to replace ones deprecated in 3.12. * Fix bug in `model_to_dict()` when only aliases are present. * Fix version check for Sqlite native drop column support. * Do not specify a `reconnect=` argument to `ping()` if using MySQL 8.x. [View commits](https://github.com/coleifer/peewee/compare/3.17.0...3.17.1) ## 3.17.0 * Only roll-back in the outermost `@db.transaction` decorator/ctx manager if an unhandled exception occurs. Previously, an unhandled exception that occurred in a nested `transaction` context would trigger a rollback. The use of nested `transaction` has long been discouraged in the documentation: the recommendation is to always use `db.atomic`, which will use savepoints to properly handle nested blocks. However, the new behavior should make it easier to reason about transaction boundaries - see #2767 for discussion. * Cover transaction `BEGIN` in the reconnect-mixin. Given that no transaction has been started, reconnecting when beginning a new transaction ensures that a reconnect will occur if it is safe to do so. * Add support for setting `isolation_level` in `db.atomic()` and `db.transaction()` when using Postgres and MySQL/MariaDB, which will apply to the wrapped transaction. Note: Sqlite has supported a similar `lock_type` parameter for some time. * Add support for the Sqlite `SQLITE_DETERMINISTIC` function flag. This allows user-defined Sqlite functions to be used in indexes and may be used by the query planner. * Fix unreported bug in dataset import when inferred field name differs from column name. [View commits](https://github.com/coleifer/peewee/compare/3.16.3...3.17.0) ## 3.16.3 * Support for Cython 3.0. * Add flag to `ManyToManyField` to prevent setting/getting values on unsaved instances. This is worthwhile, since reading or writing a many-to-many has no meaning when the instance is unsaved. * Adds a `star()` helper to `Source` base-class for selecting all columns. * Fix missing `binary` types for mysql-connector and mariadb-connector. * Add `extract()` method to MySQL `JSONField` for extracting a jsonpath. [View commits](https://github.com/coleifer/peewee/compare/3.16.2...3.16.3) ## 3.16.2 Fixes a longstanding issue with thread-safety of various decorators, including `atomic()`, `transaction()`, `savepoint()`. The context-managers are unaffected. See #2709 for details. [View commits](https://github.com/coleifer/peewee/compare/3.16.1...3.16.2) ## 3.16.1 * Add changes required for building against Cython 3.0 and set Cython language-level to 3. * Ensure indexes aren't added to unindexed fields during introspection, #2691. * Ensure we don't redundantly select same PK in prefetch when using PREFETCH_TYPE.JOIN. * In Sqlite migrator, use Sqlite's builtin DROP and RENAME column facilities when possible. This can be overridden by passing `legacy=True` flag. [View commits](https://github.com/coleifer/peewee/compare/3.16.0...3.16.1) ## 3.16.0 This release contains backwards-incompatible changes in the way Peewee initializes connections to the underlying database driver. Previously, peewee implemented autocommit semantics *on-top* of the existing DB-API transactional workflow. Going forward, Peewee instead places the DB-API driver into autocommit mode directly. Why this change? Previously, Peewee emulated autocommit behavior for top-level queries issued outside of a transaction. This necessitated a number of checks which had to be performed each time a query was executed, so as to ensure that we didn't end up with uncommitted writes or, conversely, idle read transactions. By running the underlying driver in autocommit mode, we can eliminate all these checks, since we are already managing transactions ourselves. Behaviorally, there should be no change -- Peewee will still treat top-level queries outside of transactions as being autocommitted, while queries inside of `atomic()` / `with db:` blocks are implicitly committed at the end of the block, or rolled-back if an exception occurs. **How might this affect me?** * If you are using the underlying database connection or cursors, e.g. via `Database.connection()` or `Database.cursor()`, your queries will now be executed in autocommit mode. * The `commit=` argument is deprecated for the `cursor()`, `execute()` and `execute_sql()` methods. * If you have a custom `Database` implementation (whether for a database that is not officially supported, or for the purpose of overriding default behaviors), you will want to ensure that your connections are opened in autocommit mode. Other changes: * Some fixes to help with packaging in Python 3.11. * MySQL `get_columns()` implementation now returns columns in their declared order. [View commits](https://github.com/coleifer/peewee/compare/3.15.4...3.16.0) ## 3.15.4 * Raise an exception in `ReconnectMixin` if connection is lost while inside a transaction (if the transaction was interrupted presumably some changes were lost and explicit intervention is needed). * Add `db.Model` property to reduce boilerplate. * Add support for running `prefetch()` queries with joins instead of subqueries (this helps overcome a MySQL limitation about applying LIMITs to a subquery). * Add SQL `AVG` to whitelist to avoid coercing by default. * Allow arbitrary keywords in metaclass constructor, #2627 * Add a `pyproject.toml` to silence warnings from newer pips when `wheel` package is not available. This release has a small helper for reducing boilerplate in some cases by exposing a base model class as an attribute of the database instance. ```python # old: db = SqliteDatabase('...') class BaseModel(Model): class Meta: database = db class MyModel(BaseModel): pass # new: db = SqliteDatabase('...') class MyModel(db.Model): pass ``` [View commits](https://github.com/coleifer/peewee/compare/3.15.3...3.15.4) ## 3.15.3 * Add `scalars()` query method (complements `scalar()`), roughly equivalent to writing `[t[0] for t in query.tuples()]`. * Small doc improvements * Fix and remove some flaky test assertions with Sqlite INSERT + RETURNING. * Fix innocuous failing Sqlite test on big-endian machines. [View commits](https://github.com/coleifer/peewee/compare/3.15.2...3.15.3) ## 3.15.2 * Fix bug where field-specific conversions were being applied to the pattern used for LIKE / ILIKE operations. Refs #2609 * Fix possible infinite loop when accidentally invoking the `__iter__` method on certain `Column` subclasses. Refs #2606 * Add new helper for specifying which Model a particular selected column-like should be bound to, in queries with joins that select from multiple sources. [View commits](https://github.com/coleifer/peewee/compare/3.15.1...3.15.2) ## 3.15.1 * Fix issue introduced in Sqlite 3.39.0 regarding the propagation of column subtypes in subqueries. * Fix bug where cockroachdb server version was not set when beginning a transaction on an unopened database. [View commits](https://github.com/coleifer/peewee/compare/3.15.0...3.15.1) ## 3.15.0 Rollback behavior change in commit ab43376697 (GH #2026). Peewee will no longer automatically return the cursor `rowcount` for certain bulk-inserts. This should mainly affect users of MySQL and Sqlite who relied on a bulk INSERT returning the `rowcount` (as opposed to the cursor's `lastrowid`). The `rowcount` behavior is still available chaining the ``as_rowcount()`` method: ```python # NOTE: this change only affects MySQL or Sqlite. db = MySQLDatabase(...) # Previously, bulk inserts of the following forms would return the rowcount. query = User.insert_many(...) # Bulk insert. query = User.insert_from(...) # Bulk insert (INSERT INTO .. SELECT FROM). # Previous behavior (peewee 3.12 - 3.14.10): # rows_inserted = query.execute() # New behavior: last_id = query.execute() # To get the old behavior back: rows_inserted = query.as_rowcount().execute() ``` Additionally, in previous versions specifying an empty `.returning()` with Postgres would cause the rowcount to be returned. For Postgres users who wish to receive the rowcount: ```python # NOTE: this change only affects Postgresql. db = PostgresqlDatabase(...) # Previously, an empty returning() would return the rowcount. query = User.insert_many(...) # Bulk insert. query = User.insert_from(...) # Bulk insert (INSERT INTO .. SELECT FROM). # Old behavior: # rows_inserted = query.returning().execute() # To get the rows inserted in 3.15 and newer: rows_inserted = query.as_rowcount().execute() ``` This release contains a fix for a long-standing request to allow data-modifying queries to support CTEs. CTEs are now supported for use with INSERT, DELETE and UPDATE queries - see #2152. Additionally, this release adds better support for using the new `RETURNING` syntax with Sqlite automatically. Specify `returning_clause=True` when initializing your `SqliteDatabase` and all bulk inserts will automatically specify a `RETURNING` clause, returning the newly-inserted primary keys. This functionality requires Sqlite 3.35 or newer. Smaller changes: * Add `shortcuts.insert_where()` helper for generating conditional INSERT with a bit less boilerplate. * Fix bug in `test_utils.count_queres()` which could erroneously include pool events such as connect/disconnect, etc. [View commits](https://github.com/coleifer/peewee/compare/3.14.10...3.15.0) ## 3.14.10 * Add shortcut for conditional insert using sub-select, see #2528 * Add convenience `left_outer_join()` method to query. * Add `selected_columns` property to Select queries. * Add `name` property to Alias instances. * Fix regression in tests introduced by change to DataSet in 3.14.9. [View commits](https://github.com/coleifer/peewee/compare/3.14.9...3.14.10) ## 3.14.9 * Allow calling `table_exists()` with a model-class, refs * Improve `is_connection_usable()` method of `MySQLDatabase` class. * Better support for VIEWs with `playhouse.dataset.DataSet` and sqlite-web. * Support INSERT / ON CONFLICT in `playhosue.kv` for newer Sqlite. * Add `ArrayField.contained_by()` method, a corollary to `contains()` and the `contains_any()` methods. * Support cyclical foreign-key relationships in reflection/introspection, and also for sqlite-web. * Add magic methods for FTS5 field to optimize, rebuild and integrity check the full-text index. * Add fallbacks in `setup.py` in the event distutils is not available. [View commits](https://github.com/coleifer/peewee/compare/3.14.8...3.14.9) ## 3.14.8 Back-out all changes to automatically use RETURNING for `SqliteExtDatabase`, `CSqliteExtDatabase` and `APSWDatabase`. The issue I found is that when a RETURNING cursor is not fully-consumed, any parent SAVEPOINT (and possibly transaction) would not be able to be released. Since this is a backwards-incompatible change, I am going to back it out for now. Returning clause can still be specified for Sqlite, however it just needs to be done so manually rather than having it applied automatically. [View commits](https://github.com/coleifer/peewee/compare/3.14.7...3.14.8) ## 3.14.7 Fix bug in APSW extension with Sqlite 3.35 and newer, due to handling of last insert rowid with RETURNING. Refs #2479. [View commits](https://github.com/coleifer/peewee/compare/3.14.6...3.14.7) ## 3.14.6 Fix pesky bug in new `last_insert_id()` on the `SqliteExtDatabase`. [View commits](https://github.com/coleifer/peewee/compare/3.14.5...3.14.6) ## 3.14.5 This release contains a number of bug-fixes and small improvements. * Only raise `DoesNotExist` when `lazy_load` is enabled on ForeignKeyField, fixes issue #2377. * Add missing convenience method `ModelSelect.get_or_none()` * Allow `ForeignKeyField` to specify a custom `BackrefAccessorClass`, references issue #2391. * Ensure foreign-key-specific conversions are applied on INSERT and UPDATE, fixes #2408. * Add handling of MySQL error 4031 (inactivity timeout) to the `ReconnectMixin` helper class. Fixes #2419. * Support specification of conflict target for ON CONFLICT/DO NOTHING. * Add `encoding` parameter to the DataSet `freeze()` and `thaw()` methods, fixes #2425. * Fix bug which prevented `DeferredForeignKey` from being used as a model's primary key, fixes #2427. * Ensure foreign key's related object cache is cleared when the foreign-key is set to `None`. Fixes #2428. * Allow specification of `(schema, table)` to be used with CREATE TABLE AS..., fixes #2423. * Allow reusing open connections with DataSet, refs #2441. * Add `highlight()` and `snippet()` helpers to Sqlite `SearchField`, for use with full-text search extension. * Preserve user-provided aliases in column names. Fixes #2453. * Add support for Sqlite 3.37 strict tables. * Ensure database is inherited when using `ThreadSafeDatabaseMetadata`, and also adds an implementation in `playhouse.shortcuts` along with basic unit tests. * Better handling of Model's dirty fields when saving, fixes #2466. * Add basic support for MariaDB connector driver in `playhouse.mysql_ext`, refs issue #2471. * Begin a basic implementation for a psycopg3-compatible pg database, refs issue #2473. * Add provisional support for RETURNING when using the appropriate versions of Sqlite or MariaDB. [View commits](https://github.com/coleifer/peewee/compare/3.14.4...3.14.5) ## 3.14.4 This release contains an important fix for a regression introduced by commit ebe3ad5, which affected the way model instances are converted to parameters for use in expressions within a query. The bug could manifest when code uses model instances as parameters in expressions against fields that are not foreign-keys. The issue is described in #2376. [View commits](https://github.com/coleifer/peewee/compare/3.14.3...3.14.4) ## 3.14.3 This release contains a single fix for ensuring NULL values are inserted when issuing a bulk-insert of heterogeneous dictionaries which may be missing explicit NULL values. Fixes issue #2638. [View commits](https://github.com/coleifer/peewee/compare/3.14.2...3.14.3) ## 3.14.2 This is a small release mainly to get some fixes out. * Support for named `Check` and foreign-key constraints. * Better foreign-key introspection for CockroachDB (and Postgres). * Register UUID adapter for Postgres. * Add `fn.array_agg()` to blacklist for automatic value coercion. [View commits](https://github.com/coleifer/peewee/compare/3.14.1...3.14.2) ## 3.14.1 This release contains primarily bugfixes. * Properly delegate to a foreign-key field's `db_value()` function when converting model instances. #2304. * Strip quote marks and parentheses from column names returned by sqlite cursor when a function-call is projected without an alias. #2305. * Fix `DataSet.create_index()` method, #2319. * Fix column-to-model mapping in model-select from subquery with joins, #2320. * Improvements to foreign-key lazy-loading thanks @conqp, #2328. * Preserve and handle `CHECK()` constraints in Sqlite migrator, #2343. * Add `stddev` aggregate function to collection of sqlite user-defined funcs. [View commits](https://github.com/coleifer/peewee/compare/3.14.0...3.14.1) ## 3.14.0 This release has been a bit overdue and there are numerous small improvements and bug-fixes. The bugfix that prompted this release is #2293, which is a regression in the Django-inspired `.filter()` APIs that could cause some filter expressions to be discarded from the generated SQL. Many thanks for the excellent bug report, Jakub. * Add an experimental helper, `shortcuts.resolve_multimodel_query()`, for resolving multiple models used in a compound select query. * Add a `lateral()` method to select query for use with lateral joins, refs issue #2205. * Added support for nested transactions (savepoints) in cockroach-db (requires 20.1 or newer). * Automatically escape wildcards passed to string-matching methods, refs #2224. * Allow index-type to be specified on MySQL, refs #2242. * Added a new API, `converter()` to be used for specifying a function to use to convert a row-value pulled off the cursor, refs #2248. * Add `set()` and `clear()` method to the bitfield flag descriptor, refs #2257. * Add support for `range` types with `IN` and other expressions. * Support CTEs bound to compound select queries, refs #2289. ### Bug-fixes * Fix to return related object id when accessing via the object-id descriptor, when the related object is not populated, refs #2162. * Fix to ensure we do not insert a NULL value for a primary key. * Fix to conditionally set the field/column on an added column in a migration, refs #2171. * Apply field conversion logic to model-class values. Relocates the logic from issue #2131 and fixes #2185. * Clone node before modifying it to be flat in an enclosed nodelist expr, fixes issue #2200. * Fix an invalid item assignment in nodelist, refs #2220. * Fix an incorrect truthiness check used with `save()` and `only=`, refs #2269. * Fix regression in `filter()` where using both `*args` and `**kwargs` caused the expressions passed as `args` to be discarded. See #2293. [View commits](https://github.com/coleifer/peewee/compare/3.13.3...3.14.0) ## 3.13.3 * Allow arbitrary keyword arguments to be passed to `DataSet` constructor, which are then passed to the instrospector. * Allow scalar subqueries to be compared using numeric operands. * Fix `bulk_create()` when model being inserted uses FK identifiers. * Fix `bulk_update()` so that PK values are properly coerced to the right data-type (e.g. UUIDs to strings for Sqlite). * Allow array indices to be used as dict keys, e.g. for the purposes of updating a single array index value. [View commits](https://github.com/coleifer/peewee/compare/3.13.2...3.13.3) ## 3.13.2 * Allow aggregate functions to support an `ORDER BY` clause, via the addition of an `order_by()` method to the function (`fn`) instance. Refs #2094. * Fix `prefetch()` bug, where related "backref" instances were marked as dirty, even though they had no changes. Fixes #2091. * Support `LIMIT 0`. Previously a limit of 0 would be translated into effectively an unlimited query on MySQL. References #2084. * Support indexing into arrays using expressions with Postgres array fields. References #2085. * Ensure postgres introspection methods return the columns for multi-column indexes in the correct order. Fixes #2104. * Add support for arrays of UUIDs to postgres introspection. * Fix introspection of columns w/capitalized table names in postgres (#2110). * Fix to ensure correct exception is raised in SqliteQueueDatabase when iterating over cursor/result-set. * Fix bug comparing subquery against a scalar value. Fixes #2118. * Fix issue resolving composite primary-keys that include foreign-keys when building the model-graph. Fixes #2115. * Allow model-classes to be passed as arguments, e.g., to a table function. Refs #2131. * Ensure postgres `JSONField.concat()` accepts expressions as arguments. [View commits](https://github.com/coleifer/peewee/compare/3.13.1...3.13.2) ## 3.13.1 Fix a regression when specifying keyword arguments to the `atomic()` or `transaction()` helper methods. Note: this only occurs if you were using Sqlite and were explicitly setting the `lock_type=` parameter. [View commits](https://github.com/coleifer/peewee/compare/3.13.0...3.13.1) ## 3.13.0 ### CockroachDB support added This will be a notable release as it adds support for [CockroachDB](https://cockroachlabs.com/), a distributed, horizontally-scalable SQL database. * [CockroachDB usage overview](http://docs.peewee-orm.com/en/latest/peewee/database.html#using-crdb) * [CockroachDB API documentation](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#crdb) ### Other features and fixes * Allow `FOR UPDATE` clause to specify one or more tables (`FOR UPDATE OF...`). * Support for Postgres `LATERAL` join. * Properly wrap exceptions raised during explicit commit/rollback in the appropriate peewee-specific exception class. * Capture original exception object and expose it as `exc.orig` on the wrapped exception. * Properly introspect `SMALLINT` columns in Postgres schema reflection. * More flexible handling of passing database-specific arguments to `atomic()` and `transaction()` context-manager/decorator. * Fix non-deterministic join ordering issue when using the `filter()` API across several tables (#2063). [View commits](https://github.com/coleifer/peewee/compare/3.12.0...3.13.0) ## 3.12.0 * Bulk insert (`insert_many()` and `insert_from()`) will now return the row count instead of the last insert ID. If you are using Postgres, peewee will continue to return a cursor that provides an iterator over the newly-inserted primary-key values by default. This behavior is being retained by default for compatibility. Postgres users can simply specify an empty `returning()` call to disable the cursor and retrieve the rowcount instead. * Migration extension now supports altering a column's data-type, via the new `alter_column_type()` method. * Added `Database.is_connection_usabe()` method, which attempts to look at the status of the underlying DB-API connection to determine whether the connection is usable. * Common table expressions include a `materialized` parameter, which can be used to control Postgres' optimization fencing around CTEs. * Added `BloomFilter.from_buffer()` method for populating a bloom-filter from the output of a previous call to the `to_buffer()` method. * Fixed APSW extension's `commit()` and `rollback()` methods to no-op if the database is in auto-commit mode. * Added `generate_always=` option to the `IdentityField` (defaults to False). [View commits](https://github.com/coleifer/peewee/compare/3.11.2...3.12.0) ## 3.11.2 * Implement `hash` interface for `Alias` instances, allowing them to be used in multi-source queries. [View commits](https://github.com/coleifer/peewee/compare/3.11.1...3.11.2) ## 3.11.1 * Fix bug in new `_pk` / `get_id()` implementation for models that explicitly have disabled a primary-key. [View commits](https://github.com/coleifer/peewee/compare/3.11.0...3.11.1) ## 3.11.0 * Fixes #1991. This particular issue involves joining 3 models together in a chain, where the outer two models are empty. Previously peewee would make the middle model an empty model instance (since a link might be needed from the source model to the outermost model). But since both were empty, it is more correct to make the intervening model a NULL value on the foreign-key field rather than an empty instance. * An unrelated fix came out of the work on #1991 where hashing a model whose primary-key happened to be a foreign-key could trigger the FK resolution query. This patch fixes the `Model._pk` and `get_id()` interfaces so they no longer introduce the possibility of accidentally resolving the FK. * Allow `Field.contains()`, `startswith()` and `endswith()` to compare against another column-like object or expression. * Workaround for MySQL prior to 8 and MariaDB handling of union queries inside of parenthesized expressions (like IN). * Be more permissive in letting invalid values be stored in a field whose type is INTEGER or REAL, since Sqlite allows this. * `TimestampField` resolution cleanup. Now values 0 *and* 1 will resolve to a timestamp resolution of 1 second. Values 2-6 specify the number of decimal places (hundredths to microsecond), or alternatively the resolution can still be provided as a power of 10, e.g. 10, 1000 (millisecond), 1e6 (microsecond). * When self-referential foreign-keys are inherited, the foreign-key on the subclass will also be self-referential (rather than pointing to the parent model). * Add TSV import/export option to the `dataset` extension. * Add item interface to the `dataset.Table` class for doing primary-key lookup, assignment, or deletion. * Extend the mysql `ReconnectMixin` helper to work with mysql-connector. * Fix mapping of double-precision float in postgres schema reflection. Previously it mapped to single-precision, now it correctly uses a double. * Fix issue where `PostgresqlExtDatabase` and `MySQLConnectorDatabase` did not respect the `autoconnect` setting. [View commits](https://github.com/coleifer/peewee/compare/3.10.0...3.11.0) ## 3.10.0 * Add a helper to `playhouse.mysql_ext` for creating `Match` full-text search expressions. * Added date-part properties to `TimestampField` for accessing the year, month, day, etc., within a SQL expression. * Added `to_timestamp()` helper for `DateField` and `DateTimeField` that produces an expression returning a unix timestamp. * Add `autoconnect` parameter to `Database` classes. This parameter defaults to `True` and is compatible with previous versions of Peewee, in which executing a query on a closed database would open a connection automatically. To make it easier to catch inconsistent use of the database connection, this behavior can now be disabled by specifying `autoconnect=False`, making an explicit call to `Database.connect()` needed before executing a query. * Added database-agnostic interface for obtaining a random value. * Allow `isolation_level` to be specified when initializing a Postgres db. * Allow hybrid properties to be used on model aliases. Refs #1969. * Support aggregates with FILTER predicates on the latest Sqlite. #### Changes * More aggressively slot row values into the appropriate field when building objects from the database cursor (rather than using whatever `cursor.description` tells us, which is buggy in older Sqlite). * Be more permissive in what we accept in the `insert_many()` and `insert()` methods. * When implicitly joining a model with multiple foreign-keys, choose the foreign-key whose name matches that of the related model. Previously, this would have raised a `ValueError` stating that multiple FKs existed. * Improved date truncation logic for Sqlite and MySQL to make more compatible with Postgres' `date_trunc()` behavior. Previously, truncating a datetime to month resolution would return `'2019-08'` for example. As of 3.10.0, the Sqlite and MySQL `date_trunc` implementation returns a full datetime, e.g. `'2019-08-01 00:00:00'`. * Apply slightly different logic for casting JSON values with Postgres. Previously, Peewee just wrapped the value in the psycopg2 `Json()` helper. In this version, Peewee now dumps the json to a string and applies an explicit cast to the underlying JSON data-type (e.g. json or jsonb). #### Bug fixes * Save hooks can now be called for models without a primary key. * Fixed bug in the conversion of Python values to JSON when using Postgres. * Fix for differentiating empty values from NULL values in `model_to_dict`. * Fixed a bug referencing primary-key values that required some kind of conversion (e.g., a UUID). See #1979 for details. * Add small jitter to the pool connection timestamp to avoid issues when multiple connections are checked-out at the same exact time. [View commits](https://github.com/coleifer/peewee/compare/3.9.6...3.10.0) ## 3.9.6 * Support nesting the `Database` instance as a context-manager. The outermost block will handle opening and closing the connection along with wrapping everything in a transaction. Nested blocks will use savepoints. * Add new `session_start()`, `session_commit()` and `session_rollback()` interfaces to the Database object to support using transactional controls in situations where a context-manager or decorator is awkward. * Fix error that would arise when attempting to do an empty bulk-insert. * Set `isolation_level=None` in SQLite connection constructor rather than afterwards using the setter. * Add `create_table()` method to `Select` query to implement `CREATE TABLE AS`. * Cleanup some declarations in the Sqlite C extension. * Add new example showing how to implement Reddit's ranking algorithm in SQL. [View commits](https://github.com/coleifer/peewee/compare/3.9.5...3.9.6) ## 3.9.5 * Added small helper for setting timezone when using Postgres. * Improved SQL generation for `VALUES` clause. * Support passing resolution to `TimestampField` as a power-of-10. * Small improvements to `INSERT` queries when the primary-key is not an auto-incrementing integer, but is generated by the database server (eg uuid). * Cleanups to virtual table implementation and python-to-sqlite value conversions. * Fixed bug related to binding previously-unbound models to a database using a context manager, #1913. [View commits](https://github.com/coleifer/peewee/compare/3.9.4...3.9.5) ## 3.9.4 * Add `Model.bulk_update()` method for bulk-updating fields across multiple model instances. [Docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#Model.bulk_update). * Add `lazy_load` parameter to `ForeignKeyField`. When initialized with `lazy_load=False`, the foreign-key will not use an additional query to resolve the related model instance. Instead, if the related model instance is not available, the underlying FK column value is returned (behaving like the "_id" descriptor). * Added `Model.truncate_table()` method. * The `reflection` and `pwiz` extensions now attempt to be smarter about converting database table and column names into snake-case. To disable this, you can set `snake_case=False` when calling the `Introspector.introspect()` method or use the `-L` (legacy naming) option with the `pwiz` script. * Bulk insert via ``insert_many()`` no longer require specification of the fields argument when the inserted rows are lists/tuples. In that case, the fields will be inferred to be all model fields except any auto-increment id. * Add `DatabaseProxy`, which implements several of the `Database` class context managers. This allows you to reference some of the special features of the database object without directly needing to initialize the proxy first. * Add support for window function frame exclusion and added built-in support for the GROUPS frame type. * Add support for chaining window functions by extending a previously-declared window function. * Playhouse Postgresql extension `TSVectorField.match()` method supports an additional argument `plain`, which can be used to control the parsing of the TS query. * Added very minimal `JSONField` to the playhouse MySQL extension. [View commits](https://github.com/coleifer/peewee/compare/3.9.3...3.9.4) ## 3.9.3 * Added cross-database support for `NULLS FIRST/LAST` when specifying the ordering for a query. Previously this was only supported for Postgres. Peewee will now generate an equivalent `CASE` statement for Sqlite and MySQL. * Added [EXCLUDED](http://docs.peewee-orm.com/en/latest/peewee/api.html#EXCLUDED) helper for referring to the `EXCLUDED` namespace used with `INSERT...ON CONFLICT` queries, when referencing values in the conflicting row data. * Added helper method to the model `Metadata` class for setting the table name at run-time. Setting the `Model._meta.table_name` directly may have appeared to work in some situations, but could lead to subtle bugs. The new API is `Model._meta.set_table_name()`. * Enhanced helpers for working with Peewee interactively, [see doc](http://docs.peewee-orm.com/en/latest/peewee/interactive.html). * Fix cache invalidation bug in `DataSet` that was originally reported on the sqlite-web project. * New example script implementing a [hexastore](https://github.com/coleifer/peewee/blob/master/examples/hexastore.py). [View commits](https://github.com/coleifer/peewee/compare/3.9.2...3.9.3) ## 3.9.1 and 3.9.2 Includes a bugfix for an `AttributeError` that occurs when using MySQL with the `MySQLdb` client. The 3.9.2 release includes fixes for a test failure. [View commits](https://github.com/coleifer/peewee/compare/3.9.0...3.9.2) ## 3.9.0 * Added new document describing how to [use peewee interactively](http://docs.peewee-orm.com/en/latest/peewee/interactive.html). * Added convenience functions for generating model classes from a pre-existing database, printing model definitions and printing CREATE TABLE sql for a model. See the "use peewee interactively" section for details. * Added a `__str__` implementation to all `Query` subclasses which converts the query to a string and interpolates the parameters. * Improvements to `sqlite_ext.JSONField` regarding the serialization of data, as well as the addition of options to override the JSON serialization and de-serialization functions. * Added `index_type` parameter to `Field` * Added `DatabaseProxy`, which allows one to use database-specific decorators with an uninitialized `Proxy` object. See #1842 for discussion. Recommend that you update any usage of `Proxy` for deferring database initialization to use the new `DatabaseProxy` class instead. * Added support for `INSERT ... ON CONFLICT` when the conflict target is a partial index (e.g., contains a `WHERE` clause). The `OnConflict` and `on_conflict()` APIs now take an additional `conflict_where` parameter to represent the `WHERE` clause of the partial index in question. See #1860. * Enhanced the `playhouse.kv` extension to use efficient upsert for *all* database engines. Previously upsert was only supported for sqlite and mysql. * Re-added the `orwhere()` query filtering method, which will append the given expressions using `OR` instead of `AND`. See #391 for old discussion. * Added some new examples to the ``examples/`` directory * Added `select_from()` API for wrapping a query and selecting one or more columns from the wrapped subquery. [Docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#SelectQuery.select_from). * Added documentation on using [row values](http://docs.peewee-orm.com/en/latest/peewee/query_operators.html#row-values). * Removed the (defunct) "speedups" C extension, which as of 3.8.2 only contained a barely-faster function for quoting entities. **Bugfixes** * Fix bug in SQL generation when there was a subquery that used a common table expressions. * Enhanced `prefetch()` and fixed bug that could occur when mixing self-referential foreign-keys and model aliases. * MariaDB 10.3.3 introduces backwards-incompatible changes to the SQL used for upsert. Peewee now introspects the MySQL server version at connection time to ensure proper handling of version-specific features. See #1834 for details. * Fixed bug where `TimestampField` would treat zero values as `None` when reading from the database. [View commits](https://github.com/coleifer/peewee/compare/3.8.2...3.9.0) ## 3.8.2 **Backwards-incompatible changes** * The default row-type for `INSERT` queries executed with a non-default `RETURNING` clause has changed from `tuple` to `Model` instances. This makes `INSERT` behavior consistent with `UPDATE` and `DELETE` queries that specify a `RETURNING` clause. To revert back to the old behavior, just append a call to `.tuples()` to your `INSERT ... RETURNING` query. * Removing support for the `table_alias` model `Meta` option. Previously, this attribute could be used to specify a "vanity" alias for a model class in the generated SQL. As a result of some changes to support more robust UPDATE and DELETE queries, supporting this feature will require some re-working. As of the 3.8.0 release, it was broken and resulted in incorrect SQL for UPDATE queries, so now it is removed. **New features** * Added `playhouse.shortcuts.ReconnectMixin`, which can be used to implement automatic reconnect under certain error conditions (notably the MySQL error 2006 - server has gone away). **Bugfixes** * Fix SQL generation bug when using an inline window function in the `ORDER BY` clause of a query. * Fix possible zero-division in user-defined implementation of BM25 ranking algorithm for SQLite full-text search. [View commits](https://github.com/coleifer/peewee/compare/3.8.1...3.8.2) ## 3.8.1 **New features** * Sqlite `SearchField` now supports the `match()` operator, allowing full-text search to be performed on a single column (as opposed to the whole table). **Changes** * Remove minimum passphrase restrictions in SQLCipher integration. **Bugfixes** * Support inheritance of `ManyToManyField` instances. * Ensure operator overloads are invoked when generating filter expressions. * Fix incorrect scoring in Sqlite BM25, BM25f and Lucene ranking algorithms. * Support string field-names in data dictionary when performing an ON CONFLICT ... UPDATE query, which allows field-specific conversions to be applied. References #1815. [View commits](https://github.com/coleifer/peewee/compare/3.8.0...3.8.1) ## 3.8.0 **New features** * Postgres `BinaryJSONField` now supports `has_key()`, `concat()` and `remove()` methods (though remove may require pg10+). * Add `python_value()` method to the SQL-function helper `fn`, to allow specifying a custom function for mapping database values to Python values. **Changes** * Better support for UPDATE ... FROM queries, and more generally, more robust support for UPDATE and RETURNING clauses. This means that the `QualifiedNames` helper is no longer needed for certain types of queries. * The `SqlCipherDatabase` no longer accepts a `kdf_iter` parameter. To configure the various SQLCipher encryption settings, specify the setting values as `pragmas` when initializing the database. * Introspection will now, by default, only strip "_id" from introspected column names if those columns are foreign-keys. See #1799 for discussion. * Allow `UUIDField` and `BinaryUUIDField` to accept hexadecimal UUID strings as well as raw binary UUID bytestrings (in addition to `UUID` instances, which are already supported). * Allow `ForeignKeyField` to be created without an index. * Allow multiple calls to `cast()` to be chained (#1795). * Add logic to ensure foreign-key constraint names that exceed 64 characters are truncated using the same logic as is currently in place for long indexes. * `ManyToManyField` supports foreign-keys to fields other than primary-keys. * When linked against SQLite 3.26 or newer, support `SQLITE_CONSTRAINT` to designate invalid queries against virtual tables. * SQL-generation changes to aid in supporting using queries within expressions following the SELECT statement. **Bugfixes** * Fixed bug in `order_by_extend()`, thanks @nhatHero. * Fixed bug where the `DataSet` CSV import/export did not support non-ASCII characters in Python 3.x. * Fixed bug where `model_to_dict` would attempt to traverse explicitly disabled foreign-key backrefs (#1785). * Fixed bug when attempting to migrate SQLite tables that have a field whose column-name begins with "primary_". * Fixed bug with inheriting deferred foreign-keys. [View commits](https://github.com/coleifer/peewee/compare/3.7.1...3.8.0) ## 3.7.1 **New features** * Added `table_settings` model `Meta` option, which should be a list of strings specifying additional options for `CREATE TABLE`, which are placed *after* the closing parentheses. * Allow specification of `on_update` and `on_delete` behavior for many-to-many relationships when using `ManyToManyField`. **Bugfixes** * Fixed incorrect SQL generation for Postgresql ON CONFLICT clause when the conflict_target is a named constraint (rather than an index expression). This introduces a new keyword-argument to the `on_conflict()` method: `conflict_constraint`, which is currently only supported by Postgresql. Refs issue #1737. * Fixed incorrect SQL for sub-selects used on the right side of `IN` expressions. Previously the query would be assigned an alias, even though an alias was not needed. * Fixed incorrect SQL generation for Model indexes which contain SQL functions as indexed columns. * Fixed bug in the generation of special queries used to perform operations on SQLite FTS5 virtual tables. * Allow `frozenset` to be correctly parameterized as a list of values. * Allow multi-value INSERT queries to specify `columns` as a list of strings. * Support `CROSS JOIN` for model select queries. [View commits](https://github.com/coleifer/peewee/compare/3.7.0...3.7.1) ## 3.7.0 **Backwards-incompatible changes** * Pool database `close_all()` method renamed to `close_idle()` to better reflect the actual behavior. * Databases will now raise `InterfaceError` when `connect()` or `close()` are called on an uninitialized, deferred database object. **New features** * Add methods to the migrations extension to support adding and dropping table constraints. * Add [Model.bulk_create()](http://docs.peewee-orm.com/en/latest/peewee/api.html#Model.bulk_create) method for bulk-inserting unsaved model instances. * Add `close_stale()` method to the connection pool to support closing stale connections. * The `FlaskDB` class in `playhouse.flask_utils` now accepts a `model_class` parameter, which can be used to specify a custom base-class for models. **Bugfixes** * Parentheses were not added to subqueries used in function calls with more than one argument. * Fixed bug when attempting to serialize many-to-many fields which were created initially with a `DeferredThroughModel`, see #1708. * Fixed bug when using the Postgres `ArrayField` with an array of `BlobField`. * Allow `Proxy` databases to be used as a context-manager. * Fixed bug where the APSW driver was referring to the SQLite version from the standard library `sqlite3` driver, rather than from `apsw`. * Reflection library attempts to wrap server-side column defaults in quotation marks if the column data-type is text/varchar. * Missing import in migrations library, which would cause errors when attempting to add indexes whose name exceeded 64 chars. * When using the Postgres connection pool, ensure any open/pending transactions are rolled-back when the connection is recycled. * Even *more* changes to the `setup.py` script. In this case I've added a helper function which will reliably determine if the SQLite3 extensions can be built. This follows the approach taken by the Python YAML package. [View commits](https://github.com/coleifer/peewee/compare/3.6.4...3.7.0) ## 3.6.4 Take a whole new approach, following what `simplejson` does. Allow the `build_ext` command class to fail, and retry without extensions in the event we run into issues building extensions. References #1676. [View commits](https://github.com/coleifer/peewee/compare/3.6.3...3.6.4) ## 3.6.3 Add check in `setup.py` to determine if a C compiler is available before building C extensions. References #1676. [View commits](https://github.com/coleifer/peewee/compare/3.6.2...3.6.3) ## 3.6.2 Use `ctypes.util.find_library` to determine if `libsqlite3` is installed. Should fix problems people are encountering installing when SQLite3 is not available. [View commits](https://github.com/coleifer/peewee/compare/3.6.1...3.6.2) ## 3.6.1 Fixed issue with setup script. [View commits](https://github.com/coleifer/peewee/compare/3.6.0...3.6.1) ## 3.6.0 * Support for Python 3.7, including bugfixes related to new StopIteration handling inside of generators. * Support for specifying `ROWS` or `RANGE` window frame types. For more information, see the new [frame type documentation](http://docs.peewee-orm.com/en/latest/peewee/querying.html#frame-types-range-vs-rows). * Add APIs for user-defined window functions if using [pysqlite3](https://github.com/coleifer/pysqlite3) and sqlite 3.25.0 or newer. * `TimestampField` now uses 64-bit integer data-type for storage. * Added support to `pwiz` and `playhouse.reflection` to enable generating models from VIEWs. * Added lower-level database API for introspecting VIEWs. * Revamped continuous integration setup for better coverage, including 3.7 and 3.8-dev. * Allow building C extensions even if Cython is not installed, by distributing pre-generated C source files. * Switch to using `setuptools` for packaging. [View commits](https://github.com/coleifer/peewee/compare/3.5.2...3.6.0) ## 3.5.2 * New guide to using [window functions in Peewee](http://docs.peewee-orm.com/en/latest/peewee/querying.html#window-functions). * New and improved table name auto-generation. This feature is not backwards compatible, so it is **disabled by default**. To enable, set `legacy_table_names=False` in your model's `Meta` options. For more details, see [table names](http://docs.peewee-orm.com/en/latest/peewee/models.html#table_names) documentation. * Allow passing single fields/columns to window function `order_by` and `partition_by` arguments. * Support for `FILTER (WHERE...)` clauses with window functions and aggregates. * Added `IdentityField` class suitable for use with Postgres 10's new identity column type. It can be used anywhere `AutoField` or `BigAutoField` was being used previously. * Fixed bug creating indexes on tables that are in attached databases (SQLite). * Fixed obscure bug when using `prefetch()` and `ModelAlias` to populate a back-reference related model. [View commits](https://github.com/coleifer/peewee/compare/3.5.1...3.5.2) ## 3.5.1 **New features** * New documentation for working with [relationships](http://docs.peewee-orm.com/en/latest/peewee/relationships.html) in Peewee. * Improved tests and documentation for MySQL upsert functionality. * Allow `database` parameter to be specified with `ModelSelect.get()` method. For discussion, see #1620. * Add `QualifiedNames` helper to peewee module exports. * Add `temporary=` meta option to support temporary tables. * Allow a `Database` object to be passed to constructor of `DataSet` helper. **Bug fixes** * Fixed edge-case where attempting to alias a field to it's underlying column-name (when different), Peewee would not respect the alias and use the field name instead. See #1625 for details and discussion. * Raise a `ValueError` when joining and aliasing the join to a foreign-key's `object_id_name` descriptor. Should prevent accidentally introducing O(n) queries or silently ignoring data from a joined-instance. * Fixed bug for MySQL when creating a foreign-key to a model which used the `BigAutoField` for it's primary-key. * Fixed bugs in the implementation of user-defined aggregates and extensions with the APSW SQLite driver. * Fixed regression introduced in 3.5.0 which ignored custom Model `__repr__()`. * Fixed regression from 2.x in which inserting from a query using a `SQL()` was no longer working. Refs #1645. [View commits](https://github.com/coleifer/peewee/compare/3.5.0...3.5.1) ## 3.5.0 **Backwards-incompatible changes** * Custom Model `repr` no longer use the convention of overriding `__unicode__`, and now use `__str__`. * Redesigned the [sqlite json1 integration](http://docs.peewee-orm.com/en/latest/peewee/sqlite_ext.html#sqlite-json1). and changed some of the APIs and semantics of various `JSONField` methods. The documentation has been expanded to include more examples and the API has been simplified to make it easier to work with. These changes **do not** have any effect on the [Postgresql JSON fields](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#pgjson). **New features** * Better default `repr` for model classes and fields. * `ForeignKeyField()` accepts a new initialization parameter, `deferrable`, for specifying when constraints should be enforced. * `BitField.flag()` can be called without a value parameter for the common use-case of using flags that are powers-of-2. * `SqliteDatabase` pragmas can be specified as a `dict` (previously required a list of 2-tuples). * SQLite `TableFunction` ([docs](http://docs.peewee-orm.com/en/latest/peewee/sqlite_ext.html#sqlite-vtfunc)) will print Python exception tracebacks raised in the `initialize` and `iterate` callbacks, making debugging significantly easier. **Bug fixes** * Fixed bug in `migrator.add_column()` where, if the field being added declared a non-standard index type (e.g., binary json field with GIN index), this index type was not being respected. * Fixed bug in `database.table_exists()` where the implementation did not match the documentation. Implementation has been updated to match the documentation. * Fixed bug in SQLite `TableFunction` implementation which raised errors if the return value of the `iterate()` method was not a `tuple`. [View commits](https://github.com/coleifer/peewee/compare/3.4.0...3.5.0) ## 3.4.0 **Backwards-incompatible changes** * The `regexp()` operation is now case-sensitive for MySQL and Postgres. To perform case-insensitive regexp operations, use `iregexp()`. * The SQLite `BareField()` field-type now supports all column constraints *except* specifying the data-type. Previously it silently ignored any column constraints. * LIMIT and OFFSET parameters are now treated as parameterized values instead of literals. * The `schema` parameter for SQLite database introspection methods is no longer ignored by default. The schema corresponds to the name given to an attached database. * `ArrayField` now accepts a new parameter `field_kwargs`, which is used to pass information to the array field's `field_class` initializer. **New features and other changes** * SQLite backup interface supports specifying page-counts and a user-defined progress handler. * GIL is released when doing backups or during SQLite busy timeouts (when using the peewee SQLite busy-handler). * Add NATURAL join-type to the `JOIN` helper. * Improved identifier quoting to allow specifying distinct open/close-quote characters. Enables adding support for MSSQL, for instance, which uses square brackets, e.g. `[table].[column]`. * Unify timeout interfaces for SQLite databases (use seconds everywhere rather than mixing seconds and milliseconds, which was confusing). * Added `attach()` and `detach()` methods to SQLite database, making it possible to attach additional databases (e.g. an in-memory cache db). [View commits](https://github.com/coleifer/peewee/compare/3.3.4...3.4.0) ## 3.3.4 * Added a `BinaryUUIDField` class for efficiently storing UUIDs in 16-bytes. * Fix dataset's `update_cache()` logic so that when updating a single table that was newly-added, we also ensure that all dependent tables are updated at the same time. Refs coleifer/sqlite-web#42. [View commits](https://github.com/coleifer/peewee/compare/3.3.3...3.3.4) ## 3.3.3 * More efficient implementation of model dependency-graph generation. Improves performance of recursively deleting related objects by omitting unnecessary subqueries. * Added `union()`, `union_all()`, `intersect()` and `except_()` to the `Model`-specific query implementations. This was an oversight that should have been patched in 3.3.2, but is fixed in 3.3.3. * Major cleanup to test runner and standardized test skipping logic to integrate with standard-library `unittest` conventions. [View commits](https://github.com/coleifer/peewee/compare/3.3.2...3.3.3) ## 3.3.2 * Add methods for `union()`, `union_all`, `intersect()` and `except_()`. Previously, these methods were only available as operator overloads. * Removed some Python 2.6-specific support code, as 2.6 is no longer officially supported. * Fixed model-graph resolution logic for deferred foreign-keys. * Better support for UPDATE...FROM queries (Postgresql). [View commits](https://github.com/coleifer/peewee/compare/3.3.1...3.3.2) ## 3.3.1 * Fixed long-standing bug in 3.x regarding using column aliases with queries that utilize the ModelCursorWrapper (typically queries with one or more joins). * Fix typo in model metadata code, thanks @klen. * Add examples of using recursive CTEs to docs. [View commits](https://github.com/coleifer/peewee/compare/3.3.0...3.3.1) ## 3.3.0 * Added support for SQLite's new `ON CONFLICT` clause, which is modelled on the syntax used by Postgresql and will be available in SQLite 3.24.0 and onward. * Added better support for using common table expressions and a cleaner way of implementing recursive CTEs, both of which are also tested with integration tests (as opposed to just checking the generated SQL). * Modernized the CI environment to utilize the latest MariaDB features, so we can test window functions and CTEs with MySQL (when available). * Reorganized and unified the feature-flags in the test suite. [View commits](https://github.com/coleifer/peewee/compare/3.2.5...3.3.0) ## 3.2.5 * Added `ValuesList` for representing values lists. [Docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#ValuesList). * `DateTimeField`, `DateField` and `TimeField` will parse formatted-strings before sending to the database. Previously this only occurred when reading values from the database. [View commits](https://github.com/coleifer/peewee/compare/3.2.4...3.2.5) ## 3.2.4 * Smarter handling of model-graph when dealing with compound queries (union, intersect, etc). #1579. * If the same column-name is selected multiple times, first value wins. #1579. * If `ModelSelect.switch()` is called without any arguments, default to the query's model. Refs #1573. * Fix issue where cloning a ModelSelect query did not result in the joins being cloned. #1576. [View commits](https://github.com/coleifer/peewee/compare/3.2.3...3.2.4) ## 3.2.3 * `pwiz` tool will capture column defaults defined as part of the table schema. * Fixed a misleading error message - #1563. * Ensure `reuse_if_open` parameter has effect on pooled databases. * Added support for on update/delete when migrating foreign-key. * Fixed bug in SQL generation for subqueries in aliased functions #1572. [View commits](https://github.com/coleifer/peewee/compare/3.2.2...3.2.3) ## 3.2.2 * Added support for passing `Model` classes to the `returning()` method when you intend to return all columns for the given model. * Fixed a bug when using user-defined sequences, and the underlying sequence already exists. * Added `drop_sequences` parameter to `drop_table()` method which allows you to conditionally drop any user-defined sequences when dropping the table. [View commits](https://github.com/coleifer/peewee/compare/3.2.1...3.2.2) ## 3.2.1 **Notice:** the default mysql driver for Peewee has changed to [pymysql](https://github.com/PyMySQL/PyMySQL) in version 3.2.1. In previous versions, if both *mysql-python* and *pymysql* were installed, Peewee would use *mysql-python*. As of 3.2.1, if both libraries are installed Peewee will use *pymysql*. * Added new module `playhouse.mysql_ext` which includes `MySQLConnectorDatabase`, a database implementation that works with the [mysql-connector](https://dev.mysql.com/doc/connector-python/en/) driver. * Added new field to `ColumnMetadata` class which captures a database column's default value. `ColumnMetadata` is returned by `Database.get_columns()`. * Added [documentation on making Peewee async](http://docs.peewee-orm.com/en/latest/peewee/database.html#async-with-gevent). [View commits](https://github.com/coleifer/peewee/compare/3.2.0...3.2.1) ## 3.2.0 The 3.2.0 release introduces a potentially backwards-incompatible change. The only users affected will be those that have implemented custom `Field` types with a user-defined `coerce` method. tl/dr: rename the coerce attribute to adapt and you should be set. #### Field.coerce renamed to Field.adapt The `Field.coerce` method has been renamed to `Field.adapt`. The purpose of this method is to convert a value from the application/database into the appropriate Python data-type. For instance, `IntegerField.adapt` is simply the `int` built-in function. The motivation for this change is to support adding metadata to any AST node instructing Peewee to not coerce the associated value. As an example, consider this code: ```python class Note(Model): id = AutoField() # autoincrementing integer primary key. content = TextField() # Query notes table and cast the "id" to a string and store as "id_text" attr. query = Note.select(Note.id.cast('TEXT').alias('id_text'), Note.content) a_note = query.get() print((a_note.id_text, a_note.content)) # Prior to 3.2.0 the CAST is "un-done" because the value gets converted # back to an integer, since the value is associated with the Note.id field: (1, u'some note') # 3.1.7, e.g. -- "id_text" is an integer! # As of 3.2.0, CAST will automatically prevent the conversion of field values, # which is an extension of a more general metadata API that can instruct Peewee # not to convert certain values. (u'1', u'some note') # 3.2.0 -- "id_text" is a string as expected. ``` If you have implemented custom `Field` classes and are using `coerce` to enforce a particular data-type, you can simply rename the attribute to `adapt`. #### Other changes Old versions of SQLite do not strip quotation marks from aliased column names in compound queries (e.g. UNION). Fixed in 3.2.0. [View commits](https://github.com/coleifer/peewee/compare/3.1.7...3.2.0) ## 3.1.7 For all the winblows lusers out there, added an option to skip compilation of the SQLite C extensions during installation. Set env var `NO_SQLITE=1` and run `setup.py install` and you should be able to build without requiring SQLite. [View commits](https://github.com/coleifer/peewee/compare/3.1.6...3.1.7) ## 3.1.6 * Added `rekey()` method to SqlCipher database for changing encryption key and documentation for `set_passphrase()` method. * Added `convert_values` parameter to `ArrayField` constructor, which will cause the array values to be processed using the underlying data-type's conversion logic. * Fixed unreported bug using `TimestampField` with sub-second resolutions. * Fixed bug where options were not being processed when calling `drop_table()`. * Some fixes and improvements to `signals` extension. [View commits](https://github.com/coleifer/peewee/compare/3.1.5...3.1.6) ## 3.1.5 Fixed Python 2/3 incompatibility with `itertools.izip_longest()`. [View commits](https://github.com/coleifer/peewee/compare/3.1.4...3.1.5) ## 3.1.4 * Added `BigAutoField` to support 64-bit auto-incrementing primary keys. * Use Peewee-compatible datetime serialization when exporting JSON from a `DataSet`. Previously the JSON export used ISO-8601 by default. See #1536. * Added `Database.batch_commit` helper to wrap iterators in chunked transactions. See #1539 for discussion. [View commits](https://github.com/coleifer/peewee/compare/3.1.3...3.1.4) ## 3.1.3 * Fixed issue where scope-specific settings were being updated in-place instead of copied. #1534. * Fixed bug where setting a `ForeignKeyField` did not add it to the model's "dirty" fields list. #1530. * Use pre-fetched data when using `prefetch()` with `ManyToManyField`. Thanks to @iBelieve for the patch. #1531. * Use `JSON` data-type for SQLite `JSONField` instances. * Add a `json_contains` function for use with SQLite `json1` extension. * Various documentation updates and additions. [View commits](https://github.com/coleifer/peewee/compare/3.1.2...3.1.3) ## 3.1.2 #### New behavior for INSERT queries with RETURNING clause Investigating #1522, it occurred to me that INSERT queries with non-default *RETURNING* clauses (postgres-only feature) should always return a cursor object. Previously, if executing a single-row INSERT query, the last-inserted row ID would be returned, regardless of what was specified by the RETURNING clause. This change only affects INSERT queries with non-default RETURNING clauses and will cause a cursor to be returned, as opposed to the last-inserted row ID. [View commits](https://github.com/coleifer/peewee/compare/3.1.1...3.1.2) ## 3.1.1 * Fixed bug when using `Model.alias()` when the model defined a particular database schema. * Added `SchemaManager.create_foreign_key` API to simplify adding constraints when dealing with circular foreign-key relationships. Updated docs accordingly. * Improved implementation of `Migrator.add_foreign_key_constraint` so that it can be used with Postgresql (in addition to MySQL). * Added `PickleField` to the `playhouse.fields` module. [Docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#PickleField). * Fixed bug in implementation of `CompressedField` when using Python 3. * Added `KeyValue` API in `playhouse.kv` module. [Docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#key-value-store). * More test cases for joining on sub-selects or common table expressions. [View commits](https://github.com/coleifer/peewee/compare/3.1.0...3.1.1) ## 3.1.0 #### Backwards-incompatible changes `Database.bind()` has been renamed to `Database.bind_ctx()`, to more closely match the semantics of the corresponding model methods, `Model.bind()` and `Model.bind_ctx()`. The new `Database.bind()` method is a one-time operation that binds the given models to the database. See documentation: * [Database.bind()](http://docs.peewee-orm.com/en/latest/peewee/api.html#Database.bind) * [Database.bind_ctx()](http://docs.peewee-orm.com/en/latest/peewee/api.html#Database.bind_ctx) #### Other changes * Removed Python 2.6 support code from a few places. * Fixed example analytics app code to ensure hstore extension is registered. * Small efficiency improvement to bloom filter. * Removed "attention!" from *README*. [View commits](https://github.com/coleifer/peewee/compare/3.0.20...3.1.0) ## 3.0.20 * Include `schema` (if specified) when checking for table-existence. * Correct placement of ORDER BY / LIMIT clauses in compound select queries. * Fix bug in back-reference lookups when using `filter()` API. * Fix bug in SQL generation for ON CONFLICT queries with Postgres, #1512. [View commits](https://github.com/coleifer/peewee/compare/3.0.19...3.0.20) ## 3.0.19 * Support for more types of mappings in `insert_many()`, refs #1495. * Lots of documentation improvements. * Fix bug when calling `tuples()` on a `ModelRaw` query. This was reported originally as a bug with *sqlite-web* CSV export. See coleifer/sqlite-web#38. [View commits](https://github.com/coleifer/peewee/compare/3.0.18...3.0.19) ## 3.0.18 * Improved error messages when attempting to use a database class for which the corresponding driver is not installed. * Added tests showing the use of custom operator (a-la the docs). * Fixed indentation issue in docs, #1493. * Fixed issue with the SQLite date_part issue, #1494. [View commits](https://github.com/coleifer/peewee/compare/3.0.17...3.0.18) ## 3.0.17 * Fix `schema` inheritance regression, #1485. * Add helper method to postgres migrator for setting search_path, #1353. [View commits](https://github.com/coleifer/peewee/compare/3.0.16...3.0.17) ## 3.0.16 * Improve model graph resolution when iterating results of a query. Refs #1482. * Allow Model._meta.schema to be changed at run-time. #1483. [View commits](https://github.com/coleifer/peewee/compare/3.0.15...3.0.16) ## 3.0.15 * Use same `schema` used for reflection in generated models. * Preserve `pragmas` set on deferred Sqlite database if database is re-initialized without re-specifying pragmas. [View commits](https://github.com/coleifer/peewee/compare/3.0.14...3.0.15) ## 3.0.14 * Fix bug creating model instances on Postgres when model does not have a primary key column. * Extend postgresql reflection to support array types. [View commits](https://github.com/coleifer/peewee/compare/3.0.13...3.0.14) ## 3.0.13 * Fix bug where simple field aliases were being ignored. Fixes #1473. * More strict about column type inference for postgres + pwiz. [View commits](https://github.com/coleifer/peewee/compare/3.0.12...3.0.13) ## 3.0.12 * Fix queries of the form INSERT ... VALUES (SELECT...) so that sub-select is wrapped in parentheses. * Improve model-graph resolution when selecting from multiple tables that are joined by foreign-keys, and an intermediate table is omitted from selection. * Docs update to reflect deletion of post_init signal. [View commits](https://github.com/coleifer/peewee/compare/3.0.11...3.0.12) ## 3.0.11 * Add note to changelog about `cursor()` method. * Add hash method to postgres indexedfield subclasses. * Add TableFunction to sqlite_ext module namespace. * Fix bug regarding NOT IN queries where the right-hand-side is an empty set. * Fallback implementations of bm25f and lucene search ranking algorithms. * Fixed DecimalField issue. * Fixed issue with BlobField when database is a Proxy object. [View commits](https://github.com/coleifer/peewee/compare/3.0.10...3.0.11) ## 3.0.10 * Fix `Database.drop_tables()` signature to support `cascade` argument - #1453. * Fix querying documentation for custom functions - #1454. * Added len() method to `ModelBase` for convenient counting. * Fix bug related to unsaved relation population (thanks @conqp) - #1459. * Fix count() on compound select - #1460. * Support `coerce` keyword argument with `fn.XXX()` - #1463. * Support updating existing model instance with dict_to_model-like API - #1456. * Fix equality tests with ArrayField - #1461. [View commits](https://github.com/coleifer/peewee/compare/3.0.9...3.0.10) ## 3.0.9 * Add deprecation notice if passing `autocommit` as keyword argument to the `Database` initializer. Refs #1452. * Add `JSONPath` and "J" helpers to sqlite extension. [View commits](https://github.com/coleifer/peewee/compare/3.0.8...3.0.9) ## 3.0.8 * Add support for passing `cascade=True` when dropping tables. Fixes #1449. * Fix issues with backrefs and inherited foreign-keys. Fixes #1448. [View commits](https://github.com/coleifer/peewee/compare/3.0.7...3.0.8) ## 3.0.7 * Add `select_extend()` method to extend existing SELECT-ion. [Doc](http://docs.peewee-orm.com/en/latest/peewee/api.html#Select.select_extend). * Accept `set()` as iterable value type, fixes #1445 * Add test for model/field inheritance and fix bug relating to recursion error when inheriting foreign-key field. Fixes #1448. * Fix regression where consecutive calls to `ModelSelect.select()` with no parameters resulted in an empty selection. Fixes #1438. [View commits](https://github.com/coleifer/peewee/compare/3.0.6...3.0.7) ## 3.0.6 Add constraints for ON UPDATE/ON DELETE to foreign-key constraint - #1443. [View commits](https://github.com/coleifer/peewee/compare/3.0.5...3.0.6) ## 3.0.5 Adds Model.index(), a short-hand method for declaring ModelIndex instances. * [Model.index docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#Model.index) * [Model.add_index docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#Model.add_index) * [ModelIndex docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#ModelIndex) [View commits](https://github.com/coleifer/peewee/compare/3.0.4...3.0.5) ## 3.0.4 Re-add a shim for `PrimaryKeyField` (renamed to `AutoField`) and log a deprecation warning if you try to use it. [View commits](https://github.com/coleifer/peewee/compare/3.0.3...3.0.4) ## 3.0.3 Includes fix for bug where column-name to field-name translation was not being done when running select queries on models whose field name differed from the underlying column name (#1437). [View commits](https://github.com/coleifer/peewee/compare/3.0.2...3.0.3) ## 3.0.2 Ensures that the pysqlite headers are included in the source distribution so that certain C extensions can be compiled. [View commits](https://github.com/coleifer/peewee/compare/3.0.0...3.0.2) ## 3.0.0 * Complete rewrite of SQL AST and code-generation. * Inclusion of new, low-level query builder APIs. * List of [backwards-incompatible changes](http://docs.peewee-orm.com/en/latest/peewee/changes.html). [View commits](https://github.com/coleifer/peewee/compare/2.10.2...3.0.0) ## 2.10.2 * Update travis-ci build scripts to use Postgres 9.6 and test against Python 3.6. * Added support for returning `namedtuple` objects when iterating over a cursor. * Added support for specifying the "object id" attribute used when declaring a foreign key. By default, it is `foreign-key-name_id`, but it can now be customized. * Fixed small bug in the calculation of search scores when using the SQLite C extension or the `sqlite_ext` module. * Support literal column names with the `dataset` module. [View commits](https://github.com/coleifer/peewee/compare/2.10.1...2.10.2) ## 2.10.1 Removed `AESEncryptedField`. [View commits](https://github.com/coleifer/peewee/compare/2.10.0...2.10.1) ## 2.10.0 The main change in this release is the removal of the `AESEncryptedField`, which was included as part of the `playhouse.fields` extension. It was brought to my attention that there was some serious potential for security vulnerabilities. Rather than give users a false sense of security, I've decided the best course of action is to remove the field. * Remove the `playhouse.fields.AESEncryptedField` over security concerns described in ticket #1264. * Correctly resolve explicit table dependencies when creating tables, refs #1076. Thanks @maaaks. * Implement not equals comparison for `CompositeKey`. [View commits](https://github.com/coleifer/peewee/compare/2.9.2...2.10.0) ## 2.9.2 * Fixed significant bug in the `savepoint` commit/rollback implementation. Many thanks to @Syeberman for raising the issue. See #1225 for details. * Added support for postgresql `INTERVAL` columns. The new `IntervalField` in the `postgres_ext` module is suitable for storing `datetime.timedelta`. * Fixed bug where missing `sqlite3` library was causing other, unrelated libraries to throw errors when attempting to import. * Added a `case_sensitive` parameter to the SQLite `REGEXP` function implementation. The default is `False`, to preserve backwards-compatibility. * Fixed bug that caused tables not to be created when using the `dataset` extension. See #1213 for details. * Modified `drop_table` to raise an exception if the user attempts to drop tables with `CASCADE` when the database backend does not support it. * Fixed Python3 issue in the `AESEncryptedField`. * Modified the behavior of string-typed fields to treat the addition operator as concatenation. See #1241 for details. [View commits](https://github.com/coleifer/peewee/compare/2.9.1...2.9.2) ## 2.9.1 * Fixed #1218, where the use of `playhouse.flask_utils` was requiring the `sqlite3` module to be installed. * Fixed #1219 regarding the SQL generation for composite key sub-selects, joins, etc. [View commits](https://github.com/coleifer/peewee/compare/2.9.0...2.9.1) ## 2.9.0 In this release there are two notable changes: * The ``Model.create_or_get()`` method was removed. See the [documentation](http://docs.peewee-orm.com/en/latest/peewee/querying.html#create-or-get) for an example of the code one would write to replicate this functionality. * The SQLite closure table extension gained support for many-to-many relationships thanks to a nice PR by @necoro. [Docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#ClosureTable). [View commits](https://github.com/coleifer/peewee/compare/2.8.8...2.9.0) ## 2.8.8 This release contains a single important bugfix for a regression in specifying the type of lock to use when opening a SQLite transaction. [View commits](https://github.com/coleifer/peewee/compare/2.8.7...2.8.8) ## 2.8.7 This release contains numerous cleanups. ### Bugs fixed * #1087 - Fixed a misuse of the iteration protocol in the `sqliteq` extension. * Ensure that driver exceptions are wrapped when calling `commit` and `rollback`. * #1096 - Fix representation of recursive foreign key relations when using the `model_to_dict` helper. * #1126 - Allow `pskel` to be installed into `bin` directory. * #1105 - Added a `Tuple()` type to Peewee to enable expressing arbitrary tuple expressions in SQL. * #1133 - Fixed bug in the conversion of objects to `Decimal` instances in the `DecimalField`. * Fixed an issue renaming a unique foreign key in MySQL. * Remove the join predicate from CROSS JOINs. * #1148 - Ensure indexes are created when a column is added using a schema migration. * #1165 - Fix bug where the primary key was being overwritten in queries using the closure-table extension. ### New stuff * Added properties to the `SqliteExtDatabase` to expose common `PRAGMA` settings. For example, to set the cache size to 4MB, `db.cache_size = 1000`. * Clarified documentation on calling `commit()` or `rollback()` from within the scope of an atomic block. [See docs](http://docs.peewee-orm.com/en/latest/peewee/transactions.html#transactions). * Allow table creation dependencies to be specified using new `depends_on` meta option. Refs #1076. * Allow specification of the lock type used in SQLite transactions. Previously this behavior was only present in `playhouse.sqlite_ext.SqliteExtDatabase`, but it now exists in `peewee.SqliteDatabase`. * Added support for `CROSS JOIN` expressions in select queries. * Docs on how to implement [optimistic locking](http://docs.peewee-orm.com/en/latest/peewee/hacks.html#optimistic-locking). * Documented optional dependencies. * Generic support for specifying select queries as locking the selected rows `FOR X`, e.g. `FOR UPDATE` or `FOR SHARE`. * Support for specifying the frame-of-reference in window queries, e.g. specifying `UNBOUNDED PRECEDING`, etc. [See docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#Window). ### Backwards-incompatible changes * As of 9e76c99, an `OperationalError` is raised if the user calls `connect()` on an already-open Database object. Previously, the existing connection would remain open and a new connection would overwrite it, making it impossible to close the previous connection. If you find this is causing breakage in your application, you can switch the `connect()` call to `get_conn()` which will only open a connection if necessary. The error **is** indicative of a real issue, though, so audit your code for places where you may be opening a connection without closing it (module-scope operations, e.g.). [View commits](https://github.com/coleifer/peewee/compare/2.8.5...2.8.7) ## 2.8.6 This release was later removed due to containing a bug. See notes on 2.8.7. ## 2.8.5 This release contains two small bugfixes. * #1081 - fixed the use of parentheses in compound queries on MySQL. * Fixed some grossness in a helper function used by `prefetch` that was clearing out the `GROUP BY` and `HAVING` clauses of sub-queries. [View commits](https://github.com/coleifer/peewee/compare/2.8.4...2.8.5) ## 2.8.4 This release contains bugfixes as well as a new playhouse extension module for working with [SQLite in multi-threaded / concurrent environments](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#sqliteq). The new module is called `playhouse.sqliteq` and it works by serializing queries using a dedicated worker thread (or greenlet). The performance is quite good, hopefully this proves useful to someone besides myself! You can learn more by reading the [sqliteq documentation](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#sqliteq). As a miscellaneous note, I did some major refactoring and cleanup in `ExtQueryResultsWrapper` and it's corollary in the `speedups` module. The code is much easier to read than before. [View commits](https://github.com/coleifer/peewee/compare/2.8.3...2.8.4) ### Bugs fixed * #1061 - @akrs patched a bug in `TimestampField` which affected the accuracy of sub-second timestamps (for resolution > 1). * #1071, small python 3 fix. * #1072, allow `DeferredRelation` to be used multiple times if there are multiple references to a given deferred model. * #1073, fixed regression in the speedups module that caused SQL functions to always coerce return values, regardless of the `coerce` flag. * #1083, another Python 3 issue - this time regarding the use of `exc.message`. [View commits](https://github.com/coleifer/peewee/compare/2.8.3...2.8.4) ## 2.8.3 This release contains bugfixes and a small backwards-incompatible change to the way foreign key `ObjectIdDescriptor` is named (issue #1050). ### Bugs fixed and general changes * #1028 - allow the `ensure_join` method to accept `on` and `join_type` parameters. Thanks @paulbooth. * #1032 - fix bug related to coercing model instances to database parameters when the model's primary key is a foreign key. * #1035 - fix bug introduced in 2.8.2, where I had added some logic to try and restrict the base `Model` class from being treated as a "real" Model. * #1039 - update documentation to clarify that lists *or tuples* are acceptable values when specifying SQLite `PRAGMA` statements. * #1041 - PyPy user was unable to install Peewee. (Who in their right mind would *ever* use PyPy?!) Bug was fixed by removing the pre-generated C files from the distribution. * #1043 - fix bug where the `speedups` C extension was not calling the correct model initialization method, resulting in model instances returned as results of a query having their `dirty` flag incorrectly set. * #1048 - similar to #1043, add logic to ensure that fields with default values are considered dirty when instantiating the model. * #1049 - update URL to [APSW](https://rogerbinns.github.io/apsw). * Fixed unreported bug regarding `TimestampField` with zero values reporting the incorrect datetime. ### New stuff * [djpeewee](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#djpeewee) extension module now works with Django 1.9. * [TimestampField](http://docs.peewee-orm.com/en/latest/peewee/api.html#TimestampField) is now an officially documented field. * #1050 - use the `db_column` of a `ForeignKeyField` for the name of the `ObjectIdDescriptor`, except when the `db_column` and field `name` are the same, in which case the ID descriptor will be named `_id`. [View commits](https://github.com/coleifer/peewee/compare/2.8.2...2.8.3) ## 2.8.2 This release contains mostly bug-fixes, clean-ups, and API enhancements. ### Bugs fixed and general cleanups * #820 - fixed some bugs related to the Cython extension build process. * #858 - allow blanks and perform type conversion when using the `db_url` extension * #922 - ensure that `peewee.OperationalError` is raised consistently when using the `RetryOperationalError` mixin. * #929 - ensure that `pwiz` will import the appropriate extensions when vendor-specific fields are used. * #930 - ensure that `pwiz`-generated models containing `UnknownField` placeholders do not blow up when you instantiate them. * #932 - correctly limit the length of automatically-generated index names. * #933 - fixed bug where `BlobField` could not be used if it's parent model pointed to an uninitialized database `Proxy`. * #935 - greater consistency with the conversion to Python data-types when performing aggregations, annotations, or calling `scalar()`. * #939 - ensure the correct data-types are used when initializing a connection pool. * #947 - fix bug where `Signal` subclasses were not returning rows affected on save. * #951 - better warnings regarding C extension compilation, thanks @dhaase-de. * #968 - fix bug where table names starting with numbers generated invalid table names when using `pwiz`. * #971 - fix bug where parameter was not being used. Thanks @jberkel. * #974 - fixed the way `SqliteExtDatabase` handles the automatic `rowid` (and `docid`) columns. Thanks for alerting me to the issue and providing a failing test case @jberkel. * #976 - fix obscure bug relating to cloning foreign key fields twice. * #981 - allow `set` instances to be used on the right-hand side of `IN` exprs. * #983 - fix behavior where the default `id` primary key was inherited regardless. When users would inadvertently include it in their queries, it would use the table alias of it's parent class. * #992 - add support for `db_column` in `djpeewee` * #995 - fix the behavior of `truncate_date` with Postgresql. Thanks @Zverik. * #1011 - correctly handle `bytes` wrapper used by `PasswordField` to `bytes`. * #1012 - when selecting and joining on multiple models, do not create model instances when the foreign key is NULL. * #1017 - do not coerce the return value of function calls to `COUNT` or `SUM`, since the python driver will already give us the right Python value. * #1018 - use global state to resolve `DeferredRelations`, allowing for a nicer API. Thanks @brenguyen711. * #1022 - attempt to avoid creating invalid Python when using `pwiz` with MySQL database columns containing spaces. Yes, fucking spaces. * #1024 - fix bug in SQLite migrator which had a naive approach to fixing indexes. * #1025 - explicitly check for `None` when determining if the database has been set on `ModelOptions`. Thanks @joeyespo. ### New stuff * Added `TimestampField` for storing datetimes using integers. Greater than second delay is possible through exponentiation. * Added `Database.drop_index()` method. * Added a `max_depth` parameter to the `model_to_dict` function in the `playhouse.shortcuts` extension module. * `SelectQuery.first()` function accepts a parameter `n` which applies a limit to the query and returns the first row. Previously the limit was not applied out of consideration for subsequent iterations, but I believe usage has shown that a limit is more desirable than reserving the option to iterate without a second query. The old behavior is preserved in the new `SelectQuery.peek()` method. * `group_by()`, `order_by()`, `window()` now accept a keyward argument `extend`, which, when set to `True`, will append to the existing values rather than overwriting them. * Query results support negative indexing. * C sources are included now as part of the package. I *think* they should be able to compile for python 2 or 3, on linux or windows...but not positive. * #895 - added the ability to query using the `_id` attribute. * #948 - added documentation about SQLite limits and how they affect * #1009 - allow `DATABASE_URL` as a recognized parameter to the Flask config. `insert_many`. [View commits](https://github.com/coleifer/peewee/compare/2.8.1...2.8.2) ## 2.8.1 This release is long overdue so apologies if you've been waiting on it and running off master. There are numerous bugfixes contained in this release, so I'll list those first this time. ### Bugs fixed * #821 - issue warning if Cython is old * #822 - better handling of MySQL connections point for advanced use-cases. * #313 - support equality/inequality with generic foreign key queries, and ensure `get_or_create` works with GFKs. * #834 - fixed Python3 incompatibilities in the `PasswordField`, thanks @mosquito. * #836 - fix handling of `last_insert_id()` when using `APSWDatabase`. * #845 - add connection hooks to `APSWDatabase`. * #852 - check SQLite library version to avoid calls to missing APIs. * #857 - allow database definition to be deferred when using the connection pool. * #878 - formerly `.limit(0)` had no effect. Now adds `LIMIT 0`. * #879 - implement a `__hash__` method for `Model` * #886 - fix `count()` for compound select queries. * #895 - allow writing to the `foreign_key_id` descriptor to set the foreign key value. * #893 - fix boolean logic bug in `model_to_dict()`. * #904 - fix side-effect in `clean_prefetch_query`, thanks to @p.kamayev * #907 - package includes `pskel` now. * #852 - fix sqlite version check in BerkeleyDB backend. * #919 - add runtime check for `sqlite3` library to match MySQL and Postgres. Thanks @M157q ### New features * Added a number of [SQLite user-defined functions and aggregates](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#sqlite-udf). * Use the DB-API2 `Binary` type for `BlobField`. * Implemented the lucene scoring algorithm in the `sqlite_ext` Cython library. * #825 - allow a custom base class for `ModelOptions`, providing an extension * #830 - added `SmallIntegerField` type. * #838 - allow using a custom descriptor class with `ManyToManyField`. * #855 - merged change from @lez which included docs on using peewee with Pyramid. * #858 - allow arguments to be passed on query-string when using the `db_url` module. Thanks @RealSalmon * #862 - add support for `truncate table`, thanks @dev-zero for the sample code. * Allow the `related_name` model `Meta` option to be a callable that accepts the foreign key field instance. [View commits](https://github.com/coleifer/peewee/compare/2.8.0...2.8.1) ## 2.8.0 This release includes a couple new field types and greatly improved C extension support for both speedups and SQLite enhancements. Also includes some work, suggested by @foxx, to remove some places where `Proxy` was used in favor of more obvious APIs. ### New features * [travis-ci builds](http://travis-ci.org/coleifer/peewee/builds/) now include MySQL and Python 3.5. Dropped support for Python 3.2 and 3.3. Builds also will run the C-extension code. * C extension speedups now enabled by default, includes faster implementations for `dict` and `tuple` `QueryResultWrapper` classes, faster date formatting, and a faster field and model sorting. * C implementations of SQLite functions is now enabled by default. SQLite extension is now compatible with APSW and can be used in standalone form directly from Python. See [SqliteExtDatabase](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#SqliteExtDatabase) for more details. * SQLite C extension now supports `murmurhash2`. * `UUIDField` is now supported for SQLite and MySQL, using `text` and `varchar` respectively, thanks @foxx! * Added `BinaryField`, thanks again, @foxx! * Added `PickledField` to `playhouse.fields`. * `ManyToManyField` now accepts a list of primary keys when adding or removing values from the through relationship. * Added support for SQLite [table-valued functions](http://sqlite.org/vtab.html#tabfunc2) using the [sqlite-vtfunc library](https://github.com/coleifer/sqlite-vtfunc). * Significantly simplified the build process for compiling the C extensions. ### Backwards-incompatible changes * Instead of using a `Proxy` for defining circular foreign key relationships, you now need to use [DeferredRelation](http://docs.peewee-orm.com/en/latest/peewee/api.html#DeferredRelation). * Instead of using a `Proxy` for defining many-to-many through tables, you now need to use [DeferredThroughModel](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#DeferredThroughModel). * SQLite Virtual Models must now use `Meta.extension_module` and `Meta.extension_options` to declare extension and any options. For more details, see [VirtualModel](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#VirtualModel). * MySQL database will now issue `COMMIT` statements for `SELECT` queries. This was not necessary, but added due to an influx of confused users creating GitHub tickets. Hint: learn to user your damn database, it's not magic! ### Bugs fixed Some of these may have been included in a previous release, but since I did not list them I'm listing them here. * #766, fixed bug with PasswordField and Python3. Fuck Python 3. * #768, fixed SortedFieldList and `remove_field()`. Thanks @klen! * #771, clarified docs for APSW. * #773, added docs for request hooks in Pyramid (who uses Pyramid, by the way?). * #774, prefetch() only loads first ForeignKeyField for a given relation. * #782, fixed typo in docs. * #791, foreign keys were not correctly handling coercing to the appropriate python value. * #792, cleaned up some CSV utils code. * #798, cleaned up iteration protocol in QueryResultWrappers. * #806, not really a bug, but MySQL users were clowning around and needed help. [View commits](https://github.com/coleifer/peewee/compare/2.7.4...2.8.0) ## 2.7.4 This is another small release which adds code to automatically build the SQLite C extension if `libsqlite` is available. The release also includes: * Support for `UUIDField` with SQLite. * Support for registering additional database classes with the `db_url` module via `register_database`. * `prefetch()` supports fetching multiple foreign-keys to the same model class. * Added method to validate FTS5 search queries. [View commits](https://github.com/coleifer/peewee/compare/2.7.3...2.7.4) ## 2.7.3 Small release which includes some changes to the BM25 sorting algorithm and the addition of a [`JSONField`](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#JSONField) for use with the new [JSON1 extension](http://sqlite.org/json1.html). ## 2.7.2 People were having trouble building the sqlite extension. I figure enough people are having trouble that I made it a separate command: `python setup.py build_sqlite_ext`. ## 2.7.1 Jacked up the setup.py ## 2.7.0 New APIs, features, and performance improvements. ### Notable changes and new features * [`PasswordField`](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#PasswordField) that uses the `bcrypt` module. * Added new Model [`Meta.only_save_dirty`](http://docs.peewee-orm.com/en/latest/peewee/models.html#model-options-and-table-metadata) flag to, by default, only save fields that have been modified. * Added support for [`upsert()`](http://docs.peewee-orm.com/en/latest/peewee/api.html#InsertQuery.upsert) on MySQL (in addition to SQLite). * Implemented SQLite ranking functions (``rank`` and ``bm25``) in Cython, and changed both the Cython and Python APIs to accept weight values for every column in the search index. This more closely aligns with the APIs provided by FTS5. In fact, made the APIs for FTS4 and FTS5 result ranking compatible. * Major changes to the :ref:`sqlite_ext` module. Function callbacks implemented in Python were implemented in Cython (e.g. date manipulation and regex processing) and will be used if Cython is available when Peewee is installed. * Support for the experimental new [FTS5](http://sqlite.org/fts5.html) SQLite search extension. * Added :py:class:`SearchField` for use with the SQLite FTS extensions. * Added :py:class:`RowIDField` for working with the special ``rowid`` column in SQLite. * Added a model class validation hook to allow model subclasses to perform any validation after class construction. This is currently used to ensure that ``FTS5Model`` subclasses do not violate any rules required by the FTS5 virtual table. ### Bugs fixed * **#751**, fixed some very broken behavior in the MySQL migrator code. Added more tests. * **#718**, added a `RetryOperationalError` mixin that will try automatically reconnecting after a failed query. There was a bug in the previous error handler implementation that made this impossible, which is also fixed. #### Small bugs * #713, fix column name regular expression in SQLite migrator. * #724, fixed `NULL` handling with the Postgresql `JSONField`. * #725, added `__module__` attribute to `DoesNotExist` classes. * #727, removed the `commit_select` logic for MySQL databases. * #730, added documentation for `Meta.order_by` API. * #745, added `cast()` method for casting JSON field values. * #748, added docs and method override to indicate that SQLite does not support adding foreign key constraints after table creation. * Check whether pysqlite or libsqlite were compiled with BerkeleyDB support when using the :py:class:`BerkeleyDatabase`. * Clean up the options passed to SQLite virtual tables on creation. ### Small features * #700, use sensible default if field's declared data-type is not present in the field type map. * #707, allow model to be specified explicitly in `prefetch()`. * #734, automatic testing against python 3.5. * #753, added support for `upsert()` ith MySQL via the `REPLACE INTO ...` statement. * #757, `pwiz`, the schema intropsection tool, will now generate multi-column index declarations. * #756, `pwiz` will capture passwords using the `getpass()` function rather than via the command-line. * Removed `Database.sql_error_handler()`, replaced with the `RetryOperationalError` mixin class. * Documentation for `Meta.order_by` and `Meta.primary_key`. * Better documentation around column and table constraints. * Improved performance for some methods that are called frequently. * Added `coerce` parameter to `BareField` and added documentation. [View commits](https://github.com/coleifer/peewee/compare/2.6.4...2.7.0) ## 2.6.4 Updating so some of the new APIs are available on pypi. ### Bugs fixed * #646, fixed a bug with the Cython speedups not being included in package. * #654, documented how to create models with no primary key. * #659, allow bare `INSERT` statements. * #674, regarding foreign key / one-to-one relationships. * #676, allow `ArrayField` to accept tuples in addition to lists. * #679, fix regarding unsaved relations. * #682, refactored QueryResultWrapper to allow multiple independent iterations over the same underlying result cache. * #692, fix bug with multiple joins to same table + eager loading. * #695, fix bug when connection fails while using an execution context. * #698, use correct column names with non-standard django foreign keys. * #706, return `datetime.time` instead of `timedelta` for MySQL time fields. * #712, fixed SQLite migrator regular expressions. Thanks @sroebert. ### New features * #647, #649, #650, added support for `RETURNING` clauses. Update, Insert and Delete queries can now be called with `RETURNING` to retrieve the rows that were affected. [See docs](http://docs.peewee-orm.com/en/latest/peewee/querying.html#returning-clause). * #685, added web request hook docs. * #691, allowed arbitrary model attributes and methods to be serialized by `model_to_dict()`. [Docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#model_to_dict). * #696, allow `model_to_dict()` to introspect query for which fields to serialize. * Added backend-agnostic [truncate_date()](http://docs.peewee-orm.com/en/latest/peewee/api.html#Database.truncate_date) implementation. * Added a `FixedCharField` which uses column type `CHAR`. * Added support for arbitrary `PRAGMA` statements to be run on new SQLite connections. [Docs](http://docs.peewee-orm.com/en/latest/peewee/databases.html#sqlite-pragma). * Removed `berkeley_build.sh` script. See instructions [on my blog instead](http://charlesleifer.com/blog/building-the-python-sqlite-driver-for-use-with-berkeleydb/). [View commits](https://github.com/coleifer/peewee/compare/2.6.2...2.6.4) ## 2.6.2 Just a regular old release. ### Bugs fixed * #641, fixed bug with exception wrapping and Python 2.6 * #634, fixed bug where correct query result wrapper was not being used for certain composite queries. * #625, cleaned up some example code. * #614, fixed bug with `aggregate_rows()` when there are multiple joins to the same table. ### New features * Added [create_or_get()](http://docs.peewee-orm.com/en/latest/peewee/querying.html#create-or-get) as a companion to `get_or_create()`. * Added support for `ON CONFLICT` clauses for `UPDATE` and `INSERT` queries. [Docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#UpdateQuery.on_conflict). * Added a [JSONKeyStore](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#JSONKeyStore) to `playhouse.kv`. * Added Cythonized version of `strip_parens()`, with plans to perhaps move more performance-critical code to Cython in the future. * Added docs on specifying [vendor-specific database parameters](http://docs.peewee-orm.com/en/latest/peewee/database.html#vendor-specific-parameters). * Added docs on specifying [field default values](http://docs.peewee-orm.com/en/latest/peewee/models.html#default-field-values) (both client and server-side). * Added docs on [foreign key field back-references](http://docs.peewee-orm.com/en/latest/peewee/models.html#foreignkeyfield). * Added docs for [models without a primary key](http://docs.peewee-orm.com/en/latest/peewee/models.html#models-without-a-primary-key). * Cleaned up docs on `prefetch()` and `aggregate_rows()`. [View commits](https://github.com/coleifer/peewee/compare/2.6.1...2.6.2) ## 2.6.1 This release contains a number of small fixes and enhancements. ### Bugs fixed * #606, support self-referential joins with `prefetch` and `aggregate_rows()` methods. * #588, accomodate changes in SQLite's `PRAGMA index_list()` return value. * #607, fixed bug where `pwiz` was not passing table names to introspector. * #591, fixed bug with handling of named cursors in older psycopg2 version. * Removed some cruft from the `APSWDatabase` implementation. ### New features * Added [CompressedField](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#CompressedField) and [AESEncryptedField](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#AESEncryptedField) * #609, #610, added Django-style foreign key ID lookup. [Docs](http://docs.peewee-orm.com/en/latest/peewee/models.html#foreignkeyfield). * Added support for [Hybrid Attributes](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#hybrid-attributes) (cool idea courtesy of SQLAlchemy). * Added ``upsert`` keyword argument to the `Model.save()` function (SQLite only). * #587, added support for ``ON CONFLICT`` SQLite clause for `INSERT` and `UPDATE` queries. [Docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#UpdateQuery.on_conflict) * #601, added hook for programmatically defining table names. [Model options docs](http://docs.peewee-orm.com/en/latest/peewee/models.html#model-options-and-table-metadata) * #581, #611, support connection pools with `playhouse.db_url.connect()`. [Docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#connect). * Added [Contributing section](http://docs.peewee-orm.com/en/latest/peewee/contributing.html) section to docs. [View commits](https://github.com/coleifer/peewee/compare/2.6.0...2.6.1) ## 2.6.0 This is a tiny update, mainly consisting of a new-and-improved implementation of ``get_or_create()`` ([docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#Model.get_or_create)). ### Backwards-incompatible changes * ``get_or_create()`` now returns a 2-tuple consisting of the model instance and a boolean indicating whether the instance was created. The function now behaves just like the Django equivalent. ### New features * #574, better support for setting the character encoding on Postgresql database connections. Thanks @klen! * Improved implementation of [get_or_create()](http://docs.peewee-orm.com/en/latest/peewee/api.html#Model.get_or_create). [View commits](https://github.com/coleifer/peewee/compare/2.5.1...2.6.0) ## 2.5.1 This is a relatively small release with a few important bugfixes. ### Bugs fixed * #566, fixed a bug regarding parentheses around compound `SELECT` queries (i.e. `UNION`, `INTERSECT`, etc). * Fixed unreported bug where table aliases were not generated correctly for compound `SELECT` queries. * #559, add option to preserve original column order with `pwiz`. Thanks @elgow! * Fixed unreported bug where selecting all columns from a `ModelAlias` does not use the appropriate `FieldAlias` objects. ### New features * #561, added an option for bulk insert queries to return the list of auto-generated primary keys. See [docs for InsertQuery.return_id_list](http://docs.peewee-orm.com/en/latest/peewee/api.html#InsertQuery.return_id_list). * #569, added `parse` function to the `playhouse.db_url` module. Thanks @stt! * Added [hacks](http://docs.peewee-orm.com/en/latest/peewee/hacks.html) section to the docs. Please contribute your hacks! ### Backwards-incompatible changes * Calls to `Node.in_()` and `Node.not_in()` do not take `*args` anymore and instead take a single argument. [View commits](https://github.com/coleifer/peewee/compare/2.5.0...2.5.1) ## 2.5.0 There are a couple new features so I thought I'd bump to 2.5.x. One change Postgres users may be happy to see is the use of `INSERT ... RETURNING` to perform inserts. This should definitely speed up inserts for Postgres, since an extra query is no longer needed to get the new auto-generated primary key. I also added a [new context manager/decorator](http://docs.peewee-orm.com/en/latest/peewee/database.html#using-multiple-databases) that allows you to use a different database for the duration of the wrapped block. ### Bugs fixed * #534, CSV utils was erroneously stripping the primary key from CSV data. * #537, fix upserts when using `insert_many`. * #541, respect `autorollback` with `PostgresqlExtDatabase`. Thanks @davidmcclure. * #551, fix for QueryResultWrapper's implementation of the iterator protocol. * #554, allow SQLite journal_mode to be set at run-time. * Fixed case-sensitivity issue with `DataSet`. ### New features * Added support for [CAST expressions](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cast). * Added a hook for [extending Node](http://docs.peewee-orm.com/en/latest/peewee/api.html#Node.extend) with custom methods. * `JOIN_` became `JOIN.`, e.g. `.join(JOIN.LEFT_OUTER)`. * `OP_` became `OP.`. * #556, allowed using `+` and `-` prefixes to indicate ascending/descending ordering. * #550, added [Database.initialize_connection()](http://docs.peewee-orm.com/en/latest/peewee/database.html#additional-connection-initialization) hook. * #549, bind selected columns to a particular model. Thanks @jhorman, nice PR! * #531, support for swapping databases at run-time via [Using](http://docs.peewee-orm.com/en/latest/peewee/database.html#using-multiple-databases). * #530, support for SQLCipher and Python3. * New `RowIDField` for `sqlite_ext` playhouse module. This field can be used to interact with SQLite `rowid` fields. * Added `LateralJoin` helper to the `postgres_ext` playhouse module. * New [example blog app](https://github.com/coleifer/peewee/tree/master/examples/blog). [View commits](https://github.com/coleifer/peewee/compare/2.4.7...2.5.0) ## 2.4.7 ### Bugs fixed * #504, Docs updates. * #506, Fixed regression in `aggregate_rows()` * #510, Fixes bug in pwiz overwriting columns. * #514, Correctly cast foreign keys in `prefetch()`. * #515, Simplifies queries issued when doing recursive deletes. * #516, Fix cloning of Field objects. * #519, Aggregate rows now correctly preserves ordering of joined instances. * Unreported, fixed bug to not leave expired connections sitting around in the pool. ### New features * Added support for Postgresql's ``jsonb`` type with [BinaryJSONField](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#BinaryJSONField). * Add some basic [Flask helpers](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#flask-utils). * Add support for `UNION ALL` queries in #512 * Add `SqlCipherExtDatabase`, which combines the sqlcipher database with the sqlite extensions. * Add option to print metadata when generating code with ``pwiz``. [View commits](https://github.com/coleifer/peewee/compare/2.4.6...2.4.7) ## 2.4.6 This is a relatively small release with mostly bug fixes and updates to the documentation. The one new feature I'd like to highlight is the ``ManyToManyField`` ([docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#ManyToManyField)). ### Bugs fixed * #503, fixes behavior of `aggregate_rows()` when used with a `CompositeKey`. * #498, fixes value coercion for field aliases. * #492, fixes bug with pwiz and composite primary keys. * #486, correctly handle schemas with reflection module. ### New features * Peewee has a new [ManyToManyField](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#ManyToManyField) available in the ``playhouse.shortcuts`` module. * Peewee now has proper support for *NOT IN* queries through the ``Node.not_in()`` method. * Models now support iteration. This is equivalent to ``Model.select()``. [View commits](https://github.com/coleifer/peewee/compare/2.4.5...2.4.6) ## 2.4.5 I'm excited about this release, as in addition to a number of new features and bugfixes, it also is a step towards cleaner code. I refactored the tests into a number of modules, using a standard set of base test-cases and helpers. I also introduced the `mock` library into the test suite and plan to use it for cleaner tests going forward. There's a lot of work to do to continue cleaning up the tests, but I'm feeling good about the changes. Curiously, the test suite runs faster now. ### Bugs fixed * #471, #482 and #484, all of which had to do with how joins were handled by the `aggregate_rows()` query result wrapper. * #472 removed some needless special-casing in `Model.save()`. * #466 fixed case-sensitive issues with the SQLite migrator. * #474 fixed a handful of bugs that cropped up migrating foreign keys with SQLite. * #475 fixed the behavior of the SQLite migrator regarding auto-generated indexes. * #479 fixed a bug in the code that stripped extra parentheses in the SQL generator. * Fixed a handful of bugs in the APSW extension. ### New features * Added connection abstraction called `ExecutionContext` ([see docs](http://docs.peewee-orm.com/en/latest/peewee/database.html#advanced-connection-management)). * Made all context managers work as decorators (`atomic`, `transaction`, `savepoint`, `execution_context`). * Added explicit methods for `IS NULL` and `IS NOT NULL` queries. The latter was actually necessary since the behavior is different from `NOT IS NULL (...)`. * Allow disabling backref validation (#465) * Made quite a few improvements to the documentation, particularly sections on transactions. * Added caching to the [DataSet](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#dataset) extension, which should improve performance. * Made the SQLite migrator smarter with regards to preserving indexes when a table copy is necessary. [View commits](https://github.com/coleifer/peewee/compare/2.4.4...2.4.5) ## 2.4.4 Biggest news: peewee has a new logo! ![](https://media.charlesleifer.com/blog/photos/peewee-logo-bold.png) * Small documentation updates here and there. ### Backwards-incompatible changes * The argument signature for the `SqliteExtDatabase.aggregate()` decorator changed so that the aggregate name is the first parameter, and the number of parameters is the second parameter. If no values are specified, peewee will choose the name of the class and an un-specified number of arguments (`-1`). * The logic for saving a model with a composite key changed slightly. Previously, if a model had a composite primary key and you called `save()`, only the dirty fields would be saved. ### Bugs fixed * #462 * #465, add hook for disabling backref validation. * #466, fix case-sensitive table names with migration module. * #469, save only dirty fields. ### New features * Lots of enhancements and cleanup to the `playhouse.apsw_ext` module. * The `playhouse.reflection` module now supports introspecting indexes. * Added a model option for disabling backref validation. * Added support for the SQLite [closure table extension](http://charlesleifer.com/blog/querying-tree-structures-in-sqlite-using-python-and-the-transitive-closure-extension/). * Added support for *virtual fields*, which act on dynamically-created virtual table fields. * Added a new example: a virtual table implementation that exposes Redis as a relational database table. * Added a module `playhouse.sqlite_aggregates` that contains a handful of aggregates you may find useful when developing with SQLite. [View commits](https://github.com/coleifer/peewee/compare/2.4.3...2.4.4) ## 2.4.3 This release contains numerous improvements, particularly around the built-in database introspection utilities. Peewee should now also be compatible with PyPy. ### Bugs fixed * #466, table names are case sensitive in the SQLite migrations module. * #465, added option to disable backref validation. * #462, use the schema name consistently with postgres reflection. ### New features * New model *Meta* option to disable backref validation. [See validate_backrefs](http://docs.peewee-orm.com/en/latest/peewee/models.html#model-options-and-table-metadata). * Added documentation on ordering by calculated values. * Added basic PyPy compatibility. * Added logic to close cursors after they have been exhausted. * Structured and consolidated database metadata introspection, including improvements for introspecting indexes. * Added support to [prefetch](http://docs.peewee-orm.com/en/latest/peewee/api.html?highlight=prefetch#prefetch) for traversing *up* the query tree. * Added introspection option to skip invalid models while introspecting. * Added option to limit the tables introspected. * Added closed connection detection to the MySQL connection pool. * Enhancements to passing options to creating virtual tables with SQLite. * Added factory method for generating Closure tables for use with the `transitive_closure` SQLite extension. * Added support for loading SQLite extensions. * Numerous test-suite enhancements and new test-cases. [View commits](https://github.com/coleifer/peewee/compare/2.4.2...2.4.3) ## 2.4.2 This release contains a number of improvements to the `reflection` and `migrate` extension modules. I also added an encrypted *diary* app to the [examples](https://github.com/coleifer/peewee/tree/master/examples) directory. ### Bugs fixed * #449, typo in the db_url extension, thanks to @malea for the fix. * #457 and #458, fixed documentation deficiences. ### New features * Added support for [importing data](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#importing-data) when using the [DataSet extension](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#dataset). * Added an encrypted diary app to the examples. * Better index reconstruction when altering columns on SQLite databases with the [migrate](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#migrate) module. * Support for multi-column primary keys in the [reflection](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#reflection) module. * Close cursors more aggressively when executing SELECT queries. [View commits](https://github.com/coleifer/peewee/compare/2.4.1...2.4.2) ## 2.4.1 This release contains a few small bugfixes. ### Bugs fixed * #448, add hook to the connection pool for detecting closed connections. * #229, fix join attribute detection. * #447, fixed documentation typo. [View commits](https://github.com/coleifer/peewee/compare/2.4.0...2.4.1) ## 2.4.0 This release contains a number of enhancements to the `playhouse` collection of extensions. ### Backwards-incompatible changes As of 2.4.0, most of the introspection logic was moved out of the ``pwiz`` module and into ``playhouse.reflection``. ### New features * Created a new [reflection](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#reflection) extension for introspecting databases. The *reflection* module additionally can generate actual peewee Model classes dynamically. * Created a [dataset](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#dataset) library (based on the [SQLAlchemy project](https://dataset.readthedocs.io/) of the same name). For more info check out the blog post [announcing playhouse.dataset](http://charlesleifer.com/blog/saturday-morning-hacks-dataset-for-peewee/). * Added a [db_url](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#database-url) module which creates `Database` objects from a connection string. * Added [csv dump](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#dumping-csv) functionality to the [CSV utils](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#csv-utils) extension. * Added an [atomic](http://docs.peewee-orm.com/en/latest/peewee/transactions.html#nesting-transactions) context manager to support nested transactions. * Added support for HStore, JSON and TSVector to the `reflection` module. * More documentation updates. ### Bugs fixed * Fixed #440, which fixes a bug where `Model.dirty_fields` did not return an empty set for some subclasses of `QueryResultWrapper`. [View commits](https://github.com/coleifer/peewee/compare/2.3.3...2.4.0) ## 2.3.3 This release contains a lot of improvements to the documentation and a mixed bag of other new features and bugfixes. ### Backwards-incompatible changes As of 2.3.3, all peewee `Database` instances have a default of `True` for the `threadlocals` parameter. This means that a connection is opened for each thread. It seemed to me that by sharing connections across threads caused a lot of confusion to users who weren't aware of (or familiar with) the `threadlocals` parameter. For single-threaded apps the behavior will not be affected, but for multi-threaded applications, if you wish to share your connection across threads you must now specify `threadlocals=False`. For more information, see the [documentation](http://docs.peewee-orm.com/en/latest/peewee/api.html#Database). I also renamed the `Model.get_id()` and `Model.set_id()` convenience methods so as not to conflict with Flask-Login. These methods should have probably been private anyways, and the new methods are named `_get_pk_value()` and `_set_pk_value()`. ### New features * Basic support for [Postgresql full-text search](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#pg-fts). * Helper functions for converting models to dictionaries and unpacking dictionaries into model instances. See [docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#model_to_dict). ### Bugs fixed * Fixed #428, documentation formatting error. * Fixed #429, which fixes the way default values are initialized for bulk inserts. * Fixed #432, making the HStore extension optional when using `PostgresqlExtDatabase`. * Fixed #435, allowing peewee to be used with Flask-Login. * Fixed #436, allowing the SQLite date_part and date_trunc functions to correctly handle NULL values. * Fixed #438, in which the ordering of clauses in a Join expression were causing unpredictable behavior when selecting related instances. * Updated the `berkeley_build.sh` script, which was incompatible with the newest version of `bsddb3`. [View commits](https://github.com/coleifer/peewee/compare/2.3.2...2.3.3) ## 2.3.2 This release contains mostly bugfixes. ### Changes in 2.3.2 * Fixed #421, allowing division operations to work correctly in py3k. * Added support for custom json.dumps command, thanks to @alexlatchford. * Fixed some foreign key generation bugs with pwiz in #426. * Fixed a parentheses bug with UNION queries, #422. * Added support for returning partial JSON data-structures from postgresql. [View commits](https://github.com/coleifer/peewee/compare/2.3.1...2.3.2) ## 2.3.1 This release contains a fix for a bug introducted in 2.3.0. Table names are included, unquoted, in update queries now, which is causing some problems when the table name is a keyword. ### Changes in 2.3.1 * [Quote table name / alias](https://github.com/coleifer/peewee/issues/414) [View commits](https://github.com/coleifer/peewee/compare/2.3.0...2.3.1) ## 2.3.0 This release contains a number of bugfixes, enhancements and a rewrite of much of the documentation. ### Changes in 2.3.0 * [New and improved documentation](http://docs.peewee-orm.com/) * Added [aggregate_rows()](http://docs.peewee-orm.com/en/latest/peewee/querying.html#list-users-and-all-their-tweets) method for mitigating N+1 queries. * Query compiler performance improvements and rewrite of table alias internals (51d82fcd and d8d55df04). * Added context-managers and decorators for [counting queries](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#count_queries) and [asserting query counts](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#assert_query_count). * Allow `UPDATE` queries to contain subqueries for values ([example](http://docs.peewee-orm.com/en/latest/peewee/querying.html#atomic-updates)). * Support for `INSERT INTO / SELECT FROM` queries ([docs](http://docs.peewee-orm.com/en/latest/peewee/api.html?highlight=insert_from#Model.insert_from)). * Allow `SqliteDatabase` to set the database's journal mode. * Added method for concatenation ([docs]()). * Moved ``UUIDField`` out of the playhouse and into peewee * Added [pskel](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#pskel) script. * Documentation for [BerkeleyDB](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#berkeleydb). ### Bugs fixed * #340, allow inner query values to be used in outer query joins. * #380, fixed foreign key handling in SQLite migrations. * #389, mark foreign keys as dirty on assignment. * #391, added an ``orwhere()`` method. * #392, fixed ``order_by`` meta option inheritance bug. * #394, fixed UUID and conversion of foreign key values (thanks @alexlatchford). * #395, allow selecting all columns using ``SQL('*')``. * #396, fixed query compiler bug that was adding unnecessary parentheses around expressions. * #405, fixed behavior of ``count()`` when query has a limit or offset. [View commits](https://github.com/coleifer/peewee/compare/2.2.5...2.3.0) ## 2.2.5 This is a small release and contains a handful of fixes. ### Changes in 2.2.5 * Added a `Window` object for creating reusable window definitions. * Added support for `DISTINCT ON (...)`. * Added a BerkeleyDB-backed sqlite `Database` and build script. * Fixed how the `UUIDField` handles `None` values (thanks @alexlatchford). * Fixed various things in the example app. * Added 3.4 to the travis build (thanks @frewsxcv). [View commits](https://github.com/coleifer/peewee/compare/2.2.4...2.2.5) ## 2.2.4 This release contains a complete rewrite of `pwiz` as well as some improvements to the SQLite extension, including support for the BM25 ranking algorithm for full-text searches. I also merged support for sqlcipher, an encrypted SQLite database with many thanks to @thedod! ### Changes in 2.2.4 * Rewrite of `pwiz`, schema introspection utility. * `Model.save()` returns a value indicating the number of modified rows. * Fixed bug with `PostgresqlDatabase.last_insert_id()` leaving a transaction open in autocommit mode (#353). * Added BM25 ranking algorithm for full-text searches with SQLite. [View commits](https://github.com/coleifer/peewee/compare/2.2.3...2.2.4) ## 2.2.3 This release contains a new migrations module in addition to a number of small features and bug fixes. ### Changes in 2.2.3 * New migrations module. * Added a return value to `Model.save()` indicating number of rows affected. * Added a `date_trunc()` method that works for Sqlite. * Added a `Model.sqlall()` class-method to return all the SQL to generate the model / indices. ### Bugs fixed * #342, allow functions to not coerce parameters automatically. * #338, fixed unaliased columns when using Array and Json fields with postgres, thanks @mtwesley. * #331, corrected issue with the way unicode arrays were adapted with psycopg2. * #328, pwiz / mysql bug. * #326, fixed calculation of the alias_map when using subqueries. * #324, bug with `prefetch()` not selecting the correct primary key. [View commits](https://github.com/coleifer/peewee/compare/2.2.2...2.2.3) ## 2.2.1 I've been looking forward to this release, as it contains a couple new features that I've been wanting to add for some time now. Hope you find them useful. ### Changes in 2.2.1 * Window queries using ``OVER`` syntax. * Compound query operations ``UNION``, ``INTERSECT``, ``EXCEPT`` as well as symmetric difference. ### Bugs fixed * #300, pwiz was not correctly interpreting some foreign key constraints in SQLite. * #298, drop table with cascade API was missing. * #294, typo. [View commits](https://github.com/coleifer/peewee/compare/2.2.0...2.2.1) ## 2.2.0 This release contains a large refactoring of the way SQL was generated for both the standard query classes (`Select`, `Insert`, `Update`, `Delete`) as well as for the DDL methods (`create_table`, `create_index`, etc). Instead of joining strings of SQL and manually quoting things, I've created `Clause` objects containing multiple `Node` objects to represent all parts of the query. I also changed the way peewee determins the SQL to represent a field. Now a field implements ``__ddl__`` and ``__ddl_column__`` methods. The former creates the entire field definition, e.g.: "quoted_column_name" [NOT NULL/PRIMARY KEY/DEFAULT NEXTVAL(...)/CONSTRAINTS...] The latter method is responsible just for the column type definition. This might return ``VARCHAR(255)`` or simply ``TEXT``. I've also added support for arbitrary constraints on each field, so you might have: price = DecimalField(decimal_places=2, constraints=[Check('price > 0')]) ### Changes in 2.2.0 * Refactored query generation for both SQL queries and DDL queries. * Support for arbitrary column constraints. * `autorollback` option to the `Database` class that will roll back the transaction before raising an exception. * Added `JSONField` type to the `postgresql_ext` module. * Track fields that are explicitly set, allowing faster saves (thanks @soasme). * Allow the `FROM` clause to be an arbitrary `Node` object (#290). * `schema` is a new `Model.Mketa` option and is used throughout the code. * Allow indexing operation on HStore fields (thanks @zdxerr, #293). ### Bugs fixed * #277 (where calls not chainable with update query) * #278, use `wraps()`, thanks @lucasmarshall * #284, call `prepared()` after `create()`, thanks @soasme. * #286, cursor description issue with pwiz + postgres [View commits](https://github.com/coleifer/peewee/compare/2.1.7...2.2.0) ## 2.1.7 ### Changes in 2.1.7 * Support for savepoints (Sqlite, Postgresql and MySQL) using an API similar to that of transactions. * Common set of exceptions to wrap DB-API 2 driver-specific exception classes, e.g. ``peewee.IntegrityError``. * When pwiz cannot determine the underlying column type, display it in a comment in the generated code. * Support for circular foreign-keys. * Moved ``Proxy`` into peewee (previously in ``playhouse.proxy``). * Renamed ``R()`` to ``SQL()``. * General code cleanup, some new comments and docstrings. ### Bugs fixed * Fixed a small bug in the way errors were handled in transaction context manager. * #257 * #265, nest multiple calls to functions decorated with `@database.commit_on_success`. * #266 * #267 Commits: https://github.com/coleifer/peewee/compare/2.1.6...2.1.7 Released 2013-12-25 ## 2.1.6 Changes included in 2.1.6: * [Lightweight Django integration](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#django-integration). * Added a [csv loader](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#csv-loader) to playhouse. * Register unicode converters per-connection instead of globally when using `pscyopg2`. * Fix for how the related object cache is invalidated (#243). Commits: https://github.com/coleifer/peewee/compare/2.1.5...2.1.6 Released 2013-11-19 ## 2.1.5 ### Summary of new features * Rewrote the ``playhouse.postgres_ext.ServerSideCursor`` helper to work with a single query. [Docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#server-side-cursors). * Added error handler hook to the database class, allowing your code to choose how to handle errors executing SQL. [Docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#Database.sql_error_handler). * Allow arbitrary attributes to be stored in ``Model.Meta`` a5e13bb26d6196dbd24ff228f99ff63d9c046f79. * Support for composite primary keys (!!). [How-to](http://docs.peewee-orm.com/en/latest/peewee/cookbook.html#composite-primary-keys) and [API docs](http://docs.peewee-orm.com/en/latest/peewee/api.html#CompositeKey). * Added helper for generating ``CASE`` expressions. [Docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#case). * Allow the table alias to be specified as a model ``Meta`` option. * Added ability to specify ``NOWAIT`` when issuing ``SELECT FOR UPDATE`` queries. ### Bug fixes * #147, SQLite auto-increment behavior. * #222 * #223, missing call to ``execute()`` in docs. * #224, python 3 compatibility fix. * #227, was using wrong column type for boolean with MySQL. Commits: https://github.com/coleifer/peewee/compare/2.1.4...2.1.5 Released 2013-10-19 ## 2.1.4 * Small refactor of some components used to represent expressions (mostly better names). * Support for [Array fields](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#ArrayField) in postgresql. * Added notes on [Proxy](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#proxy) * Support for [Server side cursors](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#server-side-cursors) with postgresql. * Code cleanups for more consistency. Commits: https://github.com/coleifer/peewee/compare/2.1.3...2.1.4 Released 2013-08-05 ## 2.1.3 * Added the ``sqlite_ext`` module, including support for virtual tables, full-text search, user-defined functions, collations and aggregates, as well as more granular locking. * Manually convert data-types when doing simple aggregations - fixes issue #208 * Profiled code and dramatically increased performance of benchmarks. * Added a proxy object for lazy database initialization - fixes issue #210 Commits: https://github.com/coleifer/peewee/compare/2.1.2...2.1.3 Released 2013-06-28 ------------------------------------- ## 2.0.0 Major rewrite, see notes here: http://docs.peewee-orm.com/en/latest/peewee/upgrading.html#upgrading peewee-3.17.7/LICENSE000066400000000000000000000020421470346076600141030ustar00rootroot00000000000000Copyright (c) 2010 Charles Leifer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. peewee-3.17.7/MANIFEST.in000066400000000000000000000005441470346076600146410ustar00rootroot00000000000000include CHANGELOG.md include LICENSE include README.rst include TODO.rst include pyproject.toml include runtests.py include tests.py include playhouse/*.pyx include playhouse/*.c include playhouse/pskel include playhouse/README.md include playhouse/tests/README recursive-include examples * recursive-include docs * recursive-include playhouse/_pysqlite * peewee-3.17.7/README.rst000066400000000000000000000136731470346076600146010ustar00rootroot00000000000000.. image:: https://media.charlesleifer.com/blog/photos/peewee3-logo.png peewee ====== Peewee is a simple and small ORM. It has few (but expressive) concepts, making it easy to learn and intuitive to use. * a small, expressive ORM * python 2.7+ and 3.4+ * supports sqlite, mysql, mariadb, postgresql and cockroachdb * tons of `extensions `_ New to peewee? These may help: * `Quickstart `_ * `Example twitter app `_ * `Using peewee interactively `_ * `Models and fields `_ * `Querying `_ * `Relationships and joins `_ Examples -------- Defining models is similar to Django or SQLAlchemy: .. code-block:: python from peewee import * import datetime db = SqliteDatabase('my_database.db') class BaseModel(Model): class Meta: database = db class User(BaseModel): username = CharField(unique=True) class Tweet(BaseModel): user = ForeignKeyField(User, backref='tweets') message = TextField() created_date = DateTimeField(default=datetime.datetime.now) is_published = BooleanField(default=True) Connect to the database and create tables: .. code-block:: python db.connect() db.create_tables([User, Tweet]) Create a few rows: .. code-block:: python charlie = User.create(username='charlie') huey = User(username='huey') huey.save() # No need to set `is_published` or `created_date` since they # will just use the default values we specified. Tweet.create(user=charlie, message='My first tweet') Queries are expressive and composable: .. code-block:: python # A simple query selecting a user. User.get(User.username == 'charlie') # Get tweets created by one of several users. usernames = ['charlie', 'huey', 'mickey'] users = User.select().where(User.username.in_(usernames)) tweets = Tweet.select().where(Tweet.user.in_(users)) # We could accomplish the same using a JOIN: tweets = (Tweet .select() .join(User) .where(User.username.in_(usernames))) # How many tweets were published today? tweets_today = (Tweet .select() .where( (Tweet.created_date >= datetime.date.today()) & (Tweet.is_published == True)) .count()) # Paginate the user table and show me page 3 (users 41-60). User.select().order_by(User.username).paginate(3, 20) # Order users by the number of tweets they've created: tweet_ct = fn.Count(Tweet.id) users = (User .select(User, tweet_ct.alias('ct')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User) .order_by(tweet_ct.desc())) # Do an atomic update (for illustrative purposes only, imagine a simple # table for tracking a "count" associated with each URL). We don't want to # naively get the save in two separate steps since this is prone to race # conditions. Counter.update(count=Counter.count + 1).where(Counter.url == request.url) Check out the `example twitter app `_. Learning more ------------- Check the `documentation `_ for more examples. Specific question? Come hang out in the #peewee channel on irc.libera.chat, or post to the mailing list, http://groups.google.com/group/peewee-orm . If you would like to report a bug, `create a new issue `_ on GitHub. Still want more info? --------------------- .. image:: https://media.charlesleifer.com/blog/photos/wat.jpg I've written a number of blog posts about building applications and web-services with peewee (and usually Flask). If you'd like to see some real-life applications that use peewee, the following resources may be useful: * `Building a note-taking app with Flask and Peewee `_ as well as `Part 2 `_ and `Part 3 `_. * `Analytics web service built with Flask and Peewee `_. * `Personalized news digest (with a boolean query parser!) `_. * `Structuring Flask apps with Peewee `_. * `Creating a lastpass clone with Flask and Peewee `_. * `Creating a bookmarking web-service that takes screenshots of your bookmarks `_. * `Building a pastebin, wiki and a bookmarking service using Flask and Peewee `_. * `Encrypted databases with Python and SQLCipher `_. * `Dear Diary: An Encrypted, Command-Line Diary with Peewee `_. * `Query Tree Structures in SQLite using Peewee and the Transitive Closure Extension `_. peewee-3.17.7/TODO.rst000066400000000000000000000000121470346076600143700ustar00rootroot00000000000000todo ==== peewee-3.17.7/bench.py000066400000000000000000000071471470346076600145420ustar00rootroot00000000000000from peewee import * db = SqliteDatabase(':memory:') #db = PostgresqlDatabase('peewee_test', host='127.0.0.1', port=26257, user='root') #db = PostgresqlDatabase('peewee_test', host='127.0.0.1', user='postgres') class Base(Model): class Meta: database = db class Register(Base): value = IntegerField() class Collection(Base): name = TextField() class Item(Base): collection = ForeignKeyField(Collection, backref='items') name = TextField() import functools import time def timed(fn): @functools.wraps(fn) def inner(*args, **kwargs): times = [] N = 10 for i in range(N): start = time.perf_counter() fn(i, *args, **kwargs) times.append(time.perf_counter() - start) print('%0.3f ... %s' % (round(sum(times) / N, 3), fn.__name__)) return inner def populate_register(s, n): for i in range(s, n): Register.create(value=i) def populate_collections(n, n_i): for i in range(n): c = Collection.create(name=str(i)) for j in range(n_i): Item.create(collection=c, name=str(j)) @timed def insert(i): with db.atomic(): populate_register((i * 1000), (i + 1) * 1000) @timed def batch_insert(i): it = range(i * 1000, (i + 1) * 1000) for i in db.batch_commit(it, 100): Register.insert(value=i).execute() @timed def bulk_insert(i): with db.atomic(): for i in range(i * 1000, (i + 1) * 1000, 100): data = [(j,) for j in range(i, i + 100)] Register.insert_many(data, fields=[Register.value]).execute() @timed def bulk_create(i): with db.atomic(): data = [Register(value=i) for i in range(i * 1000, (i + 1) * 1000)] Register.bulk_create(data, batch_size=100) @timed def select(i): query = Register.select() for row in query: pass @timed def select_related_dbapi_raw(i): query = Item.select(Item, Collection).join(Collection) cursor = db.execute(query) for row in cursor: pass @timed def insert_related(i): with db.atomic(): populate_collections(30, 60) @timed def select_related(i): query = Item.select(Item, Collection).join(Collection) for item in query: pass @timed def select_related_left(i): query = Collection.select(Collection, Item).join(Item, JOIN.LEFT_OUTER) for collection in query: pass @timed def select_related_dicts(i): query = Item.select(Item, Collection).join(Collection).dicts() for row in query: pass @timed def select_related_objects(i): query = Item.select(Item, Collection).join(Collection).objects() for item in query: pass @timed def select_prefetch(i): query = prefetch(Collection.select(), Item) for c in query: for i in c.items: pass @timed def select_prefetch_join(i): query = prefetch(Collection.select(), Item, prefetch_type=PREFETCH_TYPE.JOIN) for c in query: for i in c.items: pass if __name__ == '__main__': db.create_tables([Register, Collection, Item]) insert() insert_related() Register.delete().execute() batch_insert() assert Register.select().count() == 10000 Register.delete().execute() bulk_insert() assert Register.select().count() == 10000 Register.delete().execute() bulk_create() assert Register.select().count() == 10000 select() select_related() select_related_left() select_related_objects() select_related_dicts() select_related_dbapi_raw() select_prefetch() select_prefetch_join() db.drop_tables([Register, Collection, Item]) peewee-3.17.7/docs/000077500000000000000000000000001470346076600140305ustar00rootroot00000000000000peewee-3.17.7/docs/Makefile000066400000000000000000000107561470346076600155010ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/peewee.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/peewee.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/peewee" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/peewee" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." peewee-3.17.7/docs/_static/000077500000000000000000000000001470346076600154565ustar00rootroot00000000000000peewee-3.17.7/docs/_static/peewee-white.png000066400000000000000000000402311470346076600205540ustar00rootroot00000000000000‰PNG  IHDRÁ«£'`sRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÚ ãõtEXtCommentCreated with GIMPW IDATxÚìÝW“\gžßùïqyÒû¬¬,ï †èÚ°{fzœfZ#ÅhCÒèbu¯°·½·û.t¡ÝˆÝi43m؆͡k$[Þ¢|Uf¥÷Ç<ºÈbhÐÀ&ÀçÁ ƒ‰:uNf"ùÎóE!$I’¤o!U>’$I’ AI’$I’!(I’$I2%I’$I† $I’$É”$I’$‚’$I’$CP’$I’dJ’$I’ AI’$I’!(I’$I2%I’$I† $I’$É”$I’$‚’$I’$CP’$I’dJ’$I’ AI’$I’!(I’$Iφþ"\„‚V«M³ÙÂë5QU…J¥†¢(„B ï´$I’ôb†`£Ñbn~‰ÕÕ{LNŒ‰¹rå®ãòï\fp°]×ä«-I’$½X!hY÷îmóÓŸ¾ÅÂÂ*gÏÎpá ·nγ·D»íðñ2™$ª*G%I’¤$…äóEÞ{ï|‡ããí¶ƒ i:¹\žwÞýt:E0x™p8€¢(òU—$I’€ç|bL»m±±±ÍG×ïP(”Báø¸ÀÝ»K ŸƒƒC®\¹Áþ~×uå+.I’$½!X«ÕYX\eo÷„Š®(Іßïebr˜É‰a\×a}}‹ååMêõ¦|Å%I’¤#&ÛÛû4-TÕÀÐ=¤’ ^{í"¯¿ö.œ!ôS(Y\\£X¬àºB¾ê’$IÒ󂶈Åb™¶e£ë>_€áá^yå<ƒƒ½LMŽÒÛ›¦mµY[»ÇÖÖí¶%_uI’$éù¯@U4]'1>>DO¦ ŸÏK:brrGçð0Çââ•J !^üjPqú$I’ô‚… ¢(x½^ɺ®aÛ¦i’L&ðzÍÓFù™™qb±ÕZ…ååM²Ù 1Aƶm Å"…bËú´ºu… ^¯³½»Ë½jõº BI’¤± ŒŽ ù±¬&¶ÝFU•Ó6¯×ËèÈÀ`ggÕÕ{߸ 2BÍ&¥r™V«ý¥¡åº.G¹o½û¿zû_ØÚÙŲìÓp\ßÚâÿýŸÿÈûÿþž; 4[-ùN—$Iú ÏuŸ ßïcöÌ$#‹·(WŠçi·-„¨ªB"çåËçY[½G¡Xbqq ¦ý_kÏ ‚v»M­Þ@ÓT~?º®#„ V¯³°²ÂÎÞ>##Œ ašæç«Ùj±º±É{×®Q«Ö(–ËüÙH_o–e±³·ÇÒêÕZH8DO½™Œì‘”$Iú=ÚO~ò“Ÿ<·e¬ªâóyÑ ƒƒ#r¹<šª1<ÜO<EÓT C' àº.»»4šMÆÆI¥â_ëRjív›ÕM>øðCö²$ãqü>–m³¼ºÆÏó·çPU¡þ~ü>BÇ¡ÝnŸyªªJ«Õbym[ssds9Jå à'Ó B°´ºÆüò ¥r˶ééî&Ý•ÂÐuùŽ—$IzQ*A€@ÀÏ¥‹gi·Úüô§o±µµÃ\'•БɤPU•t:ÉýÑë —+ *ÊCŽBì“°Ñ4 ÓãyªU“‚r¥Êõ[·xïê5’Ƀ}½„ƒ!Žr9~÷áG,.¯Ð²l޲4[-lÛ¦\©rptÈq¡€ë ñ½ÝÝ(ªŠ®ëhšŽ+¹|ž›ssLŽÑßÛƒišFç¥Ís{nžÑ¡AY J’$½h!¨ª ±X˜—_>‡ëºüêWï±¹¹ÅÑÑ1©T Ã0Ð4ÞÞ ?þë?¡Õ¶‡‚˜¦çô¶m³wxÄúæ&Á`É‘B¡àS Çu98:bae…ìñ1 pp˜%L±°²ÌÅEj†ÇIJ,Z­‡ÙW¯_çú­Ûä‹E\×%™ˆóÚåË\>ŽLº‹þž ÅÍFƒƒÃCÖ66èËt3Ð×K:•"Ÿ/Po4X\]e}ó‰xŸ×+ßõ’$I/Jº'÷Ô²…cR™ÿê¯ÿÇrH§hڧÆ¡“LÆ 7×u9ÌñÛ÷ÞïüÌþ„³ÓÓx½æS9Çj­ÆêƇÙ,Žãžs–«U–VÖÈ ¨šN<eh ࣛ7yëÝ÷Ø?:Âq\×%›Ë!\AO†þÞ^~øÝïP­×Y][§Ùjq˜Íâ AOw7g&'ÙÙÝ#_,’/¸=ß©{d5(I’ôi!õ¼_@³Ùd~i™üÅ/ù—ß}@ äåòå³$“±‡vPå3À0 ¢á0𦲱µÅ•op˜Ë=•V ×u) ,¬¬P­ÖÐuX4Bº+I£Ù`{o—VÛ"à096Î¥sç®àÎüÂIº¨ª†ªªX¶ÍQ.ÇáQa0=1Îå 牄ôڙ¥‘PˆÙ©Iúz{ñx<Ôêu–×ÖXßÚ’3E%I’^”´m›½ƒÞ»z¹ÅEî,,òÁG×Ù9<À²GîSU•d<ÎØð0®ë²°¼ÂâêõFã‰Ï±Ñl²~ï[;;´Úm?cÃÃ$ã êõÕZ§y?‹òÒì ™tõFƒ\>ã8Ò©ñX]×q‡Z£+Ñp˜ÉÑ’‰X'ÜO^×uz2f§& úý'-ÇÜYX¤P*áʾAI’¤ç?«õ:·çæYZ]¥ÙjÓj[,¯­qíÆ òÅÂc5‰ü~&FGˆÇ¢”Ëe–×Ö)•+OÔhî A©\fay…r¥Šªi$b1¦ÆÇ…‚X¶…m;èºF¦+Íà@?>Ÿï´bÕut*É÷^}™ó33ø¼&®¸®‹pªªâ5M Ã@×u¼¦yú³‘PˆéñqºÓ]hšF½^gyuÍ­-Úí¶|çK’$=Ï!è8¹ã<óKË”+U Ãáñ >ÓD×ôǺ÷ex<ôf2Œ wë7îÝckwÖF»Ýf{w—µÍ{4[-|^ƒ}}§-¿ÿ4Äb‘0Ÿ]Óðû|„‚AšÍ&G¹Ù,¶í i:ªÚ¹¶V»ÍÞþùBUU è'mº®Ó›éfjlœ` €+Ù\Ž» ‹ŠEY J’$=Ï!ض,ŽÙ;<Ä`xLü>Cý\:Žx4òX!¨* ‘P˜ÉÑQ~?Çù<ËkëTj5¾J\!¨Öj,­­“ËçqÝνº‰Ñ¢‘ðɽÁ(‰x MU©Öª4N†_}>‘pUU98:âÚÇ7XY[§mY†AÀïààðˆ›wç(•Ëx<¢‘èi/ ¢(DÂaf§§èÍd0tj½Îòú:G¹cÛ–ï~I’d>¯'nY6ùB‘f³…¦ëxL“X4Æù™ûû0 ã±éóù /“Á²,ÖîÝã0›Ãþ áº.GÙË«kÔêut]£»+ÅèпMU §½{+ëëÜ]\¤\©`š2Ýiü~ŽãR­ÕiµÛ§÷.»RIªµW?þ˜Û ´Úm"á=é®Vš1 ƒÁþ>¦'Æ …B :4È*P’$é¹A!–mŸ,¦b’‰£ƒý_mI4UUH&âLŒŽà1töXÛ¼G£ñøkÖê V7688<ıþã#ä’‰ÓY«~ŸÞ^|^/Ù\Ž®}ÈòÚ:š¦19:ÊÈà ñXŒp(D,cdpW/^ »+Åþá×oÞ¢P(âõv*àžtÏ}á¯( Ñp˜³ÓÓôõô„éí΋FO‡M%I’¾ÍžÛOBUUðy½hš†Ój£ë:¡@€P(ø@àãòûýLŒòá͛챲¶ÎùéiBÁÀck»Ýæ(›¥y² M2gld˜À}í5Mûûéïí%_,²¹½Í­ùyúúá_ý–V×(UÊ„‚A†˜ÅkšTj5CU¥§;Íù33Ä>cØ0 †øÞ«¯°³·ÇäØñhô+}Ip…Àql”Îóî8”*%,Ë& á3½µ¥H’$É|<©D‚`0@±\ÆqlTM{âFpC×Étw362Âaö˜½=6wwèJ%ñû}µÃ0ˆÇb˜žÎŒÍ±á!ú{z0=žû‚\%“îââ¹³f³f³ìîÐh6èJ&˜šdxpÛ¶ÑuŸ×‹×4iµZøL/‘H¯ÏÇ+/]`jlô3WƒQ…h$Âw_y™F³‰ßçûJ«Æ´Úms‡äòÇDBa2]Ýò¼õþÛä 9^¹ð2ç¦f …B¨Š BI’žÏíÚªª¢û‡‡aÛÑH”‘~¢‘ðW®H>iMPU•ƒÃκ~¿Ÿ‘ü'í ôÄjª¢R( ‡B¼zñ"Ãx<W†A0ımjõñX”3S“DÂa ÃÀçõv ×4@ kzçØªŠe[ôtwñÚ¥Ktwu}Á5 ZíùRžf«„3{Öq¶÷wøï?ÿ~ýÞo±,‹h8ʵ[ñ³ßüœ»Kód³DÃQ’±Ã#W¥‘$I†à³ôI€(ŠÂþÁ!¹B¥3q$™|¢E°UM#x²ÕQ©TêìK84D8øè뉪ŠBÀ璘§‡3S“Œð?¢ŠÒÖMÄcÄc1&FFNÁVNú®Öª,®-±³¿‹ÏëÇkziµ›lïoQ®–èJ&‰EbŸ{Ÿ¯R«ðÁGWøÿö?ØÜ¹Çpÿ ‘ð£ÍžBPª–yïÚûüú½·¨ÔªÌŒOá —_¿ÿ+›kTkU ŵzî®4Éx]“÷%Iúæ{®?©¼¦Éôø8ÅR™ö;ïP(¹q÷.Ý]IfÆÇðÜ7ôøX!¨(„C!^yé%zº»Ñux4ò¥?ç A£Ù Ýnã5M|^/çÇü¼ÐÑuL:M4yh‹z³ÁÅ»üÛÿD«ÝâoÿòßòÒìy²‡|pýwììï’/c£ƒ#x Ïï“Ëq!ϵ›²¼¾LÐ@yŒ*Ùqv÷w¹òñ5r…<#ã  ±w¸ÏÑqÛ±Aj½Æ¥»ôfzèI÷éê–Õ $I2Ÿu5DxõÒE ]çýk‘ËfÙØÚf¨¿ï+‡ t†[#‘0^¯I¹Z¦\-ƒ~¯ïs‡«µ*7ço±»¿ÇØÐ3Ó|G+É5`àÁ?kYÛ»ÛüêÝ·¸q÷~¿½£}¦›“Ôur…cö÷i[m"¡á`ˆL:óÀ=¹f«ÅúÖKë+(( õv‚ðªV«1¿ºÈÆÖšª292Î@Ï~_€±¡1 Å"åjG8KEnÍßæ•ó—IDã_¸1°$I’ Á§„¿éÉqB¡ ;»ûôeºhøÊA¨(4[ ÞÿèwlîÜãìô,—Ï^$zx(Ñqööxóí_±º¹Æ¹™³D#1Fú‡¾ÒlU!¥J‰+7®qkþ6Ív“xüÓ­¡|^áP§é>W8æêkL ‹Äðûü§Ç(–‹Ü]š£T.‘îJ39<~úø£T¶ùR»‹s”ªºSif'ÎŒ'ˆÇâüùJ¥ZáöÂÍŽã’˳¾µÉôø´ AI’¾ñžëi|BêsË \¹qT‡—/gjl ïSúv‡£\–+_ãçoÿ’ŵeÚÖÃK©ÕêuVYZ[bç`—…µ%¶v·hµ¿Ú® Žë°tÀõ;uPCÁScSL ôéËôòK¯ÑÛ݃¢(d¹³4G¥Z9=FgŸÄ}æWÀÔè$ý½ý ™~žf«ÉÚ½uÖî­£* cCcŒ ãõz øüœŸâ{—_'O iŸ,åÖ"_,ÈõI%I’!ø¬Y–Åêæ*ÿó—ÿÄßÿìð¿ù)KË4­æS[Åïó320Œ¡é,­.síÖGä Ç,¬í —|1Ï¥9 å"Šª k:ÆÉ,Ó¯¢Ùj±¹³ÅÞá>Á@ßo¼ò=†ûñx<ÄÂQ¾{ùu^¿ôÑp„z³ÁæÎåjWt¶€ªÖª,¬,rxtH0äÌÄ Ñpô‘ΩSE–¸»4Çq!O8æìÔâѪ¢ž,ÒáÌÄ =éžÓI9®+°¬6B®J#I’ ÁgÇ.GÇYÞ¾ò×ïÜ`÷hŸ›ó·ùÙÛorgé.Ívó©ü¯×ÇèÐ0#C´Û-æWØÚÛ~ l¶Z¬ßÛ`im™F³‰Ïëcd`ˆþž~LÏW«H[­ÙNÏ`(âÂÌYfƧ:3T5M£'á•ó—éËô¢* •Z…F³‰p;;Mdó9î,tž‹‘!&†Çð{}ööÙ;Ücau‰¶Õ¦/ÓÇÔèûî[jšF4!“Jcz<'_ ‰*'ÅH’$CðÙi¶Z,®-ññ›TëU4]ò-ªÕ*ívá>JDUÑøÉ$?‡ÙC–6V¨Ô«!NïÝu*¦ã“¥Ê"œŸ!}å’ŽãP«×Âí,ì=<ñнHC7èÍô0Ô7ˆé1q]÷´ ¬7ê,o¬°±½‰×ôrvj–d<ùÈ•é'UäþÑAgîØ$édšúàýMMÓ1MïédŸé%•H>Ѥ$I’$‚ð!½¸ºD®pŒáñ`z½Ä"1.Î^`zlê+­ŠòyþScSô¤3Ôê5×–8:Îâ ˶ÙÝßc~u‘Z£Žišôeú˜'à|õFUNšÎUL‡h8úнŽãtî“O“~ÂsÐuƒH(Œ®i4MJ•ÒC»Y¸®K©Ræ wˆ+\¢‘(~¯˶¹·»ÅÒê2ª¢0=>Eoºç‘Øë:Ëë+lîÜCÕT¦F'ê|h¶§mÛå²ììÑjµˆEb\8sž¾§2;W’$I†àç„`³Õ¤Pîlëñz‰„ÃLŒŒÓÿ >€E! sfr†X$J¾gne|±@®cnyžrµŒªª¤ª) =Q³¸ÇðN¥ñ™>J•7çn‘Ígq]÷ô9¨Ô*Ü]šgec!™®n¼^/•Z…¹åy²ù,±hŒ³“gˆ>Æ 1ÅR§­¢X*Æ87u–X$ö@ÿ¡+\ ¥·nŸ™ÎNžáõ‹¯‹Äd£¼$I2Ÿmòé6JÁ@î“ÐxÀ^¯ÉÈÀ0#ã!XÝXeic™¹å6·ïÑj·ƒŒÑŸé{ä6„ÏAƒîdšT2E­QçÚÍøèöu ¥®piµ[,­­ðî‡ïs\Ì …ìÀkšäòÇ̯,à8£C#Œ Œà5mx¸mµÙÞßayc˜ŸbzbŠÀ}½…BªÕ*7æoñ»ëWi4›LŒNð£ïÿ1cƒ#_i/GI’$‚^š¡ë:~oçƒÙu]tCÇkš¨ê³©@TE%sfb†` H6Ÿåw_åÊÇW)” (ŠB"çÜä,±Hô‰ƒXÓ4R‰$Sc“ýö÷ùÕ»¿eae‘V«Åq!Ïo?x›ù¥yEa|xŒñ¡14MãèøˆìqŽP(Äùé³Äc‚ŽeYÊ%¼/3ãSüðõIu?0¡Æqvvyûwï°w´ÏèÐýÇÉ¥³/d(IÒsã¹\1FU‚ }™žN_`£ŽmÙ4ZMœ“áÂgÁï÷35:Á`ïs+óܸ{“V«E­QÇçó120Âèà>ï“O Q…H8Ê+.±º¹Ê͹[lïm³¾³ÉôøÕZ•Í-­&ý½ý¼zá2=]TUESU{ˆ†#ÌNžy¬ :‡©Ñ ü^/@ñ¡1¼¿7ÉHQ:}á`ˆ™ñ)þèõðÊ…—å0¨$I2¿.á`ˆÙÉ3ܘ¿ÅQ>G±Tbçp—J­JÈ|&›»jªvºRK¡TธÇ0 ªçùä Ã`rd‚ÿí¯L†û¹9U(8¶ólËç“åÂÂÁ­VÛ¶PÕNö´†:só7YZ™gxhŒ³g^"ºÑnµÌõëï³´<ÏÌÔY^{í¤’é§Z­påê;\ýð=LÓäOôcνˆ¡{Ø;ØáW¿ùgÖÖ–p\!–eÑß7H¦»X싇u+•W®½ÃÕkïrtt(X–éñòê«oÆPC£’$É|ve¬¢tv}!•HqéìK(Šzº³B­V¥Þ¨á5½øü§¾Ûy£Qgyežýý|>?“3tueÐõ'o¨7j,-Ïñáõ÷ÙØXFS5νŒßß™[®Y\¼ÃâòÇÇGxL“×_û!ÑHPÂ¥Z­°¼2ÇÆæ ‘p”F£Žp­V“{¬­-rp¸wZyªªJ½žúÒ0B`;6µj•V«…¢*Ø–ÍÞþ6W?|®t†33¾òº©’$I2§"Ô â‘ÑP@UTjµ wçn²´Ÿït†ç¾0ªŽ¦iä 9r¹CÊ•2Á@ÞÞ¼^†áAÓu޳äó9~ŸŸþþa|'íÕZ……Å;d³„B.œ»LOO?8ʰ°x›z½†a!ðx< Žrîì%"_Òë¨i¡PÓô²°K¹\ÄqlÛFQF‡ÇO†T5ù·L’$‚_×u9<Úçíy“¹ù›Ô!;Wz{úéêÊ`Og‡UU ‡#tueðxLJ¥<–Õ"΋v _Èñ›ßü”yç—í‘Ju G¿4TU%vBf—lîUU›"Žb¡``w›r¹„¢¨ŒŽLÆÑ4f«ÁöÎ&ÕZ…ÁÁ.]|èÉ}ÅZ­ÂÞþ†Ç¤·w€`0D&ÓÏ«/Ñ‘ Ì/™]ûI›†×ë£\)±°C³Ùì,\  ŽÑî•«ÇH’ô¦¿hÔj5Y__fiyŽF³³«C d°”ÞžÁ§Ú:Ñ =™>üßýF†Ç±¬6Éd7êÉph£Qg{gƒ­í Ê•"½½ý¤RÝD±/=¶Ïçgtx’3388Ü#—;"›;"Ó݇iz ‡£LOcqù.…BžBñ˜ã|–Át]'rþüeÒé ==ý¤’é“6 …¾¾AþâÏþ˶ ‡ÂX–…Çc’N÷ð:ŸO&ß_*ŠJ(fld‚7¯R.—pÇq¨×k8Ž-ÿ†I’$CðëT«UYßX¦X* ëûj}½\¸ð étÏ3žÓ4x™Yžà({@»Ý¤P8¦Ýn¾dö¤¢¨ƒaú‡ðû´Z *å"¶ccžT‹ÑHŒ¾aæçocµÛÔjU§sÐçõ33uŽñ±i<¦Ç{zÜp(ÊÌÌMÓOÝ¿7Ðu]jõ*•JUQ…"øýŸ.fb±$@N{¹³¼$I2¿VB¸Ôë5÷p ŸÏ$32<Î@ßÐ3ÝèUQÔÏfýd‰eµ)–ŽžÀçõÓ™ÁÙiK¨Õ*§³‡à'TUÁôxÑ4 Û²°‹û[;;+ÖÄN'³ÜOUU|>?Ÿµˆ›ªª_:{Óu޳ܼý‹‹wÐ ƒK/½Îì™ øO–bSè,¡¦k•e,×=ý½ò~ $I2¿ÖËjS©–Ïç%ŽÐ“é?YØùëoඬ6¥rMÓš`tdïÉÂß¶m³³{7¯Òl59{æ%&&fØÖvjõêé„MÓè¿ÂŲ-@7 BÁÈSY$@A©8,nŒèIDAT\äê‡ïñλ¿âàp¯×‡®é ŽœNüq‡jµB½Q¢EH$»žÚ½WI’$‚ñáí !NªÓ$=µ¡y6‹y®^}—¥å»t¥3t§{H&RèºN½Qc~þ&ïÿîmõÕj™d2M¦»÷´)¾^«²³{V«A(Â !(—KìîÞ£mµI¥ÒÄãɧҧèº.Çùwî~ÌÁÑ.–eá÷0M/šúéìÖf³ÁÖÎ¥R!\‚Á£#“¤]ÏlÅI’¤§å…ZÛJQtÝ x2±ã“ÊD—?ÄêpB€@Ðj7(ólo¯³°t“J¥Ü¹GgsÚV³A±tÌÎÎ&ùBÛ¶N«Èݽ-VVhµZDc â±ÔiKC½^c}c™å•9‚ÁÁ¢‘ØS[²ÌР¡±hœžL/¼ÊåK¯‡OªT›ƒƒ]—îvîª*Ýé^fÏ\8…*I’$+Á¯1Á ÝÝ}¬®-Ñh4i4KyÚVÓô}mç"Dge–b±€ªt&›´[- …cZ­&>ŸŸ±ÑiÖÆ–¹·å!ObšæÉ0£ÍÁá߸ÂÞÞ6^¯¡ÁQâñ$š¦Ÿ6¦_ûè}ŽŽI$SŒÍ†žJø¨ªJ2ÙÅoüÃÃcøýF‡'I´˜8®ÃÑÑ}ü«k‹Ø¶E*ÕÍ¥‹¯322yÒ°/I’$Cðk „dnþ&¹ãC …cv÷î16:ƒß|`%—gÉqvw·øõoþ™ù…[X–E !I`žìϧëCC£üùŸþkööwèJ¥Ét÷¡ë:år‰Ûw>ææíh¶ŒŽL23}žp¨ÓÄ^©–¹qó*++ xL“33:÷ŸRÐ+Š‚ßçgb|†¡Q4MÅëõNvi5›¬­-róö‡T«ºRÝ\¾ô^¹ü]b1YJ’$CðÂ4½ŒM3{æ%®^{‡|¾Èææ*ÝéÛa"‘¯ïÚ²-êõ®p‰D¢ 39q–€ÿÓj­S N180Š®ë§Kž¹Â¥Õn"„KWW†K_gxh¼³£„Ø–EµZÁcš ŽòÊåï’L¤žêî ŠÒ™AúY³HEÅ4}$â]Ä¢ ÎξĹ³/“éî}êk´J’$=+/ÜŠ1Š¢`š&~¿ŸJµBµZ¡Ùj⺂žLÿ#­ÖòtÎ ݃iz ‚ŒMóÊåï1>>C < AMÓñx<läªÚéf¸ãcÓ\¾ôÓí>YçSUU¢Ñ/¼ÊØèÔS]õKß8ªF8a``˜³³—˜Ÿ9ð#I’ôÜdÆó¼ŸàçéÜk°µ½ÉÊê‡GûÄc ^}å ÒéÌC;¥?+®ëÒhÔi4ꨚŠßxä úäžb¹RÂcx…ÂÌpBÐhÔiµ›ø|~LÏW@!â“…´£’ììCèv¢ü¾ {%I’d~‚жíNP´hšN0ÆÐõ/ü w]—F½L¯Mpầ³NÕ“ÜïÿÙv» B Æ«¯!°ÚmÚí6š¦az½þ¬ãP¯×±Úm¼>^¯EUNgµ:ŽCµ\îl¬àóù+°>ïZëµǹµJŸßO:“Áç÷Ë@“$I†à ˆ®KµR%œÃu]¼^`_ÀÿÀ¤ã8î°pç6íf‹¡ÑQú† †B§áÇÙ,»[[¨ªJWw†D*‰áñ „ X(²º¸@½^§§·¾ÁÓ°j5›¬-/³¾ºJ$ajv–Dª3¼)„ ˜ÏsýêUŽ›œäÌùó‚ÁûÎkŸ÷ß~›R±ÈÅ—_aæÜY¼¾'›Ө׹uý:¿þùÏÙÞܤw`€÷wÇäÌ šÖ”$é[à[ñIgYk«üúg?ãø(KWW£ããÌ\¸@ïà¦×‹ëºä³YÞùͯyëoÒªÕ™œžæÏÿÍ¿æÌ… x}>*å2ï¿ý6oý⸎åW_å/þæoÈôõÑnµX¼{—¿ÿ¿ÿ¹£#fÏãßüûÿÀðøš¢pœÍñÖ/ÞäÊ{ï‘H&øÛ¿ûO\~ý5ü¶e±±¶ÆÏþáØßÙá;?øÃç!Øj6Y¸{—7ÿ韱­6ÉTбɉ'Áz­Æü»||õG‡4êuJ¥Žë"<“$I†à BÓu‘þ@€Û;7X[\dynžƒ½}~ôã¿¢x˜V³É›7yûÍ7Y[ZBSUÁ ÕJ¥³ ã=:âúÕ«,ÍÏ£* ÉTF£³rK©Ä7X^X Q«‘Nwc ›¶[-¶67¹yý:»[[xN¶úd8´Z©0wëë++Ø'»9|2ÁDA©XdþÖ-Žöéêî&•Nãñ>ùn®ƒÕnã:.• _K’$Cð…£ª*=}}üÅL4gîæMÊÅ"GìoïJ§ÉqåÝwÙÚÜ!ˆ'“œ½ô#xL“z½Îêâ"++4ëu’]] ŽŽ‰Dq‡ƒ½}îÜ¡R.‰D'‘J¢¨*•r™ù;·Ébš†ÇÇèÄ4ÍÓp½{ëÕJ…Þþ¦fÏœV¶e±»µÅâü<®ë269ÉÀÐÓY \×uüÁ †ÇÀ4M"ÑÎy?P’$‚/EQð˜&ýÃÃDâq^ýþ÷( ×%É`;k++,ÌÍѬ7‚ŒONòúoîéAQŠ…s·nQ,Ð ƒt&Ã̹sÃ!ªÕ*‹ssìlÝ!Hu¥9sá<áH!‡Ìß¹CµR!™J2{þ‰T EU©–Ë,/,po}Ã0˜ž=ÃðØØiC}¥\fáî]öö †BÌž¿@4ÿÒ~@!®ëÒjµ®ûéMÃ4MTUÅcšôõ÷31=M£ÑàâË/“Îd¾¶$I’d~4M#‰Fq®‹ªióy6×Ö(‹èšF<gæìYú‡†0 ƒV³ÉÖÆKsóÔ«5B‘“3g:{<ìíì0wë&åR‰@ Èøô4㣘^/ÕJå4ä\Ç¡¯€ñé)Á®ëRÌç¹{ó¥|h,Æ™sç‰%¨ªŠë8ä²YænߦQ¯3þÒ&¦§ð_z­V»Íö½{lmlÐl¶@tZ B‘33$ ¼^/ç.^¤»·Wâ±Øéï–$I’!ø‚RUõzÛ¶Éçr¸ŽƒÇðFéÄï÷ŸVcó·ostx€¢’çÏÅh5›¬,.²¾²B»Õ¢«+Í™óçˆÆãä¹{ó&¥BP8ÌÔÙ³¤3TM£Ùhpo}ƒå…Çapd„‰™éÓ«Õj,/,°¹¶†išÌœ=G²«ë¡Jí³v}/óæ?ý×Þÿ€z­Öù3 ôôõñÿó&ôòËx½^ÂÑ(¡HäôçåP¨$I2¿eÇ¡Õlâ8.]Ãï÷ EÐtÇq8ØßçîÍ[TJe¼>ÃccŒNL`z½0û6ÇÙ,¦i282ÂøT§Zk5›l¬¬°²°€Õn32:Êìùó§¡Ó¹Wx‡ìá`é³³¤Òi4M;© ܽ٩0û‡˜:;K0:=ïOúüŠù<®Dc1‚Á ŠªbYÕr™V³ë:Ÿ»‹† >I’d~Ë©ŠrÒ'NVA8¶‚ZµÊÒÜ<;[[8¶M¬»›™óçH¤R8¶Íöæ&KóÞÀT*Åô¹³¤ÒiTU¥X(p÷Ö-ò¹>¿Ÿñéi†;“ZÇ9¹Wx›F½ÁèÄ8S³³NB®Õjqoc¥ùyEaêÌú0î›Ól4¸ýñÇü˯~E»mñúßçõ7Þ ‰ÅxãG?btr×qJgCÞ®®.Ƨ¦0=rÃ[I’$‚€áñO$Ðuv£I½V#ŸÍÒj¶ÈçrÌß¾E¹XÄ0 2}½LÎÌ(—JI+û{(ŠJ:ÓÃä™3C!lËbw—…;w©×jdz{™9wŽH4Šz2cty¾®¦ÇÃô™YúOf} !N‡`³Y¢±g_º@4{ j«”Ë||í×Þÿ˶ðx Μ?O0" rþÒ%fÎ{`¸Ô0:3AyßO’$I† €Ïïght”P8ÂA¥J±P`se…á‰qîmn°º¸D«Ù$–H093C¦·EU9:8`îÖ-ÊÅÁ`‰éiúúûÑ ƒb¡ÀÂ;ìî¢k:CccŒMv†I…ërœÍž±v¥ÓÌœ?G$Ú 9Û²8ÜÛcñîŽí061ÉÈø8¾ßkŽWCÓñ˜&ÓC0þt©6EÁç÷ã“/¯$I’ Á/ AŸñ©)&f¦Ée9γ²°H4‘`{g›r©„išdzz8srO¯Ùh°±ºÊÞö®ëÐÕföÂy"±¥Bå…êõ±d‚ÙóçI¦»P5F½ÎÖú:kËK(ªÂØäc““§!W¯×Y[Yá`o—p$̹‹I$SUoáH„W¿ÿ}tOgÒ‹¯¾z:!G’$I’!øHTM£«»›×ßxƒíÍM6VWÙØXÇ_8ÈÄÌ4ºn0}æ £““˜¦ÙY[×%Ù•bjv–ñéi|>îI^$aljŠþÎ]ºDð¤Þu,Ç!ÕÕE²«‹K¯½ÖYGT넜p]„ëÒÛ?@²«‹Ù ç †‚·éõ2uv–þ¡A‚áðSi¢—$Iú¶øV, ý(„ë’=<â­7Á[o¾I³Vgxt”7þôG ŒŒ`ü?±DÃ0°m›r±H±PÀ¶,Bá0‰T ÙÙô¶Q¯“=:¢^«H¥Óx}¾Ó…¸‹ù<¹£#E!•NŽFO[,Ëâ8›¥X(à÷ûIuwãõzå,NI’$‚ÏŽã8䎎X[Z¢V«‘J§%‰tvÉåÁ^¼Ó§NøŒV÷d¥Êâ¾u:?ïgOŽ+›×%I’d~ma«ÝÆut]Ç0 ÄI@ ºþà‚BÇÁC{>ÒïûŒF÷Çy\’$I’!øÌØ–M.—#{˜C× ººSDc4½ÓÐ^­TÉå°m—D2A4FÓ¿|íÍÎÎñ-JÅ2A8Æëó>° o»Õ¦\.£i:ápÝ·p%I’ž&ù©ú%Êå ï½ý>‹sKD#1fÏÏrþâ,‘x„f³ÉÝÛw¹öÁG TΞ›åÂËçˆ%¢_:„iÛ6[›Û\yï ŽíòÒå—˜˜ÃçïÜ7´,‹õM>ºò¡P˜Ë¯]¢»'-·–$I’!ø5V‚¶M±Pdowìá1…P(Èô¹IlÛ¦P(²»³K­RDZA?³/!ôá¦eYä²9VWÖ(J4›-|~/ÃcÃx<í¶Åîö. s‹¸NçÞâw𩸕$IzJ´Ÿüä'?‘OÃçSTÇqØß; ŸÏãZIª+I(B×5޳ÇR¯ÕÑTƒ®t’P8tÚòðYT¥óØqö˜½=Š…¦ÇKw&?àCQÀ¶-ØÝÙ¥T(OÄI¤’C¾0’$I2¿†RY×ñù¼T«Uvwvi6[˜¦x"N2Ý B€Ý=ÊÅ2Š¢EIw§ð˜Ÿß³§ª*>¿Ãcp°ÀáÁ¶e“îNOÆñx<øý~4Meow\îMÕèì#’ßJ’$=•ÏøoûP¬ÖÙ<ÈQ¬Õ>÷ÏØ–EÛëC÷y©åKçYZÙ$=ØC  mxñ„BˆÃùBž…åMŒd‚™ñNûg?0˜NM¦ˆõô°½½ËþÁWnÜæÈqðü G!êâà0ÇÜÂfª‹ž‘A’±(CÝI¢Aÿ_ßg‰ u'¿ðüŸäñGýýòúêã?êõSÏ_¾þÏÇõ}|ëg‡Þ\Ýâ¿þò]n®nѽ~ø :-¸¨Ê.½F4Ç4»8Ìzh´ºÞð³~û.ÑJ…ßG4ÒÇaÎG½ÙùáîE?ÿø¸Ä_r8^\Äk;øÝÔê Úmãäq…2ж‹ª8˜ž~šÍ¶ûé™ÿç¿ÿ—¯|}â‹Îÿi<þeäõ½û=þ—]ß7ýüåûû›{}ßßú.ìb­ÆÍÕ-Þ¾µð™òA "Ðp]ãÚP pgs—…ý â ۱ؿï ÂòQ‘\½+\«ã~ú{=(BGAàº-\ܧv}úEá‹~þI–¯Ÿ¼¾'?þ“üþoÂùË÷÷7÷úd>Eé,ÜÂ}MëlÛÁuA|ZÒ•^Ú¶ý1Õ BË„ÒÙûO÷èŠ ŠŠ8 ey7P’$I†à×¢SÐ а¸¸Z-ëž#ÏÉŸq]e9§K¦=RÐ"8ÙÒ!Ô“*ò¾¨.Žë"„‚êƒI’$I_™ì|¤jК@ UUB£ÕVqœN©ŽKXp¶­„Õ£ j·1¢¨¸Â@í:ÅBUìN¥)4d-(I’$CðëªAX¨JU³QUÛ6h· „«¢àâ±[$tUU@ñв<8ΣÝÂlµÐUðà ÝÀªRCU]¡ã8®¼$I’ Á¯……¢QÕêIÈùh¶ýØ®~R¥5ñÕ«èšØvËñ Ä—UkpP¨ámÕqAÇñáºÊɱT¥Ž¦VGq… BV‚’$I2Ÿ±Îd˜&ªZFQ„kb[!lÇëª(8¨J‰kÑÒ5„Âv¸®Áý÷í.Œ tþcóþc+ 4QÉ£ØL'‚Ÿü¬‹JU9¥®‰aàëß4÷³Îÿqÿ¦{ѯO’ïoùþ–!ø•@Q À‹p \‚¸N!ô“ˆìLŒÉdº ÄÂŒLÏìíF7 bÁàͦóÝ›MºAio›Ÿÿô—ìåZlÕ,j€O†B¥Seºn½óÿD!Bˆ?À\¦ÿýϾÿÐùÿ~3íç=~síÿçý‡oôëü¢_Ÿ$ßßòýý- A!®ëâØªª>´ÑÛ~î1<8Nú$uÚ}Už†#büÇÿôçÄã1’©$ÓóÐ’f§ß´îãØÛ>ñq–ëKìVyÉsßù ¥¾„P„ëÆxøCÌ ý¬óœÇŸ›oÊ/èõIòý-ßßß²B`Y¥R…b¡L½V' ÓÓ“Æczp—z½N­Z'ŸÍ£ ª(÷Õ'÷ëNª0K§£ÏAQµ“vEx™œž@×õÇÚ^ÕTzúz;?Ë?®ìRw•N ‚ðƒÒÝi›À@¶FH’$ÉüR–e±¶¶Éõo²µ¹‡ë™³“D"!TMãè(ËÝ; ¬,op˜/h·1N7³u€:¨u ]Bض×m€Ò@×UtÕ‡e{AtØ=žÇ¿O§( ‡@(„«ê¸<\‘v‚Ð'ߥ’$I2½ ,Ë\ùà#~÷þ5ªÕ:‰Dœ §Q5jµÊµ+óÛ·Þ¥P( G°…zÒ¨îà¸%,çC·pô Š0q\Ë>@ÕxA £`*ÏváUYùI’$=K/\Ùm;ìí0?¿ÈQ6‡m[Dca††ú1MÙ£·nÝewwf³‰?䣥騮@ÐÆrЏ¢‚å´°Ú–­âºm,»L۪вêØvûdi3I’$I†à7H­Zcyy•ìQ×uñûýŒÐIcY««loïbY6¡P€®Þ ަ¡( D!ja£*àG9mIˆ“¥Ðú#ôJ’$Ißt/Ôp¨ã¸äŽó,/­R­ÖPU•x"ÆÄä(þ€ŸüqžÅ…eÊ¥2š¦‘L%H÷f`ópÝ ‚Š¢ ª&ªBAGQ TÕƒ¢¸(˜arÿPåÍÕ­'Ú¯ë›îI®/|ãû^ôë“äû[¾¿¿%!ب×Y_Ûdg{«m b`°lnl±ººA»Õ&066L4GÐiFpD„ƒ®yP• Šb‚ÐP] ?ˆ  ¢Ÿ‡>é~l }³Ÿ×'½¾ÿãÿúñC}H_æëÜ”óE¿>I¾¿åûû[‚BJ¥2ós‹ä U!‹25=N$¦Z­±´´J±XFU5b±“c´ü>WV\l§ÖÙ-Bñ£DP†–¥Ó+¨ =p?ð“ý¶þCäÕÏý&öE?ß”Ÿäú¾é}H/úõIòý-ßߟ'ØjµÙÚÚauuƒF½išôöõ04<€¡ëdY[Û¤Ùhàõzéèeh¨M7À¶‰*@UÕ; âCUôÓ§IQMÅ‹¢tV‹‘$I’d~cªÀjµÊ ǹ<ÊÿjïÞ~›Hï0Žçd{|Œ8äà`;„@mX@‚VmµUÕ­ÚJ•ú_ô«´Û½èM»• ZuÅE[±] Á9r‚;>{fzv»H@º ”8û|¤\YÍoÞQÍÌû{_à •Jrzr‚þþ4f“ryõ-<Ï'™J099A_:…ïy˜íI|<,Ëyö-0tØb2""¢|÷<ÏgkóñÁS`£IÈ ‘Í01Q$âºìl?áÞÝûT*b††²”Jy¢Q—V£Ý¬ãš†eb #Ž¥»CDD!xôíï×(ߟgkó†a’ÉÌÄóº,.,³ººF»Ý¡/•äÔéÙÁ ಻‹Ñ¬¶ l3„iÆ1y~ö§ˆˆO=?1&êõkë›X¦I¡0ƹé3\¹z‰T_нÝ=ææ©×d2i¦ÎNráÂ9ÉõzƒÍµM¬NÃ41­¦‘ À>ØêHDD‚G]8ìPÏÓ—J’Ï1>Q`` ŸPÈ¡ëyØŽMi¢@©Tàò•òùŽãP­V©Uª`<õ F~Œ Æ·{úºûu÷>œõ)¦—¦Xõúë;êÇ×ø«>…à+|5 æÚõ«xžGÄຑ¯·3ÊdÒüüÃèvº$S ’É$Žcc®ë2ZÈÓº·ÌÜúcAßþ’¼Î~]/ûý8Ýd‡õ)æ÷ÿNõ½Íó+íãëþV} ÁW‚€n·‹i›DÜ0¡Ð÷ó ‚€PÈattÃ0¾þûŠëFÈÆè¦úØyøp¾Ó9¼î~]ǽ§×û${½¾·}~Ç}|uïúz6ƒ  Óíòtw—Õµ5*•*¹‘ c eû¾O½Ñ`wo €t_ ×u±¾‚¦iŠ„Á²ð4FDä{¦'C0š­‹Ë+ܾs‡Å•X¶†ÁÈЖeòxg‡Ù¹9fç°‡k—ßçÔxË45ê""Ò»!èû>ëüåÆ îÏ/Ðõ}F‡‡I§R8ŽÍΓ'ÜüÛçܾs‡Z½IntË4¿±{¼ˆˆHö Ö æ™_\äéî.fdè¾ï3·¸È?nßfeõ­V“áì™t–mkÄED¤wC0v÷ö(Ïϳ[©`ñXŒR¡@"§ZÝg¶\fûÉR‰¥b‘D<®O~""òœž{4j¶Z¬—#¿ðxÇ}¿¬7QßQîƒT}ï¾>]Õ×ËŒ èe¢ƒ àñö6üÓŸùìÖ-ê™~~ö“ó£ë× ‚€Ooü•Ooܤº¿Ïð‰!>üé\»ü>ñxü…ÇÜݯ¿pSÉÿåæì…>½×­ßø»üþ¦®¯ê{7õéú¨¾ãðÿ±çB°Óép·\æ£ÿÀÂò2¶ípª4Îoý+J…"××ùè“Oø÷ì,¶ípîÌ~óË_0žÏcYZ[DDž×Sßkõ:ó ‹ßûˆÅ¢”ŠENd³tº–—XßܤëyDcQŠ…<™ ¦Ú"DD¤—CÐ÷}¶wv(/,P«×1M“¾TЉñ"±XŒ½J…ûóóT* JQ*ˆE£Ï­#""Òs!Xo4XZ]e}c“N·K8æd.ÇØÈ××YY}@£ÙÄu]Šù“Œ á8ŽFYDDz7ƒ `o¯ÂìÜ<{Ï6Æ=‘ÍröôiRÉ$ûµåùùƒžAÓ¤?“arb‚D"®§@y©žh‘h·ÛÏžôV1Lƒbî$W/]âÜÔÛfië KË´;²ýýLOM1Q,¾´-BDD¤gB°ëy´ÚmR©$#ÃÃ\™™áÌ©S¤Ó}4êu6¶¶¨Õë  0sñ"×®^!Û߯ 1""òJG¾E"ºžG¥Ze{g‡p(L¶¿ŸhÔÅ0 ê÷ÊeÊsódÒi¦ÏN1˜Íâh‰4éÕ hµZTªUÚÝ6ñXœhÄŶìçöô}ŸF³I³Ù"rp£Qí!""½‚žçQ­UYXYâ‹{_²ßØgzò—Ïψ%4j""òF¹w†Ýn—Ç›|q÷_üý˲±½…cÛ¤Si.LN“ˆiÐDD䘆`¥Záæç7¹yë3v÷+8á17†˜†^sŠˆÈ1Áf«ÉÊÚ*›¶À40M;n“ˆ%´þ§ˆˆ¼QGîÑÊ÷}<ÏÃ|LÓ$æF)Sߟˆˆó'AÓ²ˆº1±±xŒ™é÷øá•0~² %ÐDDäx‡`<ãâÔyÜp„ìÀ —οGnd”Òh""òF¹ Ïó¨Õk4šM¡ñX[ï""ò}A‘ÿõˆˆˆBPDDD!(""¢QŠˆˆ(EDD‚""" A… ˆˆˆBPDDD!(""¢QŠˆˆ(EDD‚""" A… ˆˆˆBPDDämûslmŸœ1êÌIEND®B`‚peewee-3.17.7/docs/_themes/000077500000000000000000000000001470346076600154545ustar00rootroot00000000000000peewee-3.17.7/docs/_themes/flask/000077500000000000000000000000001470346076600165545ustar00rootroot00000000000000peewee-3.17.7/docs/_themes/flask/layout.html000066400000000000000000000013561470346076600207640ustar00rootroot00000000000000{%- extends "basic/layout.html" %} {%- block extrahead %} {{ super() }} {% if theme_touch_icon %} {% endif %} {% endblock %} {%- block relbar2 %}{% endblock %} {% block header %} {{ super() }} {% if pagename == 'index' %}
{% endif %} {% endblock %} {%- block footer %} {% if pagename == 'index' %}
{% endif %} {%- endblock %} peewee-3.17.7/docs/_themes/flask/relations.html000066400000000000000000000011161470346076600214410ustar00rootroot00000000000000

Related Topics

peewee-3.17.7/docs/_themes/flask/static/000077500000000000000000000000001470346076600200435ustar00rootroot00000000000000peewee-3.17.7/docs/_themes/flask/static/flasky.css_t000066400000000000000000000144441470346076600224000ustar00rootroot00000000000000/* * flasky.css_t * ~~~~~~~~~~~~ * * :copyright: Copyright 2010 by Armin Ronacher. * :license: Flask Design License, see LICENSE for details. */ {% set page_width = '940px' %} {% set sidebar_width = '220px' %} @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: 'Georgia', serif; font-size: 17px; background-color: white; color: #000; margin: 0; padding: 0; } div.document { width: {{ page_width }}; margin: 30px auto 0 auto; } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { margin: 0 0 0 {{ sidebar_width }}; } div.sphinxsidebar { width: {{ sidebar_width }}; } hr { border: 1px solid #B1B4B6; } div.body { background-color: #ffffff; color: #3E4349; padding: 0 30px 0 30px; } img.floatingflask { padding: 0 0 10px 10px; float: right; } div.footer { width: {{ page_width }}; margin: 20px auto 30px auto; font-size: 14px; color: #888; text-align: right; } div.footer a { color: #888; } div.related { display: none; } div.sphinxsidebar a { color: #444; text-decoration: none; border-bottom: 1px dotted #999; } div.sphinxsidebar a:hover { border-bottom: 1px solid #999; } div.sphinxsidebar { font-size: 14px; line-height: 1.5; } div.sphinxsidebarwrapper { padding: 18px 10px; } div.sphinxsidebarwrapper p.logo { padding: 0 0 20px 0; margin: 0; text-align: center; } div.sphinxsidebar h3, div.sphinxsidebar h4 { font-family: 'Garamond', 'Georgia', serif; color: #444; font-size: 24px; font-weight: normal; margin: 0 0 5px 0; padding: 0; } div.sphinxsidebar h4 { font-size: 20px; } div.sphinxsidebar h3 a { color: #444; } div.sphinxsidebar p.logo a, div.sphinxsidebar h3 a, div.sphinxsidebar p.logo a:hover, div.sphinxsidebar h3 a:hover { border: none; } div.sphinxsidebar p { color: #555; margin: 10px 0; } div.sphinxsidebar ul { margin: 10px 0; padding: 0; color: #000; } div.sphinxsidebar input { border: 1px solid #ccc; font-family: 'Georgia', serif; font-size: 1em; } /* -- body styles ----------------------------------------------------------- */ a { color: #004B6B; text-decoration: underline; } a:hover { color: #6D4100; text-decoration: underline; } div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-family: 'Garamond', 'Georgia', serif; font-weight: normal; margin: 30px 0px 10px 0px; padding: 0; } {% if theme_index_logo %} div.indexwrapper h1 { text-indent: -999999px; background: url({{ theme_index_logo }}) no-repeat center center; height: {{ theme_index_logo_height }}; } {% endif %} div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } div.body h2 { font-size: 180%; } div.body h3 { font-size: 150%; } div.body h4 { font-size: 130%; } div.body h5 { font-size: 100%; } div.body h6 { font-size: 100%; } a.headerlink { color: #ddd; padding: 0 4px; text-decoration: none; } a.headerlink:hover { color: #444; background: #eaeaea; } div.body p, div.body dd, div.body li { line-height: 1.4em; } div.admonition { background: #fafafa; margin: 20px -30px; padding: 10px 30px; border-top: 1px solid #ccc; border-bottom: 1px solid #ccc; } div.admonition tt.xref, div.admonition a tt { border-bottom: 1px solid #fafafa; } dd div.admonition { margin-left: -60px; padding-left: 60px; } div.admonition p.admonition-title { font-family: 'Garamond', 'Georgia', serif; font-weight: normal; font-size: 24px; margin: 0 0 10px 0; padding: 0; line-height: 1; } div.admonition p.last { margin-bottom: 0; } div.highlight { background-color: white; } dt:target, .highlight { background: #FAF3E8; } div.note { background-color: #eee; border: 1px solid #ccc; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.topic { background-color: #eee; } p.admonition-title { display: inline; } p.admonition-title:after { content: ":"; } pre, tt { font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.9em; } img.screenshot { } tt.descname, tt.descclassname { font-size: 0.95em; } tt.descname { padding-right: 0.08em; } img.screenshot { -moz-box-shadow: 2px 2px 4px #eee; -webkit-box-shadow: 2px 2px 4px #eee; box-shadow: 2px 2px 4px #eee; } table.docutils { border: 1px solid #888; -moz-box-shadow: 2px 2px 4px #eee; -webkit-box-shadow: 2px 2px 4px #eee; box-shadow: 2px 2px 4px #eee; } table.docutils td, table.docutils th { border: 1px solid #888; padding: 0.25em 0.7em; } table.field-list, table.footnote { border: none; -moz-box-shadow: none; -webkit-box-shadow: none; box-shadow: none; } table.footnote { margin: 15px 0; width: 100%; border: 1px solid #eee; background: #fdfdfd; font-size: 0.9em; } table.footnote + table.footnote { margin-top: -15px; border-top: none; } table.field-list th { padding: 0 0.8em 0 0; } table.field-list td { padding: 0; } table.footnote td.label { width: 0px; padding: 0.3em 0 0.3em 0.5em; } table.footnote td { padding: 0.3em 0.5em; } dl { margin: 0; padding: 0; } dl dd { margin-left: 30px; } blockquote { margin: 0 0 0 30px; padding: 0; } ul, ol { margin: 10px 0 10px 30px; padding: 0; } pre { background: #eee; padding: 7px 30px; margin: 15px -30px; line-height: 1.3em; } dl pre, blockquote pre, li pre { margin-left: -60px; padding-left: 60px; } dl dl pre { margin-left: -90px; padding-left: 90px; } tt { background-color: #ecf0f3; color: #222; /* padding: 1px 2px; */ } tt.xref, a tt { background-color: #FBFBFB; border-bottom: 1px solid white; } a.reference { text-decoration: none; border-bottom: 1px dotted #004B6B; } a.reference:hover { border-bottom: 1px solid #6D4100; } a.footnote-reference { text-decoration: none; font-size: 0.7em; vertical-align: top; border-bottom: 1px dotted #004B6B; } a.footnote-reference:hover { border-bottom: 1px solid #6D4100; } a:hover tt { background: #EEE; } peewee-3.17.7/docs/_themes/flask/static/small_flask.css000066400000000000000000000017201470346076600230450ustar00rootroot00000000000000/* * small_flask.css_t * ~~~~~~~~~~~~~~~~~ * * :copyright: Copyright 2010 by Armin Ronacher. * :license: Flask Design License, see LICENSE for details. */ body { margin: 0; padding: 20px 30px; } div.documentwrapper { float: none; background: white; } div.sphinxsidebar { display: block; float: none; width: 102.5%; margin: 50px -30px -20px -30px; padding: 10px 20px; background: #333; color: white; } div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, div.sphinxsidebar h3 a { color: white; } div.sphinxsidebar a { color: #aaa; } div.sphinxsidebar p.logo { display: none; } div.document { width: 100%; margin: 0; } div.related { display: block; margin: 0; padding: 10px 0 20px 0; } div.related ul, div.related ul li { margin: 0; padding: 0; } div.footer { display: none; } div.bodywrapper { margin: 0; } div.body { min-height: 0; padding: 0; } peewee-3.17.7/docs/_themes/flask/theme.conf000066400000000000000000000002441470346076600205250ustar00rootroot00000000000000[theme] inherit = basic stylesheet = flasky.css pygments_style = flask_theme_support.FlaskyStyle [options] index_logo = '' index_logo_height = 120px touch_icon = peewee-3.17.7/docs/conf.py000066400000000000000000000162311470346076600153320ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # peewee documentation build configuration file, created by # sphinx-quickstart on Fri Nov 26 11:05:15 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. #RTD_NEW_THEME = True import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx_rtd_theme'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'peewee' copyright = u'charles leifer' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. src_dir = os.path.realpath(os.path.dirname(os.path.dirname(__file__))) sys.path.insert(0, src_dir) from peewee import __version__ version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'pastie' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = { # 'index_logo': 'peewee-white.png' #} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'peeweedoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'peewee.tex', u'peewee Documentation', u'charles leifer', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True autodoc_default_flags = ['members', 'show-inheritance'] autodoc_member_order = 'bysource' # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'peewee', u'peewee Documentation', [u'charles leifer'], 1) ] peewee-3.17.7/docs/crdb.png000066400000000000000000000051331470346076600154520ustar00rootroot00000000000000‰PNG  IHDR00Wù‡bKGDÿÿÿ ½§“ pHYs × ×B(›xtIMEã 0)ñá;JiTXtCommentCreated with GIMPd.e ¿IDAThÞÍ™kp]U€¿}ι4÷Õ4é#Mš´M“&´iK›[+X[ "RÁADÐŽ£8¢ŽÏÔq|[uqTf´ZF)H‡ -ôš>Ò6mMBó¾ÏsϹ{/$½˜¦‚¤IËš9w朻ÏYûÛ{íµ×ÚK‰ˆ¸y7ïq¬£‹_ÙÏÎ=méèd`pˆÁ4ét–p8D,:…êÙ³¨›7›å‹ë¹¤e13+ʇ‚ƒÞŠäÜ<žçsàÐqvìjå•Öƒ;ÑMgW™\ßó‰'"”%b”%b4.¨¡eY+–4RQž   8¨Gÿüwyü©íüíÙ—èéÀ¶mÇF(RJQ¼Ä"BAk|_ÓX_û׮`Ýšå,k®§ºrúv¼õÀQví>ÀSÛvðÌó/óZO?€ƒmÛ(5¢ï´.9ýF„BAcŒ¡¡nëÖ®àÓû êžû¾#?øåˆL)Á/ðý€Cmõ,¦&¢DKKƒ$Ó’© ýI:»{PÊ"°±m]ЄBAëkY»j wܺ‰êÊ£:¾cg+›³•—^i£ýØ¿QJa)UÔ ˜=«‚x,B<¡$"—Ë“Îæèz­Îî‚#°ƒIvüu3ª«»Oš/»‰Šiq®»z—_ºŠúºj¡ ÊV¢”ÂÁƒ1Bÿ`’;÷òìö=lyâ2¹<Á@¥À²,±ŸºíZî¾ãCdr.Ÿùâwyò/sóÅÑtÝÏÿ²•ËšøíÏ¾ÒÆH*•!‹0^ytëÓl~x+m‡;Èds n.O˲FŽvt2˜JãØJA,ZÊê•ÍÜùñkiYvѸô#ä\—Ò)%(‘#›Ùöü.~þÈVvìje`0…Ö˶P@MÕLÖ®^Ê'nº†¦úÚ‰R9±½}ƒ¬»îN:»z°, €BA³áÝ«øÝO¿ÎD‹5‘ëJ±áú»éêîE) ­5žçã86ßö"·Þõ·/ÀP*ÅÆëïáÄÉSÄc¥ücËùþýŸqǰåñgùø=ßšX™I&Órí­_iõWHyÕ²bým²{ßáâÿ¿Ûò7©Y¶IÊ6Ê´ú+å[ýJŒ‘ ‘sÐÚÈר\ìüü–ÊË{Ži÷ðïŸò†+¥báF©Zrüå©ío€wï—ø¼õRÞp¥Dkß#ÿÚÕú?Û~í_HlÞz™V…,^ûJ¥/,@AkiZ}£”Õ_!±¹ëeó#zÓw6Ýr¯$ê6H¤vÜýÅïœ3À9-â/ßÿNv/Úúºj–-^Hgw™¬K¡ ‹í|¿@2•¡ãd7Ÿüè&Û¦$â§oaû¿ö^˜} ípW}ø³¤ÒYD $dj"JyYœx,J(  bŒ!ïù¸nžÁ$}Iú“hmÖÕðÜc?7€3Þ7?¼•T:[t‘nÞ£³»ÎîÞ725*Ò=v¢‹?>¾\uéùÛ:Nvó®VΜ¼Óáð^g|ËÍ{üáñgo<0.€}ûÛ9p¸cTì~.Òzà(m‡ß/ð܎ݸyo8Îñìño¢»hF¯¶'7ï±íù—qË6,¿ºß„QÄ—EŒ…Ö†vîÅó “ɺìÝߎ25ͽ\zË~æ¯| £ß‚9‰"Ñrˆ©«÷ƒe°-‹—÷ÄóüÉع»m87Õsš{Q¼ëæWIÌÈÂÿ¹ÃÕ½De(™9€¥ö¼ÚŽçŸ€}ûà8F+f.D4L«Ê°âšv,ûÍ ”£™ºêvÈ£ND‰ÅàPŠžÞÁÉ8tä8¶ea4Ѳ"`44_~‚йÉ7t‡¢-bK¬BŒBDá$2ˆ(lÛæ@ûñÉ8Õ;€RSâÞ¨ëo߃hëÚ} š#±ü0Rx}Ñ"9°,Å©ÞþÉH¥s(¥pBzÔy‘Ñ0c~š—t¡}ë,.Ó"~ÉADþ{±+TPwåT*7ùFFžeÅò°òºCgtrdôËÒ”Tõ€QcvoFÚm& )EDð}gŒ×xE–Ù}£ÜªhÅ”¹ÝXŽ>s^0¾ JhdÊä$bÁÏÙg]°NÐ0ïâS£¬ &\ÙgñRÚ œNmI$"“P]917ÄÍŒ=еl¡¼6I ôúhÛÑvÔ3cJAa e ZkæT͘|€Eó)h²„¾1”u¦Q@´Ì%Zî8¥.N©{¶l¯? J…Ì«™=ùK-À/°láä¾iXÖ³¦$æQó†iX%y¬°V÷ßÓ0hjª+ …‚“P5{:µsf¡¬ÇvWŒ™€pÄ#\ê [Œœhޱö#øýQt6Œ6š•Kz‹5†q„V·,FKþ“z:JÇÌB ,ºhçv‰Wt•¯‡†ô¡J° FVµ,:O¡—¾ãbDÀr ¯<1—@øŒZ`tq¤•cÎVdÏ%L/ŸJscݸ¤·  ”bÅ’jªf¢,¡ýÅ${†kgz£ÓÏ”  ,Cöè t6 µ45ŒïÄz\)eSÃ\š›æB>`ïÓÕXÎhO¤}k¤Dbì1£Ÿ9X‰h Û¶yçª%Ä¢¥çÀqlnøÀ‚Á¦ 8¸½’ä©P16ÒÞ멦 0ùчnWù®2¦&¢ÜvÃ{Ïÿéôû._ÃEõs1"ôtÄhûç좣ñr6ùœSLu& JŠ÷ɽµh7ˆ›÷¸õCW1½¢ì¯ÿþÏàyJ /ÐPWËçï¼™¼çQð-þüà ´Vd“A²C#&¥@gC7ˆ×¥ï¹&° Zk6ï¾ _àøÜ§odMËb´øôwFØúÀ N‰œØÒ!rÇË9õär”e0b¸ëöë¹xqý9LHl_Ûn¼ã+t½ÖX”D=r©ÿr­J°ƒñmŒV-oâ×?ú*SãÑ·G‰iÑÂy|ó w Wø•à¦ÏØDaüáð»|Zœ?øù éü„¼ãZ¾ý¥O Ôc29#DKKxò·©â¿-Dà£7¼—¿z'ñX¿Pu¹`^=ò µs*™P‘I§Ÿ{I–\v³$æoØÜ÷Ȧ[î•öc''C•Lx¡û´ôôp÷}±´¹{n¿`À™ 5üVc`ç`?€‰IEND®B`‚peewee-3.17.7/docs/index.rst000066400000000000000000000040401470346076600156670ustar00rootroot00000000000000.. peewee documentation master file, created by sphinx-quickstart on Thu Nov 25 21:20:29 2010. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. peewee ====== .. image:: peewee3-logo.png Peewee is a simple and small ORM. It has few (but expressive) concepts, making it easy to learn and intuitive to use. * a small, expressive ORM * python 2.7+ and 3.4+ * supports sqlite, mysql, mariadb, postgresql and cockroachdb * :ref:`tons of extensions ` .. image:: postgresql.png :target: peewee/database.html#using-postgresql :alt: postgresql .. image:: mysql.png :target: peewee/database.html#using-mysql :alt: mysql .. image:: mariadb.png :target: peewee/database.html#using-mariadb :alt: mariadb .. image:: sqlite.png :target: peewee/database.html#using-sqlite :alt: sqlite .. image:: crdb.png :target: peewee/database.html#using-crdb :alt: cockroachdb Peewee's source code hosted on `GitHub `_. New to peewee? These may help: * :ref:`Quickstart ` * :ref:`Example twitter app ` * :ref:`Using peewee interactively ` * :ref:`Models and fields ` * :ref:`Querying ` * :ref:`Relationships and joins ` Contents: --------- .. toctree:: :maxdepth: 2 :glob: peewee/installation peewee/quickstart peewee/example peewee/interactive peewee/contributing peewee/database peewee/models peewee/querying peewee/query_operators peewee/relationships peewee/api peewee/sqlite_ext peewee/playhouse peewee/query_examples peewee/query_builder peewee/hacks peewee/changes Note ---- If you find any bugs, odd behavior, or have an idea for a new feature please don't hesitate to `open an issue `_ on GitHub or `contact me `_. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` peewee-3.17.7/docs/make.bat000066400000000000000000000100121470346076600154270ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\peewee.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\peewee.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end peewee-3.17.7/docs/mariadb.png000066400000000000000000000012301470346076600161310ustar00rootroot00000000000000‰PNG  IHDR00`Ü µQPLTEÿÿÿæëí¶ÆÊúûûY|†N\íñò5E3CÞåç4^kFT frÌ×Úq˜+Wd‘¨¯|˜ ÀÍѬ¾Ã†Ÿ§›°·E,·¢IDATHÇå”Ù’ã E1›˜¼ýÿ‡ŽˆÝIÛÉð0o­ª˜”}W!¿)ØàÓ®M/>T[„™`ê䩆#ÄN^ÀØÌÚÅûrò¢güàG›ú­Ó‘d§AúÁCìäÉvè­“Oæ4-‚â—Cª‹—§C–ué±ã{6l>w¸Å?÷LÒ à›ã?—J³žž8Ftè5”WúÓ.8š/tS”ÞÚr¥[Â;Swq‡0½ñ¨N·øü¾dj~TÛ‹I@ñÖãì1͘š—×¾ctžwꇄm0·Ã# bmiõ—‰œÁAdê•òËZsàaámá"eѧ“_çò8‡¡ïÄÃ|cW®oÜ„QWy=üb¿uGÀ÷‹T¯E¾·SÕ}¹ÙO‡ÿt»çóùòY“Ò±c”ˆÚ&t îµi¿îˆÏ qyÞ¶¹"`Æ2D;‚* r øój—D&ïóvp.qê¸gLrJdŒZ‘M£{±<,çkô)FnµRF¥\ò4 ž'’Uê%f§¢e¶$·ˆÄ£r³9p›·-à}\‹¬qÓ²jj°)3&ì‘bnÃ_+(ÂX®Är¼ž±AuPIŸ fÉ%©ÐVŒy¾:®sy#›e. ³VZ êf<7‚²B·bÈ!¬Äññ"¿1þFC Õz=kIEND®B`‚peewee-3.17.7/docs/mysql.png000066400000000000000000000025011470346076600157010ustar00rootroot00000000000000‰PNG  IHDR00`Ü µgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<€PLTEê¤GÄÜåã„ýôêb‹ùæÍ&yœu¬ÁþùòõùûâíòõôõûéÓëóöÕÖ×÷Ü·ëìíòʼnÌáé«ÍÙÓåëùãÅ iéž;ª«®d¢ºžÅÓæ“$u™2ƒ£CŒª]†jÉËÌî³føøøõÒ¤j¥½»ÍZœ¶ºÖᔾϱÑÝúüýüüü­®±G­¡ÆÕ„¶ÉŸ¡¤/‚£Q˜³è›4ë©R¡¤¦ò÷ø²Æââä:„£³¶¸†ˆ‹¾Ùâ²³µÝëð™›äðô¿ÃÈ§ËØŸ¡¯³µq–L”°Ûêïüþþº¼¾íõ÷S–±¤É×ïºtÖèî·¸º§¨«;ˆ§»Û錎‘ñ¿}ÆÆÉÀÁÃwš–˜›t˜'~ŸŽ‘”é ?6‡¦ÿÿÿ o”øûûþüùþþÿ¯ÏÜæçè@«ÿÿþ_ ¸øà¿ÎÎÏüýýúÂ|þþþÿþýýÿÿüïßÿþþÈãñ“”—¥§©ø¹ièéên¨¿þû÷ÆËΓ••—˜q•¶Ôßý¡GÝGIDATxÚ¼– WÚ@Ç9#1\r$ˆ\* ÈU,(å…zEÑŠéa/¡­~õnb‘ˆ•àë{ÝÇKvöå—ÙùÏìÑéËÆ‰è¼hüwÀ¥G*ÞÂèÀ-_³‘qq!:"P°úÓ²èe÷šr¤-õwÐÏ¢×TA¬‰‘‚vÊRÜ]¹fEäÑeÍ@ârnOŽØVFµeÙ…¸—ÞQ²ma@Elj,7ÇoŸOgZ˜{˜‹©¸JðXˆ¾1KšÜÂ&Ó·bdQ°–ð#?/•Aº X|™ž1Ez…€FÐů‹­!ˆ+ßô#`*8Ç7SÔfn8àº`€*Ë Á€Z\.õííÛÍáÀbe§(^21<4ÞÒëý…,­—Ãòpát¹,bÞ+œ²ºiìy@¼AÉ¢îGò«&­tëù63i›ªŒÿq:c±Üо”’‹ʸâl mdÑb_Uñ2§eøÈo–D:ÅÄÊØùÍßÓp?®7dHïÜCÐe£ÏǨoæóÀ0µc^sîPcàF`U­q4Rnò­¸¤ì«Éd`y=¿j7|éÒñ€ïð*½~ Àn)b,©}0S4µØŒûšŸ ¿œ>\Ý}óÚ^.Dá5w|…H–ÇçœÞàÔ=P‚!Ø#ÍARa|å}ûÕ§år^ñ•øæ‹D“¿z5•~P ë|¬we¸T7>aWÇ/5ЬÌ<ÍÏüâsR lowQÁ±ÞòxØ`0„¦»Zx3ˆP4„ò˲“Y¯#÷Åð¡q6mÖ¡¨n|B²§ÓéD¢ ‰Ž¹kÖ3àBàYŒ±' —}ùn¾–*˜‘°oÖVS‹ÄñÄ’…-orn©j¨¶g$mhï=ÜÌ‘éŽ' Pa=°¥«†DЃ´Ýd=ÜÇp-å€úl>t´HâRDI\£ãã¨9’$I’ AI’$I’!(I’$I2%I’$I† $I’$É”$I’$‚’$I’$CP’$I’dJ’$I’ AI’$I’!(I’$I2%I’$I† $I’$É”$I’$‚’$I’$CP’$I’dJ’$I’ AI’$I’!(I’$Iφþ"\„‚V«M³ÙÂë5QU…J¥†¢(„B ï´$I’ôb†`£Ñbn~‰ÕÕ{LNŒ‰¹rå®ãòï\fp°]×ä«-I’$½X!hY÷îmóÓŸ¾ÅÂÂ*gÏÎpá ·nγ·D»íðñ2™$ª*G%I’¤$…äóEÞ{ï|‡ããí¶ƒ i:¹\žwÞýt:E0x™p8€¢(òU—$I’€ç|bL»m±±±ÍG×ïP(”Báø¸ÀÝ»K ŸƒƒC®\¹Áþ~×uå+.I’$½!X«ÕYX\eo÷„Š®(Іßïebr˜É‰a\×a}}‹ååMêõ¦|Å%I’¤#&ÛÛû4-TÕÀÐ=¤’ ^{í"¯¿ö.œ!ôS(Y\\£X¬àºB¾ê’$IÒ󂶈Åb™¶e£ë>_€áá^yå<ƒƒ½LMŽÒÛ›¦mµY[»ÇÖÖí¶%_uI’$éù¯@U4]'1>>DO¦ ŸÏK:brrGçð0Çââ•J !^üjPqú$I’ô‚… ¢(x½^ɺ®aÛ¦i’L&ðzÍÓFù™™qb±ÕZ…ååM²Ù 1Aƶm Å"…bËú´ºu… ^¯³½»Ë½jõº BI’¤± ŒŽ ù±¬&¶ÝFU•Ó6¯×ËèÈÀ`ggÕÕ{߸ 2BÍ&¥r™V«ý¥¡åº.G¹o½û¿zû_ØÚÙŲìÓp\ßÚâÿýŸÿÈûÿþž; 4[-ùN—$Iú ÏuŸ ßïcöÌ$#‹·(WŠçi·-„¨ªB"çåËçY[½G¡Xbqq ¦ý_kÏ ‚v»M­Þ@ÓT~?º®#„ V¯³°²ÂÎÞ>##Œ ašæç«Ùj±º±É{×®Q«Ö(–ËüÙH_o–e±³·ÇÒêÕZH8DO½™Œì‘”$Iú=ÚO~ò“Ÿ<·e¬ªâóyÑ ƒƒ#r¹<šª1<ÜO<EÓT C' àº.»»4šMÆÆI¥â_ëRjív›ÕM>øðCö²$ãqü>–m³¼ºÆÏó·çPU¡þ~ü>BÇ¡ÝnŸyªªJ«Õbym[ssds9Jå à'Ó B°´ºÆüò ¥r˶ééî&Ý•ÂÐuùŽ—$IzQ*A€@ÀÏ¥‹gi·Úüô§o±µµÃ\'•БɤPU•t:ÉýÑë —+ *ÊCŽBì“°Ñ4 ÓãyªU“‚r¥Êõ[·xïê5’Ƀ}½„ƒ!Žr9~÷áG,.¯Ð²l޲4[-lÛ¦\©rptÈq¡€ë ñ½ÝÝ(ªŠ®ëhšŽ+¹|ž›ssLŽÑßÛƒišFç¥Ís{nžÑ¡AY J’$½h!¨ª ±X˜—_>‡ëºüêWï±¹¹ÅÑÑ1©T Ã0Ð4ÞÞ ?þë?¡Õ¶‡‚˜¦çô¶m³wxÄúæ&Á`É‘B¡àS Çu98:bae…ìñ1 pp˜%L±°²ÌÅEj†ÇIJ,Z­‡ÙW¯_çú­Ûä‹E\×%™ˆóÚåË\>ŽLº‹þž ÅÍFƒƒÃCÖ66èËt3Ð×K:•"Ÿ/Po4X\]e}ó‰xŸ×+ßõ’$I/Jº'÷Ô²…cR™ÿê¯ÿÇrH§hڧÆ¡“LÆ 7×u9ÌñÛ÷ÞïüÌþ„³ÓÓx½æS9Çj­ÆêƇÙ,Žãžs–«U–VÖÈ ¨šN<eh ࣛ7yëÝ÷Ø?:Âq\×%›Ë!\AO†þÞ^~øÝïP­×Y][§Ùjq˜Íâ AOw7g&'ÙÙÝ#_,’/¸=ß©{d5(I’ôi!õ¼_@³Ùd~i™üÅ/ù—ß}@ äåòå³$“±‡vPå3À0 ¢á0𦲱µÅ•op˜Ë=•V ×u) ,¬¬P­ÖÐuX4Bº+I£Ù`{o—VÛ"à096Î¥sç®àÎüÂIº¨ª†ªªX¶ÍQ.ÇáQa0=1Îå 牄ôڙ¥‘PˆÙ©Iúz{ñx<Ôêu–×ÖXßÚ’3E%I’^”´m›½ƒÞ»z¹ÅEî,,òÁG×Ù9<À²GîSU•d<ÎØð0®ë²°¼ÂâêõFã‰Ï±Ñl²~ï[;;´Úm?cÃÃ$ã êõÕZ§y?‹òÒì ™tõFƒ\>ã8Ò©ñX]×q‡Z£+Ñp˜ÉÑ’‰X'ÜO^×uz2f§& úý'-ÇÜYX¤P*áʾAI’¤ç?«õ:·çæYZ]¥ÙjÓj[,¯­qíÆ òÅÂc5‰ü~&FGˆÇ¢”Ëe–×Ö)•+OÔhî A©\fay…r¥Šªi$b1¦ÆÇ…‚X¶…m;èºF¦+Íà@?>Ÿï´bÕut*É÷^}™ó33ø¼&®¸®‹pªªâ5M Ã@×u¼¦yú³‘PˆéñqºÓ]hšF½^gyuÍ­-Úí¶|çK’$=Ï!è8¹ã<óKË”+U Ãáñ >ÓD×ôǺ÷ex<ôf2Œ wë7îÝckwÖF»Ýf{w—µÍ{4[-|^ƒ}}§-¿ÿ4Äb‘0Ÿ]Óðû|„‚AšÍ&G¹Ù,¶í i:ªÚ¹¶V»ÍÞþùBUU è'mº®Ó›éfjlœ` €+Ù\Ž» ‹ŠEY J’$=Ï!ض,ŽÙ;<Ä`xLü>Cý\:Žx4òX!¨* ‘P˜ÉÑQ~?Çù<ËkëTj5¾J\!¨Öj,­­“ËçqÝνº‰Ñ¢‘ðɽÁ(‰x MU©Öª4N†_}>‘pUU98:âÚÇ7XY[§mY†AÀïààðˆ›wç(•Ëx<¢‘èi/ ¢(DÂaf§§èÍd0tj½Îòú:G¹cÛ–ï~I’d>¯'nY6ùB‘f³…¦ëxL“X4Æù™ûû0 ã±éóù /“Á²,ÖîÝã0›Ãþ áº.GÙË«kÔêut]£»+ÅèпMU §½{+ëëÜ]\¤\©`š2Ýiü~ŽãR­ÕiµÛ§÷.»RIªµW?þ˜Û ´Úm"á=é®Vš1 ƒÁþ>¦'Æ …B :4È*P’$é¹A!–mŸ,¦b’‰£ƒý_mI4UUH&âLŒŽà1töXÛ¼G£ñøkÖê V7688<ıþã#ä’‰ÓY«~ŸÞ^|^/Ù\Ž®}ÈòÚ:š¦19:ÊÈà ñXŒp(D,cdpW/^ »+Åþá×oÞ¢P(âõv*àžtÏ}á¯( Ñp˜³ÓÓôõô„éí΋FO‡M%I’¾ÍžÛOBUUðy½hš†Ój£ë:¡@€P(ø@àãòûýLŒòá͛챲¶ÎùéiBÁÀck»Ýæ(›¥y² M2gld˜À}í5Mûûéïí%_,²¹½Í­ùyúúá_ý–V×(UÊ„‚A†˜ÅkšTj5CU¥§;Íù33Ä>cØ0 †øÞ«¯°³·ÇäØñhô+}Ip…Àql”Îóî8”*%,Ë& á3½µ¥H’$É|<©D‚`0@±\ÆqlTM{âFpC×Étw362Âaö˜½=6wwèJ%ñû}µÃ0ˆÇb˜žÎŒÍ±á!ú{z0=žû‚\%“îââ¹³f³f³ìîÐh6èJ&˜šdxpÛ¶ÑuŸ×‹×4iµZøL/‘H¯ÏÇ+/]`jlô3WƒQ…h$Âw_y™F³‰ßçûJ«Æ´Úms‡äòÇDBa2]Ýò¼õþÛä 9^¹ð2ç¦f …B¨Š BI’žÏíÚªª¢û‡‡aÛÑH”‘~¢‘ðW®H>iMPU•ƒÃκ~¿Ÿ‘ü'í ôÄjª¢R( ‡B¼zñ"Ãx<W†A0ımjõñX”3S“DÂa ÃÀçõv ×4@ kzçØªŠe[ôtwñÚ¥Ktwu}Á5 ZíùRžf«„3{Öq¶÷wøï?ÿ~ýÞo±,‹h8ʵ[ñ³ßüœ»Kód³DÃQ’±Ã#W¥‘$I†à³ôI€(ŠÂþÁ!¹B¥3q$™|¢E°UM#x²ÕQ©TêìK84D8øè뉪ŠBÀ璘§‡3S“Œð?¢ŠÒÖMÄcÄc1&FFNÁVNú®Öª,®-±³¿‹ÏëÇkziµ›lïoQ®–èJ&‰EbŸ{Ÿ¯R«ðÁGWøÿö?ØÜ¹Çpÿ ‘ð£ÍžBPª–yïÚûüú½·¨ÔªÌŒOá —_¿ÿ+›kTkU ŵzî®4Éx]“÷%Iúæ{®?©¼¦Éôø8ÅR™ö;ïP(¹q÷.Ý]IfÆÇðÜ7ôøX!¨(„C!^yé%zº»Ñux4ò¥?ç A£Ù Ýnã5M|^/çÇü¼ÐÑuL:M4yh‹z³ÁÅ»üÛÿD«ÝâoÿòßòÒìy²‡|pýwììï’/c£ƒ#x Ïï“Ëq!ϵ›²¼¾LÐ@yŒ*Ùqv÷w¹òñ5r…<#ã  ±w¸ÏÑqÛ±Aj½Æ¥»ôfzèI÷éê–Õ $I2Ÿu5DxõÒE ]çýk‘ËfÙØÚf¨¿ï+‡ t†[#‘0^¯I¹Z¦\-ƒ~¯ïs‡«µ*7ço±»¿ÇØÐ3Ó|G+É5`àÁ?kYÛ»ÛüêÝ·¸q÷~¿½£}¦›“Ôur…cö÷i[m"¡á`ˆL:óÀ=¹f«ÅúÖKë+(( õv‚ðªV«1¿ºÈÆÖšª292Î@Ï~_€±¡1 Å"åjG8KEnÍßæ•ó—IDã_¸1°$I’ Á§„¿éÉqB¡ ;»ûôeºhøÊA¨(4[ ÞÿèwlîÜãìô,—Ï^$zx(Ñqööxóí_±º¹Æ¹™³D#1Fú‡¾ÒlU!¥J‰+7®qkþ6Ív“xüÓ­¡|^áP§é>W8æêkL ‹Äðûü§Ç(–‹Ü]š£T.‘îJ39<~úø£T¶ùR»‹s”ªºSif'ÎŒ'ˆÇâüùJ¥ZáöÂÍŽã’˳¾µÉôø´ AI’¾ñžëi|BêsË \¹qT‡—/gjl ïSúv‡£\–+_ãçoÿ’ŵeÚÖÃK©ÕêuVYZ[bç`—…µ%¶v·hµ¿Ú® Žë°tÀõ;uPCÁScSL ôéËôòK¯ÑÛ݃¢(d¹³4G¥Z9=FgŸÄ}æWÀÔè$ý½ý ™~žf«ÉÚ½uÖî­£* cCcŒ ãõz øüœŸâ{—_'O iŸ,åÖ"_,ÈõI%I’!ø¬Y–Åêæ*ÿó—ÿÄßÿìð¿ù)KË4­æS[Åïó320Œ¡é,­.síÖGä Ç,¬í —|1Ï¥9 å"Šª k:ÆÉ,Ó¯¢Ùj±¹³ÅÞá>Á@ßo¼ò=†ûñx<ÄÂQ¾{ùu^¿ôÑp„z³ÁæÎåjWt¶€ªÖª,¬,rxtH0äÌÄ Ñpô‘ΩSE–¸»4Çq!O8æìÔâѪ¢ž,ÒáÌÄ =éžÓI9®+°¬6B®J#I’ ÁgÇ.GÇYÞ¾ò×ïÜ`÷hŸ›ó·ùÙÛorgé.Ívó©ü¯×ÇèÐ0#C´Û-æWØÚÛ~ l¶Z¬ßÛ`im™F³‰Ïëcd`ˆþž~LÏW«H[­ÙNÏ`(âÂÌYfƧ:3T5M£'á•ó—éËô¢* •Z…F³‰p;;Mdó9î,tž‹‘!&†Çð{}ööÙ;Ücau‰¶Õ¦/ÓÇÔèûî[jšF4!“Jcz<'_ ‰*'ÅH’$CðÙi¶Z,®-ññ›TëU4]ò-ªÕ*ívá>JDUÑøÉ$?‡ÙC–6V¨Ô«!NïÝu*¦ã“¥Ê"œŸ!}å’ŽãP«×Âí,ì=<ñнHC7èÍô0Ô7ˆé1q]÷´ ¬7ê,o¬°±½‰×ôrvj–d<ùÈ•é'UäþÑAgîØ$édšúàýMMÓ1MïédŸé%•H>Ѥ$I’$‚ð!½¸ºD®pŒáñ`z½Ä"1.Î^`zlê+­ŠòyþScSô¤3Ôê5×–8:Îâ ˶ÙÝßc~u‘Z£Žišôeú˜'à|õFUNšÎUL‡h8úнŽãtî“O“~ÂsÐuƒH(Œ®i4MJ•ÒC»Y¸®K©Ræ wˆ+\¢‘(~¯˶¹·»ÅÒê2ª¢0=>Eoºç‘Øë:Ëë+lîÜCÕT¦F'ê|h¶§mÛå²ììÑjµˆEb\8sž¾§2;W’$I†àç„`³Õ¤Pîlëñz‰„ÃLŒŒÓÿ >€E! sfr†X$J¾gne|±@®cnyžrµŒªª¤ª) =Q³¸ÇðN¥ñ™>J•7çn‘Ígq]÷ô9¨Ô*Ü]šgec!™®n¼^/•Z…¹åy²ù,±hŒ³“gˆ>Æ 1ÅR§­¢X*Æ87u–X$ö@ÿ¡+\ ¥·nŸ™ÎNžáõ‹¯‹Äd£¼$I2Ÿmòé6JÁ@î“ÐxÀ^¯ÉÈÀ0#ã!XÝXeic™¹å6·ïÑj·ƒŒÑŸé{ä6„ÏAƒîdšT2E­QçÚÍøèöu ¥®piµ[,­­ðî‡ïs\Ì …ìÀkšäòÇ̯,à8£C#Œ Œà5mx¸mµÙÞßayc˜ŸbzbŠÀ}½…BªÕ*7æoñ»ëWi4›LŒNð£ïÿ1cƒ#_i/GI’$‚^š¡ë:~oçƒÙu]tCÇkš¨ê³©@TE%sfb†` H6Ÿåw_åÊÇW)” (ŠB"çÜä,±Hô‰ƒXÓ4R‰$Sc“ýö÷ùÕ»¿eae‘V«Åq!Ïo?x›ù¥yEa|xŒñ¡14MãèøˆìqŽP(Äùé³Äc‚ŽeYÊ%¼/3ãSüðõIu?0¡Æqvvyûwï°w´ÏèÐýÇÉ¥³/d(IÒsã¹\1FU‚ }™žN_`£ŽmÙ4ZMœ“áÂgÁï÷35:Á`ïs+óܸ{“V«E­QÇçó120Âèà>ï“O Q…H8Ê+.±º¹Ê͹[lïm³¾³ÉôøÕZ•Í-­&ý½ý¼zá2=]TUESU{ˆ†#ÌNžy¬ :‡©Ñ ü^/@ñ¡1¼¿7ÉHQ:}á`ˆ™ñ)þèõðÊ…—å0¨$I2¿.á`ˆÙÉ3ܘ¿ÅQ>G±Tbçp—J­JÈ|&›»jªvºRK¡TธÇ0 ªçùä Ã`rd‚ÿí¯L†û¹9U(8¶ólËç“åÂÂÁ­VÛ¶PÕNö´†:só7YZ™gxhŒ³g^"ºÑnµÌõëï³´<ÏÌÔY^{í¤’é§Z­påê;\ýð=LÓäOôcνˆ¡{Ø;ØáW¿ùgÖÖ–p\!–eÑß7H¦»X싇u+•W®½ÃÕkïrtt(X–éñòê«oÆPC£’$É|ve¬¢tv}!•HqéìK(Šzº³B­V¥Þ¨á5½øü§¾Ûy£Qgyežýý|>?“3tueÐõ'o¨7j,-Ïñáõ÷ÙØXFS5νŒßß™[®Y\¼ÃâòÇÇGxL“×_û!ÑHPÂ¥Z­°¼2ÇÆæ ‘p”F£Žp­V“{¬­-rp¸wZyªªJ½žúÒ0B`;6µj•V«…¢*Ø–ÍÞþ6W?|®t†33¾òº©’$I2§"Ô â‘ÑP@UTjµ wçn²´Ÿït†ç¾0ªŽ¦iä 9r¹CÊ•2Á@ÞÞ¼^†áAÓu޳äó9~ŸŸþþa|'íÕZ……Å;d³„B.œ»LOO?8ʰ°x›z½†a!ðx< Žrîì%"_Òë¨i¡PÓô²°K¹\ÄqlÛFQF‡ÇO†T5ù·L’$‚_×u9<Úçíy“¹ù›Ô!;Wz{úéêÊ`Og‡UU ‡#tueðxLJ¥<–Õ"΋v _Èñ›ßü”yç—í‘Ju G¿4TU%vBf—lîUU›"Žb¡``w›r¹„¢¨ŒŽLÆÑ4f«ÁöÎ&ÕZ…ÁÁ.]|èÉ}ÅZ­ÂÞþ†Ç¤·w€`0D&ÓÏ«/Ñ‘ Ì/™]ûI›†×ë£\)±°C³Ùì,\  ŽÑî•«ÇH’ô¦¿hÔj5Y__fiyŽF³³«C d°”ÞžÁ§Ú:Ñ =™>üßýF†Ç±¬6Éd7êÉph£Qg{gƒ­í Ê•"½½ý¤RÝD±/=¶Ïçgtx’3388Ü#—;"›;"Ó݇iz ‡£LOcqù.…BžBñ˜ã|–Át]'rþüeÒé ==ý¤’é“6 …¾¾AþâÏþ˶ ‡ÂX–…Çc’N÷ð:ŸO&ß_*ŠJ(fld‚7¯R.—pÇq¨×k8Ž-ÿ†I’$CðëT«UYßX¦X* ëûj}½\¸ð étÏ3žÓ4x™Yžà({@»Ý¤P8¦Ýn¾dö¤¢¨ƒaú‡ðû´Z *å"¶ccžT‹ÑHŒ¾aæçocµÛÔjU§sÐçõ33uŽñ±i<¦Ç{zÜp(ÊÌÌMÓOÝ¿7Ðu]jõ*•JUQ…"øýŸ.fb±$@N{¹³¼$I2¿VB¸Ôë5÷p ŸÏ$32<Î@ßÐ3ÝèUQÔÏfýd‰eµ)–ŽžÀçõÓ™ÁÙiK¨Õ*§³‡à'TUÁôxÑ4 Û²°‹û[;;+ÖÄN'³ÜOUU|>?Ÿµˆ›ªª_:{Óu޳ܼý‹‹wÐ ƒK/½Îì™ øO–bSè,¡¦k•e,×=ý½ò~ $I2¿ÖËjS©–Ïç%ŽÐ“é?YØùëoඬ6¥rMÓš`tdïÉÂß¶m³³{7¯Òl59{æ%&&fØÖvjõêé„MÓè¿ÂŲ-@7 BÁÈSY$@A©8,nŒèIDAT\äê‡ïñλ¿âàp¯×‡®é ŽœNüq‡jµB½Q¢EH$»žÚ½WI’$‚ñáí !NªÓ$=µ¡y6‹y®^}—¥å»t¥3t§{H&RèºN½Qc~þ&ïÿîmõÕj™d2M¦»÷´)¾^«²³{V«A(Â !(—KìîÞ£mµI¥ÒÄãɧҧèº.Çùwî~ÌÁÑ.–eá÷0M/šúéìÖf³ÁÖÎ¥R!\‚Á£#“¤]ÏlÅI’¤§å…ZÛJQtÝ x2±ã“ÊD—?ÄêpB€@Ðj7(ólo¯³°t“J¥Ü¹GgsÚV³A±tÌÎÎ&ùBÛ¶N«Èݽ-VVhµZDc â±ÔiKC½^c}c™å•9‚ÁÁ¢‘ØS[²ÌР¡±hœžL/¼ÊåK¯‡OªT›ƒƒ]—îvîª*Ýé^fÏ\8…*I’$+Á¯1Á ÝÝ}¬®-Ñh4i4KyÚVÓô}mç"Dge–b±€ªt&›´[- …cZ­&>ŸŸ±ÑiÖÆ–¹·å!ObšæÉ0£ÍÁá߸ÂÞÞ6^¯¡ÁQâñ$š¦Ÿ6¦_ûè}ŽŽI$SŒÍ†žJø¨ªJ2ÙÅoüÃÃcøýF‡'I´˜8®ÃÑÑ}ü«k‹Ø¶E*ÕÍ¥‹¯322yÒ°/I’$Cðk „dnþ&¹ãC …cv÷î16:ƒß|`%—gÉqvw·øõoþ™ù…[X–E !I`žìϧëCC£üùŸþkööwèJ¥Ét÷¡ë:år‰Ûw>ææíh¶ŒŽL23}žp¨ÓÄ^©–¹qó*++ xL“33:÷ŸRÐ+Š‚ßçgb|†¡Q4MÅëõNvi5›¬­-róö‡T«ºRÝ\¾ô^¹ü]b1YJ’$CðÂ4½ŒM3{æ%®^{‡|¾Èææ*ÝéÛa"‘¯ïÚ²-êõ®p‰D¢ 39q–€ÿÓj­S N180Š®ë§Kž¹Â¥Õn"„KWW†K_gxh¼³£„Ø–EµZÁcš ŽòÊåï’L¤žêî ŠÒ™AúY³HEÅ4}$â]Ä¢ ÎξĹ³/“éî}êk´J’$=+/ÜŠ1Š¢`š&~¿ŸJµBµZ¡Ùj⺂žLÿ#­ÖòtÎ ݃iz ‚ŒMóÊåï1>>C < AMÓñx<läªÚéf¸ãcÓ\¾ôÓí>YçSUU¢Ñ/¼ÊØèÔS]õKß8ªF8a``˜³³—˜Ÿ9ð#I’ôÜdÆó¼ŸàçéÜk°µ½ÉÊê‡GûÄc ^}å ÒéÌC;¥?+®ëÒhÔi4ꨚŠßxä úäžb¹RÂcx…ÂÌpBÐhÔiµ›ø|~LÏW@!â“…´£’ììCèv¢ü¾ {%I’d~‚жíNP´hšN0ÆÐõ/ü w]—F½L¯Mpầ³NÕ“ÜïÿÙv» B Æ«¯!°ÚmÚí6š¦az½þ¬ãP¯×±Úm¼>^¯EUNgµ:ŽCµ\îl¬àóù+°>ïZëµǹµJŸßO:“Áç÷Ë@“$I†à ˆ®KµR%œÃu]¼^`_ÀÿÀ¤ã8î°pç6íf‹¡ÑQú† †B§áÇÙ,»[[¨ªJWw†D*‰áñ „ X(²º¸@½^§§·¾ÁÓ°j5›¬-/³¾ºJ$ajv–Dª3¼)„ ˜ÏsýêUŽ›œäÌùó‚ÁûÎkŸ÷ß~›R±ÈÅ—_aæÜY¼¾'›Ө׹uý:¿þùÏÙÞܤw`€÷wÇäÌ šÖ”$é[à[ñIgYk«üúg?ãø(KWW£ããÌ\¸@ïà¦×‹ëºä³YÞùͯyëoÒªÕ™œžæÏÿÍ¿æÌ… x}>*å2ï¿ý6oý⸎åW_å/þæoÈôõÑnµX¼{—¿ÿ¿ÿ¹£#fÏãßüûÿÀðøš¢pœÍñÖ/ÞäÊ{ï‘H&øÛ¿ûO\~ý5ü¶e±±¶ÆÏþáØßÙá;?øÃç!Øj6Y¸{—7ÿ韱­6ÉTбɉ'Áz­Æü»||õG‡4êuJ¥Žë"<“$I†à BÓu‘þ@€Û;7X[\dynžƒ½}~ôã¿¢x˜V³É›7yûÍ7Y[ZBSUÁ ÕJ¥³ ã=:âúÕ«,ÍÏ£* ÉTF£³rK©Ä7X^X Q«‘Nwc ›¶[-¶67¹yý:»[[xN¶úd8´Z©0wëë++Ø'»9|2ÁDA©XdþÖ-Žöéêî&•Nãñ>ùn®ƒÕnã:.• _K’$Cð…£ª*=}}üÅL4gîæMÊÅ"GìoïJ§ÉqåÝwÙÚÜ!ˆ'“œ½ô#xL“z½Îêâ"++4ëu’]] ŽŽ‰Dq‡ƒ½}îÜ¡R.‰D'‘J¢¨*•r™ù;·Ébš†ÇÇèÄ4ÍÓp½{ëÕJ…Þþ¦fÏœV¶e±»µÅâü<®ë269ÉÀÐÓY \×uüÁ †ÇÀ4M"ÑÎy?P’$‚/EQð˜&ýÃÃDâq^ýþ÷( ×%É`;k++,ÌÍѬ7‚ŒONòúoîéAQŠ…s·nQ,Ð ƒt&Ã̹sÃ!ªÕ*‹ssìlÝ!Hu¥9sá<áH!‡Ìß¹CµR!™J2{þ‰T EU©–Ë,/,po}Ã0˜ž=ÃðØØiC}¥\fáî]öö †BÌž¿@4ÿÒ~@!®ëÒjµ®ûéMÃ4MTUÅcšôõ÷31=M£ÑàâË/“Îd¾¶$I’d~4M#‰Fq®‹ªióy6×Ö(‹èšF<gæìYú‡†0 ƒV³ÉÖÆKsóÔ«5B‘“3g:{<ìíì0wë&åR‰@ Èøô4㣘^/ÕJå4ä\Ç¡¯€ñé)Á®ëRÌç¹{ó¥|h,Æ™sç‰%¨ªŠë8ä²YænߦQ¯3þÒ&¦§ð_z­V»Íö½{lmlÐl¶@tZ B‘33$ ¼^/ç.^¤»·Wâ±Øéï–$I’!ø‚RUõzÛ¶Éçr¸ŽƒÇðFéÄï÷ŸVcó·ostx€¢’çÏÅh5›¬,.²¾²B»Õ¢«+Í™óçˆÆãä¹{ó&¥BP8ÌÔÙ³¤3TM£Ùhpo}ƒå…Çapd„‰™éÓ«Õj,/,°¹¶†išÌœ=G²«ë¡Jí³v}/óæ?ý×Þÿ€z­Öù3 ôôõñÿó&ôòËx½^ÂÑ(¡HäôçåP¨$I2¿eÇ¡Õlâ8.]Ãï÷ EÐtÇq8ØßçîÍ[TJe¼>ÃccŒNL`z½0û6ÇÙ,¦i282ÂøT§Zk5›l¬¬°²°€Õn32:Êìùó§¡Ó¹Wx‡ìá`é³³¤Òi4M;© ܽ٩0û‡˜:;K0:=ïOúüŠù<®Dc1‚Á ŠªbYÕr™V³ë:Ÿ»‹† >I’d~Ë©ŠrÒ'NVA8¶‚ZµÊÒÜ<;[[8¶M¬»›™óçH¤R8¶Íöæ&KóÞÀT*Åô¹³¤ÒiTU¥X(p÷Ö-ò¹>¿Ÿñéi†;“ZÇ9¹Wx›F½ÁèÄ8S³³NB®Õjqoc¥ùyEaêÌú0î›Ól4¸ýñÇü˯~E»mñúßçõ7Þ ‰ÅxãG?btr×qJgCÞ®®.Ƨ¦0=rÃ[I’$‚€áñO$Ðuv£I½V#ŸÍÒj¶ÈçrÌß¾E¹XÄ0 2}½LÎÌ(—JI+û{(ŠJ:ÓÃä™3C!lËbw—…;w©×jdz{™9wŽH4Šz2cty¾®¦ÇÃô™YúOf} !N‡`³Y¢±g_º@4{ j«”Ë||í×Þÿ˶ðx Μ?O0" rþÒ%fÎ{`¸Ô0:3AyßO’$I† €Ïïght”P8ÂA¥J±P`se…á‰qîmn°º¸D«Ù$–H093C¦·EU9:8`îÖ-ÊÅÁ`‰éiúúûÑ ƒb¡ÀÂ;ìî¢k:CccŒMv†I…ërœÍž±v¥ÓÌœ?G$Ú 9Û²8ÜÛcñîŽí061ÉÈø8¾ßkŽWCÓñ˜&ÓC0þt©6EÁç÷ã“/¯$I’ Á/ AŸñ©)&f¦Ée9γ²°H4‘`{g›r©„išdzz8srO¯Ùh°±ºÊÞö®ëÐÕföÂy"±¥Bå…êõ±d‚ÙóçI¦»P5F½ÎÖú:kËK(ªÂØäc““§!W¯×Y[Yá`o—p$̹‹I$SUoáH„W¿ÿ}tOgÒ‹¯¾z:!G’$I’!øHTM£«»›×ßxƒíÍM6VWÙØXÇ_8ÈÄÌ4ºn0}æ £““˜¦ÙY[×%Ù•bjv–ñéi|>îI^$aljŠþÎ]ºDð¤Þu,Ç!ÕÕE²«‹K¯½ÖYGT넜p]„ëÒÛ?@²«‹Ù ç †‚·éõ2uv–þ¡A‚áðSi¢—$Iú¶øV, ý(„ë’=<â­7Á[o¾I³Vgxt”7þôG ŒŒ`ü?±DÃ0°m›r±H±PÀ¶,Bá0‰T ÙÙô¶Q¯“=:¢^«H¥Óx}¾Ó…¸‹ù<¹£#E!•NŽFO[,Ëâ8›¥X(à÷ûIuwãõzå,NI’$‚ÏŽã8䎎X[Z¢V«‘J§%‰tvÉåÁ^¼Ó§NøŒV÷d¥Êâ¾u:?ïgOŽ+›×%I’d~ma«ÝÆut]Ç0 ÄI@ ºþà‚BÇÁC{>ÒïûŒF÷Çy\’$I’!øÌØ–M.—#{˜C× ººSDc4½ÓÐ^­TÉå°m—D2A4FÓ¿|íÍÎÎñ-JÅ2A8Æëó>° o»Õ¦\.£i:ápÝ·p%I’ž&ù©ú%Êå ï½ý>‹sKD#1fÏÏrþâ,‘x„f³ÉÝÛw¹öÁG TΞ›åÂËçˆ%¢_:„iÛ6[›Û\yï ŽíòÒå—˜˜ÃçïÜ7´,‹õM>ºò¡P˜Ë¯]¢»'-·–$I’!ø5V‚¶M±Pdowìá1…P(Èô¹IlÛ¦P(²»³K­RDZA?³/!ôá¦eYä²9VWÖ(J4›-|~/ÃcÃx<í¶Åîö. s‹¸NçÞâw𩸕$IzJ´Ÿüä'?‘OÃçSTÇqØß; ŸÏãZIª+I(B×5޳ÇR¯ÕÑTƒ®t’P8tÚòðYT¥óØqö˜½=Š…¦ÇKw&?àCQÀ¶-ØÝÙ¥T(OÄI¤’C¾0’$I2¿†RY×ñù¼T«Uvwvi6[˜¦x"N2Ý B€Ý=ÊÅ2Š¢EIw§ð˜Ÿß³§ª*>¿Ãcp°ÀáÁ¶e“îNOÆñx<øý~4Meow\îMÕèì#’ßJ’$=•ÏøoûP¬ÖÙ<ÈQ¬Õ>÷ÏØ–EÛëC÷y©åKçYZÙ$=ØC  mxñ„BˆÃùBž…åMŒd‚™ñNûg?0˜NM¦ˆõô°½½ËþÁWnÜæÈqðü G!êâà0ÇÜÂfª‹ž‘A’±(CÝI¢Aÿ_ßg‰ u'¿ðüŸäñGýýòúêã?êõSÏ_¾þÏÇõ}|ëg‡Þ\Ýâ¿þò]n®nѽ~ø :-¸¨Ê.½F4Ç4»8Ìzh´ºÞð³~û.ÑJ…ßG4ÒÇaÎG½ÙùáîE?ÿø¸Ä_r8^\Äk;øÝÔê Úmãäq…2ж‹ª8˜ž~šÍ¶ûé™ÿç¿ÿ—¯|}â‹Îÿi<þeäõ½û=þ—]ß7ýüåûû›{}ßßú.ìb­ÆÍÕ-Þ¾µð™òA "Ðp]ãÚP pgs—…ý â ۱ؿï ÂòQ‘\½+\«ã~ú{=(BGAàº-\ܧv}úEá‹~þI–¯Ÿ¼¾'?þ“üþoÂùË÷÷7÷úd>Eé,ÜÂ}MëlÛÁuA|ZÒ•^Ú¶ý1Õ BË„ÒÙûO÷èŠ ŠŠ8 ey7P’$I†à×¢SÐ а¸¸Z-ëž#ÏÉŸq]e9§K¦=RÐ"8ÙÒ!Ô“*ò¾¨.Žë"„‚êƒI’$I_™ì|¤jК@ UUB£ÕVqœN©ŽKXp¶­„Õ£ j·1¢¨¸Â@í:ÅBUìN¥)4d-(I’$CðëªAX¨JU³QUÛ6h· „«¢àâ±[$tUU@ñв<8ΣÝÂlµÐUðà ÝÀªRCU]¡ã8®¼$I’ Á¯……¢QÕêIÈùh¶ýØ®~R¥5ñÕ«èšØvËñ Ä—UkpP¨ámÕqAÇñáºÊɱT¥Ž¦VGq… BV‚’$I2Ÿ±Îd˜&ªZFQ„kb[!lÇëª(8¨J‰kÑÒ5„Âv¸®Áý÷í.Œ tþcóþc+ 4QÉ£ØL'‚Ÿü¬‹JU9¥®‰aàëß4÷³Îÿqÿ¦{ѯO’ïoùþ–!ø•@Q À‹p \‚¸N!ô“ˆìLŒÉdº ÄÂŒLÏìíF7 bÁàͦóÝ›MºAio›Ÿÿô—ìåZlÕ,j€O†B¥Seºn½óÿD!Bˆ?À\¦ÿýϾÿÐùÿ~3íç=~síÿçý‡oôëü¢_Ÿ$ßßòýý- A!®ëâØªª>´ÑÛ~î1<8Nú$uÚ}Už†#büÇÿôçÄã1’©$ÓóÐ’f§ß´îãØÛ>ñq–ëKìVyÉsßù ¥¾„P„ëÆxøCÌ ý¬óœÇŸ›oÊ/èõIòý-ßßß²B`Y¥R…b¡L½V' ÓÓ“Æczp—z½N­Z'ŸÍ£ ª(÷Õ'÷ëNª0K§£ÏAQµ“vEx™œž@×õÇÚ^ÕTzúz;?Ë?®ìRw•N ‚ðƒÒÝi›À@¶FH’$ÉüR–e±¶¶Éõo²µ¹‡ë™³“D"!TMãè(ËÝ; ¬,op˜/h·1N7³u€:¨u ]Bض×m€Ò@×UtÕ‡e{AtØ=žÇ¿O§( ‡@(„«ê¸<\‘v‚Ð'ߥ’$I2½ ,Ë\ùà#~÷þ5ªÕ:‰Dœ §Q5jµÊµ+óÛ·Þ¥P( G°…zÒ¨îà¸%,çC·pô Š0q\Ë>@ÕxA £`*ÏváUYùI’$=K/\Ùm;ìí0?¿ÈQ6‡m[Dca††ú1MÙ£·nÝewwf³‰?䣥騮@ÐÆrЏ¢‚å´°Ú–­âºm,»L۪вêØvûdi3I’$I†à7H­Zcyy•ìQ×uñûýŒÐIcY««loïbY6¡P€®Þ ަ¡( D!ja£*àG9mIˆ“¥Ðú#ôJ’$Ißt/Ôp¨ã¸äŽó,/­R­ÖPU•x"ÆÄä(þ€ŸüqžÅ…eÊ¥2š¦‘L%H÷f`ópÝ ‚Š¢ ª&ªBAGQ TÕƒ¢¸(˜arÿPåÍÕ­'Ú¯ë›îI®/|ãû^ôë“äû[¾¿¿%!ب×Y_Ûdg{«m b`°lnl±ººA»Õ&066L4GÐiFpD„ƒ®yP• Šb‚ÐP] ?ˆ  ¢Ÿ‡>é~l }³Ÿ×'½¾ÿãÿúñC}H_æëÜ”óE¿>I¾¿åûû[‚BJ¥2ós‹ä U!‹25=N$¦Z­±´´J±XFU5b±“c´ü>WV\l§ÖÙ-Bñ£DP†–¥Ó+¨ =p?ð“ý¶þCäÕÏý&öE?ß”Ÿäú¾é}H/úõIòý-ßߟ'ØjµÙÚÚauuƒF½išôöõ04<€¡ëdY[Û¤Ùhàõzéèeh¨M7À¶‰*@UÕ; âCUôÓ§IQMÅ‹¢tV‹‘$I’d~cªÀjµÊ ǹ<ÊÿjïÞ~›Hï0Žçd{|Œ8äà`;„@mX@‚VmµUÕ­ÚJ•ú_ô«´Û½èM»• ZuÅE[±] Á9r‚;>{fzv»H@º ”8û|¤\YÍoÞQÍÌû{_à •Jrzr‚þþ4f“ryõ-<Ï'™J099A_:…ïy˜íI|<,Ëyö-0tØb2""¢|÷<ÏgkóñÁS`£IÈ ‘Í01Q$âºìl?áÞÝûT*b††²”Jy¢Q—V£Ý¬ãš†eb #Ž¥»CDD!xôíï×(ߟgkó†a’ÉÌÄóº,.,³ººF»Ý¡/•äÔéÙÁ ಻‹Ñ¬¶ l3„iÆ1y~ö§ˆˆO=?1&êõkë›X¦I¡0ƹé3\¹z‰T_нÝ=ææ©×d2i¦ÎNráÂ9ÉõzƒÍµM¬NÃ41­¦‘ À>ØêHDD‚G]8ìPÏÓ—J’Ï1>Q`` ŸPÈ¡ëyØŽMi¢@©Tàò•òùŽãP­V©Uª`<õ F~Œ Æ·{úºûu÷>œõ)¦—¦Xõúë;êÇ×ø«>…à+|5 æÚõ«xžGÄຑ¯·3ÊdÒüüÃèvº$S ’É$Žcc®ë2ZÈÓº·ÌÜúcAßþ’¼Î~]/ûý8Ýd‡õ)æ÷ÿNõ½Íó+íãëþV} ÁW‚€n·‹i›DÜ0¡Ð÷ó ‚€PÈattÃ0¾þûŠëFÈÆè¦úØyøp¾Ó9¼î~]ǽ§×û${½¾·}~Ç}|uïúz6ƒ  Óíòtw—Õµ5*•*¹‘ c eû¾O½Ñ`wo €t_ ×u±¾‚¦iŠ„Á²ð4FDä{¦'C0š­‹Ë+ܾs‡Å•X¶†ÁÈЖeòxg‡Ù¹9fç°‡k—ßçÔxË45ê""Ò»!èû>ëüåÆ îÏ/Ðõ}F‡‡I§R8ŽÍΓ'ÜüÛçܾs‡Z½IntË4¿±{¼ˆˆHö Ö æ™_\äéî.fdè¾ï3·¸È?nßfeõ­V“áì™t–mkÄED¤wC0v÷ö(Ïϳ[©`ñXŒR¡@"§ZÝg¶\fûÉR‰¥b‘D<®O~""òœž{4j¶Z¬—#¿ðxÇ}¿¬7QßQîƒT}ï¾>]Õ×ËŒ èe¢ƒ àñö6üÓŸùìÖ-ê™~~ö“ó£ë× ‚€Ooü•Ooܤº¿Ïð‰!>üé\»ü>ñxü…ÇÜݯ¿pSÉÿåæì…>½×­ßø»üþ¦®¯ê{7õéú¨¾ãðÿ±çB°Óép·\æ£ÿÀÂò2¶ípª4Îoý+J…"××ùè“Oø÷ì,¶ípîÌ~óË_0žÏcYZ[DDž×Sßkõ:ó ‹ßûˆÅ¢”ŠENd³tº–—XßܤëyDcQŠ…<™ ¦Ú"DD¤—CÐ÷}¶wv(/,P«×1M“¾TЉñ"±XŒ½J…ûóóT* JQ*ˆE£Ï­#""Òs!Xo4XZ]e}c“N·K8æd.ÇØÈ××YY}@£ÙÄu]Šù“Œ á8ŽFYDDz7ƒ `o¯ÂìÜ<{Ï6Æ=‘ÍröôiRÉ$ûµåùùƒžAÓ¤?“arb‚D"®§@y©žh‘h·ÛÏžôV1Lƒbî$W/]âÜÔÛfië KË´;²ýýLOM1Q,¾´-BDD¤gB°ëy´ÚmR©$#ÃÃ\™™áÌ©S¤Ó}4êu6¶¶¨Õë  0sñ"×®^!Û߯ 1""òJG¾E"ºžG¥Ze{g‡p(L¶¿ŸhÔÅ0 ê÷ÊeÊsódÒi¦ÏN1˜Íâh‰4éÕ hµZTªUÚÝ6ñXœhÄŶìçöô}ŸF³I³Ù"rp£Qí!""½‚žçQ­UYXYâ‹{_²ßØgzò—Ïψ%4j""òF¹w†Ýn—Ç›|q÷_üý˲±½…cÛ¤Si.LN“ˆiÐDD䘆`¥Záæç7¹yë3v÷+8á17†˜†^sŠˆÈ1Áf«ÉÊÚ*›¶À40M;n“ˆ%´þ§ˆˆ¼QGîÑÊ÷}<ÏÃ|LÓ$æF)Sߟˆˆó'AÓ²ˆº1±±xŒ™é÷øá•0~² %ÐDDäx‡`<ãâÔyÜp„ìÀ —οGnd”Òh""òF¹ Ïó¨Õk4šM¡ñX[ï""ò}A‘ÿõˆˆˆBPDDD!(""¢QŠˆˆ(EDD‚""" A… ˆˆˆBPDDD!(""¢QŠˆˆ(EDD‚""" A… ˆˆˆBPDDämûslmŸœ1êÌIEND®B`‚peewee-3.17.7/docs/peewee/000077500000000000000000000000001470346076600153025ustar00rootroot00000000000000peewee-3.17.7/docs/peewee/api.rst000066400000000000000000005534761470346076600166310ustar00rootroot00000000000000.. _api: API Documentation ================= This document specifies Peewee's APIs. Database -------- .. py:class:: Database(database[, thread_safe=True[, field_types=None[, operations=None[, autoconnect=True[, **kwargs]]]]]) :param str database: Database name or filename for SQLite (or ``None`` to :ref:`defer initialization `, in which case you must call :py:meth:`Database.init`, specifying the database name). :param bool thread_safe: Whether to store connection state in a thread-local. :param dict field_types: A mapping of additional field types to support. :param dict operations: A mapping of additional operations to support. :param bool autoconnect: Automatically connect to database if attempting to execute a query on a closed database. :param kwargs: Arbitrary keyword arguments that will be passed to the database driver when a connection is created, for example ``password``, ``host``, etc. The :py:class:`Database` is responsible for: * Executing queries * Managing connections * Transactions * Introspection .. note:: The database can be instantiated with ``None`` as the database name if the database is not known until run-time. In this way you can create a database instance and then configure it elsewhere when the settings are known. This is called :ref:`deferred* initialization `. Examples: .. code-block:: python # Sqlite database using WAL-mode and 32MB page-cache. db = SqliteDatabase('app.db', pragmas={ 'journal_mode': 'wal', 'cache_size': -32 * 1000}) # Postgresql database on remote host. db = PostgresqlDatabase('my_app', user='postgres', host='10.1.0.3', password='secret') Deferred initialization example: .. code-block:: python db = PostgresqlDatabase(None) class BaseModel(Model): class Meta: database = db # Read database connection info from env, for example: db_name = os.environ['DATABASE'] db_host = os.environ['PGHOST'] # Initialize database. db.init(db_name, host=db_host, user='postgres') .. py:attribute:: param = '?' String used as parameter placeholder in SQL queries. .. py:attribute:: quote = '"' Type of quotation-mark to use to denote entities such as tables or columns. .. py:method:: init(database[, **kwargs]) :param str database: Database name or filename for SQLite. :param kwargs: Arbitrary keyword arguments that will be passed to the database driver when a connection is created, for example ``password``, ``host``, etc. Initialize a *deferred* database. See :ref:`deferring_initialization` for more info. .. py:method:: __enter__() The :py:class:`Database` instance can be used as a context-manager, in which case a connection will be held open for the duration of the wrapped block. Additionally, any SQL executed within the wrapped block will be executed in a transaction. .. py:method:: connection_context() Create a context-manager that will hold open a connection for the duration of the wrapped block. Example:: def on_app_startup(): # When app starts up, create the database tables, being sure # the connection is closed upon completion. with database.connection_context(): database.create_tables(APP_MODELS) .. py:method:: connect([reuse_if_open=False]) :param bool reuse_if_open: Do not raise an exception if a connection is already opened. :returns: whether a new connection was opened. :rtype: bool :raises: ``OperationalError`` if connection already open and ``reuse_if_open`` is not set to ``True``. Open a connection to the database. .. py:method:: close() :returns: Whether a connection was closed. If the database was already closed, this returns ``False``. :rtype: bool Close the connection to the database. .. py:method:: is_closed() :returns: return ``True`` if database is closed, ``False`` if open. :rtype: bool .. py:method:: connection() Return the open connection. If a connection is not open, one will be opened. The connection will be whatever the underlying database-driver uses to encapsulate a database connection. .. py:method:: cursor([named_cursor=None]) :param named_cursor: For internal use. Return a ``cursor`` object on the current connection. If a connection is not open, one will be opened. The cursor will be whatever the underlying database-driver uses to encapsulate a database cursor. .. py:method:: execute_sql(sql[, params=None]) :param str sql: SQL string to execute. :param tuple params: Parameters for query. :returns: cursor object. Execute a SQL query and return a cursor over the results. .. py:method:: execute(query[, **context_options]) :param query: A :py:class:`Query` instance. :param context_options: Arbitrary options to pass to the SQL generator. :returns: cursor object. Execute a SQL query by compiling a ``Query`` instance and executing the resulting SQL. .. py:method:: last_insert_id(cursor[, query_type=None]) :param cursor: cursor object. :returns: primary key of last-inserted row. .. py:method:: rows_affected(cursor) :param cursor: cursor object. :returns: number of rows modified by query. .. py:method:: in_transaction() :returns: whether or not a transaction is currently open. :rtype: bool .. py:method:: atomic([...]) Create a context-manager which runs any queries in the wrapped block in a transaction (or save-point if blocks are nested). Calls to :py:meth:`~Database.atomic` can be nested. :py:meth:`~Database.atomic` can also be used as a decorator. Database-specific parameters: :py:class:`PostgresqlDatabase` and :py:class:`MySQLDatabase` accept an ``isolation_level`` parameter. :py:class:`SqliteDatabase` accepts a ``lock_type`` parameter. :param str isolation_level: Isolation strategy: SERIALIZABLE, READ COMMITTED, REPEATABLE READ, READ UNCOMMITTED :param str lock_type: Locking strategy: DEFERRED, IMMEDIATE, EXCLUSIVE. Example code:: with db.atomic() as txn: perform_operation() with db.atomic() as nested_txn: perform_another_operation() Transactions and save-points can be explicitly committed or rolled-back within the wrapped block. If this occurs, a new transaction or savepoint is begun after the commit/rollback. Example:: with db.atomic() as txn: User.create(username='mickey') txn.commit() # Changes are saved and a new transaction begins. User.create(username='huey') txn.rollback() # "huey" will not be saved. User.create(username='zaizee') # Print the usernames of all users. print([u.username for u in User.select()]) # Prints ["mickey", "zaizee"] .. py:method:: manual_commit() Create a context-manager which disables all transaction management for the duration of the wrapped block. Example:: with db.manual_commit(): db.begin() # Begin transaction explicitly. try: user.delete_instance(recursive=True) except: db.rollback() # Rollback -- an error occurred. raise else: try: db.commit() # Attempt to commit changes. except: db.rollback() # Error committing, rollback. raise The above code is equivalent to the following:: with db.atomic(): user.delete_instance(recursive=True) .. py:method:: session_start() Begin a new transaction (without using a context-manager or decorator). This method is useful if you intend to execute a sequence of operations inside a transaction, but using a decorator or context-manager would not be appropriate. .. note:: It is strongly advised that you use the :py:meth:`Database.atomic` method whenever possible for managing transactions/savepoints. The ``atomic`` method correctly manages nesting, uses the appropriate construction (e.g., transaction-vs-savepoint), and always cleans up after itself. The :py:meth:`~Database.session_start` method should only be used if the sequence of operations does not easily lend itself to wrapping using either a context-manager or decorator. .. warning:: You must *always* call either :py:meth:`~Database.session_commit` or :py:meth:`~Database.session_rollback` after calling the ``session_start`` method. .. py:method:: session_commit() Commit any changes made during a transaction begun with :py:meth:`~Database.session_start`. .. py:method:: session_rollback() Roll back any changes made during a transaction begun with :py:meth:`~Database.session_start`. .. py:method:: transaction([...]) Create a context-manager that runs all queries in the wrapped block in a transaction. Database-specific parameters: :py:class:`PostgresqlDatabase` and :py:class:`MySQLDatabase` accept an ``isolation_level`` parameter. :py:class:`SqliteDatabase` accepts a ``lock_type`` parameter. :param str isolation_level: Isolation strategy: SERIALIZABLE, READ COMMITTED, REPEATABLE READ, READ UNCOMMITTED :param str lock_type: Locking strategy: DEFERRED, IMMEDIATE, EXCLUSIVE. .. warning:: Calls to ``transaction`` cannot be nested. Only the top-most call will take effect. Rolling-back or committing a nested transaction context-manager has undefined behavior. .. py:method:: savepoint() Create a context-manager that runs all queries in the wrapped block in a savepoint. Savepoints can be nested arbitrarily. .. warning:: Calls to ``savepoint`` must occur inside of a transaction. .. py:method:: begin() Begin a transaction when using manual-commit mode. .. note:: This method should only be used in conjunction with the :py:meth:`~Database.manual_commit` context manager. .. py:method:: commit() Manually commit the currently-active transaction. .. note:: This method should only be used in conjunction with the :py:meth:`~Database.manual_commit` context manager. .. py:method:: rollback() Manually roll-back the currently-active transaction. .. note:: This method should only be used in conjunction with the :py:meth:`~Database.manual_commit` context manager. .. py:method:: batch_commit(it, n) :param iterable it: an iterable whose items will be yielded. :param int n: commit every *n* items. :return: an equivalent iterable to the one provided, with the addition that groups of *n* items will be yielded in a transaction. The purpose of this method is to simplify batching large operations, such as inserts, updates, etc. You pass in an iterable and the number of items-per-batch, and the items will be returned by an equivalent iterator that wraps each batch in a transaction. Example: .. code-block:: python # Some list or iterable containing data to insert. row_data = [{'username': 'u1'}, {'username': 'u2'}, ...] # Insert all data, committing every 100 rows. If, for example, # there are 789 items in the list, then there will be a total of # 8 transactions (7x100 and 1x89). for row in db.batch_commit(row_data, 100): User.create(**row) An alternative that may be more efficient is to batch the data into a multi-value ``INSERT`` statement (for example, using :py:meth:`Model.insert_many`): .. code-block:: python with db.atomic(): for idx in range(0, len(row_data), 100): # Insert 100 rows at a time. rows = row_data[idx:idx + 100] User.insert_many(rows).execute() .. py:method:: table_exists(table[, schema=None]) :param str table: Table name. :param str schema: Schema name (optional). :returns: ``bool`` indicating whether table exists. .. py:method:: get_tables([schema=None]) :param str schema: Schema name (optional). :returns: a list of table names in the database. .. py:method:: get_indexes(table[, schema=None]) :param str table: Table name. :param str schema: Schema name (optional). Return a list of :py:class:`IndexMetadata` tuples. Example:: print(db.get_indexes('entry')) [IndexMetadata( name='entry_public_list', sql='CREATE INDEX "entry_public_list" ...', columns=['timestamp'], unique=False, table='entry'), IndexMetadata( name='entry_slug', sql='CREATE UNIQUE INDEX "entry_slug" ON "entry" ("slug")', columns=['slug'], unique=True, table='entry')] .. py:method:: get_columns(table[, schema=None]) :param str table: Table name. :param str schema: Schema name (optional). Return a list of :py:class:`ColumnMetadata` tuples. Example:: print(db.get_columns('entry')) [ColumnMetadata( name='id', data_type='INTEGER', null=False, primary_key=True, table='entry'), ColumnMetadata( name='title', data_type='TEXT', null=False, primary_key=False, table='entry'), ...] .. py:method:: get_primary_keys(table[, schema=None]) :param str table: Table name. :param str schema: Schema name (optional). Return a list of column names that comprise the primary key. Example:: print(db.get_primary_keys('entry')) ['id'] .. py:method:: get_foreign_keys(table[, schema=None]) :param str table: Table name. :param str schema: Schema name (optional). Return a list of :py:class:`ForeignKeyMetadata` tuples for keys present on the table. Example:: print(db.get_foreign_keys('entrytag')) [ForeignKeyMetadata( column='entry_id', dest_table='entry', dest_column='id', table='entrytag'), ...] .. py:method:: get_views([schema=None]) :param str schema: Schema name (optional). Return a list of :py:class:`ViewMetadata` tuples for VIEWs present in the database. Example:: print(db.get_views()) [ViewMetadata( name='entries_public', sql='CREATE VIEW entries_public AS SELECT ... '), ...] .. py:method:: sequence_exists(seq) :param str seq: Name of sequence. :returns: Whether sequence exists. :rtype: bool .. py:method:: create_tables(models[, **options]) :param list models: A list of :py:class:`Model` classes. :param options: Options to specify when calling :py:meth:`Model.create_table`. Create tables, indexes and associated metadata for the given list of models. Dependencies are resolved so that tables are created in the appropriate order. .. py:method:: drop_tables(models[, **options]) :param list models: A list of :py:class:`Model` classes. :param kwargs: Options to specify when calling :py:meth:`Model.drop_table`. Drop tables, indexes and associated metadata for the given list of models. Dependencies are resolved so that tables are dropped in the appropriate order. .. py:method:: bind(models[, bind_refs=True[, bind_backrefs=True]]) :param list models: One or more :py:class:`Model` classes to bind. :param bool bind_refs: Bind related models. :param bool bind_backrefs: Bind back-reference related models. Bind the given list of models, and specified relations, to the database. .. py:method:: bind_ctx(models[, bind_refs=True[, bind_backrefs=True]]) :param list models: List of models to bind to the database. :param bool bind_refs: Bind models that are referenced using foreign-keys. :param bool bind_backrefs: Bind models that reference the given model with a foreign-key. Create a context-manager that binds (associates) the given models with the current database for the duration of the wrapped block. Example: .. code-block:: python MODELS = (User, Account, Note) # Bind the given models to the db for the duration of wrapped block. def use_test_database(fn): @wraps(fn) def inner(self): with test_db.bind_ctx(MODELS): test_db.create_tables(MODELS) try: fn(self) finally: test_db.drop_tables(MODELS) return inner class TestSomething(TestCase): @use_test_database def test_something(self): # ... models are bound to test database ... pass .. py:method:: extract_date(date_part, date_field) :param str date_part: date part to extract, e.g. 'year'. :param Node date_field: a SQL node containing a date/time, for example a :py:class:`DateTimeField`. :returns: a SQL node representing a function call that will return the provided date part. Provides a compatible interface for extracting a portion of a datetime. .. py:method:: truncate_date(date_part, date_field) :param str date_part: date part to truncate to, e.g. 'day'. :param Node date_field: a SQL node containing a date/time, for example a :py:class:`DateTimeField`. :returns: a SQL node representing a function call that will return the truncated date part. Provides a compatible interface for truncating a datetime to the given resolution. .. py:method:: random() :returns: a SQL node representing a function call that returns a random value. A compatible interface for calling the appropriate random number generation function provided by the database. For Postgres and Sqlite, this is equivalent to ``fn.random()``, for MySQL ``fn.rand()``. .. py:class:: SqliteDatabase(database[, pragmas=None[, timeout=5[, returning_clause=None[, **kwargs]]]]) :param pragmas: Either a dictionary or a list of 2-tuples containing pragma key and value to set every time a connection is opened. :param timeout: Set the busy-timeout on the SQLite driver (in seconds). :param bool returning_clause: Use `RETURNING` clause automatically for bulk INSERT queries (requires Sqlite 3.35 or newer). Sqlite database implementation. :py:class:`SqliteDatabase` that provides some advanced features only offered by Sqlite. * Register custom aggregates, collations and functions * Load C extensions * Advanced transactions (specify lock type) * For even more features, see :py:class:`SqliteExtDatabase`. Example of initializing a database and configuring some PRAGMAs: .. code-block:: python db = SqliteDatabase('my_app.db', pragmas=( ('cache_size', -16000), # 16MB ('journal_mode', 'wal'), # Use write-ahead-log journal mode. )) # Alternatively, pragmas can be specified using a dictionary. db = SqliteDatabase('my_app.db', pragmas={'journal_mode': 'wal'}) .. py:method:: pragma(key[, value=SENTINEL[, permanent=False]]) :param key: Setting name. :param value: New value for the setting (optional). :param permanent: Apply this pragma whenever a connection is opened. Execute a PRAGMA query once on the active connection. If a value is not specified, then the current value will be returned. If ``permanent`` is specified, then the PRAGMA query will also be executed whenever a new connection is opened, ensuring it is always in-effect. .. note:: By default this only affects the current connection. If the PRAGMA being executed is not persistent, then you must specify ``permanent=True`` to ensure the pragma is set on subsequent connections. .. py:attribute:: cache_size Get or set the cache_size pragma for the current connection. .. py:attribute:: foreign_keys Get or set the foreign_keys pragma for the current connection. .. py:attribute:: journal_mode Get or set the journal_mode pragma. .. py:attribute:: journal_size_limit Get or set the journal_size_limit pragma. .. py:attribute:: mmap_size Get or set the mmap_size pragma for the current connection. .. py:attribute:: page_size Get or set the page_size pragma. .. py:attribute:: read_uncommitted Get or set the read_uncommitted pragma for the current connection. .. py:attribute:: synchronous Get or set the synchronous pragma for the current connection. .. py:attribute:: wal_autocheckpoint Get or set the wal_autocheckpoint pragma for the current connection. .. py:attribute:: timeout Get or set the busy timeout (seconds). .. py:method:: register_aggregate(klass[, name=None[, num_params=-1]]) :param klass: Class implementing aggregate API. :param str name: Aggregate function name (defaults to name of class). :param int num_params: Number of parameters the aggregate accepts, or -1 for any number. Register a user-defined aggregate function. The function will be registered each time a new connection is opened. Additionally, if a connection is already open, the aggregate will be registered with the open connection. .. py:method:: aggregate([name=None[, num_params=-1]]) :param str name: Name of the aggregate (defaults to class name). :param int num_params: Number of parameters the aggregate accepts, or -1 for any number. Class decorator to register a user-defined aggregate function. Example: .. code-block:: python @db.aggregate('md5') class MD5(object): def initialize(self): self.md5 = hashlib.md5() def step(self, value): self.md5.update(value) def finalize(self): return self.md5.hexdigest() @db.aggregate() class Product(object): '''Like SUM() except calculates cumulative product.''' def __init__(self): self.product = 1 def step(self, value): self.product *= value def finalize(self): return self.product .. py:method:: register_collation(fn[, name=None]) :param fn: The collation function. :param str name: Name of collation (defaults to function name) Register a user-defined collation. The collation will be registered each time a new connection is opened. Additionally, if a connection is already open, the collation will be registered with the open connection. .. py:method:: collation([name=None]) :param str name: Name of collation (defaults to function name) Decorator to register a user-defined collation. Example: .. code-block:: python @db.collation('reverse') def collate_reverse(s1, s2): return -cmp(s1, s2) # Usage: Book.select().order_by(collate_reverse.collation(Book.title)) # Equivalent: Book.select().order_by(Book.title.asc(collation='reverse')) As you might have noticed, the original ``collate_reverse`` function has a special attribute called ``collation`` attached to it. This extra attribute provides a shorthand way to generate the SQL necessary to use our custom collation. .. py:method:: register_function(fn[, name=None[, num_params=-1[, deterministic=None]]]) :param fn: The user-defined scalar function. :param str name: Name of function (defaults to function name) :param int num_params: Number of arguments the function accepts, or -1 for any number. :param bool deterministic: Whether the function is deterministic for a given input (this is required to use the function in an index). Requires Sqlite 3.20 or newer, and ``sqlite3`` driver support (added to stdlib in Python 3.8). Register a user-defined scalar function. The function will be registered each time a new connection is opened. Additionally, if a connection is already open, the function will be registered with the open connection. .. py:method:: func([name=None[, num_params=-1[, deterministic=None]]]) :param str name: Name of the function (defaults to function name). :param int num_params: Number of parameters the function accepts, or -1 for any number. :param bool deterministic: Whether the function is deterministic for a given input (this is required to use the function in an index). Requires Sqlite 3.20 or newer, and ``sqlite3`` driver support (added to stdlib in Python 3.8). Decorator to register a user-defined scalar function. Example: .. code-block:: python @db.func('title_case') def title_case(s): return s.title() if s else '' # Usage: title_case_books = Book.select(fn.title_case(Book.title)) .. py:method:: register_window_function(klass[, name=None[, num_params=-1]]) :param klass: Class implementing window function API. :param str name: Window function name (defaults to name of class). :param int num_params: Number of parameters the function accepts, or -1 for any number. Register a user-defined window function. .. attention:: This feature requires SQLite >= 3.25.0 **and** `pysqlite3 `_ >= 0.2.0. The window function will be registered each time a new connection is opened. Additionally, if a connection is already open, the window function will be registered with the open connection. .. py:method:: window_function([name=None[, num_params=-1]]) :param str name: Name of the window function (defaults to class name). :param int num_params: Number of parameters the function accepts, or -1 for any number. Class decorator to register a user-defined window function. Window functions must define the following methods: * ``step()`` - receive values from a row and update state. * ``inverse()`` - inverse of ``step()`` for the given values. * ``value()`` - return the current value of the window function. * ``finalize()`` - return the final value of the window function. Example: .. code-block:: python @db.window_function('my_sum') class MySum(object): def __init__(self): self._value = 0 def step(self, value): self._value += value def inverse(self, value): self._value -= value def value(self): return self._value def finalize(self): return self._value .. py:method:: table_function([name=None]) Class-decorator for registering a :py:class:`TableFunction`. Table functions are user-defined functions that, rather than returning a single, scalar value, can return any number of rows of tabular data. Example: .. code-block:: python from playhouse.sqlite_ext import TableFunction @db.table_function('series') class Series(TableFunction): columns = ['value'] params = ['start', 'stop', 'step'] def initialize(self, start=0, stop=None, step=1): """ Table-functions declare an initialize() method, which is called with whatever arguments the user has called the function with. """ self.start = self.current = start self.stop = stop or float('Inf') self.step = step def iterate(self, idx): """ Iterate is called repeatedly by the SQLite database engine until the required number of rows has been read **or** the function raises a `StopIteration` signalling no more rows are available. """ if self.current > self.stop: raise StopIteration ret, self.current = self.current, self.current + self.step return (ret,) # Usage: cursor = db.execute_sql('SELECT * FROM series(?, ?, ?)', (0, 5, 2)) for value, in cursor: print(value) # Prints: # 0 # 2 # 4 .. py:method:: unregister_aggregate(name) :param name: Name of the user-defined aggregate function. Unregister the user-defined aggregate function. .. py:method:: unregister_collation(name) :param name: Name of the user-defined collation. Unregister the user-defined collation. .. py:method:: unregister_function(name) :param name: Name of the user-defined scalar function. Unregister the user-defined scalar function. .. py:method:: unregister_table_function(name) :param name: Name of the user-defined table function. :returns: True or False, depending on whether the function was removed. Unregister the user-defined scalar function. .. py:method:: load_extension(extension_module) Load the given C extension. If a connection is currently open in the calling thread, then the extension will be loaded for that connection as well as all subsequent connections. For example, if you've compiled the closure table extension and wish to use it in your application, you might write: .. code-block:: python db = SqliteExtDatabase('my_app.db') db.load_extension('closure') .. py:method:: attach(filename, name) :param str filename: Database to attach (or ``:memory:`` for in-memory) :param str name: Schema name for attached database. :return: boolean indicating success Register another database file that will be attached to every database connection. If the main database is currently connected, the new database will be attached on the open connection. .. note:: Databases that are attached using this method will be attached every time a database connection is opened. .. py:method:: detach(name) :param str name: Schema name for attached database. :return: boolean indicating success Unregister another database file that was attached previously with a call to :py:meth:`~SqliteDatabase.attach`. If the main database is currently connected, the attached database will be detached from the open connection. .. py:method:: atomic([lock_type=None]) :param str lock_type: Locking strategy: DEFERRED, IMMEDIATE, EXCLUSIVE. Create an atomic context-manager, optionally using the specified locking strategy (if unspecified, DEFERRED is used). .. note:: Lock type only applies to the outermost ``atomic()`` block. .. py:method:: transaction([lock_type=None]) :param str lock_type: Locking strategy: DEFERRED, IMMEDIATE, EXCLUSIVE. Create a transaction context-manager using the specified locking strategy (defaults to DEFERRED). .. py:class:: PostgresqlDatabase(database[, register_unicode=True[, encoding=None[, isolation_level=None]]]) Postgresql database implementation. Additional optional keyword-parameters: :param bool register_unicode: Register unicode types. :param str encoding: Database encoding. :param int isolation_level: Isolation level constant, defined in the ``psycopg2.extensions`` module. .. py:method:: set_time_zone(timezone) :param str timezone: timezone name, e.g. "US/Central". :returns: no return value. Set the timezone on the current connection. If no connection is open, then one will be opened. .. py:method:: atomic([isolation_level=None]) :param str isolation_level: Isolation strategy: SERIALIZABLE, READ COMMITTED, REPEATABLE READ, READ UNCOMMITTED Create an atomic context-manager, optionally using the specified isolation level (if unspecified, the server default will be used). .. note:: Isolation level only applies to the outermost ``atomic()`` block. .. py:method:: transaction([isolation_level=None]) :param str isolation_level: Isolation strategy: SERIALIZABLE, READ COMMITTED, REPEATABLE READ, READ UNCOMMITTED Create a transaction context-manager, optionally using the specified isolation level (if unspecified, the server default will be used). .. py:class:: MySQLDatabase(database[, **kwargs]) MySQL database implementation. .. py:method:: atomic([isolation_level=None]) :param str isolation_level: Isolation strategy: SERIALIZABLE, READ COMMITTED, REPEATABLE READ, READ UNCOMMITTED Create an atomic context-manager, optionally using the specified isolation level (if unspecified, the server default will be used). .. note:: Isolation level only applies to the outermost ``atomic()`` block. .. py:method:: transaction([isolation_level=None]) :param str isolation_level: Isolation strategy: SERIALIZABLE, READ COMMITTED, REPEATABLE READ, READ UNCOMMITTED Create a transaction context-manager, optionally using the specified isolation level (if unspecified, the server default will be used). .. _query-builder-api: Query-builder ------------- .. py:class:: Node() Base-class for all components which make up the AST for a SQL query. .. py:staticmethod:: copy(method) Decorator to use with Node methods that mutate the node's state. This allows method-chaining, e.g.: .. code-block:: python query = MyModel.select() new_query = query.where(MyModel.field == 'value') .. py:method:: unwrap() API for recursively unwrapping "wrapped" nodes. Base case is to return self. .. py:method:: is_alias() API for determining if a node, at any point, has been explicitly aliased by the user. .. py:class:: Source([alias=None]) A source of row tuples, for example a table, join, or select query. By default provides a "magic" attribute named "c" that is a factory for column/attribute lookups, for example: .. code-block:: python User = Table('users') query = (User .select(User.c.username) .where(User.c.active == True) .order_by(User.c.username)) .. py:method:: alias(name) Returns a copy of the object with the given alias applied. .. py:method:: select(*columns) :param columns: :py:class:`Column` instances, expressions, functions, sub-queries, or anything else that you would like to select. Create a :py:class:`Select` query on the table. If the table explicitly declares columns and no columns are provided, then by default all the table's defined columns will be selected. .. py:method:: join(dest[, join_type='INNER'[, on=None]]) :param Source dest: Join the table with the given destination. :param str join_type: Join type. :param on: Expression to use as join predicate. :returns: a :py:class:`Join` instance. Join type may be one of: * ``JOIN.INNER`` * ``JOIN.LEFT_OUTER`` * ``JOIN.RIGHT_OUTER`` * ``JOIN.FULL`` * ``JOIN.FULL_OUTER`` * ``JOIN.CROSS`` .. py:method:: left_outer_join(dest[, on=None]) :param Source dest: Join the table with the given destination. :param on: Expression to use as join predicate. :returns: a :py:class:`Join` instance. Convenience method for calling :py:meth:`~Source.join` using a LEFT OUTER join. .. py:class:: BaseTable() Base class for table-like objects, which support JOINs via operator overloading. .. py:method:: __and__(dest) Perform an INNER join on ``dest``. .. py:method:: __add__(dest) Perform a LEFT OUTER join on ``dest``. .. py:method:: __sub__(dest) Perform a RIGHT OUTER join on ``dest``. .. py:method:: __or__(dest) Perform a FULL OUTER join on ``dest``. .. py:method:: __mul__(dest) Perform a CROSS join on ``dest``. .. py:class:: Table(name[, columns=None[, primary_key=None[, schema=None[, alias=None]]]]) Represents a table in the database (or a table-like object such as a view). :param str name: Database table name :param tuple columns: List of column names (optional). :param str primary_key: Name of primary key column. :param str schema: Schema name used to access table (if necessary). :param str alias: Alias to use for table in SQL queries. .. note:: If columns are specified, the magic "c" attribute will be disabled. When columns are not explicitly defined, tables have a special attribute "c" which is a factory that provides access to table columns dynamically. Example:: User = Table('users') query = (User .select(User.c.id, User.c.username) .order_by(User.c.username)) Equivalent example when columns **are** specified:: User = Table('users', ('id', 'username')) query = (User .select(User.id, User.username) .order_by(User.username)) .. py:method:: bind([database=None]) :param database: :py:class:`Database` object. Bind this table to the given database (or unbind by leaving empty). When a table is *bound* to a database, queries may be executed against it without the need to specify the database in the query's execute method. .. py:method:: bind_ctx([database=None]) :param database: :py:class:`Database` object. Return a context manager that will bind the table to the given database for the duration of the wrapped block. .. py:method:: select(*columns) :param columns: :py:class:`Column` instances, expressions, functions, sub-queries, or anything else that you would like to select. Create a :py:class:`Select` query on the table. If the table explicitly declares columns and no columns are provided, then by default all the table's defined columns will be selected. Example:: User = Table('users', ('id', 'username')) # Because columns were defined on the Table, we will default to # selecting both of the User table's columns. # Evaluates to SELECT id, username FROM users query = User.select() Note = Table('notes') query = (Note .select(Note.c.content, Note.c.timestamp, User.username) .join(User, on=(Note.c.user_id == User.id)) .where(Note.c.is_published == True) .order_by(Note.c.timestamp.desc())) # Using a function to select users and the number of notes they # have authored. query = (User .select( User.username, fn.COUNT(Note.c.id).alias('n_notes')) .join( Note, JOIN.LEFT_OUTER, on=(User.id == Note.c.user_id)) .order_by(fn.COUNT(Note.c.id).desc())) .. py:method:: insert([insert=None[, columns=None[, **kwargs]]]) :param insert: A dictionary mapping column to value, an iterable that yields dictionaries (i.e. list), or a :py:class:`Select` query. :param list columns: The list of columns to insert into when the data being inserted is not a dictionary. :param kwargs: Mapping of column-name to value. Create a :py:class:`Insert` query into the table. .. py:method:: replace([insert=None[, columns=None[, **kwargs]]]) :param insert: A dictionary mapping column to value, an iterable that yields dictionaries (i.e. list), or a :py:class:`Select` query. :param list columns: The list of columns to insert into when the data being inserted is not a dictionary. :param kwargs: Mapping of column-name to value. Create a :py:class:`Insert` query into the table whose conflict resolution method is to replace. .. py:method:: update([update=None[, **kwargs]]) :param update: A dictionary mapping column to value. :param kwargs: Mapping of column-name to value. Create a :py:class:`Update` query for the table. .. py:method:: delete() Create a :py:class:`Delete` query for the table. .. py:class:: Join(lhs, rhs[, join_type=JOIN.INNER[, on=None[, alias=None]]]) Represent a JOIN between to table-like objects. :param lhs: Left-hand side of the join. :param rhs: Right-hand side of the join. :param join_type: Type of join. e.g. JOIN.INNER, JOIN.LEFT_OUTER, etc. :param on: Expression describing the join predicate. :param str alias: Alias to apply to joined data. .. py:method:: on(predicate) :param Expression predicate: join predicate. Specify the predicate expression used for this join. .. py:class:: ValuesList(values[, columns=None[, alias=None]]) Represent a values list that can be used like a table. :param values: a list-of-lists containing the row data to represent. :param list columns: the names to give to the columns in each row. :param str alias: alias to use for values-list. Example: .. code-block:: python data = [(1, 'first'), (2, 'second')] vl = ValuesList(data, columns=('idx', 'name')) query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx)) # Yields: # SELECT t1.idx, t1.name # FROM (VALUES (1, 'first'), (2, 'second')) AS t1(idx, name) # ORDER BY t1.idx .. py:method:: columns(*names) :param names: names to apply to the columns of data. Example: .. code-block:: python vl = ValuesList([(1, 'first'), (2, 'second')]) vl = vl.columns('idx', 'name').alias('v') query = vl.select(vl.c.idx, vl.c.name) # Yields: # SELECT v.idx, v.name # FROM (VALUES (1, 'first'), (2, 'second')) AS v(idx, name) .. py:class:: CTE(name, query[, recursive=False[, columns=None]]) Represent a common-table-expression. For example queries, see :ref:`cte`. :param name: Name for the CTE. :param query: :py:class:`Select` query describing CTE. :param bool recursive: Whether the CTE is recursive. :param list columns: Explicit list of columns produced by CTE (optional). .. py:method:: select_from(*columns) Create a SELECT query that utilizes the given common table expression as the source for a new query. :param columns: One or more columns to select from the CTE. :return: :py:class:`Select` query utilizing the common table expression .. py:method:: union_all(other) Used on the base-case CTE to construct the recursive term of the CTE. :param other: recursive term, generally a :py:class:`Select` query. :return: a recursive :py:class:`CTE` with the given recursive term. .. py:class:: ColumnBase() Base-class for column-like objects, attributes or expressions. Column-like objects can be composed using various operators and special methods. * ``&``: Logical AND * ``|``: Logical OR * ``+``: Addition * ``-``: Subtraction * ``*``: Multiplication * ``/``: Division * ``^``: Exclusive-OR * ``==``: Equality * ``!=``: Inequality * ``>``: Greater-than * ``<``: Less-than * ``>=``: Greater-than or equal * ``<=``: Less-than or equal * ``<<``: ``IN`` * ``>>``: ``IS`` (i.e. ``IS NULL``) * ``%``: ``LIKE`` * ``**``: ``ILIKE`` * ``bin_and()``: Binary AND * ``bin_or()``: Binary OR * ``in_()``: ``IN`` * ``not_in()``: ``NOT IN`` * ``regexp()``: ``REGEXP`` * ``is_null(True/False)``: ``IS NULL`` or ``IS NOT NULL`` * ``contains(s)``: ``LIKE %s%`` * ``startswith(s)``: ``LIKE s%`` * ``endswith(s)``: ``LIKE %s`` * ``between(low, high)``: ``BETWEEN low AND high`` * ``concat()``: ``||`` .. py:method:: alias(alias) :param str alias: Alias for the given column-like object. :returns: a :py:class:`Alias` object. Indicate the alias that should be given to the specified column-like object. .. py:method:: cast(as_type) :param str as_type: Type name to cast to. :returns: a :py:class:`Cast` object. Create a ``CAST`` expression. .. py:method:: asc([collation=None[, nulls=None]]) :param str collation: Collation name to use for sorting. :param str nulls: Sort nulls (FIRST or LAST). :returns: an ascending :py:class:`Ordering` object for the column. .. py:method:: desc([collation=None[, nulls=None]]) :param str collation: Collation name to use for sorting. :param str nulls: Sort nulls (FIRST or LAST). :returns: an descending :py:class:`Ordering` object for the column. .. py:method:: __invert__() :returns: a :py:class:`Negated` wrapper for the column. .. py:class:: Column(source, name) :param Source source: Source for column. :param str name: Column name. Column on a table or a column returned by a sub-query. .. py:class:: Alias(node, alias) :param Node node: a column-like object. :param str alias: alias to assign to column. Create a named alias for the given column-like object. .. py:method:: alias([alias=None]) :param str alias: new name (or None) for aliased column. Create a new :py:class:`Alias` for the aliased column-like object. If the new alias is ``None``, then the original column-like object is returned. .. py:class:: Negated(node) Represents a negated column-like object. .. py:class:: Value(value[, converterNone[, unpack=True]]) :param value: Python object or scalar value. :param converter: Function used to convert value into type the database understands. :param bool unpack: Whether lists or tuples should be unpacked into a list of values or treated as-is. Value to be used in a parameterized query. It is the responsibility of the caller to ensure that the value passed in can be adapted to a type the database driver understands. .. py:function:: AsIs(value) Represents a :py:class:`Value` that is treated as-is, and passed directly back to the database driver. This may be useful if you are using database extensions that accept native Python data-types and you do not wish Peewee to impose any handling of the values. .. py:class:: Cast(node, cast) :param node: A column-like object. :param str cast: Type to cast to. Represents a ``CAST( AS )`` expression. .. py:class:: Ordering(node, direction[, collation=None[, nulls=None]]) :param node: A column-like object. :param str direction: ASC or DESC :param str collation: Collation name to use for sorting. :param str nulls: Sort nulls (FIRST or LAST). Represent ordering by a column-like object. Postgresql supports a non-standard clause ("NULLS FIRST/LAST"). Peewee will automatically use an equivalent ``CASE`` statement for databases that do not support this (Sqlite / MySQL). .. py:method:: collate([collation=None]) :param str collation: Collation name to use for sorting. .. py:function:: Asc(node[, collation=None[, nulls=None]]) Short-hand for instantiating an ascending :py:class:`Ordering` object. .. py:function:: Desc(node[, collation=None[, nulls=None]]) Short-hand for instantiating an descending :py:class:`Ordering` object. .. py:class:: Expression(lhs, op, rhs[, flat=True]) :param lhs: Left-hand side. :param op: Operation. :param rhs: Right-hand side. :param bool flat: Whether to wrap expression in parentheses. Represent a binary expression of the form (lhs op rhs), e.g. (foo + 1). .. py:class:: Entity(*path) :param path: Components that make up the dotted-path of the entity name. Represent a quoted entity in a query, such as a table, column, alias. The name may consist of multiple components, e.g. "a_table"."column_name". .. py:method:: __getattr__(self, attr) Factory method for creating sub-entities. .. py:class:: SQL(sql[, params=None]) :param str sql: SQL query string. :param tuple params: Parameters for query (optional). Represent a parameterized SQL query or query-fragment. .. py:function:: Check(constraint[, name=None]) :param str constraint: Constraint SQL. :param str name: constraint name. Represent a CHECK constraint. .. warning:: MySQL may not support a ``name`` parameter when inlining the constraint along with the column definition. The solution is to just put the named ``Check`` constraint in the model's ``Meta.constraints`` list instead of in the field instances ``constraints=[...]`` list. .. py:class:: Function(name, arguments[, coerce=True[, python_value=None]]) :param str name: Function name. :param tuple arguments: Arguments to function. :param bool coerce: Whether to coerce the function result to a particular data-type when reading function return values from the cursor. :param callable python_value: Function to use for converting the return value from the cursor. Represent an arbitrary SQL function call. .. note:: Rather than instantiating this class directly, it is recommended to use the ``fn`` helper. Example of using ``fn`` to call an arbitrary SQL function:: # Query users and count of tweets authored. query = (User .select(User.username, fn.COUNT(Tweet.id).alias('ct')) .join(Tweet, JOIN.LEFT_OUTER, on=(User.id == Tweet.user_id)) .group_by(User.username) .order_by(fn.COUNT(Tweet.id).desc())) .. py:method:: over([partition_by=None[, order_by=None[, start=None[, end=None[, window=None[, exclude=None]]]]]]) :param list partition_by: List of columns to partition by. :param list order_by: List of columns / expressions to order window by. :param start: A :py:class:`SQL` instance or a string expressing the start of the window range. :param end: A :py:class:`SQL` instance or a string expressing the end of the window range. :param str frame_type: ``Window.RANGE``, ``Window.ROWS`` or ``Window.GROUPS``. :param Window window: A :py:class:`Window` instance. :param exclude: Frame exclusion, one of ``Window.CURRENT_ROW``, ``Window.GROUP``, ``Window.TIES`` or ``Window.NO_OTHERS``. .. note:: For an in-depth guide to using window functions with Peewee, see the :ref:`window-functions` section. Examples:: # Using a simple partition on a single column. query = (Sample .select( Sample.counter, Sample.value, fn.AVG(Sample.value).over([Sample.counter])) .order_by(Sample.counter)) # Equivalent example Using a Window() instance instead. window = Window(partition_by=[Sample.counter]) query = (Sample .select( Sample.counter, Sample.value, fn.AVG(Sample.value).over(window)) .window(window) # Note call to ".window()" .order_by(Sample.counter)) # Example using bounded window. query = (Sample .select(Sample.value, fn.SUM(Sample.value).over( partition_by=[Sample.counter], start=Window.CURRENT_ROW, # current row end=Window.following())) # unbounded following .order_by(Sample.id)) .. py:method:: filter(where) :param where: Expression for filtering aggregate. Add a ``FILTER (WHERE...)`` clause to an aggregate function. The where expression is evaluated to determine which rows are fed to the aggregate function. This SQL feature is supported for Postgres and SQLite. .. py:method:: coerce([coerce=True]) :param bool coerce: Whether to attempt to coerce function-call result to a Python data-type. When coerce is ``True``, the target data-type is inferred using several heuristics. Read the source for ``BaseModelCursorWrapper._initialize_columns`` method to see how this works. .. py:method:: python_value([func=None]) :param callable python_value: Function to use for converting the return value from the cursor. Specify a particular function to use when converting values returned by the database cursor. For example: .. code-block:: python # Get user and a list of their tweet IDs. The tweet IDs are # returned as a comma-separated string by the db, so we'll split # the result string and convert the values to python ints. convert_ids = lambda s: [int(i) for i in (s or '').split(',') if i] tweet_ids = (fn .GROUP_CONCAT(Tweet.id) .python_value(convert_ids)) query = (User .select(User.username, tweet_ids.alias('tweet_ids')) .group_by(User.username)) for user in query: print(user.username, user.tweet_ids) # e.g., # huey [1, 4, 5, 7] # mickey [2, 3, 6] # zaizee [] .. py:function:: fn() The :py:func:`fn` helper is actually an instance of :py:class:`Function` that implements a ``__getattr__`` hook to provide a nice API for calling SQL functions. To create a node representative of a SQL function call, use the function name as an attribute on ``fn`` and then provide the arguments as you would if calling a Python function: .. code-block:: python # List users and the number of tweets they have authored, # from highest-to-lowest: sql_count = fn.COUNT(Tweet.id) query = (User .select(User, sql_count.alias('count')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User) .order_by(sql_count.desc())) # Get the timestamp of the most recent tweet: query = Tweet.select(fn.MAX(Tweet.timestamp)) max_timestamp = query.scalar() # Retrieve scalar result from query. Function calls can, like anything else, be composed and nested: .. code-block:: python # Get users whose username begins with "A" or "a": a_users = User.select().where(fn.LOWER(fn.SUBSTR(User.username, 1, 1)) == 'a') .. py:class:: Window([partition_by=None[, order_by=None[, start=None[, end=None[, frame_type=None[, extends=None[, exclude=None[, alias=None]]]]]]]]) :param list partition_by: List of columns to partition by. :param list order_by: List of columns to order by. :param start: A :py:class:`SQL` instance or a string expressing the start of the window range. :param end: A :py:class:`SQL` instance or a string expressing the end of the window range. :param str frame_type: ``Window.RANGE``, ``Window.ROWS`` or ``Window.GROUPS``. :param extends: A :py:class:`Window` definition to extend. Alternately, you may specify the window's alias instead. :param exclude: Frame exclusion, one of ``Window.CURRENT_ROW``, ``Window.GROUP``, ``Window.TIES`` or ``Window.NO_OTHERS``. :param str alias: Alias for the window. Represent a WINDOW clause. .. note:: For an in-depth guide to using window functions with Peewee, see the :ref:`window-functions` section. .. py:attribute:: RANGE .. py:attribute:: ROWS .. py:attribute:: GROUPS Specify the window ``frame_type``. See :ref:`window-frame-types`. .. py:attribute:: CURRENT_ROW Reference to current row for use in start/end clause or the frame exclusion parameter. .. py:attribute:: NO_OTHERS .. py:attribute:: GROUP .. py:attribute:: TIES Specify the window frame exclusion parameter. .. py:staticmethod:: preceding([value=None]) :param value: Number of rows preceding. If ``None`` is UNBOUNDED. Convenience method for generating SQL suitable for passing in as the ``start`` parameter for a window range. .. py:staticmethod:: following([value=None]) :param value: Number of rows following. If ``None`` is UNBOUNDED. Convenience method for generating SQL suitable for passing in as the ``end`` parameter for a window range. .. py:method:: as_rows() .. py:method:: as_range() .. py:method:: as_groups() Specify the frame type. .. py:method:: extends([window=None]) :param Window window: A :py:class:`Window` definition to extend. Alternately, you may specify the window's alias instead. .. py:method:: exclude([frame_exclusion=None]) :param frame_exclusion: Frame exclusion, one of ``Window.CURRENT_ROW``, ``Window.GROUP``, ``Window.TIES`` or ``Window.NO_OTHERS``. .. py:method:: alias([alias=None]) :param str alias: Alias to use for window. .. py:function:: Case(predicate, expression_tuples[, default=None]]) :param predicate: Predicate for CASE query (optional). :param expression_tuples: One or more cases to evaluate. :param default: Default value (optional). :returns: Representation of CASE statement. Examples:: Number = Table('numbers', ('val',)) num_as_str = Case(Number.val, ( (1, 'one'), (2, 'two'), (3, 'three')), 'a lot') query = Number.select(Number.val, num_as_str.alias('num_str')) # The above is equivalent to: # SELECT "val", # CASE "val" # WHEN 1 THEN 'one' # WHEN 2 THEN 'two' # WHEN 3 THEN 'three' # ELSE 'a lot' END AS "num_str" # FROM "numbers" num_as_str = Case(None, ( (Number.val == 1, 'one'), (Number.val == 2, 'two'), (Number.val == 3, 'three')), 'a lot') query = Number.select(Number.val, num_as_str.alias('num_str')) # The above is equivalent to: # SELECT "val", # CASE # WHEN "val" = 1 THEN 'one' # WHEN "val" = 2 THEN 'two' # WHEN "val" = 3 THEN 'three' # ELSE 'a lot' END AS "num_str" # FROM "numbers" .. py:class:: NodeList(nodes[, glue=' '[, parens=False]]) :param list nodes: Zero or more nodes. :param str glue: How to join the nodes when converting to SQL. :param bool parens: Whether to wrap the resulting SQL in parentheses. Represent a list of nodes, a multi-part clause, a list of parameters, etc. .. py:function:: CommaNodeList(nodes) :param list nodes: Zero or more nodes. :returns: a :py:class:`NodeList` Represent a list of nodes joined by commas. .. py:function:: EnclosedNodeList(nodes) :param list nodes: Zero or more nodes. :returns: a :py:class:`NodeList` Represent a list of nodes joined by commas and wrapped in parentheses. .. py:class:: DQ(**query) :param query: Arbitrary filter expressions using Django-style lookups. Represent a composable Django-style filter expression suitable for use with the :py:meth:`Model.filter` or :py:meth:`ModelSelect.filter` methods. .. py:class:: Tuple(*args) Represent a SQL `row value `_. Row-values are supported by most databases. .. py:class:: OnConflict([action=None[, update=None[, preserve=None[, where=None[, conflict_target=None[, conflict_where=None[, conflict_constraint=None]]]]]]]) :param str action: Action to take when resolving conflict. :param update: A dictionary mapping column to new value. :param preserve: A list of columns whose values should be preserved from the original INSERT. See also :py:class:`EXCLUDED`. :param where: Expression to restrict the conflict resolution. :param conflict_target: Column(s) that comprise the constraint. :param conflict_where: Expressions needed to match the constraint target if it is a partial index (index with a WHERE clause). :param str conflict_constraint: Name of constraint to use for conflict resolution. Currently only supported by Postgres. Represent a conflict resolution clause for a data-modification query. Depending on the database-driver being used, one or more of the above parameters may be required. .. py:method:: preserve(*columns) :param columns: Columns whose values should be preserved. .. py:method:: update([_data=None[, **kwargs]]) :param dict _data: Dictionary mapping column to new value. :param kwargs: Dictionary mapping column name to new value. The ``update()`` method supports being called with either a dictionary of column-to-value, **or** keyword arguments representing the same. .. py:method:: where(*expressions) :param expressions: Expressions that restrict the action of the conflict resolution clause. .. py:method:: conflict_target(*constraints) :param constraints: Column(s) to use as target for conflict resolution. .. py:method:: conflict_where(*expressions) :param expressions: Expressions that match the conflict target index, in the case the conflict target is a partial index. .. py:method:: conflict_constraint(constraint) :param str constraint: Name of constraints to use as target for conflict resolution. Currently only supported by Postgres. .. py:class:: EXCLUDED Helper object that exposes the ``EXCLUDED`` namespace that is used with ``INSERT ... ON CONFLICT`` to reference values in the conflicting data. This is a "magic" helper, such that one uses it by accessing attributes on it that correspond to a particular column. Example: .. code-block:: python class KV(Model): key = CharField(unique=True) value = IntegerField() # Create one row. KV.create(key='k1', value=1) # Demonstrate usage of EXCLUDED. # Here we will attempt to insert a new value for a given key. If that # key already exists, then we will update its value with the *sum* of its # original value and the value we attempted to insert -- provided that # the new value is larger than the original value. query = (KV.insert(key='k1', value=10) .on_conflict(conflict_target=[KV.key], update={KV.value: KV.value + EXCLUDED.value}, where=(EXCLUDED.value > KV.value))) # Executing the above query will result in the following data being # present in the "kv" table: # (key='k1', value=11) query.execute() # If we attempted to execute the query *again*, then nothing would be # updated, as the new value (10) is now less than the value in the # original row (11). .. py:class:: BaseQuery() The parent class from which all other query classes are derived. While you will not deal with :py:class:`BaseQuery` directly in your code, it implements some methods that are common across all query types. .. py:attribute:: default_row_type = ROW.DICT .. py:method:: bind([database=None]) :param Database database: Database to execute query against. Bind the query to the given database for execution. .. py:method:: dicts([as_dict=True]) :param bool as_dict: Specify whether to return rows as dictionaries. Return rows as dictionaries. .. py:method:: tuples([as_tuples=True]) :param bool as_tuple: Specify whether to return rows as tuples. Return rows as tuples. .. py:method:: namedtuples([as_namedtuple=True]) :param bool as_namedtuple: Specify whether to return rows as named tuples. Return rows as named tuples. .. py:method:: objects([constructor=None]) :param constructor: Function that accepts row dict and returns an arbitrary object. Return rows as arbitrary objects using the given constructor. .. py:method:: sql() :returns: A 2-tuple consisting of the query's SQL and parameters. .. py:method:: execute(database) :param Database database: Database to execute query against. Not required if query was previously bound to a database. Execute the query and return result (depends on type of query being executed). For example, select queries the return result will be an iterator over the query results. .. py:method:: iterator([database=None]) :param Database database: Database to execute query against. Not required if query was previously bound to a database. Execute the query and return an iterator over the result-set. For large result-sets this method is preferable as rows are not cached in-memory during iteration. .. note:: Because rows are not cached, the query may only be iterated over once. Subsequent iterations will return empty result-sets as the cursor will have been consumed. Example: .. code-block:: python query = StatTbl.select().order_by(StatTbl.timestamp).tuples() for row in query.iterator(db): process_row(row) .. py:method:: __iter__() Execute the query and return an iterator over the result-set. Unlike :py:meth:`~BaseQuery.iterator`, this method will cause rows to be cached in order to allow efficient iteration, indexing and slicing. .. py:method:: __getitem__(value) :param value: Either an integer index or a slice. Retrieve a row or range of rows from the result-set. .. py:method:: __len__() Return the number of rows in the result-set. .. warning:: This does not issue a ``COUNT()`` query. Instead, the result-set is loaded as it would be during normal iteration, and the length is determined from the size of the result set. .. py:class:: RawQuery([sql=None[, params=None[, **kwargs]]]) :param str sql: SQL query. :param tuple params: Parameters (optional). Create a query by directly specifying the SQL to execute. .. py:class:: Query([where=None[, order_by=None[, limit=None[, offset=None[, **kwargs]]]]]) :param where: Representation of WHERE clause. :param tuple order_by: Columns or values to order by. :param int limit: Value of LIMIT clause. :param int offset: Value of OFFSET clause. Base-class for queries that support method-chaining APIs. .. py:method:: with_cte(*cte_list) :param cte_list: zero or more :py:class:`CTE` objects. Include the given common-table expressions in the query. Any previously specified CTEs will be overwritten. For examples of common-table expressions, see :ref:`cte`. .. py:method:: cte(name[, recursive=False[, columns=None]]) :param str name: Alias for common table expression. :param bool recursive: Will this be a recursive CTE? :param list columns: List of column names (as strings). Indicate that a query will be used as a common table expression. For example, if we are modelling a category tree and are using a parent-link foreign key, we can retrieve all categories and their absolute depths using a recursive CTE: .. code-block:: python class Category(Model): name = TextField() parent = ForeignKeyField('self', backref='children', null=True) # The base case of our recursive CTE will be categories that are at # the root level -- in other words, categories without parents. roots = (Category .select(Category.name, Value(0).alias('level')) .where(Category.parent.is_null()) .cte(name='roots', recursive=True)) # The recursive term will select the category name and increment # the depth, joining on the base term so that the recursive term # consists of all children of the base category. RTerm = Category.alias() recursive = (RTerm .select(RTerm.name, (roots.c.level + 1).alias('level')) .join(roots, on=(RTerm.parent == roots.c.id))) # Express UNION ALL . cte = roots.union_all(recursive) # Select name and level from the recursive CTE. query = (cte .select_from(cte.c.name, cte.c.level) .order_by(cte.c.name)) for category in query: print(category.name, category.level) For more examples of CTEs, see :ref:`cte`. .. py:method:: where(*expressions) :param expressions: zero or more expressions to include in the WHERE clause. Include the given expressions in the WHERE clause of the query. The expressions will be AND-ed together with any previously-specified WHERE expressions. Example selection users where the username is equal to 'somebody': .. code-block:: python sq = User.select().where(User.username == 'somebody') Example selecting tweets made by users who are either editors or administrators: .. code-block:: python sq = Tweet.select().join(User).where( (User.is_editor == True) | (User.is_admin == True)) Example of deleting tweets by users who are no longer active: .. code-block:: python inactive_users = User.select().where(User.active == False) dq = (Tweet .delete() .where(Tweet.user.in_(inactive_users))) dq.execute() # Return number of tweets deleted. .. note:: :py:meth:`~Query.where` calls are chainable. Multiple calls will be "AND"-ed together. .. py:method:: orwhere(*expressions) :param expressions: zero or more expressions to include in the WHERE clause. Include the given expressions in the WHERE clause of the query. This method is the same as the :py:meth:`Query.where` method, except that the expressions will be OR-ed together with any previously-specified WHERE expressions. .. py:method:: order_by(*values) :param values: zero or more Column-like objects to order by. Define the ORDER BY clause. Any previously-specified values will be overwritten. .. py:method:: order_by_extend(*values) :param values: zero or more Column-like objects to order by. Extend any previously-specified ORDER BY clause with the given values. .. py:method:: limit([value=None]) :param int value: specify value for LIMIT clause. .. py:method:: offset([value=None]) :param int value: specify value for OFFSET clause. .. py:method:: paginate(page[, paginate_by=20]) :param int page: Page number of results (starting from 1). :param int paginate_by: Rows-per-page. Convenience method for specifying the LIMIT and OFFSET in a more intuitive way. This feature is designed with web-site pagination in mind, so the first page starts with ``page=1``. .. py:class:: SelectQuery() Select query helper-class that implements operator-overloads for creating compound queries. .. py:method:: select_from(*columns) :param columns: one or more columns to select from the inner query. :return: a new query that wraps the calling query. Create a new query that wraps the current (calling) query. For example, suppose you have a simple ``UNION`` query, and need to apply an aggregation on the union result-set. To do this, you need to write something like: .. code-block:: sql SELECT "u"."owner", COUNT("u"."id") AS "ct" FROM ( SELECT "id", "owner", ... FROM "cars" UNION SELECT "id", "owner", ... FROM "motorcycles" UNION SELECT "id", "owner", ... FROM "boats") AS "u" GROUP BY "u"."owner" The :py:meth:`~SelectQuery.select_from` method is designed to simplify constructing this type of query. Example peewee code: .. code-block:: python class Car(Model): owner = ForeignKeyField(Owner, backref='cars') # ... car-specific fields, etc ... class Motorcycle(Model): owner = ForeignKeyField(Owner, backref='motorcycles') # ... motorcycle-specific fields, etc ... class Boat(Model): owner = ForeignKeyField(Owner, backref='boats') # ... boat-specific fields, etc ... cars = Car.select(Car.owner) motorcycles = Motorcycle.select(Motorcycle.owner) boats = Boat.select(Boat.owner) union = cars | motorcycles | boats query = (union .select_from(union.c.owner, fn.COUNT(union.c.id)) .group_by(union.c.owner)) .. py:method:: union_all(dest) Create a UNION ALL query with ``dest``. .. py:method:: __add__(dest) Create a UNION ALL query with ``dest``. .. py:method:: union(dest) Create a UNION query with ``dest``. .. py:method:: __or__(dest) Create a UNION query with ``dest``. .. py:method:: intersect(dest) Create an INTERSECT query with ``dest``. .. py:method:: __and__(dest) Create an INTERSECT query with ``dest``. .. py:method:: except_(dest) Create an EXCEPT query with ``dest``. Note that the method name has a trailing "_" character since ``except`` is a Python reserved word. .. py:method:: __sub__(dest) Create an EXCEPT query with ``dest``. .. py:class:: SelectBase() Base-class for :py:class:`Select` and :py:class:`CompoundSelect` queries. .. py:method:: peek(database[, n=1]) :param Database database: database to execute query against. :param int n: Number of rows to return. :returns: A single row if n = 1, else a list of rows. Execute the query and return the given number of rows from the start of the cursor. This function may be called multiple times safely, and will always return the first N rows of results. .. py:method:: first(database[, n=1]) :param Database database: database to execute query against. :param int n: Number of rows to return. :returns: A single row if n = 1, else a list of rows. Like the :py:meth:`~SelectBase.peek` method, except a ``LIMIT`` is applied to the query to ensure that only ``n`` rows are returned. Multiple calls for the same value of ``n`` will not result in multiple executions. The query is altered in-place so it is not possible to call :py:meth:`~SelectBase.first` and then later iterate over the full result-set using the same query object. Again, this is done to ensure that multiple calls to ``first()`` will not result in multiple query executions. .. py:method:: scalar(database[, as_tuple=False[, as_dict=False]]) :param Database database: database to execute query against. :param bool as_tuple: Return the result as a tuple? :param bool as_dict: Return the result as a dict? :returns: Single scalar value. If ``as_tuple = True``, a row tuple is returned. If ``as_dict = True``, a row dict is returned. Return a scalar value from the first row of results. If multiple scalar values are anticipated (e.g. multiple aggregations in a single query) then you may specify ``as_tuple=True`` to get the row tuple. Example:: query = Note.select(fn.MAX(Note.timestamp)) max_ts = query.scalar(db) query = Note.select(fn.MAX(Note.timestamp), fn.COUNT(Note.id)) max_ts, n_notes = query.scalar(db, as_tuple=True) query = Note.select(fn.COUNT(Note.id).alias('count')) assert query.scalar(db, as_dict=True) == {'count': 123} .. py:method:: count(database[, clear_limit=False]) :param Database database: database to execute query against. :param bool clear_limit: Clear any LIMIT clause when counting. :return: Number of rows in the query result-set. Return number of rows in the query result-set. Implemented by running SELECT COUNT(1) FROM (). .. py:method:: exists(database) :param Database database: database to execute query against. :return: Whether any results exist for the current query. Return a boolean indicating whether the current query has any results. .. py:method:: get(database) :param Database database: database to execute query against. :return: A single row from the database or ``None``. Execute the query and return the first row, if it exists. Multiple calls will result in multiple queries being executed. .. py:class:: CompoundSelectQuery(lhs, op, rhs) :param SelectBase lhs: A Select or CompoundSelect query. :param str op: Operation (e.g. UNION, INTERSECT, EXCEPT). :param SelectBase rhs: A Select or CompoundSelect query. Class representing a compound SELECT query. .. py:class:: Select([from_list=None[, columns=None[, group_by=None[, having=None[, distinct=None[, windows=None[, for_update=None[, for_update_of=None[, for_update_nowait=None[, **kwargs]]]]]]]]]]) :param list from_list: List of sources for FROM clause. :param list columns: Columns or values to select. :param list group_by: List of columns or values to group by. :param Expression having: Expression for HAVING clause. :param distinct: Either a boolean or a list of column-like objects. :param list windows: List of :py:class:`Window` clauses. :param for_update: Boolean or str indicating if SELECT...FOR UPDATE. :param for_update_of: One or more tables for FOR UPDATE OF clause. :param bool for_update_nowait: Specify NOWAIT locking. Class representing a SELECT query. .. note:: Rather than instantiating this directly, most-commonly you will use a factory method like :py:meth:`Table.select` or :py:meth:`Model.select`. Methods on the select query can be chained together. Example selecting some user instances from the database. Only the ``id`` and ``username`` columns are selected. When iterated, will return instances of the ``User`` model: .. code-block:: python query = User.select(User.id, User.username) for user in query: print(user.username) Example selecting users and additionally the number of tweets made by the user. The ``User`` instances returned will have an additional attribute, 'count', that corresponds to the number of tweets made: .. code-block:: python query = (User .select(User, fn.COUNT(Tweet.id).alias('count')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User)) for user in query: print(user.username, 'has tweeted', user.count, 'times') .. note:: While it is possible to instantiate :py:class:`Select` directly, more commonly you will build the query using the method-chaining APIs. .. py:method:: columns(*columns) :param columns: Zero or more column-like objects to SELECT. Specify which columns or column-like values to SELECT. .. py:method:: select(*columns) :param columns: Zero or more column-like objects to SELECT. Same as :py:meth:`Select.columns`, provided for backwards-compatibility. .. py:method:: select_extend(*columns) :param columns: Zero or more column-like objects to SELECT. Extend the current selection with the given columns. Example: .. code-block:: python def get_users(with_count=False): query = User.select() if with_count: query = (query .select_extend(fn.COUNT(Tweet.id).alias('count')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User)) return query .. py:method:: from_(*sources) :param sources: Zero or more sources for the FROM clause. Specify which table-like objects should be used in the FROM clause. .. code-block:: python User = Table('users') Tweet = Table('tweets') query = (User .select(User.c.username, Tweet.c.content) .from_(User, Tweet) .where(User.c.id == Tweet.c.user_id)) for row in query.execute(db): print(row['username'], '->', row['content']) .. py:method:: join(dest[, join_type='INNER'[, on=None]]) :param dest: A table or table-like object. :param str join_type: Type of JOIN, default is "INNER". :param Expression on: Join predicate. Join type may be one of: * ``JOIN.INNER`` * ``JOIN.LEFT_OUTER`` * ``JOIN.RIGHT_OUTER`` * ``JOIN.FULL`` * ``JOIN.FULL_OUTER`` * ``JOIN.CROSS`` Express a JOIN:: User = Table('users', ('id', 'username')) Note = Table('notes', ('id', 'user_id', 'content')) query = (Note .select(Note.content, User.username) .join(User, on=(Note.user_id == User.id))) .. py:method:: group_by(*columns) :param values: zero or more Column-like objects to group by. Define the GROUP BY clause. Any previously-specified values will be overwritten. Additionally, to specify all columns on a given table, you can pass the table/model object in place of the individual columns. Example: .. code-block:: python query = (User .select(User, fn.Count(Tweet.id).alias('count')) .join(Tweet) .group_by(User)) .. py:method:: group_by_extend(*columns) :param values: zero or more Column-like objects to group by. Extend the GROUP BY clause with the given columns. .. py:method:: having(*expressions) :param expressions: zero or more expressions to include in the HAVING clause. Include the given expressions in the HAVING clause of the query. The expressions will be AND-ed together with any previously-specified HAVING expressions. .. py:method:: distinct(*columns) :param columns: Zero or more column-like objects. Indicate whether this query should use a DISTINCT clause. By specifying a single value of ``True`` the query will use a simple SELECT DISTINCT. Specifying one or more columns will result in a SELECT DISTINCT ON. .. py:method:: window(*windows) :param windows: zero or more :py:class:`Window` objects. Define the WINDOW clause. Any previously-specified values will be overwritten. Example: .. code-block:: python # Equivalent example Using a Window() instance instead. window = Window(partition_by=[Sample.counter]) query = (Sample .select( Sample.counter, Sample.value, fn.AVG(Sample.value).over(window)) .window(window) # Note call to ".window()" .order_by(Sample.counter)) .. py:method:: for_update([for_update=True[, of=None[, nowait=None]]]) :param for_update: Either a boolean or a string indicating the desired expression, e.g. "FOR SHARE". :param of: One or more models to restrict locking to. :param bool nowait: Specify NOWAIT option when locking. .. py:class:: _WriteQuery(table[, returning=None[, **kwargs]]) :param Table table: Table to write to. :param list returning: List of columns for RETURNING clause. Base-class for write queries. .. py:method:: returning(*returning) :param returning: Zero or more column-like objects for RETURNING clause Specify the RETURNING clause of query (if supported by your database). .. code-block:: python query = (User .insert_many([{'username': 'foo'}, {'username': 'bar'}, {'username': 'baz'}]) .returning(User.id, User.username) .namedtuples()) data = query.execute() for row in data: print('added:', row.username, 'with id=', row.id) .. py:class:: Update(table[, update=None[, **kwargs]]) :param Table table: Table to update. :param dict update: Data to update. Class representing an UPDATE query. Example: .. code-block:: python PageView = Table('page_views') query = (PageView .update({PageView.c.page_views: PageView.c.page_views + 1}) .where(PageView.c.url == url)) query.execute(database) .. py:method:: from_(*sources) :param Source sources: one or more :py:class:`Table`, :py:class:`Model`, query, or :py:class:`ValuesList` to join with. Specify additional tables to join with using the UPDATE ... FROM syntax, which is supported by Postgres. The `Postgres documentation `_ provides additional detail, but to summarize: When a ``FROM`` clause is present, what essentially happens is that the target table is joined to the tables mentioned in the from_list, and each output row of the join represents an update operation for the target table. When using ``FROM`` you should ensure that the join produces at most one output row for each row to be modified. Example: .. code-block:: python # Update multiple users in a single query. data = [('huey', True), ('mickey', False), ('zaizee', True)] vl = ValuesList(data, columns=('username', 'is_admin'), alias='vl') # Here we'll update the "is_admin" status of the above users, # "joining" the VALUES() on the "username" column. query = (User .update(is_admin=vl.c.is_admin) .from_(vl) .where(User.username == vl.c.username)) The above query produces the following SQL: .. code-block:: sql UPDATE "users" SET "is_admin" = "vl"."is_admin" FROM ( VALUES ('huey', t), ('mickey', f), ('zaizee', t)) AS "vl"("username", "is_admin") WHERE ("users"."username" = "vl"."username") .. py:class:: Insert(table[, insert=None[, columns=None[, on_conflict=None[, **kwargs]]]]) :param Table table: Table to INSERT data into. :param insert: Either a dict, a list, or a query. :param list columns: List of columns when ``insert`` is a list or query. :param on_conflict: Conflict resolution strategy. Class representing an INSERT query. .. py:method:: as_rowcount([as_rowcount=True]) :param bool as_rowcount: Whether to return the modified row count (as opposed to the last-inserted row id). By default, on databases that do *not* use RETURNING automatically (currently Sqlite and MySQL), Peewee versions 3.12 through 3.14.10 would return the modified row-count when executing a bulk insert. This change has been reverted so that bulk-inserts will, by default, return the value of ``cursor.lastrowid``. If you prefer to receive the inserted row-count, then specify ``as_rowcount()``: .. code-block:: python db = MySQLDatabase(...) query = User.insert_many([...]) # By default, the last rowid is returned: #last_id = query.execute() # To get the modified row-count: rowcount = query.as_rowcount().execute() .. py:method:: on_conflict_ignore([ignore=True]) :param bool ignore: Whether to add ON CONFLICT IGNORE clause. Specify IGNORE conflict resolution strategy. .. py:method:: on_conflict_replace([replace=True]) :param bool replace: Whether to add ON CONFLICT REPLACE clause. Specify REPLACE conflict resolution strategy. .. py:method:: on_conflict([action=None[, update=None[, preserve=None[, where=None[, conflict_target=None[, conflict_where=None[, conflict_constraint=None]]]]]]]) :param str action: Action to take when resolving conflict. If blank, action is assumed to be "update". :param update: A dictionary mapping column to new value. :param preserve: A list of columns whose values should be preserved from the original INSERT. :param where: Expression to restrict the conflict resolution. :param conflict_target: Column(s) that comprise the constraint. :param conflict_where: Expressions needed to match the constraint target if it is a partial index (index with a WHERE clause). :param str conflict_constraint: Name of constraint to use for conflict resolution. Currently only supported by Postgres. Specify the parameters for an :py:class:`OnConflict` clause to use for conflict resolution. Examples: .. code-block:: python class User(Model): username = TextField(unique=True) last_login = DateTimeField(null=True) login_count = IntegerField() def log_user_in(username): now = datetime.datetime.now() # INSERT a new row for the user with the current timestamp and # login count set to 1. If the user already exists, then we # will preserve the last_login value from the "insert()" clause # and atomically increment the login-count. userid = (User .insert(username=username, last_login=now, login_count=1) .on_conflict( conflict_target=[User.username], preserve=[User.last_login], update={User.login_count: User.login_count + 1}) .execute()) return userid Example using the special :py:class:`EXCLUDED` namespace: .. code-block:: python class KV(Model): key = CharField(unique=True) value = IntegerField() # Create one row. KV.create(key='k1', value=1) # Demonstrate usage of EXCLUDED. # Here we will attempt to insert a new value for a given key. If that # key already exists, then we will update its value with the *sum* of its # original value and the value we attempted to insert -- provided that # the new value is larger than the original value. query = (KV.insert(key='k1', value=10) .on_conflict(conflict_target=[KV.key], update={KV.value: KV.value + EXCLUDED.value}, where=(EXCLUDED.value > KV.value))) # Executing the above query will result in the following data being # present in the "kv" table: # (key='k1', value=11) query.execute() # If we attempted to execute the query *again*, then nothing would be # updated, as the new value (10) is now less than the value in the # original row (11). .. py:class:: Delete() Class representing a DELETE query. .. py:class:: Index(name, table, expressions[, unique=False[, safe=False[, where=None[, using=None]]]]) :param str name: Index name. :param Table table: Table to create index on. :param expressions: List of columns to index on (or expressions). :param bool unique: Whether index is UNIQUE. :param bool safe: Whether to add IF NOT EXISTS clause. :param Expression where: Optional WHERE clause for index. :param str using: Index algorithm. .. py:method:: safe([_safe=True]) :param bool _safe: Whether to add IF NOT EXISTS clause. .. py:method:: where(*expressions) :param expressions: zero or more expressions to include in the WHERE clause. Include the given expressions in the WHERE clause of the index. The expressions will be AND-ed together with any previously-specified WHERE expressions. .. py:method:: using([_using=None]) :param str _using: Specify index algorithm for USING clause. .. py:class:: ModelIndex(model, fields[, unique=False[, safe=True[, where=None[, using=None[, name=None]]]]]) :param Model model: Model class to create index on. :param list fields: Fields to index. :param bool unique: Whether index is UNIQUE. :param bool safe: Whether to add IF NOT EXISTS clause. :param Expression where: Optional WHERE clause for index. :param str using: Index algorithm or type, e.g. 'BRIN', 'GiST' or 'GIN'. :param str name: Optional index name. Expressive method for declaring an index on a model. Examples: .. code-block:: python class Article(Model): name = TextField() timestamp = TimestampField() status = IntegerField() flags = BitField() is_sticky = flags.flag(1) is_favorite = flags.flag(2) # CREATE INDEX ... ON "article" ("name", "timestamp") idx = ModelIndex(Article, (Article.name, Article.timestamp)) # CREATE INDEX ... ON "article" ("name", "timestamp") WHERE "status" = 1 idx = idx.where(Article.status == 1) # CREATE UNIQUE INDEX ... ON "article" ("timestamp" DESC, "flags" & 2) WHERE "status" = 1 idx = ModelIndex( Article, (Article.timestamp.desc(), Article.flags.bin_and(2)), unique = True).where(Article.status == 1) You can also use :py:meth:`Model.index`: .. code-block:: python idx = Article.index(Article.name, Article.timestamp).where(Article.status == 1) To add an index to a model definition use :py:meth:`Model.add_index`: .. code-block:: python idx = Article.index(Article.name, Article.timestamp).where(Article.status == 1) # Add above index definition to the model definition. When you call # Article.create_table() (or database.create_tables([Article])), the # index will be created. Article.add_index(idx) .. _fields-api: Fields ------ .. py:class:: Field([null=False[, index=False[, unique=False[, column_name=None[, default=None[, primary_key=False[, constraints=None[, sequence=None[, collation=None[, unindexed=False[, choices=None[, help_text=None[, verbose_name=None[, index_type=None]]]]]]]]]]]]]]) :param bool null: Field allows NULLs. :param bool index: Create an index on field. :param bool unique: Create a unique index on field. :param str column_name: Specify column name for field. :param default: Default value (enforced in Python, not on server). :param bool primary_key: Field is the primary key. :param list constraints: List of constraints to apply to column, for example: ``[Check('price > 0')]``. :param str sequence: Sequence name for field. :param str collation: Collation name for field. :param bool unindexed: Declare field UNINDEXED (sqlite only). :param list choices: An iterable of 2-tuples mapping column values to display labels. Used for metadata purposes only, to help when displaying a dropdown of choices for field values, for example. :param str help_text: Help-text for field, metadata purposes only. :param str verbose_name: Verbose name for field, metadata purposes only. :param str index_type: Specify index type (postgres only), e.g. 'BRIN'. Fields on a :py:class:`Model` are analogous to columns on a table. .. py:attribute:: field_type = '' Attribute used to map this field to a column type, e.g. "INT". See the ``FIELD`` object in the source for more information. .. py:attribute:: column Retrieve a reference to the underlying :py:class:`Column` object. .. py:attribute:: model The model the field is bound to. .. py:attribute:: name The name of the field. .. py:method:: db_value(value) Coerce a Python value into a value suitable for storage in the database. Sub-classes operating on special data-types will most likely want to override this method. .. py:method:: python_value(value) Coerce a value from the database into a Python object. Sub-classes operating on special data-types will most likely want to override this method. .. py:method:: coerce(value) This method is a shorthand that is used, by default, by both :py:meth:`~Field.db_value` and :py:meth:`~Field.python_value`. :param value: arbitrary data from app or backend :rtype: python data type .. py:class:: IntegerField Field class for storing integers. .. py:class:: BigIntegerField Field class for storing big integers (if supported by database). .. py:class:: SmallIntegerField Field class for storing small integers (if supported by database). .. py:class:: AutoField Field class for storing auto-incrementing primary keys. .. note:: In SQLite, for performance reasons, the default primary key type simply uses the max existing value + 1 for new values, as opposed to the max ever value + 1. This means deleted records can have their primary keys reused. In conjunction with SQLite having foreign keys disabled by default (meaning ON DELETE is ignored, even if you specify it explicitly), this can lead to surprising and dangerous behaviour. To avoid this, you may want to use one or both of :py:class:`AutoIncrementField` and ``pragmas=[('foreign_keys', 'on')]`` when you instantiate :py:class:`SqliteDatabase`. .. py:class:: BigAutoField Field class for storing auto-incrementing primary keys using 64-bits. .. py:class:: IdentityField([generate_always=False]) :param bool generate_always: if specified, then the identity will always be generated (and specifying the value explicitly during INSERT will raise a programming error). Otherwise, the identity value is only generated as-needed. Field class for storing auto-incrementing primary keys using the new Postgres 10 *IDENTITY* column type. The column definition ends up looking like this: .. code-block:: python id = IdentityField() # "id" INT GENERATED BY DEFAULT AS IDENTITY NOT NULL PRIMARY KEY .. attention:: Only supported by Postgres 10.0 and newer. .. py:class:: FloatField Field class for storing floating-point numbers. .. py:class:: DoubleField Field class for storing double-precision floating-point numbers. .. py:class:: DecimalField([max_digits=10[, decimal_places=5[, auto_round=False[, rounding=None[, **kwargs]]]]]) :param int max_digits: Maximum digits to store. :param int decimal_places: Maximum precision. :param bool auto_round: Automatically round values. :param rounding: Defaults to ``decimal.DefaultContext.rounding``. Field class for storing decimal numbers. Values are represented as ``decimal.Decimal`` objects. .. py:class:: CharField([max_length=255]) Field class for storing strings. .. note:: Values that exceed length are not truncated automatically. .. py:class:: FixedCharField Field class for storing fixed-length strings. .. note:: Values that exceed length are not truncated automatically. .. py:class:: TextField Field class for storing text. .. py:class:: BlobField Field class for storing binary data. .. py:class:: BitField Field class for storing options in a 64-bit integer column. Usage: .. code-block:: python class Post(Model): content = TextField() flags = BitField() is_favorite = flags.flag(1) is_sticky = flags.flag(2) is_minimized = flags.flag(4) is_deleted = flags.flag(8) >>> p = Post() >>> p.is_sticky = True >>> p.is_minimized = True >>> print(p.flags) # Prints 4 | 2 --> "6" 6 >>> p.is_favorite False >>> p.is_sticky True We can use the flags on the Post class to build expressions in queries as well: .. code-block:: python # Generates a WHERE clause that looks like: # WHERE (post.flags & 1 != 0) query = Post.select().where(Post.is_favorite) # Query for sticky + favorite posts: query = Post.select().where(Post.is_sticky & Post.is_favorite) When bulk-updating one or more bits in a :py:class:`BitField`, you can use bitwise operators to set or clear one or more bits: .. code-block:: python # Set the 4th bit on all Post objects. Post.update(flags=Post.flags | 8).execute() # Clear the 1st and 3rd bits on all Post objects. Post.update(flags=Post.flags & ~(1 | 4)).execute() For simple operations, the flags provide handy ``set()`` and ``clear()`` methods for setting or clearing an individual bit: .. code-block:: python # Set the "is_deleted" bit on all posts. Post.update(flags=Post.is_deleted.set()).execute() # Clear the "is_deleted" bit on all posts. Post.update(flags=Post.is_deleted.clear()).execute() .. py:method:: flag([value=None]) :param int value: Value associated with flag, typically a power of 2. Returns a descriptor that can get or set specific bits in the overall value. When accessed on the class itself, it returns a :py:class:`Expression` object suitable for use in a query. If the value is not provided, it is assumed that each flag will be an increasing power of 2, so if you had four flags, they would have the values 1, 2, 4, 8. .. py:class:: BigBitField Field class for storing arbitrarily-large bitmaps in a ``BLOB``. The field will grow the underlying buffer as necessary, ensuring there are enough bytes of data to support the number of bits of data being stored. Example usage: .. code-block:: python class Bitmap(Model): data = BigBitField() bitmap = Bitmap() # Sets the ith bit, e.g. the 1st bit, the 11th bit, the 63rd, etc. bits_to_set = (1, 11, 63, 31, 55, 48, 100, 99) for bit_idx in bits_to_set: bitmap.data.set_bit(bit_idx) # We can test whether a bit is set using "is_set": assert bitmap.data.is_set(11) assert not bitmap.data.is_set(12) # We can clear a bit: bitmap.data.clear_bit(11) assert not bitmap.data.is_set(11) # We can also "toggle" a bit. Recall that the 63rd bit was set earlier. assert bitmap.data.toggle_bit(63) is False assert bitmap.data.toggle_bit(63) is True assert bitmap.data.is_set(63) # BigBitField supports item accessor by bit-number, e.g.: assert bitmap.data[63] bitmap.data[0] = 1 del bitmap.data[0] # We can also combine bitmaps using bitwise operators, e.g. b = Bitmap(data=b'\x01') b.data |= b'\x02' assert list(b.data) == [1, 1, 0, 0, 0, 0, 0, 0] assert len(b.data) == 1 .. py:method:: clear() Clears the bitmap and sets length to 0. .. py:method:: set_bit(idx) :param int idx: Bit to set, indexed starting from zero. Sets the *idx*-th bit in the bitmap. .. py:method:: clear_bit(idx) :param int idx: Bit to clear, indexed starting from zero. Clears the *idx*-th bit in the bitmap. .. py:method:: toggle_bit(idx) :param int idx: Bit to toggle, indexed starting from zero. :returns: Whether the bit is set or not. Toggles the *idx*-th bit in the bitmap and returns whether the bit is set or not. Example: .. code-block:: pycon >>> bitmap = Bitmap() >>> bitmap.data.toggle_bit(10) # Toggle the 10th bit. True >>> bitmap.data.toggle_bit(10) # This will clear the 10th bit. False .. py:method:: is_set(idx) :param int idx: Bit index, indexed starting from zero. :returns: Whether the bit is set or not. Returns boolean indicating whether the *idx*-th bit is set or not. .. py:method:: __getitem__(idx) Same as :py:meth:`~BigBitField.is_set` .. py:method:: __setitem__(idx, value) Set the bit at ``idx`` to value (True or False). .. py:method:: __delitem__(idx) Same as :py:meth:`~BigBitField.clear_bit` .. py:method:: __len__() Return the length of the bitmap **in bytes**. .. py:method:: __iter__() Returns an iterator yielding 1 or 0 for each bit in the bitmap. .. py:method:: __and__(other) :param other: Either :py:class:`BigBitField`, ``bytes``, ``bytearray`` or ``memoryview`` object. :returns: bitwise ``and`` of two bitmaps. .. py:method:: __or__(other) :param other: Either :py:class:`BigBitField`, ``bytes``, ``bytearray`` or ``memoryview`` object. :returns: bitwise ``or`` of two bitmaps. .. py:method:: __xor__(other) :param other: Either :py:class:`BigBitField`, ``bytes``, ``bytearray`` or ``memoryview`` object. :returns: bitwise ``xor`` of two bitmaps. .. py:class:: UUIDField Field class for storing ``uuid.UUID`` objects. With Postgres, the underlying column's data-type will be *UUID*. Since SQLite and MySQL do not have a native UUID type, the UUID is stored as a *VARCHAR* instead. .. py:class:: BinaryUUIDField Field class for storing ``uuid.UUID`` objects efficiently in 16-bytes. Uses the database's *BLOB* data-type (or *VARBINARY* in MySQL, or *BYTEA* in Postgres). .. py:class:: DateTimeField([formats=None[, **kwargs]]) :param list formats: A list of format strings to use when coercing a string to a date-time. Field class for storing ``datetime.datetime`` objects. Accepts a special parameter ``formats``, which contains a list of formats the datetime can be encoded with (for databases that do not have support for a native datetime data-type). The default supported formats are: .. code-block:: python '%Y-%m-%d %H:%M:%S.%f' # year-month-day hour-minute-second.microsecond '%Y-%m-%d %H:%M:%S' # year-month-day hour-minute-second '%Y-%m-%d' # year-month-day .. note:: SQLite does not have a native datetime data-type, so datetimes are stored as strings. This is handled transparently by Peewee, but if you have pre-existing data you should ensure it is stored as ``YYYY-mm-dd HH:MM:SS`` or one of the other supported formats. .. py:attribute:: year Reference the year of the value stored in the column in a query. .. code-block:: python Blog.select().where(Blog.pub_date.year == 2018) .. py:attribute:: month Reference the month of the value stored in the column in a query. .. py:attribute:: day Reference the day of the value stored in the column in a query. .. py:attribute:: hour Reference the hour of the value stored in the column in a query. .. py:attribute:: minute Reference the minute of the value stored in the column in a query. .. py:attribute:: second Reference the second of the value stored in the column in a query. .. py:method:: to_timestamp() Method that returns a database-specific function call that will allow you to work with the given date-time value as a numeric timestamp. This can sometimes simplify tasks like date math in a compatible way. Example: .. code-block:: python # Find all events that are exactly 1 hour long. query = (Event .select() .where((Event.start.to_timestamp() + 3600) == Event.stop.to_timestamp()) .order_by(Event.start)) .. py:method:: truncate(date_part) :param str date_part: year, month, day, hour, minute or second. :returns: expression node to truncate date/time to given resolution. Truncates the value in the column to the given part. This method is useful for finding all rows within a given month, for instance. .. py:class:: DateField([formats=None[, **kwargs]]) :param list formats: A list of format strings to use when coercing a string to a date. Field class for storing ``datetime.date`` objects. Accepts a special parameter ``formats``, which contains a list of formats the datetime can be encoded with (for databases that do not have support for a native date data-type). The default supported formats are: .. code-block:: python '%Y-%m-%d' # year-month-day '%Y-%m-%d %H:%M:%S' # year-month-day hour-minute-second '%Y-%m-%d %H:%M:%S.%f' # year-month-day hour-minute-second.microsecond .. note:: If the incoming value does not match a format, it is returned as-is. .. py:attribute:: year Reference the year of the value stored in the column in a query. .. code-block:: python Person.select().where(Person.dob.year == 1983) .. py:attribute:: month Reference the month of the value stored in the column in a query. .. py:attribute:: day Reference the day of the value stored in the column in a query. .. py:method:: to_timestamp() See :py:meth:`DateTimeField.to_timestamp`. .. py:method:: truncate(date_part) See :py:meth:`DateTimeField.truncate`. Note that only *year*, *month*, and *day* are meaningful for :py:class:`DateField`. .. py:class:: TimeField([formats=None[, **kwargs]]) :param list formats: A list of format strings to use when coercing a string to a time. Field class for storing ``datetime.time`` objects (not ``timedelta``). Accepts a special parameter ``formats``, which contains a list of formats the datetime can be encoded with (for databases that do not have support for a native time data-type). The default supported formats are: .. code-block:: python '%H:%M:%S.%f' # hour:minute:second.microsecond '%H:%M:%S' # hour:minute:second '%H:%M' # hour:minute '%Y-%m-%d %H:%M:%S.%f' # year-month-day hour-minute-second.microsecond '%Y-%m-%d %H:%M:%S' # year-month-day hour-minute-second .. note:: If the incoming value does not match a format, it is returned as-is. .. py:attribute:: hour Reference the hour of the value stored in the column in a query. .. code-block:: python evening_events = Event.select().where(Event.time.hour > 17) .. py:attribute:: minute Reference the minute of the value stored in the column in a query. .. py:attribute:: second Reference the second of the value stored in the column in a query. .. py:class:: TimestampField([resolution=1[, utc=False[, **kwargs]]]) :param resolution: Can be provided as either a power of 10, or as an exponent indicating how many decimal places to store. :param bool utc: Treat timestamps as UTC. Field class for storing date-times as integer timestamps. Sub-second resolution is supported by multiplying by a power of 10 to get an integer. If the ``resolution`` parameter is ``0`` *or* ``1``, then the timestamp is stored using second resolution. A resolution between ``2`` and ``6`` is treated as the number of decimal places, e.g. ``resolution=3`` corresponds to milliseconds. Alternatively, the decimal can be provided as a multiple of 10, such that ``resolution=10`` will store 1/10th of a second resolution. The ``resolution`` parameter can be either 0-6 *or* 10, 100, etc up to 1000000 (for microsecond resolution). This allows sub-second precision while still using an :py:class:`IntegerField` for storage. The default is second resolution. Also accepts a boolean parameter ``utc``, used to indicate whether the timestamps should be UTC. Default is ``False``. Finally, the field ``default`` is the current timestamp. If you do not want this behavior, then explicitly pass in ``default=None``. .. py:class:: IPField Field class for storing IPv4 addresses efficiently (as integers). .. py:class:: BooleanField Field class for storing boolean values. .. py:class:: BareField([coerce=None[, **kwargs]]) :param coerce: Optional function to use for converting raw values into a specific format. Field class that does not specify a data-type (**SQLite-only**). Since data-types are not enforced, you can declare fields without *any* data-type. It is also common for SQLite virtual tables to use meta-columns or untyped columns, so for those cases as well you may wish to use an untyped field. Accepts a special ``coerce`` parameter, a function that takes a value coming from the database and converts it into the appropriate Python type. .. py:class:: ForeignKeyField(model[, field=None[, backref=None[, on_delete=None[, on_update=None[, deferrable=None[, object_id_name=None[, lazy_load=True[, constraint_name=None[, **kwargs]]]]]]]]]) :param Model model: Model to reference or the string 'self' if declaring a self-referential foreign key. :param Field field: Field to reference on ``model`` (default is primary key). :param str backref: Accessor name for back-reference, or "+" to disable the back-reference accessor. :param str on_delete: ON DELETE action, e.g. ``'CASCADE'``.. :param str on_update: ON UPDATE action. :param str deferrable: Control when constraint is enforced, e.g. ``'INITIALLY DEFERRED'``. :param str object_id_name: Name for object-id accessor. :param bool lazy_load: Fetch the related object when the foreign-key field attribute is accessed (if it was not already loaded). If this is disabled, accessing the foreign-key field will return the value stored in the foreign-key column. :param str constraint_name: (optional) name to use for foreign-key constraint. Field class for storing a foreign key. .. code-block:: python class User(Model): name = TextField() class Tweet(Model): user = ForeignKeyField(User, backref='tweets') content = TextField() # "user" attribute >>> some_tweet.user # "tweets" backref attribute >>> for tweet in charlie.tweets: ... print(tweet.content) Some tweet Another tweet Yet another tweet For an in-depth discussion of foreign-keys, joins and relationships between models, refer to :ref:`relationships`. .. note:: Foreign keys do not have a particular ``field_type`` as they will take their field type depending on the type of primary key on the model they are related to. .. note:: If you manually specify a ``field``, that field must be either a primary key or have a unique constraint. .. note:: Take care with foreign keys in SQLite. By default, ON DELETE has no effect, which can have surprising (and usually unwanted) effects on your database integrity. This can affect you even if you don't specify ``on_delete``, since the default ON DELETE behaviour (to fail without modifying your data) does not happen, and your data can be silently relinked. The safest thing to do is to specify ``pragmas={'foreign_keys': 1}`` when you instantiate :py:class:`SqliteDatabase`. .. py:class:: DeferredForeignKey(rel_model_name[, **kwargs]) :param str rel_model_name: Model name to reference. Field class for representing a deferred foreign key. Useful for circular foreign-key references, for example: .. code-block:: python class Husband(Model): name = TextField() wife = DeferredForeignKey('Wife', deferrable='INITIALLY DEFERRED') class Wife(Model): name = TextField() husband = ForeignKeyField(Husband, deferrable='INITIALLY DEFERRED') In the above example, when the ``Wife`` model is declared, the foreign-key ``Husband.wife`` is automatically resolved and turned into a regular :py:class:`ForeignKeyField`. .. warning:: :py:class:`DeferredForeignKey` references are resolved when model classes are declared and created. This means that if you declare a :py:class:`DeferredForeignKey` to a model class that has already been imported and created, the deferred foreign key instance will never be resolved. For example: .. code-block:: python class User(Model): username = TextField() class Tweet(Model): # This will never actually be resolved, because the User # model has already been declared. user = DeferredForeignKey('user', backref='tweets') content = TextField() In cases like these you should use the regular :py:class:`ForeignKeyField` *or* you can manually resolve deferred foreign keys like so: .. code-block:: python # Tweet.user will be resolved into a ForeignKeyField: DeferredForeignKey.resolve(User) .. py:class:: ManyToManyField(model[, backref=None[, through_model=None[, on_delete=None[, on_update=None]]]]) :param Model model: Model to create relationship with. :param str backref: Accessor name for back-reference :param Model through_model: :py:class:`Model` to use for the intermediary table. If not provided, a simple through table will be automatically created. :param str on_delete: ON DELETE action, e.g. ``'CASCADE'``. Will be used for foreign-keys in through model. :param str on_update: ON UPDATE action. Will be used for foreign-keys in through model. The :py:class:`ManyToManyField` provides a simple interface for working with many-to-many relationships, inspired by Django. A many-to-many relationship is typically implemented by creating a junction table with foreign keys to the two models being related. For instance, if you were building a syllabus manager for college students, the relationship between students and courses would be many-to-many. Here is the schema using standard APIs: .. attention:: This is not a field in the sense that there is no column associated with it. Rather, it provides a convenient interface for accessing rows of data related via a through model. Standard way of declaring a many-to-many relationship (without the use of the :py:class:`ManyToManyField`): .. code-block:: python class Student(Model): name = CharField() class Course(Model): name = CharField() class StudentCourse(Model): student = ForeignKeyField(Student) course = ForeignKeyField(Course) To query the courses for a particular student, you would join through the junction table: .. code-block:: python # List the courses that "Huey" is enrolled in: courses = (Course .select() .join(StudentCourse) .join(Student) .where(Student.name == 'Huey')) for course in courses: print(course.name) The :py:class:`ManyToManyField` is designed to simplify this use-case by providing a *field-like* API for querying and modifying data in the junction table. Here is how our code looks using :py:class:`ManyToManyField`: .. code-block:: python class Student(Model): name = CharField() class Course(Model): name = CharField() students = ManyToManyField(Student, backref='courses') .. note:: It does not matter from Peewee's perspective which model the :py:class:`ManyToManyField` goes on, since the back-reference is just the mirror image. In order to write valid Python, though, you will need to add the ``ManyToManyField`` on the second model so that the name of the first model is in the scope. We still need a junction table to store the relationships between students and courses. This model can be accessed by calling the :py:meth:`~ManyToManyField.get_through_model` method. This is useful when creating tables. .. code-block:: python # Create tables for the students, courses, and relationships between # the two. db.create_tables([ Student, Course, Course.students.get_through_model()]) When accessed from a model instance, the :py:class:`ManyToManyField` exposes a :py:class:`ModelSelect` representing the set of related objects. Let's use the interactive shell to see how all this works: .. code-block:: pycon >>> huey = Student.get(Student.name == 'huey') >>> [course.name for course in huey.courses] ['English 101', 'CS 101'] >>> engl_101 = Course.get(Course.name == 'English 101') >>> [student.name for student in engl_101.students] ['Huey', 'Mickey', 'Zaizee'] To add new relationships between objects, you can either assign the objects directly to the ``ManyToManyField`` attribute, or call the :py:meth:`~ManyToManyField.add` method. The difference between the two is that simply assigning will clear out any existing relationships, whereas ``add()`` can preserve existing relationships. .. code-block:: pycon >>> huey.courses = Course.select().where(Course.name.contains('english')) >>> for course in huey.courses.order_by(Course.name): ... print(course.name) English 101 English 151 English 201 English 221 >>> cs_101 = Course.get(Course.name == 'CS 101') >>> cs_151 = Course.get(Course.name == 'CS 151') >>> huey.courses.add([cs_101, cs_151]) >>> [course.name for course in huey.courses.order_by(Course.name)] ['CS 101', 'CS151', 'English 101', 'English 151', 'English 201', 'English 221'] This is quite a few courses, so let's remove the 200-level english courses. To remove objects, use the :py:meth:`~ManyToManyField.remove` method. .. code-block:: pycon >>> huey.courses.remove(Course.select().where(Course.name.contains('2')) 2 >>> [course.name for course in huey.courses.order_by(Course.name)] ['CS 101', 'CS151', 'English 101', 'English 151'] To remove all relationships from a collection, you can use the :py:meth:`~SelectQuery.clear` method. Let's say that English 101 is canceled, so we need to remove all the students from it: .. code-block:: pycon >>> engl_101 = Course.get(Course.name == 'English 101') >>> engl_101.students.clear() .. note:: For an overview of implementing many-to-many relationships using standard Peewee APIs, check out the :ref:`manytomany` section. For all but the most simple cases, you will be better off implementing many-to-many using the standard APIs. .. py:attribute:: through_model The :py:class:`Model` representing the many-to-many junction table. Will be auto-generated if not explicitly declared. .. py:method:: add(value[, clear_existing=True]) :param value: Either a :py:class:`Model` instance, a list of model instances, or a :py:class:`SelectQuery`. :param bool clear_existing: Whether to remove existing relationships. Associate ``value`` with the current instance. You can pass in a single model instance, a list of model instances, or even a :py:class:`ModelSelect`. Example code: .. code-block:: python # Huey needs to enroll in a bunch of courses, including all # the English classes, and a couple Comp-Sci classes. huey = Student.get(Student.name == 'Huey') # We can add all the objects represented by a query. english_courses = Course.select().where( Course.name.contains('english')) huey.courses.add(english_courses) # We can also add lists of individual objects. cs101 = Course.get(Course.name == 'CS 101') cs151 = Course.get(Course.name == 'CS 151') huey.courses.add([cs101, cs151]) .. py:method:: remove(value) :param value: Either a :py:class:`Model` instance, a list of model instances, or a :py:class:`ModelSelect`. Disassociate ``value`` from the current instance. Like :py:meth:`~ManyToManyField.add`, you can pass in a model instance, a list of model instances, or even a :py:class:`ModelSelect`. Example code: .. code-block:: python # Huey is currently enrolled in a lot of english classes # as well as some Comp-Sci. He is changing majors, so we # will remove all his courses. english_courses = Course.select().where( Course.name.contains('english')) huey.courses.remove(english_courses) # Remove the two Comp-Sci classes Huey is enrolled in. cs101 = Course.get(Course.name == 'CS 101') cs151 = Course.get(Course.name == 'CS 151') huey.courses.remove([cs101, cs151]) .. py:method:: clear() Remove all associated objects. Example code: .. code-block:: python # English 101 is canceled this semester, so remove all # the enrollments. english_101 = Course.get(Course.name == 'English 101') english_101.students.clear() .. py:method:: get_through_model() Return the :py:class:`Model` representing the many-to-many junction table. This can be specified manually when the field is being instantiated using the ``through_model`` parameter. If a ``through_model`` is not specified, one will automatically be created. When creating tables for an application that uses :py:class:`ManyToManyField`, **you must create the through table expicitly**. .. code-block:: python # Get a reference to the automatically-created through table. StudentCourseThrough = Course.students.get_through_model() # Create tables for our two models as well as the through model. db.create_tables([ Student, Course, StudentCourseThrough]) .. py:class:: DeferredThroughModel() Place-holder for a through-model in cases where, due to a dependency, you cannot declare either a model or a many-to-many field without introducing NameErrors. Example: .. code-block:: python class Note(BaseModel): content = TextField() NoteThroughDeferred = DeferredThroughModel() class User(BaseModel): username = TextField() notes = ManyToManyField(Note, through_model=NoteThroughDeferred) # Cannot declare this before "User" since it has a foreign-key to # the User model. class NoteThrough(BaseModel): note = ForeignKeyField(Note) user = ForeignKeyField(User) # Resolve dependencies. NoteThroughDeferred.set_model(NoteThrough) .. py:class:: CompositeKey(*field_names) :param field_names: Names of fields that comprise the primary key. A primary key composed of multiple columns. Unlike the other fields, a composite key is defined in the model's ``Meta`` class after the fields have been defined. It takes as parameters the string names of the fields to use as the primary key: .. code-block:: python class BlogTagThrough(Model): blog = ForeignKeyField(Blog, backref='tags') tag = ForeignKeyField(Tag, backref='blogs') class Meta: primary_key = CompositeKey('blog', 'tag') Schema Manager -------------- .. py:class:: SchemaManager(model[, database=None[, **context_options]]) :param Model model: Model class. :param Database database: If unspecified defaults to model._meta.database. Provides methods for managing the creation and deletion of tables and indexes for the given model. .. py:method:: create_table([safe=True[, **options]]) :param bool safe: Specify IF NOT EXISTS clause. :param options: Arbitrary options. Execute CREATE TABLE query for the given model. .. py:method:: drop_table([safe=True[, drop_sequences=True[, **options]]]) :param bool safe: Specify IF EXISTS clause. :param bool drop_sequences: Drop any sequences associated with the columns on the table (postgres only). :param options: Arbitrary options. Execute DROP TABLE query for the given model. .. py:method:: truncate_table([restart_identity=False[, cascade=False]]) :param bool restart_identity: Restart the id sequence (postgres-only). :param bool cascade: Truncate related tables as well (postgres-only). Execute TRUNCATE TABLE for the given model. If the database is Sqlite, which does not support TRUNCATE, then an equivalent DELETE query will be executed. .. py:method:: create_indexes([safe=True]) :param bool safe: Specify IF NOT EXISTS clause. Execute CREATE INDEX queries for the indexes defined for the model. .. py:method:: drop_indexes([safe=True]) :param bool safe: Specify IF EXISTS clause. Execute DROP INDEX queries for the indexes defined for the model. .. py:method:: create_sequence(field) :param Field field: Field instance which specifies a sequence. Create sequence for the given :py:class:`Field`. .. py:method:: drop_sequence(field) :param Field field: Field instance which specifies a sequence. Drop sequence for the given :py:class:`Field`. .. py:method:: create_foreign_key(field) :param ForeignKeyField field: Foreign-key field constraint to add. Add a foreign-key constraint for the given field. This method should not be necessary in most cases, as foreign-key constraints are created as part of table creation. The exception is when you are creating a circular foreign-key relationship using :py:class:`DeferredForeignKey`. In those cases, it is necessary to first create the tables, then add the constraint for the deferred foreign-key: .. code-block:: python class Language(Model): name = TextField() selected_snippet = DeferredForeignKey('Snippet') class Snippet(Model): code = TextField() language = ForeignKeyField(Language, backref='snippets') # Creates both tables but does not create the constraint for the # Language.selected_snippet foreign key (because of the circular # dependency). db.create_tables([Language, Snippet]) # Explicitly create the constraint: Language._schema.create_foreign_key(Language.selected_snippet) For more information, see documentation on :ref:`circular-fks`. .. warning:: Because SQLite has limited support for altering existing tables, it is not possible to add a foreign-key constraint to an existing SQLite table. .. py:method:: create_all([safe=True[, **table_options]]) :param bool safe: Whether to specify IF NOT EXISTS. Create sequence(s), index(es) and table for the model. .. py:method:: drop_all([safe=True[, drop_sequences=True[, **options]]]) :param bool safe: Whether to specify IF EXISTS. :param bool drop_sequences: Drop any sequences associated with the columns on the table (postgres only). :param options: Arbitrary options. Drop table for the model and associated indexes. Model ----- .. py:class:: Metadata(model[, database=None[, table_name=None[, indexes=None[, primary_key=None[, constraints=None[, schema=None[, only_save_dirty=False[, depends_on=None[, options=None[, without_rowid=False[, strict_tables=False[, **kwargs]]]]]]]]]]]]]) :param Model model: Model class. :param Database database: database model is bound to. :param str table_name: Specify table name for model. :param list indexes: List of :py:class:`ModelIndex` objects. :param primary_key: Primary key for model (only specified if this is a :py:class:`CompositeKey` or ``False`` for no primary key. :param list constraints: List of table constraints. :param str schema: Schema table exists in. :param bool only_save_dirty: When :py:meth:`~Model.save` is called, only save the fields which have been modified. :param dict options: Arbitrary options for the model. :param bool without_rowid: Specify WITHOUT ROWID (sqlite only). :param bool strict_tables: Specify STRICT (sqlite only, requires 3.37+). :param kwargs: Arbitrary setting attributes and values. Store metadata for a :py:class:`Model`. This class should not be instantiated directly, but is instantiated using the attributes of a :py:class:`Model` class' inner ``Meta`` class. Metadata attributes are then available on ``Model._meta``. .. py:attribute:: table Return a reference to the underlying :py:class:`Table` object. .. py:method:: model_graph([refs=True[, backrefs=True[, depth_first=True]]]) :param bool refs: Follow foreign-key references. :param bool backrefs: Follow foreign-key back-references. :param bool depth_first: Do a depth-first search (``False`` for breadth-first). Traverse the model graph and return a list of 3-tuples, consisting of ``(foreign key field, model class, is_backref)``. .. py:method:: set_database(database) :param Database database: database object to bind Model to. Bind the model class to the given :py:class:`Database` instance. .. warning:: This API should not need to be used. Instead, to change a :py:class:`Model` database at run-time, use one of the following: * :py:meth:`Model.bind` * :py:meth:`Model.bind_ctx` (bind for scope of a context manager). * :py:meth:`Database.bind` * :py:meth:`Database.bind_ctx` .. py:method:: set_table_name(table_name) :param str table_name: table name to bind Model to. Bind the model class to the given table name at run-time. .. py:class:: SubclassAwareMetadata Metadata subclass that tracks :py:class:`Model` subclasses. Useful for when you need to track all models in a project. Example: .. code-block:: python from peewee import SubclassAwareMetadata class Base(Model): class Meta: database = db model_metadata_class = SubclassAwareMetadata # Create 3 model classes that inherit from Base. class A(Base): pass class B(Base): pass class C(Base): pass # Now let's make a helper for changing the `schema` for each Model. def change_schema(schema): def _update(model): model._meta.schema = schema return _update # Set all models to use "schema1", e.g. "schema1.a", "schema1.b", etc. # Will apply the function to every subclass of Base. Base._meta.map_models(change_schema('schema1')) # Set all models to use "schema2", e.g. "schema2.a", "schema2.b", etc. Base._meta.map_models(change_schema('schema2')) .. py:method:: map_models(fn) Apply a function to all subclasses. .. py:class:: Model(**kwargs) :param kwargs: Mapping of field-name to value to initialize model with. Model class provides a high-level abstraction for working with database tables. Models are a one-to-one mapping with a database table (or a table-like object, such as a view). Subclasses of ``Model`` declare any number of :py:class:`Field` instances as class attributes. These fields correspond to columns on the table. Table-level operations, such as :py:meth:`~Model.select`, :py:meth:`~Model.update`, :py:meth:`~Model.insert` and :py:meth:`~Model.delete` are implemented as classmethods. Row-level operations, such as :py:meth:`~Model.save` and :py:meth:`~Model.delete_instance` are implemented as instancemethods. Example: .. code-block:: python db = SqliteDatabase(':memory:') class User(Model): username = TextField() join_date = DateTimeField(default=datetime.datetime.now) is_admin = BooleanField(default=False) admin = User(username='admin', is_admin=True) admin.save() .. py:classmethod:: alias([alias=None]) :param str alias: Optional name for alias. :returns: :py:class:`ModelAlias` instance. Create an alias to the model-class. Model aliases allow you to reference the same :py:class:`Model` multiple times in a query, for example when doing a self-join or sub-query. Example: .. code-block:: python Parent = Category.alias() sq = (Category .select(Category, Parent) .join(Parent, on=(Category.parent == Parent.id)) .where(Parent.name == 'parent category')) .. py:classmethod:: select(*fields) :param fields: A list of model classes, field instances, functions or expressions. If no arguments are provided, all columns for the given model will be selected by default. :returns: :py:class:`ModelSelect` query. Create a SELECT query. If no fields are explicitly provided, the query will by default SELECT all the fields defined on the model, unless you are using the query as a sub-query, in which case only the primary key will be selected by default. Example of selecting all columns: .. code-block:: python query = User.select().where(User.active == True).order_by(User.username) Example of selecting all columns on *Tweet* and the parent model, *User*. When the ``user`` foreign key is accessed on a *Tweet* instance no additional query will be needed (see :ref:`N+1 ` for more details): .. code-block:: python query = (Tweet .select(Tweet, User) .join(User) .order_by(Tweet.created_date.desc())) for tweet in query: print(tweet.user.username, '->', tweet.content) Example of subquery only selecting the primary key: .. code-block:: python inactive_users = User.select().where(User.active == False) # Here, instead of defaulting to all columns, Peewee will default # to only selecting the primary key. Tweet.delete().where(Tweet.user.in_(inactive_users)).execute() .. py:classmethod:: update([__data=None[, **update]]) :param dict __data: ``dict`` of fields to values. :param update: Field-name to value mapping. Create an UPDATE query. Example showing users being marked inactive if their registration has expired: .. code-block:: python q = (User .update({User.active: False}) .where(User.registration_expired == True)) q.execute() # Execute the query, returning number of rows updated. Example showing an atomic update: .. code-block:: python q = (PageView .update({PageView.count: PageView.count + 1}) .where(PageView.url == url)) q.execute() # Execute the query. .. note:: When an update query is executed, the number of rows modified will be returned. .. py:classmethod:: insert([__data=None[, **insert]]) :param dict __data: ``dict`` of fields to values to insert. :param insert: Field-name to value mapping. Create an INSERT query. Insert a new row into the database. If any fields on the model have default values, these values will be used if the fields are not explicitly set in the ``insert`` dictionary. Example showing creation of a new user: .. code-block:: python q = User.insert(username='admin', active=True, registration_expired=False) q.execute() # perform the insert. You can also use :py:class:`Field` objects as the keys: .. code-block:: python new_id = User.insert({User.username: 'admin'}).execute() If you have a model with a default value on one of the fields, and that field is not specified in the ``insert`` parameter, the default will be used: .. code-block:: python class User(Model): username = CharField() active = BooleanField(default=True) # This INSERT query will automatically specify `active=True`: User.insert(username='charlie') .. note:: When an insert query is executed on a table with an auto-incrementing primary key, the primary key of the new row will be returned. .. py:classmethod:: insert_many(rows[, fields=None]) :param rows: An iterable that yields rows to insert. :param list fields: List of fields being inserted. :return: number of rows modified (see note). INSERT multiple rows of data. The ``rows`` parameter must be an iterable that yields dictionaries or tuples, where the ordering of the tuple values corresponds to the fields specified in the ``fields`` argument. As with :py:meth:`~Model.insert`, fields that are not specified in the dictionary will use their default value, if one exists. .. note:: Due to the nature of bulk inserts, each row must contain the same fields. The following will not work: .. code-block:: python Person.insert_many([ {'first_name': 'Peewee', 'last_name': 'Herman'}, {'first_name': 'Huey'}, # Missing "last_name"! ]).execute() Example of inserting multiple Users: .. code-block:: python data = [ ('charlie', True), ('huey', False), ('zaizee', False)] query = User.insert_many(data, fields=[User.username, User.is_admin]) query.execute() Equivalent example using dictionaries: .. code-block:: python data = [ {'username': 'charlie', 'is_admin': True}, {'username': 'huey', 'is_admin': False}, {'username': 'zaizee', 'is_admin': False}] # Insert new rows. User.insert_many(data).execute() Because the ``rows`` parameter can be an arbitrary iterable, you can also use a generator: .. code-block:: python def get_usernames(): for username in ['charlie', 'huey', 'peewee']: yield {'username': username} User.insert_many(get_usernames()).execute() .. warning:: If you are using SQLite, your SQLite library must be version 3.7.11 or newer to take advantage of bulk inserts. .. note:: SQLite has a default limit of bound variables per statement. This limit can be modified at compile-time or at run-time, **but** if modifying at run-time, you can only specify a *lower* value than the default limit. For more information, check out the following SQLite documents: * `Max variable number limit `_ * `Changing run-time limits `_ * `SQLite compile-time flags `_ .. note:: The default return value is the number of rows modified. However, when using Postgres, Peewee will return a cursor by default that yields the primary-keys of the inserted rows. To disable this functionality with Postgres, use ``as_rowcount()``. .. py:classmethod:: insert_from(query, fields) :param Select query: SELECT query to use as source of data. :param fields: Fields to insert data into. :return: number of rows modified (see note). INSERT data using a SELECT query as the source. This API should be used for queries of the form *INSERT INTO ... SELECT FROM ...*. Example of inserting data across tables for denormalization purposes: .. code-block:: python source = (User .select(User.username, fn.COUNT(Tweet.id)) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.username)) UserTweetDenorm.insert_from( source, [UserTweetDenorm.username, UserTweetDenorm.num_tweets]).execute() .. note:: The default return value is the number of rows modified. However, when using Postgres, Peewee will return a cursor by default that yields the primary-keys of the inserted rows. To disable this functionality with Postgres, use ``as_rowcount()``. .. py:classmethod:: replace([__data=None[, **insert]]) :param dict __data: ``dict`` of fields to values to insert. :param insert: Field-name to value mapping. Create an INSERT query that uses REPLACE for conflict-resolution. See :py:meth:`Model.insert` for examples. .. py:classmethod:: replace_many(rows[, fields=None]) :param rows: An iterable that yields rows to insert. :param list fields: List of fields being inserted. INSERT multiple rows of data using REPLACE for conflict-resolution. See :py:meth:`Model.insert_many` for examples. .. py:classmethod:: raw(sql, *params) :param str sql: SQL query to execute. :param params: Parameters for query. Execute a SQL query directly. Example selecting rows from the User table: .. code-block:: python q = User.raw('select id, username from users') for user in q: print(user.id, user.username) .. note:: Generally the use of ``raw`` is reserved for those cases where you can significantly optimize a select query. It is useful for select queries since it will return instances of the model. .. py:classmethod:: delete() Create a DELETE query. Example showing the deletion of all inactive users: .. code-block:: python q = User.delete().where(User.active == False) q.execute() # Remove the rows, return number of rows removed. .. warning:: This method performs a delete on the *entire table*. To delete a single instance, see :py:meth:`Model.delete_instance`. .. py:classmethod:: create(**query) :param query: Mapping of field-name to value. INSERT new row into table and return corresponding model instance. Example showing the creation of a user (a row will be added to the database): .. code-block:: python user = User.create(username='admin', password='test') .. note:: The create() method is a shorthand for instantiate-then-save. .. py:classmethod:: bulk_create(model_list[, batch_size=None]) :param iterable model_list: a list or other iterable of unsaved :py:class:`Model` instances. :param int batch_size: number of rows to batch per insert. If unspecified, all models will be inserted in a single query. :returns: no return value. Efficiently INSERT multiple unsaved model instances into the database. Unlike :py:meth:`~Model.insert_many`, which accepts row data as a list of either dictionaries or lists, this method accepts a list of unsaved model instances. Example: .. code-block:: python # List of 10 unsaved users. user_list = [User(username='u%s' % i) for i in range(10)] # All 10 users are inserted in a single query. User.bulk_create(user_list) Batches: .. code-block:: python user_list = [User(username='u%s' % i) for i in range(10)] with database.atomic(): # Will execute 4 INSERT queries (3 batches of 3, 1 batch of 1). User.bulk_create(user_list, batch_size=3) .. warning:: * The primary-key value for the newly-created models will only be set if you are using Postgresql (which supports the ``RETURNING`` clause). * SQLite generally has a limit of bound parameters for a query, so the maximum batch size should be param-limit / number-of-fields. This limit is typically 999 for Sqlite < 3.32.0, and 32766 for newer versions. * When a batch-size is provided it is **strongly recommended** that you wrap the call in a transaction or savepoint using :py:meth:`Database.atomic`. Otherwise an error in a batch mid-way through could leave the database in an inconsistent state. .. py:classmethod:: bulk_update(model_list, fields[, batch_size=None]) :param iterable model_list: a list or other iterable of :py:class:`Model` instances. :param list fields: list of fields to update. :param int batch_size: number of rows to batch per insert. If unspecified, all models will be inserted in a single query. :returns: total number of rows updated. Efficiently UPDATE multiple model instances. Example: .. code-block:: python # First, create 3 users. u1, u2, u3 = [User.create(username='u%s' % i) for i in (1, 2, 3)] # Now let's modify their usernames. u1.username = 'u1-x' u2.username = 'u2-y' u3.username = 'u3-z' # Update all three rows using a single UPDATE query. User.bulk_update([u1, u2, u3], fields=[User.username]) This will result in executing the following SQL: .. code-block:: sql UPDATE "users" SET "username" = CASE "users"."id" WHEN 1 THEN "u1-x" WHEN 2 THEN "u2-y" WHEN 3 THEN "u3-z" END WHERE "users"."id" IN (1, 2, 3); If you have a large number of objects to update, it is strongly recommended that you specify a ``batch_size`` and wrap the operation in a transaction: .. code-block:: python with database.atomic(): User.bulk_update(user_list, fields=['username'], batch_size=50) .. warning:: * SQLite generally has a limit of bound parameters for a query. This limit is typically 999 for Sqlite < 3.32.0, and 32766 for newer versions. * When a batch-size is provided it is **strongly recommended** that you wrap the call in a transaction or savepoint using :py:meth:`Database.atomic`. Otherwise an error in a batch mid-way through could leave the database in an inconsistent state. .. py:classmethod:: get(*query, **filters) :param query: Zero or more :py:class:`Expression` objects. :param filters: Mapping of field-name to value for Django-style filter. :raises: :py:class:`DoesNotExist` :returns: Model instance matching the specified filters. Retrieve a single model instance matching the given filters. If no model is returned, a :py:class:`DoesNotExist` is raised. .. code-block:: python user = User.get(User.username == username, User.active == True) This method is also exposed via the :py:class:`SelectQuery`, though it takes no parameters: .. code-block:: python active = User.select().where(User.active == True) try: user = active.where( (User.username == username) & (User.active == True) ).get() except User.DoesNotExist: user = None .. note:: The :py:meth:`~Model.get` method is shorthand for selecting with a limit of 1. It has the added behavior of raising an exception when no matching row is found. If more than one row is found, the first row returned by the database cursor will be used. .. py:classmethod:: get_or_none(*query, **filters) Identical to :py:meth:`Model.get` but returns ``None`` if no model matches the given filters. .. py:classmethod:: get_by_id(pk) :param pk: Primary-key value. Short-hand for calling :py:meth:`Model.get` specifying a lookup by primary key. Raises a :py:class:`DoesNotExist` if instance with the given primary key value does not exist. Example: .. code-block:: python user = User.get_by_id(1) # Returns user with id = 1. .. py:classmethod:: set_by_id(key, value) :param key: Primary-key value. :param dict value: Mapping of field to value to update. Short-hand for updating the data with the given primary-key. If no row exists with the given primary key, no exception will be raised. Example: .. code-block:: python # Set "is_admin" to True on user with id=3. User.set_by_id(3, {'is_admin': True}) .. py:classmethod:: delete_by_id(pk) :param pk: Primary-key value. Short-hand for deleting the row with the given primary-key. If no row exists with the given primary key, no exception will be raised. .. py:classmethod:: get_or_create(**kwargs) :param kwargs: Mapping of field-name to value. :param defaults: Default values to use if creating a new row. :returns: Tuple of :py:class:`Model` instance and boolean indicating if a new object was created. Attempt to get the row matching the given filters. If no matching row is found, create a new row. .. warning:: Race-conditions are possible when using this method. Example **without** ``get_or_create``: .. code-block:: python # Without `get_or_create`, we might write: try: person = Person.get( (Person.first_name == 'John') & (Person.last_name == 'Lennon')) except Person.DoesNotExist: person = Person.create( first_name='John', last_name='Lennon', birthday=datetime.date(1940, 10, 9)) Equivalent code using ``get_or_create``: .. code-block:: python person, created = Person.get_or_create( first_name='John', last_name='Lennon', defaults={'birthday': datetime.date(1940, 10, 9)}) .. py:classmethod:: filter(*dq_nodes, **filters) :param dq_nodes: Zero or more :py:class:`DQ` objects. :param filters: Django-style filters. :returns: :py:class:`ModelSelect` query. .. py:method:: get_id() :returns: The primary-key of the model instance. .. py:method:: save([force_insert=False[, only=None]]) :param bool force_insert: Force INSERT query. :param list only: Only save the given :py:class:`Field` instances. :returns: Number of rows modified. Save the data in the model instance. By default, the presence of a primary-key value will cause an UPDATE query to be executed. Example showing saving a model instance: .. code-block:: python user = User() user.username = 'some-user' # does not touch the database user.save() # change is persisted to the db .. py:attribute:: dirty_fields Return list of fields that have been modified. :rtype: list .. note:: If you just want to persist modified fields, you can call ``model.save(only=model.dirty_fields)``. If you **always** want to only save a model's dirty fields, you can use the Meta option ``only_save_dirty = True``. Then, any time you call :py:meth:`Model.save()`, by default only the dirty fields will be saved, e.g. .. code-block:: python class Person(Model): first_name = CharField() last_name = CharField() dob = DateField() class Meta: database = db only_save_dirty = True .. warning:: Peewee determines whether a field is "dirty" by observing when the field attribute is set on a model instance. If the field contains a value that is mutable, such as a dictionary instance, and that dictionary is then modified, Peewee will not notice the change. .. py:method:: is_dirty() Return boolean indicating whether any fields were manually set. .. py:method:: delete_instance([recursive=False[, delete_nullable=False]]) :param bool recursive: Delete related models. :param bool delete_nullable: Delete related models that have a null foreign key. If ``False`` nullable relations will be set to NULL. Delete the given instance. Any foreign keys set to cascade on delete will be deleted automatically. For more programmatic control, you can specify ``recursive=True``, which will delete any non-nullable related models (those that *are* nullable will be set to NULL). If you wish to delete all dependencies regardless of whether they are nullable, set ``delete_nullable=True``. example: .. code-block:: python some_obj.delete_instance() # it is gone forever .. py:classmethod:: bind(database[, bind_refs=True[, bind_backrefs=True]]) :param Database database: database to bind to. :param bool bind_refs: Bind related models. :param bool bind_backrefs: Bind back-reference related models. Bind the model (and specified relations) to the given database. See also: :py:meth:`Database.bind`. .. py:classmethod:: bind_ctx(database[, bind_refs=True[, bind_backrefs=True]]) Like :py:meth:`~Model.bind`, but returns a context manager that only binds the models for the duration of the wrapped block. See also: :py:meth:`Database.bind_ctx`. .. py:classmethod:: table_exists() :returns: boolean indicating whether the table exists. .. py:classmethod:: create_table([safe=True[, **options]]) :param bool safe: If set to ``True``, the create table query will include an ``IF NOT EXISTS`` clause. Create the model table, indexes, constraints and sequences. Example: .. code-block:: python with database: SomeModel.create_table() # Execute the create table query. .. py:classmethod:: drop_table([safe=True[, **options]]) :param bool safe: If set to ``True``, the create table query will include an ``IF EXISTS`` clause. Drop the model table. .. py:method:: truncate_table([restart_identity=False[, cascade=False]]) :param bool restart_identity: Restart the id sequence (postgres-only). :param bool cascade: Truncate related tables as well (postgres-only). Truncate (delete all rows) for the model. .. py:classmethod:: index(*fields[, unique=False[, safe=True[, where=None[, using=None[, name=None]]]]]) :param fields: Fields to index. :param bool unique: Whether index is UNIQUE. :param bool safe: Whether to add IF NOT EXISTS clause. :param Expression where: Optional WHERE clause for index. :param str using: Index algorithm. :param str name: Optional index name. Expressive method for declaring an index on a model. Wraps the declaration of a :py:class:`ModelIndex` instance. Examples: .. code-block:: python class Article(Model): name = TextField() timestamp = TimestampField() status = IntegerField() flags = BitField() is_sticky = flags.flag(1) is_favorite = flags.flag(2) # CREATE INDEX ... ON "article" ("name", "timestamp" DESC) idx = Article.index(Article.name, Article.timestamp.desc()) # Be sure to add the index to the model: Article.add_index(idx) # CREATE UNIQUE INDEX ... ON "article" ("timestamp" DESC, "flags" & 2) # WHERE ("status" = 1) idx = (Article .index(Article.timestamp.desc(), Article.flags.bin_and(2), unique=True) .where(Article.status == 1)) # Add index to model: Article.add_index(idx) .. py:classmethod:: add_index(*args, **kwargs) :param args: a :py:class:`ModelIndex` instance, Field(s) to index, or a :py:class:`SQL` instance that contains the SQL for creating the index. :param kwargs: Keyword arguments passed to :py:class:`ModelIndex` constructor. Add an index to the model's definition. .. note:: This method does not actually create the index in the database. Rather, it adds the index definition to the model's metadata, so that a subsequent call to :py:meth:`~Model.create_table` will create the new index (along with the table). Examples: .. code-block:: python class Article(Model): name = TextField() timestamp = TimestampField() status = IntegerField() flags = BitField() is_sticky = flags.flag(1) is_favorite = flags.flag(2) # CREATE INDEX ... ON "article" ("name", "timestamp") WHERE "status" = 1 idx = Article.index(Article.name, Article.timestamp).where(Article.status == 1) Article.add_index(idx) # CREATE UNIQUE INDEX ... ON "article" ("timestamp" DESC, "flags" & 2) ts_flags_idx = Article.index( Article.timestamp.desc(), Article.flags.bin_and(2), unique=True) Article.add_index(ts_flags_idx) # You can also specify a list of fields and use the same keyword # arguments that the ModelIndex constructor accepts: Article.add_index( Article.name, Article.timestamp.desc(), where=(Article.status == 1)) # Or even specify a SQL query directly: Article.add_index(SQL('CREATE INDEX ...')) .. py:method:: dependencies([search_nullable=False]) :param bool search_nullable: Search models related via a nullable foreign key :rtype: Generator expression yielding queries and foreign key fields. Generate a list of queries of dependent models. Yields a 2-tuple containing the query and corresponding foreign key field. Useful for searching dependencies of a model, i.e. things that would be orphaned in the event of a delete. .. py:method:: __iter__() :returns: a :py:class:`ModelSelect` for the given class. Convenience function for iterating over all instances of a model. Example: .. code-block:: python Setting.insert_many([ {'key': 'host', 'value': '192.168.1.2'}, {'key': 'port': 'value': '1337'}, {'key': 'user': 'value': 'nuggie'}]).execute() # Load settings from db into dict. settings = {setting.key: setting.value for setting in Setting} .. py:method:: __len__() :returns: Count of rows in table. Example: .. code-block:: python n_accounts = len(Account) # Is equivalent to: n_accounts = Account.select().count() .. py:class:: ModelAlias(model[, alias=None]) :param Model model: Model class to reference. :param str alias: (optional) name for alias. Provide a separate reference to a model in a query. .. py:class:: ModelSelect(model, fields_or_models) :param Model model: Model class to select. :param fields_or_models: List of fields or model classes to select. Model-specific implementation of SELECT query. .. py:method:: switch([ctx=None]) :param ctx: A :py:class:`Model`, :py:class:`ModelAlias`, subquery, or other object that was joined-on. Switch the *join context* - the source which subsequent calls to :py:meth:`~ModelSelect.join` will be joined against. Used for specifying multiple joins against a single table. If the ``ctx`` is not given, then the query's model will be used. The following example selects from tweet and joins on both user and tweet-flag: .. code-block:: python sq = Tweet.select().join(User).switch(Tweet).join(TweetFlag) # Equivalent (since Tweet is the query's model) sq = Tweet.select().join(User).switch().join(TweetFlag) .. py:method:: objects([constructor=None]) :param constructor: Constructor (defaults to returning model instances) Return result rows as objects created using the given constructor. The default behavior is to create model instances. .. note:: This method can be used, when selecting field data from multiple sources/models, to make all data available as attributes on the model being queried (as opposed to constructing the graph of joined model instances). For very complex queries this can have a positive performance impact, especially iterating large result sets. Similarly, you can use :py:meth:`~BaseQuery.dicts`, :py:meth:`~BaseQuery.tuples` or :py:meth:`~BaseQuery.namedtuples` to achieve even more performance. .. py:method:: join(dest[, join_type='INNER'[, on=None[, src=None[, attr=None]]]]) :param dest: A :py:class:`Model`, :py:class:`ModelAlias`, :py:class:`Select` query, or other object to join to. :param str join_type: Join type, defaults to INNER. :param on: Join predicate or a :py:class:`ForeignKeyField` to join on. :param src: Explicitly specify the source of the join. If not specified then the current *join context* will be used. :param str attr: Attribute to use when projecting columns from the joined model. Join with another table-like object. Join type may be one of: * ``JOIN.INNER`` * ``JOIN.LEFT_OUTER`` * ``JOIN.RIGHT_OUTER`` * ``JOIN.FULL`` * ``JOIN.FULL_OUTER`` * ``JOIN.CROSS`` Example selecting tweets and joining on user in order to restrict to only those tweets made by "admin" users: .. code-block:: python sq = Tweet.select().join(User).where(User.is_admin == True) Example selecting users and joining on a particular foreign key field. See the :py:ref:`example app ` for a real-life usage: .. code-block:: python sq = User.select().join(Relationship, on=Relationship.to_user) For an in-depth discussion of foreign-keys, joins and relationships between models, refer to :ref:`relationships`. .. py:method:: join_from(src, dest[, join_type='INNER'[, on=None[, attr=None]]]) :param src: Source for join. :param dest: Table to join to. Use same parameter order as the non-model-specific :py:meth:`~ModelSelect.join`. Bypasses the *join context* by requiring the join source to be specified. .. py:method:: filter(*args, **kwargs) :param args: Zero or more :py:class:`DQ` objects. :param kwargs: Django-style keyword-argument filters. Use Django-style filters to express a WHERE clause. Joins can be followed by chaining foreign-key fields. The supported operations are: * ``eq`` - equals * ``ne`` - not equals * ``lt``, ``lte`` - less-than, less-than or equal-to * ``gt``, ``gte`` - greater-than, greater-than or equal-to * ``in`` - IN set of values * ``is`` - IS (e.g. IS NULL). * ``like``, ``ilike`` - LIKE and ILIKE (case-insensitive) * ``regexp`` - regular expression match Examples: .. code-block:: python # Get all tweets by user with username="peewee". q = Tweet.filter(user__username='peewee') # Get all posts that are draft or published, and written after 2023. q = Post.filter( (DQ(status='draft') | DQ(status='published')), timestamp__gte=datetime.date(2023, 1, 1)) .. py:method:: prefetch(*subqueries[, prefetch_type=PREFETCH_TYPE.WHERE]) :param subqueries: A list of :py:class:`Model` classes or select queries to prefetch. :param prefetch_type: Query type to use for the subqueries. :returns: a list of models with selected relations prefetched. Execute the query, prefetching the given additional resources. Prefetch type may be one of: * ``PREFETCH_TYPE.WHERE`` * ``PREFETCH_TYPE.JOIN`` See also :py:func:`prefetch` standalone function. Example: .. code-block:: python # Fetch all Users and prefetch their associated tweets. query = User.select().prefetch(Tweet) for user in query: print(user.username) for tweet in user.tweets: print(' *', tweet.content) .. note:: Because ``prefetch`` must reconstruct a graph of models, it is necessary to be sure that the foreign-key/primary-key of any related models are selected, so that the related objects can be mapped correctly. .. py:function:: prefetch(sq, *subqueries[, prefetch_type=PREFETCH_TYPE.WHERE]) :param sq: Query to use as starting-point. :param subqueries: One or more models or :py:class:`ModelSelect` queries to eagerly fetch. :param prefetch_type: Query type to use for the subqueries. :returns: a list of models with selected relations prefetched. Eagerly fetch related objects, allowing efficient querying of multiple tables when a 1-to-many relationship exists. The prefetch type changes how the subqueries are constructed which may be desirable dependending on the database engine in use. Prefetch type may be one of: * ``PREFETCH_TYPE.WHERE`` * ``PREFETCH_TYPE.JOIN`` For example, it is simple to query a many-to-1 relationship efficiently:: query = (Tweet .select(Tweet, User) .join(User)) for tweet in query: # Looking up tweet.user.username does not require a query since # the related user's columns were selected. print(tweet.user.username, '->', tweet.content) To efficiently do the inverse, query users and their tweets, you can use prefetch:: query = User.select() for user in prefetch(query, Tweet): print(user.username) for tweet in user.tweets: # Does not require additional query. print(' ', tweet.content) .. note:: Because ``prefetch`` must reconstruct a graph of models, it is necessary to be sure that the foreign-key/primary-key of any related models are selected, so that the related objects can be mapped correctly. Query-builder Internals ----------------------- .. py:class:: AliasManager() Manages the aliases assigned to :py:class:`Source` objects in SELECT queries, so as to avoid ambiguous references when multiple sources are used in a single query. .. py:method:: add(source) Add a source to the AliasManager's internal registry at the current scope. The alias will be automatically generated using the following scheme (where each level of indentation refers to a new scope): :param Source source: Make the manager aware of a new source. If the source has already been added, the call is a no-op. .. py:method:: get(source[, any_depth=False]) Return the alias for the source in the current scope. If the source does not have an alias, it will be given the next available alias. :param Source source: The source whose alias should be retrieved. :returns: The alias already assigned to the source, or the next available alias. :rtype: str .. py:method:: __setitem__(source, alias) Manually set the alias for the source at the current scope. :param Source source: The source for which we set the alias. .. py:method:: push() Push a new scope onto the stack. .. py:method:: pop() Pop scope from the stack. .. py:class:: State(scope[, parentheses=False[, subquery=False[, **kwargs]]]) Lightweight object for representing the state at a given scope. During SQL generation, each object visited by the :py:class:`Context` can inspect the state. The :py:class:`State` class allows Peewee to do things like: * Use a common interface for field types or SQL expressions, but use vendor-specific data-types or operators. * Compile a :py:class:`Column` instance into a fully-qualified attribute, as a named alias, etc, depending on the value of the ``scope``. * Ensure parentheses are used appropriately. :param int scope: The scope rules to be applied while the state is active. :param bool parentheses: Wrap the contained SQL in parentheses. :param bool subquery: Whether the current state is a child of an outer query. :param dict kwargs: Arbitrary settings which should be applied in the current state. .. py:class:: Context(**settings) Converts Peewee structures into parameterized SQL queries. Peewee structures should all implement a `__sql__` method, which will be called by the `Context` class during SQL generation. The `__sql__` method accepts a single parameter, the `Context` instance, which allows for recursive descent and introspection of scope and state. .. py:attribute:: scope Return the currently-active scope rules. .. py:attribute:: parentheses Return whether the current state is wrapped in parentheses. .. py:attribute:: subquery Return whether the current state is the child of another query. .. py:method:: scope_normal([**kwargs]) The default scope. Sources are referred to by alias, columns by dotted-path from the source. .. py:method:: scope_source([**kwargs]) Scope used when defining sources, e.g. in the column list and FROM clause of a SELECT query. This scope is used for defining the fully-qualified name of the source and assigning an alias. .. py:method:: scope_values([**kwargs]) Scope used for UPDATE, INSERT or DELETE queries, where instead of referencing a source by an alias, we refer to it directly. Similarly, since there is a single table, columns do not need to be referenced by dotted-path. .. py:method:: scope_cte([**kwargs]) Scope used when generating the contents of a common-table-expression. Used after a WITH statement, when generating the definition for a CTE (as opposed to merely a reference to one). .. py:method:: scope_column([**kwargs]) Scope used when generating SQL for a column. Ensures that the column is rendered with it's correct alias. Was needed because when referencing the inner projection of a sub-select, Peewee would render the full SELECT query as the "source" of the column (instead of the query's alias + . + column). This scope allows us to avoid rendering the full query when we only need the alias. .. py:method:: sql(obj) Append a composable Node object, sub-context, or other object to the query AST. Python values, such as integers, strings, floats, etc. are treated as parameterized values. :returns: The updated Context object. .. py:method:: literal(keyword) Append a string-literal to the current query AST. :returns: The updated Context object. .. py:method:: parse(node) :param Node node: Instance of a Node subclass. :returns: a 2-tuple consisting of (sql, parameters). Convert the given node to a SQL AST and return a 2-tuple consisting of the SQL query and the parameters. .. py:method:: query() :returns: a 2-tuple consisting of (sql, parameters) for the context. Constants and Helpers --------------------- .. py:class:: Proxy() Create a proxy or placeholder for another object. .. py:method:: initialize(obj) :param obj: Object to proxy to. Bind the proxy to the given object. Afterwards all attribute lookups and method calls on the proxy will be sent to the given object. Any callbacks that have been registered will be called. .. py:method:: attach_callback(callback) :param callback: A function that accepts a single parameter, the bound object. :returns: self Add a callback to be executed when the proxy is initialized. .. py:class:: DatabaseProxy() Proxy subclass that is suitable to use as a placeholder for a :py:class:`Database` instance. See :ref:`dynamic_db` for details on usage. .. py:function:: chunked(iterable, n) :param iterable: an iterable that is the source of the data to be chunked. :param int n: chunk size :returns: a new iterable that yields *n*-length chunks of the source data. Efficient implementation for breaking up large lists of data into smaller-sized chunks. Usage: .. code-block:: python it = range(10) # An iterable that yields 0...9. # Break the iterable into chunks of length 4. for chunk in chunked(it, 4): print(', '.join(str(num) for num in chunk)) # PRINTS: # 0, 1, 2, 3 # 4, 5, 6, 7 # 8, 9 peewee-3.17.7/docs/peewee/changes.rst000066400000000000000000000151461470346076600174530ustar00rootroot00000000000000.. _changes: Changes in 3.0 ============== This document describes changes to be aware of when switching from 2.x to 3.x. Backwards-incompatible ---------------------- I tried to keep changes backwards-compatible as much as possible. In some places, APIs that have changed will trigger a ``DeprecationWarning``. Database ^^^^^^^^ * ``get_conn()`` has changed to :py:meth:`Database.connection` * ``get_cursor()`` has changed to :py:meth:`Database.cursor` * ``execution_context()`` is replaced by simply using the database instance as a context-manager. * For a connection context *without* a transaction, use :py:meth:`Database.connection_context`. * :py:meth:`Database.create_tables` and :py:meth:`Database.drop_tables`, as well as :py:meth:`Model.create_table` and :py:meth:`Model.drop_table` all default to ``safe=True`` (``create_table`` will create if not exists, ``drop_table`` will drop if exists). * ``connect_kwargs`` attribute has been renamed to ``connect_params`` * initialization parameter for custom field-type definitions has changed from ``fields`` to ``field_types``. Model Meta options ^^^^^^^^^^^^^^^^^^ * ``db_table`` has changed to ``table_name`` * ``db_table_func`` has changed to ``table_function`` * ``order_by`` has been removed (used for specifying a default ordering to be applied to SELECT queries). * ``validate_backrefs`` has been removed. Back-references are no longer validated. Models ^^^^^^ * :py:class:`BaseModel` has been renamed to :py:class:`ModelBase` * Accessing raw model data is now done using ``__data__`` instead of ``_data`` * The ``_prepare_instance()`` Model method has been removed. * The ``sqlall()`` method, which output the DDL statements to generate a model and its associated indexes, has been removed. Fields ^^^^^^ * ``db_column`` has changed to ``column_name`` * ``db_field`` class attribute changed to ``field_type`` (used if you are implementing custom field subclasses) * ``model_class`` attribute has changed to ``model`` * :py:class:`PrimaryKeyField` has been renamed to :py:class:`AutoField` * :py:class:`ForeignKeyField` constructor has the following changes: * ``rel_model`` has changed to ``model`` * ``to_field`` has changed to ``field`` * ``related_name`` has changed to ``backref`` * :py:class:`ManyToManyField` is now included in the main ``peewee.py`` module * Removed the extension fields ``PasswordField``, ``PickledField`` and ``AESEncryptedField``. Querying ^^^^^^^^ ``JOIN_INNER``, ``JOIN_LEFT_OUTER``, etc are now ``JOIN.INNER``, ``JOIN.LEFT_OUTER``, etc. The C extension that contained implementations of the query result wrappers has been removed. Additionally, :py:meth:`Select.aggregate_rows` has been removed. This helper was used to de-duplicate left-join queries to give the appearance of efficiency when iterating a model and its relations. In practice, the complexity of the code and its somewhat limited usefulness convinced me to scrap it. You can instead use :py:func:`prefetch` to achieve the same result. * :py:class:`Select` query attribute ``_select`` has changed to ``_returning`` * The ``naive()`` method is now :py:meth:`~BaseQuery.objects`, which defaults to using the model class as the constructor, but accepts any callable to use as an alternate constructor. * The ``annotate()`` query method is no longer supported. The :py:func:`Case` helper has moved from the ``playhouse.shortcuts`` module into the main peewee module. The :py:meth:`~BaseColumn.cast` method is no longer a function, but instead is a method on all column-like objects. The ``InsertQuery.return_id_list()`` method has been replaced by a more general pattern of using :py:meth:`_WriteQuery.returning`. The ``InsertQuery.upsert()`` method has been replaced by the more general and flexible :py:meth:`Insert.on_conflict` method. When using :py:func:`prefetch`, the collected instances will be stored in the same attribute as the foreign-key's ``backref``. Previously, you would access joined instances using ``(backref)_prefetch``. The :py:class:`SQL` object, used to create a composable a SQL string, now expects the second parameter to be a list/tuple of parameters. Removed Extensions ^^^^^^^^^^^^^^^^^^ The following extensions are no longer included in the ``playhouse``: * ``berkeleydb`` * ``csv_utils`` * ``djpeewee`` * ``gfk`` * ``kv`` * ``pskel`` * ``read_slave`` SQLite Extension ^^^^^^^^^^^^^^^^ The SQLite extension module's :py:class:`VirtualModel` class accepts slightly different ``Meta`` options: * ``arguments`` - used to specify arbitrary arguments appended after any columns being defined on the virtual table. Should be a list of strings. * ``extension_module`` (unchanged) * ``options`` (replaces ``extension_options``) - arbitrary options for the virtual table that appear after columns and ``arguments``. * ``prefix_arguments`` - a list of strings that should appear before any arguments or columns in the virtual table declaration. So, when declaring a model for a virtual table, it will be constructed roughly like this: .. code-block:: sql CREATE VIRTUAL TABLE "table name" USING extension_module ( prefix arguments, field definitions, arguments, options) Postgresql Extension ^^^^^^^^^^^^^^^^^^^^ The `PostgresqlExtDatabase` no longer registers the `hstore` extension by default. To use the `hstore` extension in 3.0 and onwards, pass `register_hstore=True` when initializing the database object. Signals Extension ^^^^^^^^^^^^^^^^^ The ``post_init`` signal has been removed. New stuff --------- The query-builder has been rewritten from the ground-up to be more flexible and powerful. There is now a generic, :ref:`lower-level API ` for constructing queries. SQLite ^^^^^^ Many SQLite-specific features have been moved from the ``playhouse.sqlite_ext`` module into ``peewee``, such as: * User-defined functions, aggregates, collations, and table-functions. * Loading extensions. * Specifying pragmas. See the :ref:`"Using SQLite" section ` and :ref:`"SQLite extensions" ` documents for more details. SQLite Extension ^^^^^^^^^^^^^^^^ The virtual-table implementation from `sqlite-vtfunc `_ has been folded into the peewee codebase. * Support for SQLite online backup API. * Murmurhash implementation has been corrected. * Couple small quirks in the BM25 ranking code have been addressed. * Numerous user-defined functions for hashing and ranking are now included. * :py:class:`BloomFilter` implementation. * Incremental :py:class:`Blob` I/O support. * Support for update, commit and rollback hooks. * :py:class:`LSMTable` implementation to support the lsm1 extension. peewee-3.17.7/docs/peewee/contributing.rst000066400000000000000000000040611470346076600205440ustar00rootroot00000000000000.. _contributing: Contributing ============ In order to continually improve, Peewee needs the help of developers like you. Whether it's contributing patches, submitting bug reports, or just asking and answering questions, you are helping to make Peewee a better library. In this document I'll describe some of the ways you can help. Patches ------- Do you have an idea for a new feature, or is there a clunky API you'd like to improve? Before coding it up and submitting a pull-request, `open a new issue `_ on GitHub describing your proposed changes. This doesn't have to be anything formal, just a description of what you'd like to do and why. When you're ready, you can submit a pull-request with your changes. Successful patches will have the following: * Unit tests. * Documentation, both prose form and general :ref:`API documentation `. * Code that conforms stylistically with the rest of the Peewee codebase. Bugs ---- If you've found a bug, please check to see if it has `already been reported `_, and if not `create an issue on GitHub `_. The more information you include, the more quickly the bug will get fixed, so please try to include the following: * Traceback and the error message (please `format your code `_!) * Relevant portions of your code or code to reproduce the error * Peewee version: ``python -c "from peewee import __version__; print(__version__)"`` * Which database you're using If you have found a bug in the code and submit a failing test-case, then hats-off to you, you are a hero! Questions --------- If you have questions about how to do something with peewee, then I recommend either: * Ask on StackOverflow. I check SO just about every day for new peewee questions and try to answer them. This has the benefit also of preserving the question and answer for other people to find. * Ask on the mailing list, https://groups.google.com/group/peewee-orm peewee-3.17.7/docs/peewee/crdb.rst000066400000000000000000000250011470346076600167440ustar00rootroot00000000000000.. _crdb: Cockroach Database ------------------ `CockroachDB `_ (CRDB) is well supported by peewee. .. code-block:: python from playhouse.cockroachdb import CockroachDatabase db = CockroachDatabase('my_app', user='root', host='10.1.0.8') If you are using `Cockroach Cloud `_, you may find it easier to specify the connection parameters using a connection-string: .. code-block:: python db = CockroachDatabase('postgresql://root:secret@host:26257/defaultdb...') .. note:: CockroachDB requires the ``psycopg2`` (postgres) Python driver. .. note:: CockroachDB installation and getting-started guide can be found here: https://www.cockroachlabs.com/docs/stable/install-cockroachdb.html .. _crdb_ssl: SSL Configuration ^^^^^^^^^^^^^^^^^ SSL certificates are strongly recommended when running a Cockroach cluster. Psycopg2 supports SSL out-of-the-box, but you may need to specify some additional options when initializing your database: .. code-block:: python db = CockroachDatabase( 'my_app', user='root', host='10.1.0.8', sslmode='verify-full', # Verify the cert common-name. sslrootcert='/path/to/root.crt') # Or, alternatively, specified as part of a connection-string: db = CockroachDatabase('postgresql://root:secret@host:26257/dbname' '?sslmode=verify-full&sslrootcert=/path/to/root.crt' '&options=--cluster=my-cluster-xyz') More details about client verification can be found on the `libpq docs `_. Cockroach Extension APIs ^^^^^^^^^^^^^^^^^^^^^^^^ The ``playhouse.cockroachdb`` extension module provides the following classes and helpers: * :py:class:`CockroachDatabase` - a subclass of :py:class:`PostgresqlDatabase`, designed specifically for working with CRDB. * :py:class:`PooledCockroachDatabase` - like the above, but implements connection-pooling. * :py:meth:`~CockroachDatabase.run_transaction` - runs a function inside a transaction and provides automatic client-side retry logic. Special field-types that may be useful when using CRDB: * :py:class:`UUIDKeyField` - a primary-key field implementation that uses CRDB's ``UUID`` type with a default randomly-generated UUID. * :py:class:`RowIDField` - a primary-key field implementation that uses CRDB's ``INT`` type with a default ``unique_rowid()``. * :py:class:`JSONField` - same as the Postgres :py:class:`BinaryJSONField`, as CRDB treats JSON as JSONB. * :py:class:`ArrayField` - same as the Postgres extension (but does not support multi-dimensional arrays). CRDB is compatible with Postgres' wire protocol and exposes a very similar SQL interface, so it is possible (though **not recommended**) to use :py:class:`PostgresqlDatabase` with CRDB: 1. CRDB does not support nested transactions (savepoints), so the :py:meth:`~Database.atomic` method has been implemented to enforce this when using :py:class:`CockroachDatabase`. For more info :ref:`crdb-transactions`. 2. CRDB may have subtle differences in field-types, date functions and introspection from Postgres. 3. CRDB-specific features are exposed by the :py:class:`CockroachDatabase`, such as specifying a transaction priority or the ``AS OF SYSTEM TIME`` clause. .. _crdb-transactions: CRDB Transactions ^^^^^^^^^^^^^^^^^ CRDB does not support nested transactions (savepoints), so the :py:meth:`~Database.atomic` method on the :py:class:`CockroachDatabase` has been modified to raise an exception if an invalid nesting is encountered. If you would like to be able to nest transactional code, you can use the :py:meth:`~Database.transaction` method, which will ensure that the outer-most block will manage the transaction (e.g., exiting a nested-block will not cause an early commit). Example: .. code-block:: python @db.transaction() def create_user(username): return User.create(username=username) def some_other_function(): with db.transaction() as txn: # do some stuff... # This function is wrapped in a transaction, but the nested # transaction will be ignored and folded into the outer # transaction, as we are already in a wrapped-block (via the # context manager). create_user('some_user@example.com') # do other stuff. # At this point we have exited the outer-most block and the transaction # will be committed. return CRDB provides client-side transaction retries, which are available using a special :py:meth:`~CockroachDatabase.run_transaction` helper. This helper method accepts a callable, which is responsible for executing any transactional statements that may need to be retried. Simplest possible example of :py:meth:`~CockroachDatabase.run_transaction`: .. code-block:: python def create_user(email): # Callable that accepts a single argument (the database instance) and # which is responsible for executing the transactional SQL. def callback(db_ref): return User.create(email=email) return db.run_transaction(callback, max_attempts=10) huey = create_user('huey@example.com') .. note:: The ``cockroachdb.ExceededMaxAttempts`` exception will be raised if the transaction cannot be committed after the given number of attempts. If the SQL is mal-formed, violates a constraint, etc., then the function will raise the exception to the caller. Example of using :py:meth:`~CockroachDatabase.run_transaction` to implement client-side retries for a transaction that transfers an amount from one account to another: .. code-block:: python from playhouse.cockroachdb import CockroachDatabase db = CockroachDatabase('my_app') def transfer_funds(from_id, to_id, amt): """ Returns a 3-tuple of (success?, from balance, to balance). If there are not sufficient funds, then the original balances are returned. """ def thunk(db_ref): src, dest = (Account .select() .where(Account.id.in_([from_id, to_id]))) if src.id != from_id: src, dest = dest, src # Swap order. # Cannot perform transfer, insufficient funds! if src.balance < amt: return False, src.balance, dest.balance # Update each account, returning the new balance. src, = (Account .update(balance=Account.balance - amt) .where(Account.id == from_id) .returning(Account.balance) .execute()) dest, = (Account .update(balance=Account.balance + amt) .where(Account.id == to_id) .returning(Account.balance) .execute()) return True, src.balance, dest.balance # Perform the queries that comprise a logical transaction. In the # event the transaction fails due to contention, it will be auto- # matically retried (up to 10 times). return db.run_transaction(thunk, max_attempts=10) CRDB APIs ^^^^^^^^^ .. py:class:: CockroachDatabase(database[, **kwargs]) CockroachDB implementation, based on the :py:class:`PostgresqlDatabase` and using the ``psycopg2`` driver. Additional keyword arguments are passed to the psycopg2 connection constructor, and may be used to specify the database ``user``, ``port``, etc. Alternatively, the connection details can be specified in URL-form. .. py:method:: run_transaction(callback[, max_attempts=None[, system_time=None[, priority=None]]]) :param callback: callable that accepts a single ``db`` parameter (which will be the database instance this method is called from). :param int max_attempts: max number of times to try before giving up. :param datetime system_time: execute the transaction ``AS OF SYSTEM TIME`` with respect to the given value. :param str priority: either "low", "normal" or "high". :return: returns the value returned by the callback. :raises: ``ExceededMaxAttempts`` if ``max_attempts`` is exceeded. Run SQL in a transaction with automatic client-side retries. User-provided ``callback``: * **Must** accept one parameter, the ``db`` instance representing the connection the transaction is running under. * **Must** not attempt to commit, rollback or otherwise manage the transaction. * **May** be called more than one time. * **Should** ideally only contain SQL operations. Additionally, the database must not have any open transactions at the time this function is called, as CRDB does not support nested transactions. Attempting to do so will raise a ``NotImplementedError``. Simplest possible example: .. code-block:: python def create_user(email): def callback(db_ref): return User.create(email=email) return db.run_transaction(callback, max_attempts=10) user = create_user('huey@example.com') .. py:class:: PooledCockroachDatabase(database[, **kwargs]) CockroachDB connection-pooling implementation, based on :py:class:`PooledPostgresqlDatabase`. Implements the same APIs as :py:class:`CockroachDatabase`, but will do client-side connection pooling. .. py:function:: run_transaction(db, callback[, max_attempts=None[, system_time=None[, priority=None]]]) Run SQL in a transaction with automatic client-side retries. See :py:meth:`CockroachDatabase.run_transaction` for details. :param CockroachDatabase db: database instance. :param callback: callable that accepts a single ``db`` parameter (which will be the same as the value passed above). .. note:: This function is equivalent to the identically-named method on the :py:class:`CockroachDatabase` class. .. py:class:: UUIDKeyField() UUID primary-key field that uses the CRDB ``gen_random_uuid()`` function to automatically populate the initial value. .. py:class:: RowIDField() Auto-incrementing integer primary-key field that uses the CRDB ``unique_rowid()`` function to automatically populate the initial value. See also: * :py:class:`BinaryJSONField` from the Postgresql extension (available in the ``cockroachdb`` extension module, and aliased to ``JSONField``). * :py:class:`ArrayField` from the Postgresql extension. peewee-3.17.7/docs/peewee/database.rst000066400000000000000000001733311470346076600176100ustar00rootroot00000000000000.. _database: Database ======== The Peewee :py:class:`Database` object represents a connection to a database. The :py:class:`Database` class is instantiated with all the information needed to open a connection to a database, and then can be used to: * Open and close connections. * Execute queries. * Manage transactions (and savepoints). * Introspect tables, columns, indexes, and constraints. Peewee comes with support for SQLite, MySQL, MariaDB and Postgres. Each database class provides some basic, database-specific configuration options. .. code-block:: python from peewee import * # SQLite database using WAL journal mode and 64MB cache. sqlite_db = SqliteDatabase('/path/to/app.db', pragmas={ 'journal_mode': 'wal', 'cache_size': -1024 * 64}) # Connect to a MySQL database on network. mysql_db = MySQLDatabase('my_app', user='app', password='db_password', host='10.1.0.8', port=3306) # Connect to a Postgres database. pg_db = PostgresqlDatabase('my_app', user='postgres', password='secret', host='10.1.0.9', port=5432) Peewee provides advanced support for SQLite, Postgres and CockroachDB via database-specific extension modules. To use the extended-functionality, import the appropriate database-specific module and use the database class provided: .. code-block:: python from playhouse.sqlite_ext import SqliteExtDatabase # Use SQLite (will register a REGEXP function and set busy timeout to 3s). db = SqliteExtDatabase('/path/to/app.db', regexp_function=True, timeout=3, pragmas={'journal_mode': 'wal'}) from playhouse.postgres_ext import PostgresqlExtDatabase # Use Postgres (and register hstore extension). db = PostgresqlExtDatabase('my_app', user='postgres', register_hstore=True) from playhouse.cockroachdb import CockroachDatabase # Use CockroachDB. db = CockroachDatabase('my_app', user='root', port=26257, host='10.1.0.8') # CockroachDB connections may require a number of parameters, which can # alternatively be specified using a connection-string. db = CockroachDatabase('postgresql://...') For more information on database extensions, see: * :ref:`postgres_ext` * :ref:`sqlite_ext` * :ref:`crdb` * :ref:`sqlcipher_ext` (encrypted SQLite database). * :ref:`apsw` * :ref:`sqliteq` Initializing a Database ----------------------- The :py:class:`Database` initialization method expects the name of the database as the first parameter. Subsequent keyword arguments are passed to the underlying database driver when establishing the connection, allowing you to pass vendor-specific parameters easily. For instance, with Postgresql it is common to need to specify the ``host``, ``user`` and ``password`` when creating your connection. These are not standard Peewee :py:class:`Database` parameters, so they will be passed directly back to ``psycopg2`` when creating connections: .. code-block:: python db = PostgresqlDatabase( 'database_name', # Required by Peewee. user='postgres', # Will be passed directly to psycopg2. password='secret', # Ditto. host='db.mysite.com') # Ditto. As another example, the ``pymysql`` driver accepts a ``charset`` parameter which is not a standard Peewee :py:class:`Database` parameter. To set this value, simply pass in ``charset`` alongside your other values: .. code-block:: python db = MySQLDatabase('database_name', user='www-data', charset='utf8mb4') Consult your database driver's documentation for the available parameters: * Postgres: `psycopg2 `_ * MySQL: `pymysql `_ * MySQL: `mysqlclient `_ * SQLite: `sqlite3 `_ * CockroachDB: see `psycopg2 `_ .. _using_postgresql: Using Postgresql ---------------- To connect to a Postgresql database, we will use :py:class:`PostgresqlDatabase`. The first parameter is always the name of the database, and after that you can specify arbitrary `psycopg2 parameters `_. .. code-block:: python psql_db = PostgresqlDatabase('my_database', user='postgres') class BaseModel(Model): """A base model that will use our Postgresql database""" class Meta: database = psql_db class User(BaseModel): username = CharField() The :ref:`playhouse` contains a :ref:`Postgresql extension module ` which provides many postgres-specific features such as: * :ref:`Arrays ` * :ref:`HStore ` * :ref:`JSON ` * :ref:`Server-side cursors ` * And more! If you would like to use these awesome features, use the :py:class:`PostgresqlExtDatabase` from the ``playhouse.postgres_ext`` module: .. code-block:: python from playhouse.postgres_ext import PostgresqlExtDatabase psql_db = PostgresqlExtDatabase('my_database', user='postgres') Isolation level ^^^^^^^^^^^^^^^ As of Peewee 3.9.7, the isolation level can be specified as an initialization parameter, using the symbolic constants in ``psycopg2.extensions``: .. code-block:: python from psycopg2.extensions import ISOLATION_LEVEL_SERIALIZABLE db = PostgresqlDatabase('my_app', user='postgres', host='db-host', isolation_level=ISOLATION_LEVEL_SERIALIZABLE) .. note:: In older versions, you can manually set the isolation level on the underlying psycopg2 connection. This can be done in a one-off fashion: .. code-block:: python db = PostgresqlDatabase(...) conn = db.connection() # returns current connection. from psycopg2.extensions import ISOLATION_LEVEL_SERIALIZABLE conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) To run this every time a connection is created, subclass and implement the ``_initialize_database()`` hook, which is designed for this purpose: .. code-block:: python class SerializedPostgresqlDatabase(PostgresqlDatabase): def _initialize_connection(self, conn): conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) .. _using_crdb: Using CockroachDB ----------------- Connect to CockroachDB (CRDB) using the :py:class:`CockroachDatabase` database class, defined in ``playhouse.cockroachdb``: .. code-block:: python from playhouse.cockroachdb import CockroachDatabase db = CockroachDatabase('my_app', user='root', port=26257, host='localhost') If you are using `Cockroach Cloud `_, you may find it easier to specify the connection parameters using a connection-string: .. code-block:: python db = CockroachDatabase('postgresql://root:secret@host:26257/defaultdb...') .. note:: CockroachDB requires the ``psycopg2`` (postgres) Python driver. .. note:: CockroachDB installation and getting-started guide can be found here: https://www.cockroachlabs.com/docs/stable/install-cockroachdb.html CRDB provides client-side transaction retries, which are available using a special :py:meth:`CockroachDatabase.run_transaction` helper-method. This method accepts a callable, which is responsible for executing any transactional statements that may need to be retried. Simplest possible example of :py:meth:`~CockroachDatabase.run_transaction`: .. code-block:: python def create_user(email): # Callable that accepts a single argument (the database instance) and # which is responsible for executing the transactional SQL. def callback(db_ref): return User.create(email=email) return db.run_transaction(callback, max_attempts=10) huey = create_user('huey@example.com') .. note:: The ``cockroachdb.ExceededMaxAttempts`` exception will be raised if the transaction cannot be committed after the given number of attempts. If the SQL is mal-formed, violates a constraint, etc., then the function will raise the exception to the caller. For more information, see: * :ref:`CRDB extension documentation ` * :ref:`SSL configuration with CockroachDB ` * :ref:`Arrays ` (postgres-specific, but applies to CRDB) * :ref:`JSON ` (postgres-specific, but applies to CRDB) .. _using_sqlite: Using SQLite ------------ To connect to a SQLite database, we will use :py:class:`SqliteDatabase`. The first parameter is the filename containing the database, or the string ``':memory:'`` to create an in-memory database. After the database filename, you can specify a list or pragmas or any other arbitrary `sqlite3 parameters `_. .. code-block:: python sqlite_db = SqliteDatabase('my_app.db', pragmas={'journal_mode': 'wal'}) class BaseModel(Model): """A base model that will use our Sqlite database.""" class Meta: database = sqlite_db class User(BaseModel): username = TextField() # etc, etc Peewee includes a :ref:`SQLite extension module ` which provides many SQLite-specific features such as :ref:`full-text search `, :ref:`json extension support `, and much, much more. If you would like to use these awesome features, use the :py:class:`SqliteExtDatabase` from the ``playhouse.sqlite_ext`` module: .. code-block:: python from playhouse.sqlite_ext import SqliteExtDatabase sqlite_db = SqliteExtDatabase('my_app.db', pragmas={ 'journal_mode': 'wal', # WAL-mode. 'cache_size': -64 * 1000, # 64MB cache. 'synchronous': 0}) # Let the OS manage syncing. .. _sqlite-pragma: PRAGMA statements ^^^^^^^^^^^^^^^^^ SQLite allows run-time configuration of a number of parameters through ``PRAGMA`` statements (`SQLite documentation `_). These statements are typically run when a new database connection is created. To run one or more ``PRAGMA`` statements against new connections, you can specify them as a dictionary or a list of 2-tuples containing the pragma name and value: .. code-block:: python db = SqliteDatabase('my_app.db', pragmas={ 'journal_mode': 'wal', 'cache_size': 10000, # 10000 pages, or ~40MB 'foreign_keys': 1, # Enforce foreign-key constraints }) PRAGMAs may also be configured dynamically using either the :py:meth:`~SqliteDatabase.pragma` method or the special properties exposed on the :py:class:`SqliteDatabase` object: .. code-block:: python # Set cache size to 64MB for *current connection*. db.pragma('cache_size', -1024 * 64) # Same as above. db.cache_size = -1024 * 64 # Read the value of several pragmas: print('cache_size:', db.cache_size) print('foreign_keys:', db.foreign_keys) print('journal_mode:', db.journal_mode) print('page_size:', db.page_size) # Set foreign_keys pragma on current connection *AND* on all # connections opened subsequently. db.pragma('foreign_keys', 1, permanent=True) .. attention:: Pragmas set using the :py:meth:`~SqliteDatabase.pragma` method, by default, do not persist after the connection is closed. To configure a pragma to be run whenever a connection is opened, specify ``permanent=True``. .. note:: A full list of PRAGMA settings, their meaning and accepted values can be found in the SQLite documentation: http://sqlite.org/pragma.html Recommended Settings ^^^^^^^^^^^^^^^^^^^^ The following settings are what I use with SQLite for a typical web application database. ========================= =================== =============================================== pragma recommended setting explanation ========================= =================== =============================================== journal_mode wal allow readers and writers to co-exist cache_size -1 * data_size_kb set page-cache size in KiB, e.g. -32000 = 32MB foreign_keys 1 enforce foreign-key constraints ignore_check_constraints 0 enforce CHECK constraints synchronous 0 let OS handle fsync (use with caution) ========================= =================== =============================================== Example database using the above options: .. code-block:: python db = SqliteDatabase('my_app.db', pragmas={ 'journal_mode': 'wal', 'cache_size': -1 * 64000, # 64MB 'foreign_keys': 1, 'ignore_check_constraints': 0, 'synchronous': 0}) .. _sqlite-user-functions: User-defined functions ^^^^^^^^^^^^^^^^^^^^^^ SQLite can be extended with user-defined Python code. The :py:class:`SqliteDatabase` class supports three types of user-defined extensions: * Functions - which take any number of parameters and return a single value. * Aggregates - which aggregate parameters from multiple rows and return a single value. * Collations - which describe how to sort some value. .. note:: For even more extension support, see :py:class:`SqliteExtDatabase`, which is in the ``playhouse.sqlite_ext`` module. Example user-defined function: .. code-block:: python db = SqliteDatabase('analytics.db') from urllib.parse import urlparse @db.func('hostname') def hostname(url): if url is not None: return urlparse(url).netloc # Call this function in our code: # The following finds the most common hostnames of referrers by count: query = (PageView .select(fn.hostname(PageView.referrer), fn.COUNT(PageView.id)) .group_by(fn.hostname(PageView.referrer)) .order_by(fn.COUNT(PageView.id).desc())) Example user-defined aggregate: .. code-block:: python from hashlib import md5 @db.aggregate('md5') class MD5Checksum(object): def __init__(self): self.checksum = md5() def step(self, value): self.checksum.update(value.encode('utf-8')) def finalize(self): return self.checksum.hexdigest() # Usage: # The following computes an aggregate MD5 checksum for files broken # up into chunks and stored in the database. query = (FileChunk .select(FileChunk.filename, fn.MD5(FileChunk.data)) .group_by(FileChunk.filename) .order_by(FileChunk.filename, FileChunk.sequence)) Example collation: .. code-block:: python @db.collation('ireverse') def collate_reverse(s1, s2): # Case-insensitive reverse. s1, s2 = s1.lower(), s2.lower() return (s1 < s2) - (s1 > s2) # Equivalent to -cmp(s1, s2) # To use this collation to sort books in reverse order... Book.select().order_by(collate_reverse.collation(Book.title)) # Or... Book.select().order_by(Book.title.asc(collation='reverse')) Example user-defined table-value function (see :py:class:`TableFunction` and :py:class:`~SqliteDatabase.table_function`) for additional details: .. code-block:: python from playhouse.sqlite_ext import TableFunction db = SqliteDatabase('my_app.db') @db.table_function('series') class Series(TableFunction): columns = ['value'] params = ['start', 'stop', 'step'] def initialize(self, start=0, stop=None, step=1): """ Table-functions declare an initialize() method, which is called with whatever arguments the user has called the function with. """ self.start = self.current = start self.stop = stop or float('Inf') self.step = step def iterate(self, idx): """ Iterate is called repeatedly by the SQLite database engine until the required number of rows has been read **or** the function raises a `StopIteration` signalling no more rows are available. """ if self.current > self.stop: raise StopIteration ret, self.current = self.current, self.current + self.step return (ret,) # Usage: cursor = db.execute_sql('SELECT * FROM series(?, ?, ?)', (0, 5, 2)) for value, in cursor: print(value) # Prints: # 0 # 2 # 4 For more information, see: * :py:meth:`SqliteDatabase.func` * :py:meth:`SqliteDatabase.aggregate` * :py:meth:`SqliteDatabase.collation` * :py:meth:`SqliteDatabase.table_function` * For even more SQLite extensions, see :ref:`sqlite_ext` .. _sqlite-locking: Set locking mode for transaction ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SQLite transactions can be opened in three different modes: * *Deferred* (**default**) - only acquires lock when a read or write is performed. The first read creates a `shared lock `_ and the first write creates a `reserved lock `_. Because the acquisition of the lock is deferred until actually needed, it is possible that another thread or process could create a separate transaction and write to the database after the BEGIN on the current thread has executed. * *Immediate* - a `reserved lock `_ is acquired immediately. In this mode, no other database may write to the database or open an *immediate* or *exclusive* transaction. Other processes can continue to read from the database, however. * *Exclusive* - opens an `exclusive lock `_ which prevents all (except for read uncommitted) connections from accessing the database until the transaction is complete. Example specifying the locking mode: .. code-block:: python db = SqliteDatabase('app.db') with db.atomic('EXCLUSIVE'): do_something() @db.atomic('IMMEDIATE') def some_other_function(): # This function is wrapped in an "IMMEDIATE" transaction. do_something_else() For more information, see the SQLite `locking documentation `_. To learn more about transactions in Peewee, see the :ref:`transactions` documentation. APSW, an Advanced SQLite Driver ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Peewee also comes with an alternate SQLite database that uses :ref:`apsw`. More information on APSW can be obtained on the `APSW project website `_. APSW provides special features like: * Virtual tables, virtual file-systems, Blob I/O, backups and file control. * Connections can be shared across threads without any additional locking. * Transactions are managed explicitly by your code. * Unicode is handled *correctly*. * APSW is faster that the standard library sqlite3 module. * Exposes pretty much the entire SQLite C API to your Python app. If you would like to use APSW, use the :py:class:`APSWDatabase` from the `apsw_ext` module: .. code-block:: python from playhouse.apsw_ext import APSWDatabase apsw_db = APSWDatabase('my_app.db') .. _using_mariadb: Using MariaDB ----------- Peewee supports MariaDB. To use MariaDB, use the MySQL backend, which is shared between the two. See :ref:`"Using MySQL" ` for more details. .. _using_mysql: Using MySQL ----------- To connect to a MySQL database, we will use :py:class:`MySQLDatabase`. After the database name, you can specify arbitrary connection parameters that will be passed back to the driver (e.g. ``pymysql`` or ``mysqlclient``). .. code-block:: python mysql_db = MySQLDatabase('my_database') class BaseModel(Model): """A base model that will use our MySQL database""" class Meta: database = mysql_db class User(BaseModel): username = CharField() # etc, etc Driver information: * `pymysql `_ is a pure-python mysql client, works with python 2 and 3. Peewee will use attempt to use pymysql first. * `mysqlclient `_ uses a c extension and supports python 3. It exposes a ``MySQLdb`` module. Peewee will attempt to use this module if pymysql is not installed. * ``mysql-python`` is also called `MySQLdb1 `_ and is legacy and should not be used. Since this shares the same module name as mysqlclient, same applies. * `mysql-connector python `_ pure-python (I think??) supports python 3. To use this driver you can use :ref:`MySQLConnectorDatabase` from the ``playhouse.mysql_ext`` extension. Error 2006: MySQL server has gone away ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This particular error can occur when MySQL kills an idle database connection. This typically happens with web apps that do not explicitly manage database connections. What happens is your application starts, a connection is opened to handle the first query that executes, and, since that connection is never closed, it remains open, waiting for more queries. To fix this, make sure you are explicitly connecting to the database when you need to execute queries, and close your connection when you are done. In a web-application, this typically means you will open a connection when a request comes in, and close the connection when you return a response. See the :ref:`framework-integration` section for examples of configuring common web frameworks to manage database connections. Connecting using a Database URL ------------------------------- The playhouse module :ref:`db_url` provides a helper :py:func:`connect` function that accepts a database URL and returns a :py:class:`Database` instance. Example code: .. code-block:: python import os from peewee import * from playhouse.db_url import connect # Connect to the database URL defined in the environment, falling # back to a local Sqlite database if no database URL is specified. db = connect(os.environ.get('DATABASE') or 'sqlite:///default.db') class BaseModel(Model): class Meta: database = db Example database URLs: * ``sqlite:///my_database.db`` will create a :py:class:`SqliteDatabase` instance for the file ``my_database.db`` in the current directory. * ``sqlite:///:memory:`` will create an in-memory :py:class:`SqliteDatabase` instance. * ``postgresql://postgres:my_password@localhost:5432/my_database`` will create a :py:class:`PostgresqlDatabase` instance. A username and password are provided, as well as the host and port to connect to. * ``mysql://user:passwd@ip:port/my_db`` will create a :py:class:`MySQLDatabase` instance for the local MySQL database *my_db*. * :ref:`More examples in the db_url documentation `. .. _deferring_initialization: Run-time database configuration ------------------------------- Sometimes the database connection settings are not known until run-time, when these values may be loaded from a configuration file or the environment. In these cases, you can *defer* the initialization of the database by specifying ``None`` as the database_name. .. code-block:: python database = PostgresqlDatabase(None) # Un-initialized database. class SomeModel(Model): class Meta: database = database If you try to connect or issue any queries while your database is uninitialized you will get an exception: .. code-block:: python >>> database.connect() Exception: Error, database not properly initialized before opening connection To initialize your database, call the :py:meth:`~Database.init` method with the database name and any additional keyword arguments: .. code-block:: python database_name = input('What is the name of the db? ') database.init(database_name, host='localhost', user='postgres') For even more control over initializing your database, see the next section, :ref:`dynamic_db`. .. _dynamic_db: Dynamically defining a database ------------------------------- For even more control over how your database is defined/initialized, you can use the :py:class:`DatabaseProxy` helper. :py:class:`DatabaseProxy` objects act as a placeholder, and then at run-time you can swap it out for a different object. In the example below, we will swap out the database depending on how the app is configured: .. code-block:: python database_proxy = DatabaseProxy() # Create a proxy for our db. class BaseModel(Model): class Meta: database = database_proxy # Use proxy for our DB. class User(BaseModel): username = CharField() # Based on configuration, use a different database. if app.config['DEBUG']: database = SqliteDatabase('local.db') elif app.config['TESTING']: database = SqliteDatabase(':memory:') else: database = PostgresqlDatabase('mega_production_db') # Configure our proxy to use the db we specified in config. database_proxy.initialize(database) .. warning:: Only use this method if your actual database driver varies at run-time. For instance, if your tests and local dev environment run on SQLite, but your deployed app uses PostgreSQL, you can use the :py:class:`DatabaseProxy` to swap out engines at run-time. However, if it is only connection values that vary at run-time, such as the path to the database file, or the database host, you should instead use :py:meth:`Database.init`. See :ref:`deferring_initialization` for more details. .. note:: It may be easier to avoid the use of :py:class:`DatabaseProxy` and instead use :py:meth:`Database.bind` and related methods to set or change the database. See :ref:`binding_database` for details. .. _binding_database: Setting the database at run-time -------------------------------- We have seen three ways that databases can be configured with Peewee: .. code-block:: python # The usual way: db = SqliteDatabase('my_app.db', pragmas={'journal_mode': 'wal'}) # Specify the details at run-time: db = SqliteDatabase(None) ... db.init(db_filename, pragmas={'journal_mode': 'wal'}) # Or use a placeholder: db = DatabaseProxy() ... db.initialize(SqliteDatabase('my_app.db', pragmas={'journal_mode': 'wal'})) Peewee can also set or change the database for your model classes. This technique is used by the Peewee test suite to bind test model classes to various database instances when running the tests. There are two sets of complementary methods: * :py:meth:`Database.bind` and :py:meth:`Model.bind` - bind one or more models to a database. * :py:meth:`Database.bind_ctx` and :py:meth:`Model.bind_ctx` - which are the same as their ``bind()`` counterparts, but return a context-manager and are useful when the database should only be changed temporarily. As an example, we'll declare two models **without** specifying any database: .. code-block:: python class User(Model): username = TextField() class Tweet(Model): user = ForeignKeyField(User, backref='tweets') content = TextField() timestamp = TimestampField() Bind the models to a database at run-time: .. code-block:: python postgres_db = PostgresqlDatabase('my_app', user='postgres') sqlite_db = SqliteDatabase('my_app.db') # At this point, the User and Tweet models are NOT bound to any database. # Let's bind them to the Postgres database: postgres_db.bind([User, Tweet]) # Now we will temporarily bind them to the sqlite database: with sqlite_db.bind_ctx([User, Tweet]): # User and Tweet are now bound to the sqlite database. assert User._meta.database is sqlite_db # User and Tweet are once again bound to the Postgres database. assert User._meta.database is postgres_db The :py:meth:`Model.bind` and :py:meth:`Model.bind_ctx` methods work the same for binding a given model class: .. code-block:: python # Bind the user model to the sqlite db. By default, Peewee will also # bind any models that are related to User via foreign-key as well. User.bind(sqlite_db) assert User._meta.database is sqlite_db assert Tweet._meta.database is sqlite_db # Related models bound too. # Here we will temporarily bind *just* the User model to the postgres db. with User.bind_ctx(postgres_db, bind_backrefs=False): assert User._meta.database is postgres_db assert Tweet._meta.database is sqlite_db # Has not changed. # And now User is back to being bound to the sqlite_db. assert User._meta.database is sqlite_db The :ref:`testing` section of this document also contains some examples of using the ``bind()`` methods. Thread-Safety and Multiple Databases ------------------------------------ If you plan to change the database at run-time in a multi-threaded application, storing the model's database in a thread-local will prevent race-conditions. This can be accomplished with a custom model ``Metadata`` class (see :py:class:`ThreadSafeDatabaseMetadata`, included in ``playhouse.shortcuts``): .. code-block:: python from peewee import * from playhouse.shortcuts import ThreadSafeDatabaseMetadata class BaseModel(Model): class Meta: # Instruct peewee to use our thread-safe metadata implementation. model_metadata_class = ThreadSafeDatabaseMetadata The database can now be swapped safely while running in a multi-threaded environment using the familiar :py:meth:`Database.bind` or :py:meth:`Database.bind_ctx` methods. .. _connection_management: Connection Management --------------------- To open a connection to a database, use the :py:meth:`Database.connect` method: .. code-block:: pycon >>> db = SqliteDatabase(':memory:') # In-memory SQLite database. >>> db.connect() True If we try to call ``connect()`` on an already-open database, we get a :py:class:`OperationalError`: .. code-block:: pycon >>> db.connect() Traceback (most recent call last): File "", line 1, in File "/home/charles/pypath/peewee.py", line 2390, in connect raise OperationalError('Connection already opened.') peewee.OperationalError: Connection already opened. To prevent this exception from being raised, we can call ``connect()`` with an additional argument, ``reuse_if_open``: .. code-block:: pycon >>> db.close() # Close connection. True >>> db.connect() True >>> db.connect(reuse_if_open=True) False Note that the call to ``connect()`` returns ``False`` if the database connection was already open. To close a connection, use the :py:meth:`Database.close` method: .. code-block:: pycon >>> db.close() True Calling ``close()`` on an already-closed connection will not result in an exception, but will return ``False``: .. code-block:: pycon >>> db.connect() # Open connection. True >>> db.close() # Close connection. True >>> db.close() # Connection already closed, returns False. False You can test whether the database is closed using the :py:meth:`Database.is_closed` method: .. code-block:: pycon >>> db.is_closed() True Using autoconnect ^^^^^^^^^^^^^^^^^ It is not necessary to explicitly connect to the database before using it if the database is initialized with ``autoconnect=True`` (the default). Managing connections explicitly is considered a **best practice**, therefore you may consider disabling the ``autoconnect`` behavior. It is very helpful to be explicit about your connection lifetimes. If the connection fails, for instance, the exception will be caught when the connection is being opened, rather than some arbitrary time later when a query is executed. Furthermore, if using a :ref:`connection pool `, it is necessary to call :py:meth:`~Database.connect` and :py:meth:`~Database.close` to ensure connections are recycled properly. For the best guarantee of correctness, disable ``autoconnect``: .. code-block:: python db = PostgresqlDatabase('my_app', user='postgres', autoconnect=False) Thread Safety ^^^^^^^^^^^^^ Peewee keeps track of the connection state using thread-local storage, making the Peewee :py:class:`Database` object safe to use with multiple threads. Each thread will have it's own connection, and as a result any given thread will only have a single connection open at a given time. Context managers ^^^^^^^^^^^^^^^^ The database object itself can be used as a context-manager, which opens a connection for the duration of the wrapped block of code. Additionally, a transaction is opened at the start of the wrapped block and committed before the connection is closed (unless an error occurs, in which case the transaction is rolled back). .. code-block:: pycon >>> db.is_closed() True >>> with db: ... print(db.is_closed()) # db is open inside context manager. ... False >>> db.is_closed() # db is closed. True If you want to manage transactions separately, you can use the :py:meth:`Database.connection_context` context manager. .. code-block:: pycon >>> with db.connection_context(): ... # db connection is open. ... pass ... >>> db.is_closed() # db connection is closed. True The ``connection_context()`` method can also be used as a decorator: .. code-block:: python @db.connection_context() def prepare_database(): # DB connection will be managed by the decorator, which opens # a connection, calls function, and closes upon returning. db.create_tables(MODELS) # Create schema. load_fixture_data(db) DB-API Connection Object ^^^^^^^^^^^^^^^^^^^^^^^^ To obtain a reference to the underlying DB-API 2.0 connection, use the :py:meth:`Database.connection` method. This method will return the currently-open connection object, if one exists, otherwise it will open a new connection. .. code-block:: pycon >>> db.connection() .. _connection_pooling: Connection Pooling ------------------ Connection pooling is provided by the :ref:`pool module `, included in the :ref:`playhouse ` extensions library. The pool supports: * Timeout after which connections will be recycled. * Upper bound on the number of open connections. .. code-block:: python from playhouse.pool import PooledPostgresqlExtDatabase db = PooledPostgresqlExtDatabase( 'my_database', max_connections=8, stale_timeout=300, user='postgres') class BaseModel(Model): class Meta: database = db The following pooled database classes are available: * :py:class:`PooledPostgresqlDatabase` * :py:class:`PooledPostgresqlExtDatabase` * :py:class:`PooledMySQLDatabase` * :py:class:`PooledSqliteDatabase` * :py:class:`PooledSqliteExtDatabase` For an in-depth discussion of peewee's connection pool, see the :ref:`pool` section of the :ref:`playhouse ` documentation. .. _testing: Testing Peewee Applications --------------------------- When writing tests for an application that uses Peewee, it may be desirable to use a special database for tests. Another common practice is to run tests against a clean database, which means ensuring tables are empty at the start of each test. To bind your models to a database at run-time, you can use the following methods: * :py:meth:`Database.bind_ctx`, which returns a context-manager that will bind the given models to the database instance for the duration of the wrapped block. * :py:meth:`Model.bind_ctx`, which likewise returns a context-manager that binds the model (and optionally its dependencies) to the given database for the duration of the wrapped block. * :py:meth:`Database.bind`, which is a one-time operation that binds the models (and optionally its dependencies) to the given database. * :py:meth:`Model.bind`, which is a one-time operation that binds the model (and optionally its dependencies) to the given database. Depending on your use-case, one of these options may make more sense. For the examples below, I will use :py:meth:`Model.bind`. Example test-case setup: .. code-block:: python # tests.py import unittest from my_app.models import EventLog, Relationship, Tweet, User MODELS = [User, Tweet, EventLog, Relationship] # use an in-memory SQLite for tests. test_db = SqliteDatabase(':memory:') class BaseTestCase(unittest.TestCase): def setUp(self): # Bind model classes to test db. Since we have a complete list of # all models, we do not need to recursively bind dependencies. test_db.bind(MODELS, bind_refs=False, bind_backrefs=False) test_db.connect() test_db.create_tables(MODELS) def tearDown(self): # Not strictly necessary since SQLite in-memory databases only live # for the duration of the connection, and in the next step we close # the connection...but a good practice all the same. test_db.drop_tables(MODELS) # Close connection to db. test_db.close() # If we wanted, we could re-bind the models to their original # database here. But for tests this is probably not necessary. As an aside, and speaking from experience, I recommend testing your application using the same database backend you use in production, so as to avoid any potential compatibility issues. If you'd like to see some more examples of how to run tests using Peewee, check out Peewee's own `test-suite `_. Async with Gevent ----------------- `gevent `_ is recommended for doing asynchronous I/O with Postgresql or MySQL. Reasons I prefer gevent: * No need for special-purpose "loop-aware" re-implementations of *everything*. Third-party libraries using asyncio usually have to re-implement layers and layers of code as well as re-implementing the protocols themselves. * Gevent allows you to write your application in normal, clean, idiomatic Python. No need to litter every line with "async", "await" and other noise. No callbacks, futures, tasks, promises. No cruft. * Gevent works with both Python 2 *and* Python 3. * Gevent is *Pythonic*. Asyncio is an un-pythonic abomination. Besides monkey-patching socket, no special steps are required if you are using **MySQL** with a pure Python driver like `pymysql `_ or are using `mysql-connector `_ in pure-python mode. MySQL drivers written in C will require special configuration which is beyond the scope of this document. For **Postgres** and `psycopg2 `_, which is a C extension, you can use the following code snippet to register event hooks that will make your connection async: .. code-block:: python from gevent.socket import wait_read, wait_write from psycopg2 import extensions # Call this function after monkey-patching socket (etc). def patch_psycopg2(): extensions.set_wait_callback(_psycopg2_gevent_callback) def _psycopg2_gevent_callback(conn, timeout=None): while True: state = conn.poll() if state == extensions.POLL_OK: break elif state == extensions.POLL_READ: wait_read(conn.fileno(), timeout=timeout) elif state == extensions.POLL_WRITE: wait_write(conn.fileno(), timeout=timeout) else: raise ValueError('poll() returned unexpected result') **SQLite**, because it is embedded in the Python application itself, does not do any socket operations that would be a candidate for non-blocking. Async has no effect one way or the other on SQLite databases. .. _framework-integration: Framework Integration --------------------- For web applications, it is common to open a connection when a request is received, and to close the connection when the response is delivered. In this section I will describe how to add hooks to your web app to ensure the database connection is handled properly. These steps will ensure that regardless of whether you're using a simple SQLite database, or a pool of multiple Postgres connections, peewee will handle the connections correctly. .. note:: Applications that receive lots of traffic may benefit from using a :ref:`connection pool ` to mitigate the cost of setting up and tearing down connections on every request. Flask ^^^^^ Flask and peewee are a great combo and my go-to for projects of any size. Flask provides two hooks which we will use to open and close our db connection. We'll open the connection when a request is received, then close it when the response is returned. .. code-block:: python from flask import Flask from peewee import * database = SqliteDatabase('my_app.db') app = Flask(__name__) # This hook ensures that a connection is opened to handle any queries # generated by the request. @app.before_request def _db_connect(): database.connect() # This hook ensures that the connection is closed when we've finished # processing the request. @app.teardown_request def _db_close(exc): if not database.is_closed(): database.close() Django ^^^^^^ While it's less common to see peewee used with Django, it is actually very easy to use the two. To manage your peewee database connections with Django, the easiest way in my opinion is to add a middleware to your app. The middleware should be the very first in the list of middlewares, to ensure it runs first when a request is handled, and last when the response is returned. If you have a django project named *my_blog* and your peewee database is defined in the module ``my_blog.db``, you might add the following middleware class: .. code-block:: python # middleware.py from my_blog.db import database # Import the peewee database instance. def PeeweeConnectionMiddleware(get_response): def middleware(request): database.connect() try: response = get_response(request) finally: if not database.is_closed(): database.close() return response return middleware # Older Django < 1.10 middleware. class PeeweeConnectionMiddleware(object): def process_request(self, request): database.connect() def process_response(self, request, response): if not database.is_closed(): database.close() return response To ensure this middleware gets executed, add it to your ``settings`` module: .. code-block:: python # settings.py MIDDLEWARE_CLASSES = ( # Our custom middleware appears first in the list. 'my_blog.middleware.PeeweeConnectionMiddleware', # These are the default Django 1.7 middlewares. Yours may differ, # but the important this is that our Peewee middleware comes first. 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) # ... other Django settings ... Bottle ^^^^^^ I haven't used bottle myself, but looking at the documentation I believe the following code should ensure the database connections are properly managed: .. code-block:: python # app.py from bottle import hook #, route, etc, etc. from peewee import * db = SqliteDatabase('my-bottle-app.db') @hook('before_request') def _connect_db(): db.connect() @hook('after_request') def _close_db(): if not db.is_closed(): db.close() # Rest of your bottle app goes here. Web.py ^^^^^^ See the documentation for `application processors `_. .. code-block:: python db = SqliteDatabase('my_webpy_app.db') def connection_processor(handler): db.connect() try: return handler() finally: if not db.is_closed(): db.close() app.add_processor(connection_processor) Tornado ^^^^^^^ It looks like Tornado's ``RequestHandler`` class implements two hooks which can be used to open and close connections when a request is handled. .. code-block:: python from tornado.web import RequestHandler db = SqliteDatabase('my_db.db') class PeeweeRequestHandler(RequestHandler): def prepare(self): db.connect() return super(PeeweeRequestHandler, self).prepare() def on_finish(self): if not db.is_closed(): db.close() return super(PeeweeRequestHandler, self).on_finish() In your app, instead of extending the default ``RequestHandler``, now you can extend ``PeeweeRequestHandler``. Note that this does not address how to use peewee asynchronously with Tornado or another event loop. Wheezy.web ^^^^^^^^^^ The connection handling code can be placed in a `middleware `_. .. code-block:: python def peewee_middleware(request, following): db.connect() try: response = following(request) finally: if not db.is_closed(): db.close() return response app = WSGIApplication(middleware=[ lambda x: peewee_middleware, # ... other middlewares ... ]) Thanks to GitHub user *@tuukkamustonen* for submitting this code. Falcon ^^^^^^ The connection handling code can be placed in a `middleware component `_. .. code-block:: python import falcon from peewee import * database = SqliteDatabase('my_app.db') class PeeweeConnectionMiddleware(object): def process_request(self, req, resp): database.connect() def process_response(self, req, resp, resource, req_succeeded): if not database.is_closed(): database.close() application = falcon.API(middleware=[ PeeweeConnectionMiddleware(), # ... other middlewares ... ]) Pyramid ^^^^^^^ Set up a Request factory that handles database connection lifetime as follows: .. code-block:: python from pyramid.request import Request db = SqliteDatabase('pyramidapp.db') class MyRequest(Request): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) db.connect() self.add_finished_callback(self.finish) def finish(self, request): if not db.is_closed(): db.close() In your application `main()` make sure `MyRequest` is used as `request_factory`: .. code-block:: python def main(global_settings, **settings): config = Configurator(settings=settings, ...) config.set_request_factory(MyRequest) CherryPy ^^^^^^^^ See `Publish/Subscribe pattern `_. .. code-block:: python def _db_connect(): db.connect() def _db_close(): if not db.is_closed(): db.close() cherrypy.engine.subscribe('before_request', _db_connect) cherrypy.engine.subscribe('after_request', _db_close) Sanic ^^^^^ In Sanic, the connection handling code can be placed in the request and response middleware `sanic middleware `_. .. code-block:: python # app.py @app.middleware('request') async def handle_request(request): db.connect() @app.middleware('response') async def handle_response(request, response): if not db.is_closed(): db.close() FastAPI ^^^^^^^ FastAPI is an asyncio-compatible framework. Peewee relies on thread locals (which are also compatible with gevent) to manage the connection state across requests. For use with asyncio, some overrides are necessary to replace the thread-local behavior with an asyncio-compatible context-local. Peewee recommends using Flask + gevent for lightweight async web-framework. Other frameworks ^^^^^^^^^^^^^^^^ Don't see your framework here? Please `open a GitHub ticket `_ and I'll see about adding a section, or better yet, submit a documentation pull-request. Executing Queries ----------------- SQL queries will typically be executed by calling ``execute()`` on a query constructed using the query-builder APIs (or by simply iterating over a query object in the case of a :py:class:`Select` query). For cases where you wish to execute SQL directly, you can use the :py:meth:`Database.execute_sql` method. .. code-block:: python db = SqliteDatabase('my_app.db') db.connect() # Example of executing a simple query and ignoring the results. db.execute_sql("ATTACH DATABASE ':memory:' AS cache;") # Example of iterating over the results of a query using the cursor. cursor = db.execute_sql('SELECT * FROM users WHERE status = ?', (ACTIVE,)) for row in cursor.fetchall(): # Do something with row, which is a tuple containing column data. pass .. _transactions: Managing Transactions --------------------- Peewee provides several interfaces for working with transactions. The most general is the :py:meth:`Database.atomic` method, which also supports nested transactions. :py:meth:`~Database.atomic` blocks will be run in a transaction or savepoint, depending on the level of nesting. If an unhandled exception occurs in a wrapped block, the current transaction/savepoint will be rolled back. Otherwise the statements will be committed at the end of the wrapped block. Examples: .. code-block:: python # Transaction will commit automatically at the end of the "with" block: with db.atomic() as txn: User.create(username='u1') # Unhandled exceptions will cause transaction to be rolled-back: with db.atomic() as txn: User.create(username='huey') # User has been INSERTed into the database but the transaction is not # yet committed because we haven't left the scope of the "with" block. raise ValueError('uh-oh') # This exception is unhandled - the transaction will be rolled-back and # the ValueError will be raised. .. note:: While inside a block wrapped by the :py:meth:`~Database.atomic` context manager, you can explicitly rollback or commit at any point by calling :py:meth:`Transaction.rollback` or :py:meth:`Transaction.commit`. When you do this inside a wrapped block of code, a new transaction will be started automatically. .. code-block:: python with db.atomic() as transaction: # Opens new transaction. try: save_some_objects() except ErrorSavingData: # Because this block of code is wrapped with "atomic", a # new transaction will begin automatically after the call # to rollback(). transaction.rollback() error_saving = True create_report(error_saving=error_saving) # Note: no need to call commit. Since this marks the end of the # wrapped block of code, the `atomic` context manager will # automatically call commit for us. .. note:: :py:meth:`~Database.atomic` can be used as either a **context manager** or a **decorator**. .. note:: Peewee's behavior differs from the DB-API 2.0 behavior you may be used to (see PEP-249 for details). By default, Peewee puts all connections into **autocommit-mode** and transaction management is handled by Peewee. Context manager ^^^^^^^^^^^^^^^ Using ``atomic`` as context manager: .. code-block:: python db = SqliteDatabase(':memory:') with db.atomic() as txn: # This is the outer-most level, so this block corresponds to # a transaction. User.create(username='charlie') with db.atomic() as nested_txn: # This block corresponds to a savepoint. User.create(username='huey') # This will roll back the above create() query. nested_txn.rollback() User.create(username='mickey') # When the block ends, the transaction is committed (assuming no error # occurs). At that point there will be two users, "charlie" and "mickey". You can use the ``atomic`` method to perform *get or create* operations as well: .. code-block:: python try: with db.atomic(): user = User.create(username=username) return 'Success' except peewee.IntegrityError: return 'Failure: %s is already in use.' % username Decorator ^^^^^^^^^ Using ``atomic`` as a decorator: .. code-block:: python @db.atomic() def create_user(username): # This statement will run in a transaction. If the caller is already # running in an `atomic` block, then a savepoint will be used instead. return User.create(username=username) create_user('charlie') Nesting Transactions ^^^^^^^^^^^^^^^^^^^^ :py:meth:`~Database.atomic` provides transparent nesting of transactions. When using :py:meth:`~Database.atomic`, the outer-most call will be wrapped in a transaction, and any nested calls will use savepoints. .. code-block:: python with db.atomic() as txn: perform_operation() with db.atomic() as nested_txn: perform_another_operation() Peewee supports nested transactions through the use of savepoints (for more information, see :py:meth:`~Database.savepoint`). Explicit transaction ^^^^^^^^^^^^^^^^^^^^ If you wish to explicitly run code in a transaction, you can use :py:meth:`~Database.transaction`. Like :py:meth:`~Database.atomic`, :py:meth:`~Database.transaction` can be used as a context manager or as a decorator. If an exception occurs in a wrapped block, the transaction will be rolled back. Otherwise the statements will be committed at the end of the wrapped block. .. code-block:: python db = SqliteDatabase(':memory:') with db.transaction() as txn: # Delete the user and their associated tweets. user.delete_instance(recursive=True) Transactions can be explicitly committed or rolled-back within the wrapped block. When this happens, a new transaction will be started. .. code-block:: python with db.transaction() as txn: User.create(username='mickey') txn.commit() # Changes are saved and a new transaction begins. User.create(username='huey') # Roll back. "huey" will not be saved, but since "mickey" was already # committed, that row will remain in the database. txn.rollback() with db.transaction() as txn: User.create(username='whiskers') # Roll back changes, which removes "whiskers". txn.rollback() # Create a new row for "mr. whiskers" which will be implicitly committed # at the end of the `with` block. User.create(username='mr. whiskers') .. note:: If you attempt to nest transactions with peewee using the :py:meth:`~Database.transaction` context manager, only the outer-most transaction will be used. If an exception occurs in a nested block, the transaction will NOT be rolled-back -- only exceptions that bubble-up to the outer-most transaction will trigger a rollback. As this may lead to unpredictable behavior, it is recommended that you use :py:meth:`~Database.atomic`. Explicit Savepoints ^^^^^^^^^^^^^^^^^^^ Just as you can explicitly create transactions, you can also explicitly create savepoints using the :py:meth:`~Database.savepoint` method. Savepoints must occur within a transaction, but can be nested arbitrarily deep. .. code-block:: python with db.transaction() as txn: with db.savepoint() as sp: User.create(username='mickey') with db.savepoint() as sp2: User.create(username='zaizee') sp2.rollback() # "zaizee" will not be saved, but "mickey" will be. .. warning:: If you manually commit or roll back a savepoint, a new savepoint **will not** automatically be created. This differs from the behavior of :py:class:`transaction`, which will automatically open a new transaction after manual commit/rollback. Autocommit Mode ^^^^^^^^^^^^^^^ By default, Peewee operates in *autocommit mode*, such that any statements executed outside of a transaction are run in their own transaction. To group multiple statements into a transaction, Peewee provides the :py:meth:`~Database.atomic` context-manager/decorator. This should cover all use-cases, but in the unlikely event you want to temporarily disable Peewee's transaction management completely, you can use the :py:meth:`Database.manual_commit` context-manager/decorator. Here is how you might emulate the behavior of the :py:meth:`~Database.transaction` context manager: .. code-block:: python with db.manual_commit(): db.begin() # Have to begin transaction explicitly. try: user.delete_instance(recursive=True) except: db.rollback() # Rollback! An error occurred. raise else: try: db.commit() # Commit changes. except: db.rollback() raise Again -- I don't anticipate anyone needing this, but it's here just in case. .. _database-errors: Database Errors --------------- The Python DB-API 2.0 spec describes `several types of exceptions `_. Because most database drivers have their own implementations of these exceptions, Peewee simplifies things by providing its own wrappers around any implementation-specific exception classes. That way, you don't need to worry about importing any special exception classes, you can just use the ones from peewee: * ``DatabaseError`` * ``DataError`` * ``IntegrityError`` * ``InterfaceError`` * ``InternalError`` * ``NotSupportedError`` * ``OperationalError`` * ``ProgrammingError`` .. note:: All of these error classes extend ``PeeweeException``. Logging queries --------------- All queries are logged to the *peewee* namespace using the standard library ``logging`` module. Queries are logged using the *DEBUG* level. If you're interested in doing something with the queries, you can simply register a handler. .. code-block:: python # Print all queries to stderr. import logging logger = logging.getLogger('peewee') logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.DEBUG) Adding a new Database Driver ---------------------------- Peewee comes with built-in support for Postgres, MySQL, MariaDB and SQLite. These databases are very popular and run the gamut from fast, embeddable databases to heavyweight servers suitable for large-scale deployments. That being said, there are a ton of cool databases out there and adding support for your database-of-choice should be really easy, provided the driver supports the `DB-API 2.0 spec `_. .. warning:: Peewee requires the database connection be put into autocommit-mode. The DB-API 2.0 spec should be familiar to you if you've used the standard library sqlite3 driver, psycopg2 or the like. Peewee currently relies on a handful of parts: * `Connection.commit` * `Connection.execute` * `Connection.rollback` * `Cursor.description` * `Cursor.fetchone` These methods are generally wrapped up in higher-level abstractions and exposed by the :py:class:`Database`, so even if your driver doesn't do these exactly you can still get a lot of mileage out of peewee. An example is the `apsw sqlite driver `_ in the "playhouse" module. The first thing is to provide a subclass of :py:class:`Database` that will open a connection, and ensure the connection is in autocommit-mode (thus disabling all the DB-API transaction semantics): .. code-block:: python from peewee import Database import foodb # Our fictional DB-API 2.0 driver. class FooDatabase(Database): def _connect(self, database): return foodb.connect(self.database, autocommit=True, **self.connect_params) The :py:class:`Database` provides a higher-level API and is responsible for executing queries, creating tables and indexes, and introspecting the database to get lists of tables. The above implementation is the absolute minimum needed, though some features will not work -- for best results you will want to additionally add a method for extracting a list of tables and indexes for a table from the database. We'll pretend that ``FooDB`` is a lot like MySQL and has special "SHOW" statements: .. code-block:: python class FooDatabase(Database): def _connect(self): return foodb.connect(self.database, autocommit=True, **self.connect_params) def get_tables(self): res = self.execute('SHOW TABLES;') return [r[0] for r in res.fetchall()] Other things the database handles that are not covered here include: * :py:meth:`~Database.last_insert_id` and :py:meth:`~Database.rows_affected` * :py:attr:`~Database.param` and :py:attr:`~Database.quote`, which tell the SQL-generating code how to add parameter placeholders and quote entity names. * :py:attr:`~Database.field_types` for mapping data-types like INT or TEXT to their vendor-specific type names. * :py:attr:`~Database.operations` for mapping operations such as "LIKE/ILIKE" to their database equivalent Refer to the :py:class:`Database` API reference or the `source code `_. for details. .. note:: If your driver conforms to the DB-API 2.0 spec, there shouldn't be much work needed to get up and running. Our new database can be used just like any of the other database subclasses: .. code-block:: python from peewee import * from foodb_ext import FooDatabase db = FooDatabase('my_database', user='foo', password='secret') class BaseModel(Model): class Meta: database = db class Blog(BaseModel): title = CharField() contents = TextField() pub_date = DateTimeField() peewee-3.17.7/docs/peewee/example.rst000066400000000000000000000355721470346076600175030ustar00rootroot00000000000000.. _example-app: Example app =========== We'll be building a simple *twitter*-like site. The source code for the example can be found in the ``examples/twitter`` directory. You can also `browse the source-code `_ on github. There is also an example `blog app `_ if that's more to your liking, however it is not covered in this guide. The example app uses the `flask `_ web framework which is very easy to get started with. If you don't have flask already, you will need to install it to run the example: .. code-block:: console pip install flask Running the example ------------------- .. image:: tweepee.jpg After ensuring that flask is installed, ``cd`` into the twitter example directory and execute the ``run_example.py`` script: .. code-block:: console python run_example.py The example app will be accessible at http://localhost:5000/ Diving into the code -------------------- For simplicity all example code is contained within a single module, ``examples/twitter/app.py``. For a guide on structuring larger Flask apps with peewee, check out `Structuring Flask Apps `_. .. _example-app-models: Models ^^^^^^ In the spirit of the popular web framework Django, peewee uses declarative model definitions. If you're not familiar with Django, the idea is that you declare a model class for each table. The model class then defines one or more field attributes which correspond to the table's columns. For the twitter clone, there are just three models: *User*: Represents a user account and stores the username and password, an email address for generating avatars using *gravatar*, and a datetime field indicating when that account was created. *Relationship*: This is a utility model that contains two foreign-keys to the *User* model and stores which users follow one another. *Message*: Analogous to a tweet. The Message model stores the text content of the tweet, when it was created, and who posted it (foreign key to User). If you like UML, these are the tables and relationships: .. image:: schema.jpg In order to create these models we need to instantiate a :py:class:`SqliteDatabase` object. Then we define our model classes, specifying the columns as :py:class:`Field` instances on the class. .. code-block:: python # create a peewee database instance -- our models will use this database to # persist information database = SqliteDatabase(DATABASE) # model definitions -- the standard "pattern" is to define a base model class # that specifies which database to use. then, any subclasses will automatically # use the correct storage. class BaseModel(Model): class Meta: database = database # the user model specifies its fields (or columns) declaratively, like django class User(BaseModel): username = CharField(unique=True) password = CharField() email = CharField() join_date = DateTimeField() # this model contains two foreign keys to user -- it essentially allows us to # model a "many-to-many" relationship between users. by querying and joining # on different columns we can expose who a user is "related to" and who is # "related to" a given user class Relationship(BaseModel): from_user = ForeignKeyField(User, backref='relationships') to_user = ForeignKeyField(User, backref='related_to') class Meta: # `indexes` is a tuple of 2-tuples, where the 2-tuples are # a tuple of column names to index and a boolean indicating # whether the index is unique or not. indexes = ( # Specify a unique multi-column index on from/to-user. (('from_user', 'to_user'), True), ) # a dead simple one-to-many relationship: one user has 0..n messages, exposed by # the foreign key. a users messages will be accessible as a special attribute, # User.messages. class Message(BaseModel): user = ForeignKeyField(User, backref='messages') content = TextField() pub_date = DateTimeField() .. note:: Note that we create a *BaseModel* class that simply defines what database we would like to use. All other models then extend this class and will also use the correct database connection. Peewee supports many different :ref:`field types ` which map to different column types commonly supported by database engines. Conversion between python types and those used in the database is handled transparently, allowing you to use the following in your application: * Strings (unicode or otherwise) * Integers, floats, and ``Decimal`` numbers. * Boolean values * Dates, times and datetimes * ``None`` (NULL) * Binary data Creating tables ^^^^^^^^^^^^^^^ In order to start using the models, its necessary to create the tables. This is a one-time operation and can be done quickly using the interactive interpreter. We can create a small helper function to accomplish this: .. code-block:: python def create_tables(): with database: database.create_tables([User, Relationship, Message]) Open a python shell in the directory alongside the example app and execute the following: .. code-block:: python >>> from app import * >>> create_tables() .. note:: If you encounter an *ImportError* it means that either *flask* or *peewee* was not found and may not be installed correctly. Check the :ref:`installation` document for instructions on installing peewee. Every model has a :py:meth:`~Model.create_table` classmethod which runs a SQL *CREATE TABLE* statement in the database. This method will create the table, including all columns, foreign-key constraints, indexes, and sequences. Usually this is something you'll only do once, whenever a new model is added. Peewee provides a helper method :py:meth:`Database.create_tables` which will resolve inter-model dependencies and call :py:meth:`~Model.create_table` on each model, ensuring the tables are created in order. .. note:: Adding fields after the table has been created will require you to either drop the table and re-create it or manually add the columns using an *ALTER TABLE* query. Alternatively, you can use the :ref:`schema migrations ` extension to alter your database schema using Python. Establishing a database connection ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You may have noticed in the above model code that there is a class defined on the base model named *Meta* that sets the ``database`` attribute. Peewee allows every model to specify which database it uses. There are many :ref:`Meta options ` you can specify which control the behavior of your model. This is a peewee idiom: .. code-block:: python DATABASE = 'tweepee.db' # Create a database instance that will manage the connection and # execute queries database = SqliteDatabase(DATABASE) # Create a base-class all our models will inherit, which defines # the database we'll be using. class BaseModel(Model): class Meta: database = database When developing a web application, it's common to open a connection when a request starts, and close it when the response is returned. **You should always manage your connections explicitly**. For instance, if you are using a :ref:`connection pool `, connections will only be recycled correctly if you call :py:meth:`~Database.connect` and :py:meth:`~Database.close`. We will tell flask that during the request/response cycle we need to create a connection to the database. Flask provides some handy decorators to make this a snap: .. code-block:: python @app.before_request def before_request(): database.connect() @app.after_request def after_request(response): database.close() return response .. note:: Peewee uses thread local storage to manage connection state, so this pattern can be used with multi-threaded WSGI servers. Making queries ^^^^^^^^^^^^^^ In the *User* model there are a few instance methods that encapsulate some user-specific functionality: * ``following()``: who is this user following? * ``followers()``: who is following this user? These methods are similar in their implementation but with an important difference in the SQL *JOIN* and *WHERE* clauses: .. code-block:: python def following(self): # query other users through the "relationship" table return (User .select() .join(Relationship, on=Relationship.to_user) .where(Relationship.from_user == self) .order_by(User.username)) def followers(self): return (User .select() .join(Relationship, on=Relationship.from_user) .where(Relationship.to_user == self) .order_by(User.username)) Creating new objects ^^^^^^^^^^^^^^^^^^^^ When a new user wants to join the site we need to make sure the username is available, and if so, create a new *User* record. Looking at the *join()* view, we can see that our application attempts to create the User using :py:meth:`Model.create`. We defined the *User.username* field with a unique constraint, so if the username is taken the database will raise an ``IntegrityError``. .. code-block:: python try: with database.atomic(): # Attempt to create the user. If the username is taken, due to the # unique constraint, the database will raise an IntegrityError. user = User.create( username=request.form['username'], password=md5(request.form['password']).hexdigest(), email=request.form['email'], join_date=datetime.datetime.now()) # mark the user as being 'authenticated' by setting the session vars auth_user(user) return redirect(url_for('homepage')) except IntegrityError: flash('That username is already taken') We will use a similar approach when a user wishes to follow someone. To indicate a following relationship, we create a row in the *Relationship* table pointing from one user to another. Due to the unique index on ``from_user`` and ``to_user``, we will be sure not to end up with duplicate rows: .. code-block:: python user = get_object_or_404(User, username=username) try: with database.atomic(): Relationship.create( from_user=get_current_user(), to_user=user) except IntegrityError: pass Performing subqueries ^^^^^^^^^^^^^^^^^^^^^ If you are logged-in and visit the twitter homepage, you will see tweets from the users that you follow. In order to implement this cleanly, we can use a subquery: .. note:: The subquery, ``user.following()``, by default would ordinarily select all the columns on the ``User`` model. Because we're using it as a subquery, peewee will only select the primary key. .. code-block:: python # python code user = get_current_user() messages = (Message .select() .where(Message.user.in_(user.following())) .order_by(Message.pub_date.desc())) This code corresponds to the following SQL query: .. code-block:: sql SELECT t1."id", t1."user_id", t1."content", t1."pub_date" FROM "message" AS t1 WHERE t1."user_id" IN ( SELECT t2."id" FROM "user" AS t2 INNER JOIN "relationship" AS t3 ON t2."id" = t3."to_user_id" WHERE t3."from_user_id" = ? ) Other topics of interest ^^^^^^^^^^^^^^^^^^^^^^^^ There are a couple other neat things going on in the example app that are worth mentioning briefly. * Support for paginating lists of results is implemented in a simple function called ``object_list`` (after it's corollary in Django). This function is used by all the views that return lists of objects. .. code-block:: python def object_list(template_name, qr, var_name='object_list', **kwargs): kwargs.update( page=int(request.args.get('page', 1)), pages=qr.count() / 20 + 1) kwargs[var_name] = qr.paginate(kwargs['page']) return render_template(template_name, **kwargs) * Simple authentication system with a ``login_required`` decorator. The first function simply adds user data into the current session when a user successfully logs in. The decorator ``login_required`` can be used to wrap view functions, checking for whether the session is authenticated and if not redirecting to the login page. .. code-block:: python def auth_user(user): session['logged_in'] = True session['user'] = user session['username'] = user.username flash('You are logged in as %s' % (user.username)) def login_required(f): @wraps(f) def inner(*args, **kwargs): if not session.get('logged_in'): return redirect(url_for('login')) return f(*args, **kwargs) return inner * Return a 404 response instead of throwing exceptions when an object is not found in the database. .. code-block:: python def get_object_or_404(model, *expressions): try: return model.get(*expressions) except model.DoesNotExist: abort(404) .. note:: To avoid having to frequently copy/paste :py:func:`object_list` or :py:func:`get_object_or_404`, these functions are included as part of the playhouse :ref:`flask extension module `. .. code-block:: python from playhouse.flask_utils import get_object_or_404, object_list More examples ------------- There are more examples included in the peewee `examples directory `_, including: * `Example blog app `_ using Flask and peewee. Also see `accompanying blog post `_. * `An encrypted command-line diary `_. There is a `companion blog post `_ you might enjoy as well. * `Analytics web-service `_ (like a lite version of Google Analytics). Also check out the `companion blog post `_. .. note:: Like these snippets and interested in more? Check out `flask-peewee `_ - a flask plugin that provides a django-like Admin interface, RESTful API, Authentication and more for your peewee models. peewee-3.17.7/docs/peewee/hacks.rst000066400000000000000000000433071470346076600171340ustar00rootroot00000000000000.. _hacks: Hacks ===== Collected hacks using peewee. Have a cool hack you'd like to share? Open `an issue on GitHub `_ or `contact me `_. .. _optimistic_locking: Optimistic Locking ------------------ Optimistic locking is useful in situations where you might ordinarily use a *SELECT FOR UPDATE* (or in SQLite, *BEGIN IMMEDIATE*). For example, you might fetch a user record from the database, make some modifications, then save the modified user record. Typically this scenario would require us to lock the user record for the duration of the transaction, from the moment we select it, to the moment we save our changes. In optimistic locking, on the other hand, we do *not* acquire any lock and instead rely on an internal *version* column in the row we're modifying. At read time, we see what version the row is currently at, and on save, we ensure that the update takes place only if the version is the same as the one we initially read. If the version is higher, then some other process must have snuck in and changed the row -- to save our modified version could result in the loss of important changes. It's quite simple to implement optimistic locking in Peewee, here is a base class that you can use as a starting point: .. code-block:: python from peewee import * class ConflictDetectedException(Exception): pass class BaseVersionedModel(Model): version = IntegerField(default=1, index=True) def save_optimistic(self): if not self.id: # This is a new record, so the default logic is to perform an # INSERT. Ideally your model would also have a unique # constraint that made it impossible for two INSERTs to happen # at the same time. return self.save() # Update any data that has changed and bump the version counter. field_data = dict(self.__data__) current_version = field_data.pop('version', 1) self._populate_unsaved_relations(field_data) field_data = self._prune_fields(field_data, self.dirty_fields) if not field_data: raise ValueError('No changes have been made.') ModelClass = type(self) field_data['version'] = ModelClass.version + 1 # Atomic increment. query = ModelClass.update(**field_data).where( (ModelClass.version == current_version) & (ModelClass.id == self.id)) if query.execute() == 0: # No rows were updated, indicating another process has saved # a new version. How you handle this situation is up to you, # but for simplicity I'm just raising an exception. raise ConflictDetectedException() else: # Increment local version to match what is now in the db. self.version += 1 return True Here's an example of how this works. Let's assume we have the following model definition. Note that there's a unique constraint on the username -- this is important as it provides a way to prevent double-inserts. .. code-block:: python class User(BaseVersionedModel): username = CharField(unique=True) favorite_animal = CharField() Example: .. code-block:: pycon >>> u = User(username='charlie', favorite_animal='cat') >>> u.save_optimistic() True >>> u.version 1 >>> u.save_optimistic() Traceback (most recent call last): File "", line 1, in File "x.py", line 18, in save_optimistic raise ValueError('No changes have been made.') ValueError: No changes have been made. >>> u.favorite_animal = 'kitten' >>> u.save_optimistic() True # Simulate a separate thread coming in and updating the model. >>> u2 = User.get(User.username == 'charlie') >>> u2.favorite_animal = 'macaw' >>> u2.save_optimistic() True # Now, attempt to change and re-save the original instance: >>> u.favorite_animal = 'little parrot' >>> u.save_optimistic() Traceback (most recent call last): File "", line 1, in File "x.py", line 30, in save_optimistic raise ConflictDetectedException() ConflictDetectedException: current version is out of sync .. _top_item_per_group: Top object per group -------------------- These examples describe several ways to query the single top item per group. For a thorough discuss of various techniques, check out my blog post `Querying the top item by group with Peewee ORM `_. If you are interested in the more general problem of querying the top *N* items, see the section below :ref:`top_n_per_group`. In these examples we will use the *User* and *Tweet* models to find each user and their most-recent tweet. The most efficient method I found in my testing uses the ``MAX()`` aggregate function. We will perform the aggregation in a non-correlated subquery, so we can be confident this method will be performant. The idea is that we will select the posts, grouped by their author, whose timestamp is equal to the max observed timestamp for that user. .. code-block:: python # When referencing a table multiple times, we'll call Model.alias() to create # a secondary reference to the table. TweetAlias = Tweet.alias() # Create a subquery that will calculate the maximum Tweet created_date for each # user. subquery = (TweetAlias .select( TweetAlias.user, fn.MAX(TweetAlias.created_date).alias('max_ts')) .group_by(TweetAlias.user) .alias('tweet_max_subquery')) # Query for tweets and join using the subquery to match the tweet's user # and created_date. query = (Tweet .select(Tweet, User) .join(User) .switch(Tweet) .join(subquery, on=( (Tweet.created_date == subquery.c.max_ts) & (Tweet.user == subquery.c.user_id)))) SQLite and MySQL are a bit more lax and permit grouping by a subset of the columns that are selected. This means we can do away with the subquery and express it quite concisely: .. code-block:: python query = (Tweet .select(Tweet, User) .join(User) .group_by(Tweet.user) .having(Tweet.created_date == fn.MAX(Tweet.created_date))) .. _top_n_per_group: Top N objects per group ----------------------- These examples describe several ways to query the top *N* items per group reasonably efficiently. For a thorough discussion of various techniques, check out my blog post `Querying the top N objects per group with Peewee ORM `_. In these examples we will use the *User* and *Tweet* models to find each user and their three most-recent tweets. Postgres lateral joins ^^^^^^^^^^^^^^^^^^^^^^ `Lateral joins `_ are a neat Postgres feature that allow reasonably efficient correlated subqueries. They are often described as SQL ``for each`` loops. The desired SQL is: .. code-block:: sql SELECT * FROM (SELECT id, username FROM user) AS uq LEFT JOIN LATERAL (SELECT message, created_date FROM tweet WHERE (user_id = uq.id) ORDER BY created_date DESC LIMIT 3) AS pq ON true To accomplish this with peewee is quite straightforward: .. code-block:: python subq = (Tweet .select(Tweet.message, Tweet.created_date) .where(Tweet.user == User.id) .order_by(Tweet.created_date.desc()) .limit(3)) query = (User .select(User, subq.c.content, subq.c.created_date) .join(subq, JOIN.LEFT_LATERAL) .order_by(User.username, subq.c.created_date.desc())) # We queried from the "perspective" of user, so the rows are User instances # with the addition of a "content" and "created_date" attribute for each of # the (up-to) 3 most-recent tweets for each user. for row in query: print(row.username, row.content, row.created_date) To implement an equivalent query from the "perspective" of the Tweet model, we can instead write: .. code-block:: python # subq is the same as the above example. subq = (Tweet .select(Tweet.message, Tweet.created_date) .where(Tweet.user == User.id) .order_by(Tweet.created_date.desc()) .limit(3)) query = (Tweet .select(User.username, subq.c.content, subq.c.created_date) .from_(User) .join(subq, JOIN.LEFT_LATERAL) .order_by(User.username, subq.c.created_date.desc())) # Each row is a "tweet" instance with an additional "username" attribute. # This will print the (up-to) 3 most-recent tweets from each user. for tweet in query: print(tweet.username, tweet.content, tweet.created_date) Window functions ^^^^^^^^^^^^^^^^ `Window functions `_, which are :ref:`supported by peewee `, provide scalable, efficient performance. The desired SQL is: .. code-block:: sql SELECT subq.message, subq.username FROM ( SELECT t2.message, t3.username, RANK() OVER ( PARTITION BY t2.user_id ORDER BY t2.created_date DESC ) AS rnk FROM tweet AS t2 INNER JOIN user AS t3 ON (t2.user_id = t3.id) ) AS subq WHERE (subq.rnk <= 3) To accomplish this with peewee, we will wrap the ranked Tweets in an outer query that performs the filtering. .. code-block:: python TweetAlias = Tweet.alias() # The subquery will select the relevant data from the Tweet and # User table, as well as ranking the tweets by user from newest # to oldest. subquery = (TweetAlias .select( TweetAlias.message, User.username, fn.RANK().over( partition_by=[TweetAlias.user], order_by=[TweetAlias.created_date.desc()]).alias('rnk')) .join(User, on=(TweetAlias.user == User.id)) .alias('subq')) # Since we can't filter on the rank, we are wrapping it in a query # and performing the filtering in the outer query. query = (Tweet .select(subquery.c.message, subquery.c.username) .from_(subquery) .where(subquery.c.rnk <= 3)) Other methods ^^^^^^^^^^^^^ If you're not using Postgres, then unfortunately you're left with options that exhibit less-than-ideal performance. For a more complete overview of common methods, check out `this blog post `_. Below I will summarize the approaches and the corresponding SQL. Using ``COUNT``, we can get all tweets where there exist less than *N* tweets with more recent timestamps: .. code-block:: python TweetAlias = Tweet.alias() # Create a correlated subquery that calculates the number of # tweets with a higher (newer) timestamp than the tweet we're # looking at in the outer query. subquery = (TweetAlias .select(fn.COUNT(TweetAlias.id)) .where( (TweetAlias.created_date >= Tweet.created_date) & (TweetAlias.user == Tweet.user))) # Wrap the subquery and filter on the count. query = (Tweet .select(Tweet, User) .join(User) .where(subquery <= 3)) We can achieve similar results by doing a self-join and performing the filtering in the ``HAVING`` clause: .. code-block:: python TweetAlias = Tweet.alias() # Use a self-join and join predicates to count the number of # newer tweets. query = (Tweet .select(Tweet.id, Tweet.message, Tweet.user, User.username) .join(User) .switch(Tweet) .join(TweetAlias, on=( (TweetAlias.user == Tweet.user) & (TweetAlias.created_date >= Tweet.created_date))) .group_by(Tweet.id, Tweet.content, Tweet.user, User.username) .having(fn.COUNT(Tweet.id) <= 3)) The last example uses a ``LIMIT`` clause in a correlated subquery. .. code-block:: python TweetAlias = Tweet.alias() # The subquery here will calculate, for the user who created the # tweet in the outer loop, the three newest tweets. The expression # will evaluate to `True` if the outer-loop tweet is in the set of # tweets represented by the inner query. query = (Tweet .select(Tweet, User) .join(User) .where(Tweet.id << ( TweetAlias .select(TweetAlias.id) .where(TweetAlias.user == Tweet.user) .order_by(TweetAlias.created_date.desc()) .limit(3)))) Writing custom functions with SQLite ------------------------------------ SQLite is very easy to extend with custom functions written in Python, that are then callable from your SQL statements. By using the :py:class:`SqliteExtDatabase` and the :py:meth:`~SqliteExtDatabase.func` decorator, you can very easily define your own functions. Here is an example function that generates a hashed version of a user-supplied password. We can also use this to implement ``login`` functionality for matching a user and password. .. code-block:: python from hashlib import sha1 from random import random from playhouse.sqlite_ext import SqliteExtDatabase db = SqliteExtDatabase('my-blog.db') def get_hexdigest(salt, raw_password): data = salt + raw_password return sha1(data.encode('utf8')).hexdigest() @db.func() def make_password(raw_password): salt = get_hexdigest(str(random()), str(random()))[:5] hsh = get_hexdigest(salt, raw_password) return '%s$%s' % (salt, hsh) @db.func() def check_password(raw_password, enc_password): salt, hsh = enc_password.split('$', 1) return hsh == get_hexdigest(salt, raw_password) Here is how you can use the function to add a new user, storing a hashed password: .. code-block:: python query = User.insert( username='charlie', password=fn.make_password('testing')).execute() If we retrieve the user from the database, the password that's stored is hashed and salted: .. code-block:: pycon >>> user = User.get(User.username == 'charlie') >>> print(user.password) b76fa$88be1adcde66a1ac16054bc17c8a297523170949 To implement ``login``-type functionality, you could write something like this: .. code-block:: python def login(username, password): try: return (User .select() .where( (User.username == username) & (fn.check_password(password, User.password) == True)) .get()) except User.DoesNotExist: # Incorrect username and/or password. return False .. _datemath: Date math --------- Each of the databases supported by Peewee implement their own set of functions and semantics for date/time arithmetic. This section will provide a short scenario and example code demonstrating how you might utilize Peewee to do dynamic date manipulation in SQL. Scenario: we need to run certain tasks every *X* seconds, and both the task intervals and the task themselves are defined in the database. We need to write some code that will tell us which tasks we should run at a given time: .. code-block:: python class Schedule(Model): interval = IntegerField() # Run this schedule every X seconds. class Task(Model): schedule = ForeignKeyField(Schedule, backref='tasks') command = TextField() # Run this command. last_run = DateTimeField() # When was this run last? Our logic will essentially boil down to: .. code-block:: python # e.g., if the task was last run at 12:00:05, and the associated interval # is 10 seconds, the next occurrence should be 12:00:15. So we check # whether the current time (now) is 12:00:15 or later. now >= task.last_run + schedule.interval So we can write the following code: .. code-block:: python next_occurrence = something # ??? how do we define this ??? # We can express the current time as a Python datetime value, or we could # alternatively use the appropriate SQL function/name. now = Value(datetime.datetime.now()) # Or SQL('current_timestamp'), e.g. query = (Task .select(Task, Schedule) .join(Schedule) .where(now >= next_occurrence)) For Postgresql we will multiple a static 1-second interval to calculate the offsets dynamically: .. code-block:: python second = SQL("INTERVAL '1 second'") next_occurrence = Task.last_run + (Schedule.interval * second) For MySQL we can reference the schedule's interval directly: .. code-block:: python from peewee import NodeList # Needed to construct sql entity. interval = NodeList((SQL('INTERVAL'), Schedule.interval, SQL('SECOND'))) next_occurrence = fn.date_add(Task.last_run, interval) For SQLite, things are slightly tricky because SQLite does not have a dedicated datetime type. So for SQLite, we convert to a unix timestamp, add the schedule seconds, then convert back to a comparable datetime representation: .. code-block:: python next_ts = fn.strftime('%s', Task.last_run) + Schedule.interval next_occurrence = fn.datetime(next_ts, 'unixepoch') peewee-3.17.7/docs/peewee/installation.rst000066400000000000000000000100071470346076600205330ustar00rootroot00000000000000.. _installation: Installing and Testing ====================== Most users will want to simply install the latest version, hosted on PyPI: .. code-block:: console pip install peewee Peewee comes with a couple C extensions that will be built if Cython is available. * Sqlite extensions, which includes Cython implementations of the SQLite date manipulation functions, the REGEXP operator, and full-text search result ranking algorithms. Installing with git ------------------- The project is hosted at https://github.com/coleifer/peewee and can be installed using git: .. code-block:: console git clone https://github.com/coleifer/peewee.git cd peewee python setup.py install .. note:: On some systems you may need to use ``sudo python setup.py install`` to install peewee system-wide. If you would like to build the SQLite extension in a git checkout, you can run: .. code-block:: console # Build the C extension and place shared libraries alongside other modules. python setup.py build_ext -i Running tests ------------- You can test your installation by running the test suite. .. code-block:: console python runtests.py You can test specific features or specific database drivers using the ``runtests.py`` script. To view the available test runner options, use: .. code-block:: console python runtests.py --help .. note:: To run tests against Postgres or MySQL you need to create a database named "peewee_test". To test the Postgres extension module, you will also want to install the HStore extension in the postgres test database: .. code-block:: sql -- install the hstore extension on the peewee_test postgres db. CREATE EXTENSION hstore; Optional dependencies --------------------- .. note:: To use Peewee, you typically won't need anything outside the standard library, since most Python distributions are compiled with SQLite support. You can test by running ``import sqlite3`` in the Python console. If you wish to use another database, there are many DB-API 2.0-compatible drivers out there, such as ``pymysql`` or ``psycopg2`` for MySQL and Postgres respectively. * `Cython `_: used to expose additional functionality when using SQLite and to implement things like search result ranking in a performant manner. Since the generated C files are included with the package distribution, Cython is no longer required to use the C extensions. * `apsw `_: an optional 3rd-party SQLite binding offering greater performance and comprehensive support for SQLite's C APIs. Use with :py:class:`APSWDatabase`. * `gevent `_ is an optional dependency for :py:class:`SqliteQueueDatabase` (though it works with ``threading`` just fine). * `BerkeleyDB `_ can be compiled with a SQLite frontend, which works with Peewee. Compiling can be tricky so `here are instructions `_. * Lastly, if you use the *Flask* framework, there are helper extension modules available. Note on the SQLite extensions ----------------------------- Peewee includes two SQLite-specific C extensions which provide additional functionality and improved performance for SQLite database users. Peewee will attempt to determine ahead-of-time if SQLite3 is installed, and only build the SQLite extensions if the SQLite shared-library is available on your system. If, however, you receive errors like the following when attempting to install Peewee, you can explicitly disable the compilation of the SQLite C extensions by settings the ``NO_SQLITE`` environment variable. .. code-block:: console fatal error: sqlite3.h: No such file or directory Here is how to install Peewee with the SQLite extensions explicitly disabled: .. code-block:: console $ NO_SQLITE=1 python setup.py install peewee-3.17.7/docs/peewee/interactive.rst000066400000000000000000000102121470346076600203450ustar00rootroot00000000000000.. _interactive: Using Peewee Interactively ========================== Peewee contains helpers for working interactively from a Python interpreter or something like a Jupyter notebook. For this example, we'll assume that we have a pre-existing Sqlite database with the following simple schema: .. code-block:: sql CREATE TABLE IF NOT EXISTS "event" ( "id" INTEGER NOT NULL PRIMARY KEY, "key" TEXT NOT NULL, "timestamp" DATETIME NOT NULL, "metadata" TEXT NOT NULL); To experiment with querying this database from an interactive interpreter session, we would start our interpreter and import the following helpers: * ``peewee.SqliteDatabase`` - to reference the "events.db" * ``playhouse.reflection.generate_models`` - to generate models from an existing database. * ``playhouse.reflection.print_model`` - to view the model definition. * ``playhouse.reflection.print_table_sql`` - to view the table SQL. Our terminal session might look like this: .. code-block:: pycon >>> from peewee import SqliteDatabase >>> from playhouse.reflection import generate_models, print_model, print_table_sql >>> The :py:func:`generate_models` function will introspect the database and generate model classes for all the tables that are found. This is a handy way to get started and can save a lot of typing. The function returns a dictionary keyed by the table name, with the generated model as the corresponding value: .. code-block:: pycon >>> db = SqliteDatabase('events.db') >>> models = generate_models(db) >>> list(models.items()) [('events', )] >>> globals().update(models) # Inject models into global namespace. >>> event To take a look at the model definition, which lists the model's fields and data-type, we can use the :py:func:`print_model` function: .. code-block:: pycon >>> print_model(event) event id AUTO key TEXT timestamp DATETIME metadata TEXT We can also generate a SQL ``CREATE TABLE`` for the introspected model, if you find that easier to read. This should match the actual table definition in the introspected database: .. code-block:: pycon >>> print_table_sql(event) CREATE TABLE IF NOT EXISTS "event" ( "id" INTEGER NOT NULL PRIMARY KEY, "key" TEXT NOT NULL, "timestamp" DATETIME NOT NULL, "metadata" TEXT NOT NULL) Now that we are familiar with the structure of the table we're working with, we can run some queries on the generated ``event`` model: .. code-block:: pycon >>> for e in event.select().order_by(event.timestamp).limit(5): ... print(e.key, e.timestamp) ... e00 2019-01-01 00:01:00 e01 2019-01-01 00:02:00 e02 2019-01-01 00:03:00 e03 2019-01-01 00:04:00 e04 2019-01-01 00:05:00 >>> event.select(fn.MIN(event.timestamp), fn.MAX(event.timestamp)).scalar(as_tuple=True) (datetime.datetime(2019, 1, 1, 0, 1), datetime.datetime(2019, 1, 1, 1, 0)) >>> event.select().count() # Or, len(event) 60 For more information about these APIs and other similar reflection utilities, see the :ref:`reflection` section of the :ref:`playhouse extensions ` document. To generate an actual Python module containing model definitions for an existing database, you can use the command-line :ref:`pwiz ` tool. Here is a quick example: .. code-block:: console $ pwiz -e sqlite events.db > events.py The ``events.py`` file will now be an import-able module containing a database instance (referencing the ``events.db``) along with model definitions for any tables found in the database. ``pwiz`` does some additional nice things like introspecting indexes and adding proper flags for ``NULL``/``NOT NULL`` constraints, etc. The APIs discussed in this section: * :py:func:`generate_models` * :py:func:`print_model` * :py:func:`print_table_sql` More low-level APIs are also available on the :py:class:`Database` instance: * :py:meth:`Database.get_tables` * :py:meth:`Database.get_indexes` * :py:meth:`Database.get_columns` (for a given table) * :py:meth:`Database.get_primary_keys` (for a given table) * :py:meth:`Database.get_foreign_keys` (for a given table) peewee-3.17.7/docs/peewee/models.rst000066400000000000000000001421351470346076600173250ustar00rootroot00000000000000.. _models: Models and Fields ================= :py:class:`Model` classes, :py:class:`Field` instances and model instances all map to database concepts: ================= ================================= Thing Corresponds to... ================= ================================= Model class Database table Field instance Column on a table Model instance Row in a database table ================= ================================= The following code shows the typical way you will define your database connection and model classes. .. _blog-models: .. code-block:: python import datetime from peewee import * db = SqliteDatabase('my_app.db') class BaseModel(Model): class Meta: database = db class User(BaseModel): username = CharField(unique=True) class Tweet(BaseModel): user = ForeignKeyField(User, backref='tweets') message = TextField() created_date = DateTimeField(default=datetime.datetime.now) is_published = BooleanField(default=True) 1. Create an instance of a :py:class:`Database`. .. code-block:: python db = SqliteDatabase('my_app.db') The ``db`` object will be used to manage the connections to the Sqlite database. In this example we're using :py:class:`SqliteDatabase`, but you could also use one of the other :ref:`database engines `. 2. Create a base model class which specifies our database. .. code-block:: python class BaseModel(Model): class Meta: database = db It is good practice to define a base model class which establishes the database connection. This makes your code DRY as you will not have to specify the database for subsequent models. Model configuration is kept namespaced in a special class called ``Meta``. This convention is borrowed from Django. :ref:`Meta ` configuration is passed on to subclasses, so our project's models will all subclass *BaseModel*. There are :ref:`many different attributes ` you can configure using *Model.Meta*. 3. Define a model class. .. code-block:: python class User(BaseModel): username = CharField(unique=True) Model definition uses the declarative style seen in other popular ORMs like SQLAlchemy or Django. Note that we are extending the *BaseModel* class so the *User* model will inherit the database connection. We have explicitly defined a single *username* column with a unique constraint. Because we have not specified a primary key, peewee will automatically add an auto-incrementing integer primary key field named *id*. .. note:: If you would like to start using peewee with an existing database, you can use :ref:`pwiz` to automatically generate model definitions. .. _fields: Fields ------ The :py:class:`Field` class is used to describe the mapping of :py:class:`Model` attributes to database columns. Each field type has a corresponding SQL storage class (i.e. varchar, int), and conversion between python data types and underlying storage is handled transparently. When creating a :py:class:`Model` class, fields are defined as class attributes. This should look familiar to users of the django framework. Here's an example: .. code-block:: python class User(Model): username = CharField() join_date = DateTimeField() about_me = TextField() In the above example, because none of the fields are initialized with ``primary_key=True``, an auto-incrementing primary key will automatically be created and named "id". Peewee uses :py:class:`AutoField` to signify an auto-incrementing integer primary key, which implies ``primary_key=True``. There is one special type of field, :py:class:`ForeignKeyField`, which allows you to represent foreign-key relationships between models in an intuitive way: .. code-block:: python class Message(Model): user = ForeignKeyField(User, backref='messages') body = TextField() send_date = DateTimeField(default=datetime.datetime.now) This allows you to write code like the following: .. code-block:: python >>> print(some_message.user.username) Some User >>> for message in some_user.messages: ... print(message.body) some message another message yet another message .. note:: Refer to the :ref:`relationships` document for an in-depth discussion of foreign-keys, joins and relationships between models. For full documentation on fields, see the :ref:`Fields API notes ` .. _field_types_table: Field types table ^^^^^^^^^^^^^^^^^ ===================== ================= ================= ================= Field Type Sqlite Postgresql MySQL ===================== ================= ================= ================= ``AutoField`` integer serial integer ``BigAutoField`` integer bigserial bigint ``IntegerField`` integer integer integer ``BigIntegerField`` integer bigint bigint ``SmallIntegerField`` integer smallint smallint ``IdentityField`` not supported int identity not supported ``FloatField`` real real real ``DoubleField`` real double precision double precision ``DecimalField`` decimal numeric numeric ``CharField`` varchar varchar varchar ``FixedCharField`` char char char ``TextField`` text text text ``BlobField`` blob bytea blob ``BitField`` integer bigint bigint ``BigBitField`` blob bytea blob ``UUIDField`` text uuid varchar(40) ``BinaryUUIDField`` blob bytea varbinary(16) ``DateTimeField`` datetime timestamp datetime ``DateField`` date date date ``TimeField`` time time time ``TimestampField`` integer integer integer ``IPField`` integer bigint bigint ``BooleanField`` integer boolean bool ``BareField`` untyped not supported not supported ``ForeignKeyField`` integer integer integer ===================== ================= ================= ================= .. note:: Don't see the field you're looking for in the above table? It's easy to create custom field types and use them with your models. * :ref:`custom-fields` * :py:class:`Database`, particularly the ``fields`` parameter. Field initialization arguments ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Parameters accepted by all field types and their default values: * ``null = False`` -- allow null values * ``index = False`` -- create an index on this column * ``unique = False`` -- create a unique index on this column. See also :ref:`adding composite indexes `. * ``column_name = None`` -- explicitly specify the column name in the database. * ``default = None`` -- any value or callable to use as a default for uninitialized models * ``primary_key = False`` -- primary key for the table * ``constraints = None`` - one or more constraints, e.g. ``[Check('price > 0')]`` * ``sequence = None`` -- sequence name (if backend supports it) * ``collation = None`` -- collation to use for ordering the field / index * ``unindexed = False`` -- indicate field on virtual table should be unindexed (**SQLite-only**) * ``choices = None`` -- optional iterable containing 2-tuples of ``value``, ``display`` * ``help_text = None`` -- string representing any helpful text for this field * ``verbose_name = None`` -- string representing the "user-friendly" name of this field * ``index_type = None`` -- specify a custom index-type, e.g. for Postgres you might specify a ``'BRIN'`` or ``'GIN'`` index. Some fields take special parameters... ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------------------+------------------------------------------------+ | Field type | Special Parameters | +================================+================================================+ | :py:class:`CharField` | ``max_length`` | +--------------------------------+------------------------------------------------+ | :py:class:`FixedCharField` | ``max_length`` | +--------------------------------+------------------------------------------------+ | :py:class:`DateTimeField` | ``formats`` | +--------------------------------+------------------------------------------------+ | :py:class:`DateField` | ``formats`` | +--------------------------------+------------------------------------------------+ | :py:class:`TimeField` | ``formats`` | +--------------------------------+------------------------------------------------+ | :py:class:`TimestampField` | ``resolution``, ``utc`` | +--------------------------------+------------------------------------------------+ | :py:class:`DecimalField` | ``max_digits``, ``decimal_places``, | | | ``auto_round``, ``rounding`` | +--------------------------------+------------------------------------------------+ | :py:class:`ForeignKeyField` | ``model``, ``field``, ``backref``, | | | ``on_delete``, ``on_update``, ``deferrable`` | | | ``lazy_load`` | +--------------------------------+------------------------------------------------+ | :py:class:`BareField` | ``adapt`` | +--------------------------------+------------------------------------------------+ .. note:: Both ``default`` and ``choices`` could be implemented at the database level as *DEFAULT* and *CHECK CONSTRAINT* respectively, but any application change would require a schema change. Because of this, ``default`` is implemented purely in python and ``choices`` are not validated but exist for metadata purposes only. To add database (server-side) constraints, use the ``constraints`` parameter. Default field values ^^^^^^^^^^^^^^^^^^^^ Peewee can provide default values for fields when objects are created. For example to have an ``IntegerField`` default to zero rather than ``NULL``, you could declare the field with a default value: .. code-block:: python class Message(Model): context = TextField() read_count = IntegerField(default=0) In some instances it may make sense for the default value to be dynamic. A common scenario is using the current date and time. Peewee allows you to specify a function in these cases, whose return value will be used when the object is created. Note we only provide the function, we do not actually *call* it: .. code-block:: python class Message(Model): context = TextField() timestamp = DateTimeField(default=datetime.datetime.now) .. note:: If you are using a field that accepts a mutable type (`list`, `dict`, etc), and would like to provide a default, it is a good idea to wrap your default value in a simple function so that multiple model instances are not sharing a reference to the same underlying object: .. code-block:: python def house_defaults(): return {'beds': 0, 'baths': 0} class House(Model): number = TextField() street = TextField() attributes = JSONField(default=house_defaults) The database can also provide the default value for a field. While peewee does not explicitly provide an API for setting a server-side default value, you can use the ``constraints`` parameter to specify the server default: .. code-block:: python class Message(Model): context = TextField() timestamp = DateTimeField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')]) .. note:: **Remember:** when using the ``default`` parameter, the values are set by Peewee rather than being a part of the actual table and column definition. ForeignKeyField ^^^^^^^^^^^^^^^ :py:class:`ForeignKeyField` is a special field type that allows one model to reference another. Typically a foreign key will contain the primary key of the model it relates to (but you can specify a particular column by specifying a ``field``). Foreign keys allow data to be `normalized `_. In our example models, there is a foreign key from ``Tweet`` to ``User``. This means that all the users are stored in their own table, as are the tweets, and the foreign key from tweet to user allows each tweet to *point* to a particular user object. .. note:: Refer to the :ref:`relationships` document for an in-depth discussion of foreign keys, joins and relationships between models. In peewee, accessing the value of a :py:class:`ForeignKeyField` will return the entire related object, e.g.: .. code-block:: python tweets = (Tweet .select(Tweet, User) .join(User) .order_by(Tweet.created_date.desc())) for tweet in tweets: print(tweet.user.username, tweet.message) .. note:: In the example above the ``User`` data was selected as part of the query. For more examples of this technique, see the :ref:`Avoiding N+1 ` document. If we did not select the ``User``, though, then an **additional query** would be issued to fetch the associated ``User`` data: .. code-block:: python tweets = Tweet.select().order_by(Tweet.created_date.desc()) for tweet in tweets: # WARNING: an additional query will be issued for EACH tweet # to fetch the associated User data. print(tweet.user.username, tweet.message) Sometimes you only need the associated primary key value from the foreign key column. In this case, Peewee follows the convention established by Django, of allowing you to access the raw foreign key value by appending ``"_id"`` to the foreign key field's name: .. code-block:: python tweets = Tweet.select() for tweet in tweets: # Instead of "tweet.user", we will just get the raw ID value stored # in the column. print(tweet.user_id, tweet.message) To prevent accidentally resolving a foreign-key and triggering an additional query, :py:class:`ForeignKeyField` supports an initialization paramater ``lazy_load`` which, when disabled, behaves like the ``"_id"`` attribute. For example: .. code-block:: python class Tweet(Model): # ... same fields, except we declare the user FK to have # lazy-load disabled: user = ForeignKeyField(User, backref='tweets', lazy_load=False) for tweet in Tweet.select(): print(tweet.user, tweet.message) # With lazy-load disabled, accessing tweet.user will not perform an extra # query and the user ID value is returned instead. # e.g.: # 1 tweet from user1 # 1 another from user1 # 2 tweet from user2 # However, if we eagerly load the related user object, then the user # foreign key will behave like usual: for tweet in Tweet.select(Tweet, User).join(User): print(tweet.user.username, tweet.message) # user1 tweet from user1 # user1 another from user1 # user2 tweet from user1 ForeignKeyField Back-references ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :py:class:`ForeignKeyField` allows for a backreferencing property to be bound to the target model. Implicitly, this property will be named ``classname_set``, where ``classname`` is the lowercase name of the class, but can be overridden using the parameter ``backref``: .. code-block:: python class Message(Model): from_user = ForeignKeyField(User, backref='outbox') to_user = ForeignKeyField(User, backref='inbox') text = TextField() for message in some_user.outbox: # We are iterating over all Messages whose from_user is some_user. print(message) for message in some_user.inbox: # We are iterating over all Messages whose to_user is some_user print(message) DateTimeField, DateField and TimeField ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The three fields devoted to working with dates and times have special properties which allow access to things like the year, month, hour, etc. :py:class:`DateField` has properties for: * ``year`` * ``month`` * ``day`` :py:class:`TimeField` has properties for: * ``hour`` * ``minute`` * ``second`` :py:class:`DateTimeField` has all of the above. These properties can be used just like any other expression. Let's say we have an events calendar and want to highlight all the days in the current month that have an event attached: .. code-block:: python # Get the current time. now = datetime.datetime.now() # Get days that have events for the current month. Event.select(Event.event_date.day.alias('day')).where( (Event.event_date.year == now.year) & (Event.event_date.month == now.month)) .. note:: SQLite does not have a native date type, so dates are stored in formatted text columns. To ensure that comparisons work correctly, the dates need to be formatted so they are sorted lexicographically. That is why they are stored, by default, as ``YYYY-MM-DD HH:MM:SS``. BitField and BigBitField ^^^^^^^^^^^^^^^^^^^^^^^^ The :py:class:`BitField` and :py:class:`BigBitField` are new as of 3.0.0. The former provides a subclass of :py:class:`IntegerField` that is suitable for storing feature toggles as an integer bitmask. The latter is suitable for storing a bitmap for a large data-set, e.g. expressing membership or bitmap-type data. As an example of using :py:class:`BitField`, let's say we have a *Post* model and we wish to store certain True/False flags about how the post. We could store all these feature toggles in their own :py:class:`BooleanField` objects, or we could use :py:class:`BitField` instead: .. code-block:: python class Post(Model): content = TextField() flags = BitField() is_favorite = flags.flag(1) is_sticky = flags.flag(2) is_minimized = flags.flag(4) is_deleted = flags.flag(8) Using these flags is quite simple: .. code-block:: pycon >>> p = Post() >>> p.is_sticky = True >>> p.is_minimized = True >>> print(p.flags) # Prints 4 | 2 --> "6" 6 >>> p.is_favorite False >>> p.is_sticky True We can also use the flags on the Post class to build expressions in queries: .. code-block:: python # Generates a WHERE clause that looks like: # WHERE (post.flags & 1 != 0) favorites = Post.select().where(Post.is_favorite) # Query for sticky + favorite posts: sticky_faves = Post.select().where(Post.is_sticky & Post.is_favorite) Since the :py:class:`BitField` is stored in an integer, there is a maximum of 64 flags you can represent (64-bits is common size of integer column). For storing arbitrarily large bitmaps, you can instead use :py:class:`BigBitField`, which uses an automatically managed buffer of bytes, stored in a :py:class:`BlobField`. When bulk-updating one or more bits in a :py:class:`BitField`, you can use bitwise operators to set or clear one or more bits: .. code-block:: python # Set the 4th bit on all Post objects. Post.update(flags=Post.flags | 8).execute() # Clear the 1st and 3rd bits on all Post objects. Post.update(flags=Post.flags & ~(1 | 4)).execute() For simple operations, the flags provide handy ``set()`` and ``clear()`` methods for setting or clearing an individual bit: .. code-block:: python # Set the "is_deleted" bit on all posts. Post.update(flags=Post.is_deleted.set()).execute() # Clear the "is_deleted" bit on all posts. Post.update(flags=Post.is_deleted.clear()).execute() Example usage: .. code-block:: python class Bitmap(Model): data = BigBitField() bitmap = Bitmap() # Sets the ith bit, e.g. the 1st bit, the 11th bit, the 63rd, etc. bits_to_set = (1, 11, 63, 31, 55, 48, 100, 99) for bit_idx in bits_to_set: bitmap.data.set_bit(bit_idx) # We can test whether a bit is set using "is_set": assert bitmap.data.is_set(11) assert not bitmap.data.is_set(12) # We can clear a bit: bitmap.data.clear_bit(11) assert not bitmap.data.is_set(11) # We can also "toggle" a bit. Recall that the 63rd bit was set earlier. assert bitmap.data.toggle_bit(63) is False assert bitmap.data.toggle_bit(63) is True assert bitmap.data.is_set(63) # BigBitField supports item accessor by bit-number, e.g.: assert bitmap.data[63] bitmap.data[0] = 1 del bitmap.data[0] # We can also combine bitmaps using bitwise operators, e.g. b = Bitmap(data=b'\x01') b.data |= b'\x02' assert list(b.data) == [1, 1, 0, 0, 0, 0, 0, 0] assert len(b.data) == 1 BareField ^^^^^^^^^ The :py:class:`BareField` class is intended to be used only with SQLite. Since SQLite uses dynamic typing and data-types are not enforced, it can be perfectly fine to declare fields without *any* data-type. In those cases you can use :py:class:`BareField`. It is also common for SQLite virtual tables to use meta-columns or untyped columns, so for those cases as well you may wish to use an untyped field (although for full-text search, you should use :py:class:`SearchField` instead!). :py:class:`BareField` accepts a special parameter ``adapt``. This parameter is a function that takes a value coming from the database and converts it into the appropriate Python type. For instance, if you have a virtual table with an un-typed column but you know that it will return ``int`` objects, you can specify ``adapt=int``. Example: .. code-block:: python db = SqliteDatabase(':memory:') class Junk(Model): anything = BareField() class Meta: database = db # Store multiple data-types in the Junk.anything column: Junk.create(anything='a string') Junk.create(anything=12345) Junk.create(anything=3.14159) .. _custom-fields: Creating a custom field ^^^^^^^^^^^^^^^^^^^^^^^ It is easy to add support for custom field types in peewee. In this example we will create a UUID field for postgresql (which has a native UUID column type). To add a custom field type you need to first identify what type of column the field data will be stored in. If you just want to add python behavior atop, say, a decimal field (for instance to make a currency field) you would just subclass :py:class:`DecimalField`. On the other hand, if the database offers a custom column type you will need to let peewee know. This is controlled by the :py:attr:`Field.field_type` attribute. .. note:: Peewee ships with a :py:class:`UUIDField`, the following code is intended only as an example. Let's start by defining our UUID field: .. code-block:: python class UUIDField(Field): field_type = 'uuid' We will store the UUIDs in a native UUID column. Since psycopg2 treats the data as a string by default, we will add two methods to the field to handle: * The data coming out of the database to be used in our application * The data from our python app going into the database .. code-block:: python import uuid class UUIDField(Field): field_type = 'uuid' def db_value(self, value): return value.hex # convert UUID to hex string. def python_value(self, value): return uuid.UUID(value) # convert hex string to UUID **This step is optional.** By default, the ``field_type`` value will be used for the columns data-type in the database schema. If you need to support multiple databases which use different data-types for your field-data, we need to let the database know how to map this *uuid* label to an actual *uuid* column type in the database. Specify the overrides in the :py:class:`Database` constructor: .. code-block:: python # Postgres, we use UUID data-type. db = PostgresqlDatabase('my_db', field_types={'uuid': 'uuid'}) # Sqlite doesn't have a UUID type, so we use text type. db = SqliteDatabase('my_db', field_types={'uuid': 'text'}) That is it! Some fields may support exotic operations, like the postgresql HStore field acts like a key/value store and has custom operators for things like *contains* and *update*. You can specify :ref:`custom operations ` as well. For example code, check out the source code for the :py:class:`HStoreField`, in ``playhouse.postgres_ext``. Field-naming conflicts ^^^^^^^^^^^^^^^^^^^^^^ :py:class:`Model` classes implement a number of class- and instance-methods, for example :py:meth:`Model.save` or :py:meth:`Model.create`. If you declare a field whose name coincides with a model method, it could cause problems. Consider: .. code-block:: python class LogEntry(Model): event = TextField() create = TimestampField() # Uh-oh. update = TimestampField() # Uh-oh. To avoid this problem while still using the desired column name in the database schema, explicitly specify the ``column_name`` while providing an alternative name for the field attribute: .. code-block:: python class LogEntry(Model): event = TextField() create_ = TimestampField(column_name='create') update_ = TimestampField(column_name='update') Creating model tables --------------------- In order to start using our models, its necessary to open a connection to the database and create the tables first. Peewee will run the necessary *CREATE TABLE* queries, additionally creating any constraints and indexes. .. code-block:: python # Connect to our database. db.connect() # Create the tables. db.create_tables([User, Tweet]) .. note:: Strictly speaking, it is not necessary to call :py:meth:`~Database.connect` but it is good practice to be explicit. That way if something goes wrong, the error occurs at the connect step, rather than some arbitrary time later. .. note:: By default, Peewee includes an ``IF NOT EXISTS`` clause when creating tables. If you want to disable this, specify ``safe=False``. After you have created your tables, if you choose to modify your database schema (by adding, removing or otherwise changing the columns) you will need to either: * Drop the table and re-create it. * Run one or more *ALTER TABLE* queries. Peewee comes with a schema migration tool which can greatly simplify this. Check the :ref:`schema migrations ` docs for details. .. _model-options: Model options and table metadata -------------------------------- In order not to pollute the model namespace, model-specific configuration is placed in a special class called *Meta* (a convention borrowed from the django framework): .. code-block:: python from peewee import * contacts_db = SqliteDatabase('contacts.db') class Person(Model): name = CharField() class Meta: database = contacts_db This instructs peewee that whenever a query is executed on *Person* to use the contacts database. .. note:: Take a look at :ref:`the sample models ` - you will notice that we created a ``BaseModel`` that defined the database, and then extended. This is the preferred way to define a database and create models. Once the class is defined, you should not access ``ModelClass.Meta``, but instead use ``ModelClass._meta``: .. code-block:: pycon >>> Person.Meta Traceback (most recent call last): File "", line 1, in AttributeError: type object 'Person' has no attribute 'Meta' >>> Person._meta The :py:class:`ModelOptions` class implements several methods which may be of use for retrieving model metadata (such as lists of fields, foreign key relationships, and more). .. code-block:: pycon >>> Person._meta.fields {'id': , 'name': } >>> Person._meta.primary_key >>> Person._meta.database There are several options you can specify as ``Meta`` attributes. While most options are inheritable, some are table-specific and will not be inherited by subclasses. ====================== ====================================================== ==================== Option Meaning Inheritable? ====================== ====================================================== ==================== ``database`` database for model yes ``table_name`` name of the table to store data no ``table_function`` function to generate table name dynamically yes ``indexes`` a list of fields to index yes ``primary_key`` a :py:class:`CompositeKey` instance yes ``constraints`` a list of table constraints yes ``schema`` the database schema for the model yes ``only_save_dirty`` when calling model.save(), only save dirty fields yes ``options`` dictionary of options for create table extensions yes ``table_settings`` list of setting strings to go after close parentheses yes ``temporary`` indicate temporary table yes ``legacy_table_names`` use legacy table name generation (enabled by default) yes ``depends_on`` indicate this table depends on another for creation no ``without_rowid`` indicate table should not have rowid (SQLite only) no ``strict_tables`` indicate strict data-types (SQLite only, 3.37+) yes ====================== ====================================================== ==================== Here is an example showing inheritable versus non-inheritable attributes: .. code-block:: pycon >>> db = SqliteDatabase(':memory:') >>> class ModelOne(Model): ... class Meta: ... database = db ... table_name = 'model_one_tbl' ... >>> class ModelTwo(ModelOne): ... pass ... >>> ModelOne._meta.database is ModelTwo._meta.database True >>> ModelOne._meta.table_name == ModelTwo._meta.table_name False Meta.primary_key ^^^^^^^^^^^^^^^^ The ``Meta.primary_key`` attribute is used to specify either a :py:class:`CompositeKey` or to indicate that the model has *no* primary key. Composite primary keys are discussed in more detail here: :ref:`composite-key`. To indicate that a model should not have a primary key, then set ``primary_key = False``. Examples: .. code-block:: python class BlogToTag(Model): """A simple "through" table for many-to-many relationship.""" blog = ForeignKeyField(Blog) tag = ForeignKeyField(Tag) class Meta: primary_key = CompositeKey('blog', 'tag') class NoPrimaryKey(Model): data = IntegerField() class Meta: primary_key = False .. _table_names: Table Names ^^^^^^^^^^^ By default Peewee will automatically generate a table name based on the name of your model class. The way the table-name is generated depends on the value of ``Meta.legacy_table_names``. By default, ``legacy_table_names=True`` so as to avoid breaking backwards-compatibility. However, if you wish to use the new and improved table-name generation, you can specify ``legacy_table_names=False``. This table shows the differences in how a model name is converted to a SQL table name, depending on the value of ``legacy_table_names``: =================== ========================= ============================== Model name legacy_table_names=True legacy_table_names=False (new) =================== ========================= ============================== User user user UserProfile userprofile user_profile APIResponse apiresponse api_response WebHTTPRequest webhttprequest web_http_request mixedCamelCase mixedcamelcase mixed_camel_case Name2Numbers3XYZ name2numbers3xyz name2_numbers3_xyz =================== ========================= ============================== .. attention:: To preserve backwards-compatibility, the current release (Peewee 3.x) specifies ``legacy_table_names=True`` by default. In the next major release (Peewee 4.0), ``legacy_table_names`` will have a default value of ``False``. To explicitly specify the table name for a model class, use the ``table_name`` Meta option. This feature can be useful for dealing with pre-existing database schemas that may have used awkward naming conventions: .. code-block:: python class UserProfile(Model): class Meta: table_name = 'user_profile_tbl' If you wish to implement your own naming convention, you can specify the ``table_function`` Meta option. This function will be called with your model class and should return the desired table name as a string. Suppose our company specifies that table names should be lower-cased and end with "_tbl", we can implement this as a table function: .. code-block:: python def make_table_name(model_class): model_name = model_class.__name__ return model_name.lower() + '_tbl' class BaseModel(Model): class Meta: table_function = make_table_name class User(BaseModel): # table_name will be "user_tbl". class UserProfile(BaseModel): # table_name will be "userprofile_tbl". .. _model_indexes: Indexes and Constraints ----------------------- Peewee can create indexes on single or multiple columns, optionally including a *UNIQUE* constraint. Peewee also supports user-defined constraints on both models and fields. Single-column indexes and constraints ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Single column indexes are defined using field initialization parameters. The following example adds a unique index on the *username* field, and a normal index on the *email* field: .. code-block:: python class User(Model): username = CharField(unique=True) email = CharField(index=True) To add a user-defined constraint on a column, you can pass it in using the ``constraints`` parameter. You may wish to specify a default value as part of the schema, or add a ``CHECK`` constraint, for example: .. code-block:: python class Product(Model): name = CharField(unique=True) price = DecimalField(constraints=[Check('price < 10000')]) created = DateTimeField( constraints=[SQL("DEFAULT (datetime('now'))")]) Multi-column indexes ^^^^^^^^^^^^^^^^^^^^ Multi-column indexes may be defined as *Meta* attributes using a nested tuple. Each database index is a 2-tuple, the first part of which is a tuple of the names of the fields, the second part a boolean indicating whether the index should be unique. .. code-block:: python class Transaction(Model): from_acct = CharField() to_acct = CharField() amount = DecimalField() date = DateTimeField() class Meta: indexes = ( # create a unique on from/to/date (('from_acct', 'to_acct', 'date'), True), # create a non-unique on from/to (('from_acct', 'to_acct'), False), ) .. note:: Remember to add a **trailing comma** if your tuple of indexes contains only one item: .. code-block:: python class Meta: indexes = ( (('first_name', 'last_name'), True), # Note the trailing comma! ) Advanced Index Creation ^^^^^^^^^^^^^^^^^^^^^^^ Peewee supports a more structured API for declaring indexes on a model using the :py:meth:`Model.add_index` method or by directly using the :py:class:`ModelIndex` helper class. Examples: .. code-block:: python class Article(Model): name = TextField() timestamp = TimestampField() status = IntegerField() flags = IntegerField() # Add an index on "name" and "timestamp" columns. Article.add_index(Article.name, Article.timestamp) # Add a partial index on name and timestamp where status = 1. Article.add_index(Article.name, Article.timestamp, where=(Article.status == 1)) # Create a unique index on timestamp desc, status & 4. idx = Article.index( Article.timestamp.desc(), Article.flags.bin_and(4), unique=True) Article.add_index(idx) .. warning:: SQLite does not support parameterized ``CREATE INDEX`` queries. This means that when using SQLite to create an index that involves an expression or scalar value, you will need to declare the index using the :py:class:`SQL` helper: .. code-block:: python # SQLite does not support parameterized CREATE INDEX queries, so # we declare it manually. Article.add_index(SQL('CREATE INDEX ...')) See :py:meth:`~Model.add_index` for details. For more information, see: * :py:meth:`Model.add_index` * :py:meth:`Model.index` * :py:class:`ModelIndex` * :py:class:`Index` Table constraints ^^^^^^^^^^^^^^^^^ Peewee allows you to add arbitrary constraints to your :py:class:`Model`, that will be part of the table definition when the schema is created. For instance, suppose you have a *people* table with a composite primary key of two columns, the person's first and last name. You wish to have another table relate to the *people* table, and to do this, you will need to define a foreign key constraint: .. code-block:: python class Person(Model): first = CharField() last = CharField() class Meta: primary_key = CompositeKey('first', 'last') class Pet(Model): owner_first = CharField() owner_last = CharField() pet_name = CharField() class Meta: constraints = [SQL('FOREIGN KEY(owner_first, owner_last) ' 'REFERENCES person(first, last)')] You can also implement ``CHECK`` constraints at the table level: .. code-block:: python class Product(Model): name = CharField(unique=True) price = DecimalField() class Meta: constraints = [Check('price < 10000')] .. _non_integer_primary_keys: Primary Keys, Composite Keys and other Tricks --------------------------------------------- The :py:class:`AutoField` is used to identify an auto-incrementing integer primary key. If you do not specify a primary key, Peewee will automatically create an auto-incrementing primary key named "id". To specify an auto-incrementing ID using a different field name, you can write: .. code-block:: python class Event(Model): event_id = AutoField() # Event.event_id will be auto-incrementing PK. name = CharField() timestamp = DateTimeField(default=datetime.datetime.now) metadata = BlobField() You can identify a different field as the primary key, in which case an "id" column will not be created. In this example we will use a person's email address as the primary key: .. code-block:: python class Person(Model): email = CharField(primary_key=True) name = TextField() dob = DateField() .. warning:: I frequently see people write the following, expecting an auto-incrementing integer primary key: .. code-block:: python class MyModel(Model): id = IntegerField(primary_key=True) Peewee understands the above model declaration as a model with an integer primary key, but the value of that ID is determined by the application. To create an auto-incrementing integer primary key, you would instead write: .. code-block:: python class MyModel(Model): id = AutoField() # primary_key=True is implied. Composite primary keys can be declared using :py:class:`CompositeKey`. Note that doing this may cause issues with :py:class:`ForeignKeyField`, as Peewee does not support the concept of a "composite foreign-key". As such, I've found it only advisable to use composite primary keys in a handful of situations, such as trivial many-to-many junction tables: .. code-block:: python class Image(Model): filename = TextField() mimetype = CharField() class Tag(Model): label = CharField() class ImageTag(Model): # Many-to-many relationship. image = ForeignKeyField(Image) tag = ForeignKeyField(Tag) class Meta: primary_key = CompositeKey('image', 'tag') In the extremely rare case you wish to declare a model with *no* primary key, you can specify ``primary_key = False`` in the model ``Meta`` options. Non-integer primary keys ^^^^^^^^^^^^^^^^^^^^^^^^ If you would like use a non-integer primary key (which I generally don't recommend), you can specify ``primary_key=True`` when creating a field. When you wish to create a new instance for a model using a non-autoincrementing primary key, you need to be sure you :py:meth:`~Model.save` specifying ``force_insert=True``. .. code-block:: python from peewee import * class UUIDModel(Model): id = UUIDField(primary_key=True) Auto-incrementing IDs are, as their name says, automatically generated for you when you insert a new row into the database. When you call :py:meth:`~Model.save`, peewee determines whether to do an *INSERT* versus an *UPDATE* based on the presence of a primary key value. Since, with our uuid example, the database driver won't generate a new ID, we need to specify it manually. When we call save() for the first time, pass in ``force_insert = True``: .. code-block:: python # This works because .create() will specify `force_insert=True`. obj1 = UUIDModel.create(id=uuid.uuid4()) # This will not work, however. Peewee will attempt to do an update: obj2 = UUIDModel(id=uuid.uuid4()) obj2.save() # WRONG obj2.save(force_insert=True) # CORRECT # Once the object has been created, you can call save() normally. obj2.save() .. note:: Any foreign keys to a model with a non-integer primary key will have a ``ForeignKeyField`` use the same underlying storage type as the primary key they are related to. .. _composite-key: Composite primary keys ^^^^^^^^^^^^^^^^^^^^^^ Peewee has very basic support for composite keys. In order to use a composite key, you must set the ``primary_key`` attribute of the model options to a :py:class:`CompositeKey` instance: .. code-block:: python class BlogToTag(Model): """A simple "through" table for many-to-many relationship.""" blog = ForeignKeyField(Blog) tag = ForeignKeyField(Tag) class Meta: primary_key = CompositeKey('blog', 'tag') .. warning:: Peewee does not support foreign-keys to models that define a :py:class:`CompositeKey` primary key. If you wish to add a foreign-key to a model that has a composite primary key, replicate the columns on the related model and add a custom accessor (e.g. a property). Manually specifying primary keys ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Sometimes you do not want the database to automatically generate a value for the primary key, for instance when bulk loading relational data. To handle this on a *one-off* basis, you can simply tell peewee to turn off ``auto_increment`` during the import: .. code-block:: python data = load_user_csv() # load up a bunch of data User._meta.auto_increment = False # turn off auto incrementing IDs with db.atomic(): for row in data: u = User(id=row[0], username=row[1]) u.save(force_insert=True) # <-- force peewee to insert row User._meta.auto_increment = True Although a better way to accomplish the above, without resorting to hacks, is to use the :py:meth:`Model.insert_many` API: .. code-block:: python data = load_user_csv() fields = [User.id, User.username] with db.atomic(): User.insert_many(data, fields=fields).execute() If you *always* want to have control over the primary key, simply do not use the :py:class:`AutoField` field type, but use a normal :py:class:`IntegerField` (or other column type): .. code-block:: python class User(BaseModel): id = IntegerField(primary_key=True) username = CharField() >>> u = User.create(id=999, username='somebody') >>> u.id 999 >>> User.get(User.username == 'somebody').id 999 Models without a Primary Key ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you wish to create a model with no primary key, you can specify ``primary_key = False`` in the inner ``Meta`` class: .. code-block:: python class MyData(BaseModel): timestamp = DateTimeField() value = IntegerField() class Meta: primary_key = False This will yield the following DDL: .. code-block:: sql CREATE TABLE "mydata" ( "timestamp" DATETIME NOT NULL, "value" INTEGER NOT NULL ) .. warning:: Some model APIs may not work correctly for models without a primary key, for instance :py:meth:`~Model.save` and :py:meth:`~Model.delete_instance` (you can instead use :py:meth:`~Model.insert`, :py:meth:`~Model.update` and :py:meth:`~Model.delete`). Self-referential foreign keys ----------------------------- When creating a hierarchical structure it is necessary to create a self-referential foreign key which links a child object to its parent. Because the model class is not defined at the time you instantiate the self-referential foreign key, use the special string ``'self'`` to indicate a self-referential foreign key: .. code-block:: python class Category(Model): name = CharField() parent = ForeignKeyField('self', null=True, backref='children') As you can see, the foreign key points *upward* to the parent object and the back-reference is named *children*. .. attention:: Self-referential foreign-keys should always be ``null=True``. When querying against a model that contains a self-referential foreign key you may sometimes need to perform a self-join. In those cases you can use :py:meth:`Model.alias` to create a table reference. Here is how you might query the category and parent model using a self-join: .. code-block:: python Parent = Category.alias() GrandParent = Category.alias() query = (Category .select(Category, Parent) .join(Parent, on=(Category.parent == Parent.id)) .join(GrandParent, on=(Parent.parent == GrandParent.id)) .where(GrandParent.name == 'some category') .order_by(Category.name)) .. _circular-fks: Circular foreign key dependencies --------------------------------- Sometimes it happens that you will create a circular dependency between two tables. .. note:: My personal opinion is that circular foreign keys are a code smell and should be refactored (by adding an intermediary table, for instance). Adding circular foreign keys with peewee is a bit tricky because at the time you are defining either foreign key, the model it points to will not have been defined yet, causing a ``NameError``. .. code-block:: python class User(Model): username = CharField() favorite_tweet = ForeignKeyField(Tweet, null=True) # NameError!! class Tweet(Model): message = TextField() user = ForeignKeyField(User, backref='tweets') One option is to simply use an :py:class:`IntegerField` to store the raw ID: .. code-block:: python class User(Model): username = CharField() favorite_tweet_id = IntegerField(null=True) By using :py:class:`DeferredForeignKey` we can get around the problem and still use a foreign key field: .. code-block:: python class User(Model): username = CharField() # Tweet has not been defined yet so use the deferred reference. favorite_tweet = DeferredForeignKey('Tweet', null=True) class Tweet(Model): message = TextField() user = ForeignKeyField(User, backref='tweets') # Now that Tweet is defined, "favorite_tweet" has been converted into # a ForeignKeyField. print(User.favorite_tweet) # There is one more quirk to watch out for, though. When you call :py:class:`~Model.create_table` we will again encounter the same issue. For this reason peewee will not automatically create a foreign key constraint for any *deferred* foreign keys. To create the tables *and* the foreign-key constraint, you can use the :py:meth:`SchemaManager.create_foreign_key` method to create the constraint after creating the tables: .. code-block:: python # Will create the User and Tweet tables, but does *not* create a # foreign-key constraint on User.favorite_tweet. db.create_tables([User, Tweet]) # Create the foreign-key constraint: User._schema.create_foreign_key(User.favorite_tweet) .. note:: Because SQLite has limited support for altering tables, foreign-key constraints cannot be added to a table after it has been created. peewee-3.17.7/docs/peewee/playhouse.rst000066400000000000000000003764561470346076600200720ustar00rootroot00000000000000.. _playhouse: Playhouse, extensions to Peewee =============================== Peewee comes with numerous extension modules which are collected under the ``playhouse`` namespace. Despite the silly name, there are some very useful extensions, particularly those that expose vendor-specific database features like the :ref:`sqlite_ext` and :ref:`postgres_ext` extensions. Below you will find a loosely organized listing of the various modules that make up the ``playhouse``. **Database drivers / vendor-specific database functionality** * :ref:`sqlite_ext` (on its own page) * :ref:`sqliteq` * :ref:`sqlite_udf` * :ref:`apsw` * :ref:`sqlcipher_ext` * :ref:`postgres_ext` * :ref:`crdb` * :ref:`mysql_ext` **High-level features** * :ref:`extra-fields` * :ref:`shortcuts` * :ref:`hybrid` * :ref:`kv` * :ref:`signals` * :ref:`dataset` **Database management and framework integration** * :ref:`pwiz` * :ref:`migrate` * :ref:`pool` * :ref:`reflection` * :ref:`db_url` * :ref:`test_utils` * :ref:`flask_utils` Sqlite Extensions ----------------- The Sqlite extensions have been moved to :ref:`their own page `. .. _sqliteq: SqliteQ ------- The ``playhouse.sqliteq`` module provides a subclass of :py:class:`SqliteExtDatabase`, that will serialize concurrent writes to a SQLite database. :py:class:`SqliteQueueDatabase` can be used as a drop-in replacement for the regular :py:class:`SqliteDatabase` if you want simple **read and write** access to a SQLite database from **multiple threads**. SQLite only allows one connection to write to the database at any given time. As a result, if you have a multi-threaded application (like a web-server, for example) that needs to write to the database, you may see occasional errors when one or more of the threads attempting to write cannot acquire the lock. :py:class:`SqliteQueueDatabase` is designed to simplify things by sending all write queries through a single, long-lived connection. The benefit is that you get the appearance of multiple threads writing to the database without conflicts or timeouts. The downside, however, is that you cannot issue write transactions that encompass multiple queries -- all writes run in autocommit mode, essentially. .. note:: The module gets its name from the fact that all write queries get put into a thread-safe queue. A single worker thread listens to the queue and executes all queries that are sent to it. Transactions ^^^^^^^^^^^^ Because all queries are serialized and executed by a single worker thread, it is possible for transactional SQL from separate threads to be executed out-of-order. In the example below, the transaction started by thread "B" is rolled back by thread "A" (with bad consequences!): * Thread A: UPDATE transplants SET organ='liver', ...; * Thread B: BEGIN TRANSACTION; * Thread B: UPDATE life_support_system SET timer += 60 ...; * Thread A: ROLLBACK; -- Oh no.... Since there is a potential for queries from separate transactions to be interleaved, the :py:meth:`~SqliteQueueDatabase.transaction` and :py:meth:`~SqliteQueueDatabase.atomic` methods are disabled on :py:class:`SqliteQueueDatabase`. For cases when you wish to temporarily write to the database from a different thread, you can use the :py:meth:`~SqliteQueueDatabase.pause` and :py:meth:`~SqliteQueueDatabase.unpause` methods. These methods block the caller until the writer thread is finished with its current workload. The writer then disconnects and the caller takes over until ``unpause`` is called. The :py:meth:`~SqliteQueueDatabase.stop`, :py:meth:`~SqliteQueueDatabase.start`, and :py:meth:`~SqliteQueueDatabase.is_stopped` methods can also be used to control the writer thread. .. note:: Take a look at SQLite's `isolation `_ documentation for more information about how SQLite handles concurrent connections. Code sample ^^^^^^^^^^^ Creating a database instance does not require any special handling. The :py:class:`SqliteQueueDatabase` accepts some special parameters which you should be aware of, though. If you are using `gevent `_, you must specify ``use_gevent=True`` when instantiating your database -- this way Peewee will know to use the appropriate objects for handling queueing, thread creation, and locking. .. code-block:: python from playhouse.sqliteq import SqliteQueueDatabase db = SqliteQueueDatabase( 'my_app.db', use_gevent=False, # Use the standard library "threading" module. autostart=False, # The worker thread now must be started manually. queue_max_size=64, # Max. # of pending writes that can accumulate. results_timeout=5.0) # Max. time to wait for query to be executed. If ``autostart=False``, as in the above example, you will need to call :py:meth:`~SqliteQueueDatabase.start` to bring up the worker threads that will do the actual write query execution. .. code-block:: python @app.before_first_request def _start_worker_threads(): db.start() If you plan on performing SELECT queries or generally wanting to access the database, you will need to call :py:meth:`~Database.connect` and :py:meth:`~Database.close` as you would with any other database instance. When your application is ready to terminate, use the :py:meth:`~SqliteQueueDatabase.stop` method to shut down the worker thread. If there was a backlog of work, then this method will block until all pending work is finished (though no new work is allowed). .. code-block:: python import atexit @atexit.register def _stop_worker_threads(): db.stop() Lastly, the :py:meth:`~SqliteQueueDatabase.is_stopped` method can be used to determine whether the database writer is up and running. .. _sqlite_udf: Sqlite User-Defined Functions ----------------------------- The ``sqlite_udf`` playhouse module contains a number of user-defined functions, aggregates, and table-valued functions, which you may find useful. The functions are grouped in collections and you can register these user-defined extensions individually, by collection, or register everything. Scalar functions are functions which take a number of parameters and return a single value. For example, converting a string to upper-case, or calculating the MD5 hex digest. Aggregate functions are like scalar functions that operate on multiple rows of data, producing a single result. For example, calculating the sum of a list of integers, or finding the smallest value in a particular column. Table-valued functions are simply functions that can return multiple rows of data. For example, a regular-expression search function that returns all the matches in a given string, or a function that accepts two dates and generates all the intervening days. .. note:: To use table-valued functions, you will need to build the ``playhouse._sqlite_ext`` C extension. Registering user-defined functions: .. code-block:: python db = SqliteDatabase('my_app.db') # Register *all* functions. register_all(db) # Alternatively, you can register individual groups. This will just # register the DATE and MATH groups of functions. register_groups(db, 'DATE', 'MATH') # If you only wish to register, say, the aggregate functions for a # particular group or groups, you can: register_aggregate_groups(db, 'DATE') # If you only wish to register a single function, then you can: from playhouse.sqlite_udf import gzip, gunzip db.register_function(gzip, 'gzip') db.register_function(gunzip, 'gunzip') Using a library function ("hostname"): .. code-block:: python # Assume we have a model, Link, that contains lots of arbitrary URLs. # We want to discover the most common hosts that have been linked. query = (Link .select(fn.hostname(Link.url).alias('host'), fn.COUNT(Link.id)) .group_by(fn.hostname(Link.url)) .order_by(fn.COUNT(Link.id).desc()) .tuples()) # Print the hostname along with number of links associated with it. for host, count in query: print('%s: %s' % (host, count)) Functions, listed by collection name ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Scalar functions are indicated by ``(f)``, aggregate functions by ``(a)``, and table-valued functions by ``(t)``. **CONTROL_FLOW** .. py:function:: if_then_else(cond, truthy[, falsey=None]) Simple ternary-type operator, where, depending on the truthiness of the ``cond`` parameter, either the ``truthy`` or ``falsey`` value will be returned. **DATE** .. py:function:: strip_tz(date_str) :param date_str: A datetime, encoded as a string. :returns: The datetime with any timezone info stripped off. The time is not adjusted in any way, the timezone is simply removed. .. py:function:: humandelta(nseconds[, glue=', ']) :param int nseconds: Number of seconds, total, in timedelta. :param str glue: Fragment to join values. :returns: Easy-to-read description of timedelta. Example, 86471 -> "1 day, 1 minute, 11 seconds" .. py:function:: mintdiff(datetime_value) :param datetime_value: A date-time. :returns: Minimum difference between any two values in list. Aggregate function that computes the minimum difference between any two datetimes. .. py:function:: avgtdiff(datetime_value) :param datetime_value: A date-time. :returns: Average difference between values in list. Aggregate function that computes the average difference between consecutive values in the list. .. py:function:: duration(datetime_value) :param datetime_value: A date-time. :returns: Duration from smallest to largest value in list, in seconds. Aggregate function that computes the duration from the smallest to the largest value in the list, returned in seconds. .. py:function:: date_series(start, stop[, step_seconds=86400]) :param datetime start: Start datetime :param datetime stop: Stop datetime :param int step_seconds: Number of seconds comprising a step. Table-value function that returns rows consisting of the date/+time values encountered iterating from start to stop, ``step_seconds`` at a time. Additionally, if start does not have a time component and step_seconds is greater-than-or-equal-to one day (86400 seconds), the values returned will be dates. Conversely, if start does not have a date component, values will be returned as times. Otherwise values are returned as datetimes. Example: .. code-block:: sql SELECT * FROM date_series('2017-01-28', '2017-02-02'); value ----- 2017-01-28 2017-01-29 2017-01-30 2017-01-31 2017-02-01 2017-02-02 **FILE** .. py:function:: file_ext(filename) :param str filename: Filename to extract extension from. :return: Returns the file extension, including the leading ".". .. py:function:: file_read(filename) :param str filename: Filename to read. :return: Contents of the file. **HELPER** .. py:function:: gzip(data[, compression=9]) :param bytes data: Data to compress. :param int compression: Compression level (9 is max). :returns: Compressed binary data. .. py:function:: gunzip(data) :param bytes data: Compressed data. :returns: Uncompressed binary data. .. py:function:: hostname(url) :param str url: URL to extract hostname from. :returns: hostname portion of URL .. py:function:: toggle(key) :param key: Key to toggle. Toggle a key between True/False state. Example: .. code-block:: pycon >>> toggle('my-key') True >>> toggle('my-key') False >>> toggle('my-key') True .. py:function:: setting(key[, value=None]) :param key: Key to set/retrieve. :param value: Value to set. :returns: Value associated with key. Store/retrieve a setting in memory and persist during lifetime of application. To get the current value, only specify the key. To set a new value, call with key and new value. .. py:function:: clear_toggles() Clears all state associated with the :py:func:`toggle` function. .. py:function:: clear_settings() Clears all state associated with the :py:func:`setting` function. **MATH** .. py:function:: randomrange(start[, stop=None[, step=None]]) :param int start: Start of range (inclusive) :param int end: End of range(not inclusive) :param int step: Interval at which to return a value. Return a random integer between ``[start, end)``. .. py:function:: gauss_distribution(mean, sigma) :param float mean: Mean value :param float sigma: Standard deviation .. py:function:: sqrt(n) Calculate the square root of ``n``. .. py:function:: tonumber(s) :param str s: String to convert to number. :returns: Integer, floating-point or NULL on failure. .. py:function:: mode(val) :param val: Numbers in list. :returns: The mode, or most-common, number observed. Aggregate function which calculates *mode* of values. .. py:function:: minrange(val) :param val: Value :returns: Min difference between two values. Aggregate function which calculates the minimal distance between two numbers in the sequence. .. py:function:: avgrange(val) :param val: Value :returns: Average difference between values. Aggregate function which calculates the average distance between two consecutive numbers in the sequence. .. py:function:: range(val) :param val: Value :returns: The range from the smallest to largest value in sequence. Aggregate function which returns range of values observed. .. py:function:: median(val) :param val: Value :returns: The median, or middle, value in a sequence. Aggregate function which calculates the middle value in a sequence. .. note:: Only available if you compiled the ``_sqlite_udf`` extension. **STRING** .. py:function:: substr_count(haystack, needle) Returns number of times ``needle`` appears in ``haystack``. .. py:function:: strip_chars(haystack, chars) Strips any characters in ``chars`` from beginning and end of ``haystack``. .. py:function:: damerau_levenshtein_dist(s1, s2) Computes the edit distance from s1 to s2 using the damerau variant of the levenshtein algorithm. .. note:: Only available if you compiled the ``_sqlite_udf`` extension. .. py:function:: levenshtein_dist(s1, s2) Computes the edit distance from s1 to s2 using the levenshtein algorithm. .. note:: Only available if you compiled the ``_sqlite_udf`` extension. .. py:function:: str_dist(s1, s2) Computes the edit distance from s1 to s2 using the standard library SequenceMatcher's algorithm. .. note:: Only available if you compiled the ``_sqlite_udf`` extension. .. py:function:: regex_search(regex, search_string) :param str regex: Regular expression :param str search_string: String to search for instances of regex. Table-value function that searches a string for substrings that match the provided ``regex``. Returns rows for each match found. Example: .. code-block:: python SELECT * FROM regex_search('\w+', 'extract words, ignore! symbols'); value ----- extract words ignore symbols .. _apsw: apsw, an advanced sqlite driver ------------------------------- The ``apsw_ext`` module contains a database class suitable for use with the apsw sqlite driver. APSW Project page: https://github.com/rogerbinns/apsw APSW is a really neat library that provides a thin wrapper on top of SQLite's C interface, making it possible to use all of SQLite's advanced features. Here are just a few reasons to use APSW, taken from the documentation: * APSW gives all functionality of SQLite, including virtual tables, virtual file system, blob i/o, backups and file control. * Connections can be shared across threads without any additional locking. * Transactions are managed explicitly by your code. * APSW can handle nested transactions. * Unicode is handled correctly. * APSW is faster. For more information on the differences between apsw and pysqlite, check `the apsw docs `_. How to use the APSWDatabase ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python from apsw_ext import * db = APSWDatabase(':memory:') class BaseModel(Model): class Meta: database = db class SomeModel(BaseModel): col1 = CharField() col2 = DateTimeField() apsw_ext API notes ^^^^^^^^^^^^^^^^^^ :py:class:`APSWDatabase` extends the :py:class:`SqliteExtDatabase` and inherits its advanced features. .. py:class:: APSWDatabase(database, **connect_kwargs) :param string database: filename of sqlite database :param connect_kwargs: keyword arguments passed to apsw when opening a connection .. py:method:: register_module(mod_name, mod_inst) Provides a way of globally registering a module. For more information, see the `documentation on virtual tables `_. :param string mod_name: name to use for module :param object mod_inst: an object implementing the `Virtual Table `_ interface .. py:method:: unregister_module(mod_name) Unregister a module. :param string mod_name: name to use for module .. note:: Be sure to use the ``Field`` subclasses defined in the ``apsw_ext`` module, as they will properly handle adapting the data types for storage. For example, instead of using ``peewee.DateTimeField``, be sure you are importing and using ``playhouse.apsw_ext.DateTimeField``. .. _sqlcipher_ext: Sqlcipher backend ----------------- .. note:: Although this extention's code is short, it has not been properly peer-reviewed yet and may have introduced vulnerabilities. Also note that this code relies on sqlcipher3_ (python bindings) and sqlcipher_, and the code there might have vulnerabilities as well, but since these are widely used crypto modules, we can expect "short zero days" there. .. _sqlcipher3: https://pypi.python.org/pypi/sqlcipher3 .. _pysqlcipher3: https://pypi.python.org/pypi/pysqlcipher3 .. _sqlcipher: http://sqlcipher.net sqlcipher_ext API notes ^^^^^^^^^^^^^^^^^^^^^^^ .. py:class:: SqlCipherDatabase(database, passphrase, **kwargs) Subclass of :py:class:`SqliteDatabase` that stores the database encrypted. Instead of the standard ``sqlite3`` backend, it uses sqlcipher3_: a python wrapper for sqlcipher_, which -- in turn -- is an encrypted wrapper around ``sqlite3``, so the API is *identical* to :py:class:`SqliteDatabase`'s, except for object construction parameters: :param database: Path to encrypted database filename to open [or create]. :param passphrase: Database encryption passphrase: should be at least 8 character long, but it is *strongly advised* to enforce better `passphrase strength`_ criteria in your implementation. * If the ``database`` file doesn't exist, it will be *created* with encryption by a key derived from ``passhprase``. * When trying to open an existing database, ``passhprase`` should be identical to the ones used when it was created. If the passphrase is incorrect, an error will be raised when first attempting to access the database. .. py:method:: rekey(passphrase) :param str passphrase: New passphrase for database. Change the passphrase for database. .. _passphrase strength: https://en.wikipedia.org/wiki/Password_strength .. note:: SQLCipher can be configured using a number of extension PRAGMAs. The list of PRAGMAs and their descriptions can be found in the `SQLCipher documentation `_. For example to specify the number of PBKDF2 iterations for the key derivation (64K in SQLCipher 3.x, 256K in SQLCipher 4.x by default): .. code-block:: python # Use 1,000,000 iterations. db = SqlCipherDatabase('my_app.db', pragmas={'kdf_iter': 1000000}) To use a cipher page-size of 16KB and a cache-size of 10,000 pages: .. code-block:: python db = SqlCipherDatabase('my_app.db', passphrase='secret!!!', pragmas={ 'cipher_page_size': 1024 * 16, 'cache_size': 10000}) # 10,000 16KB pages, or 160MB. Example of prompting the user for a passphrase: .. code-block:: python db = SqlCipherDatabase(None) class BaseModel(Model): """Parent for all app's models""" class Meta: # We won't have a valid db until user enters passhrase. database = db # Derive our model subclasses class Person(BaseModel): name = TextField(primary_key=True) right_passphrase = False while not right_passphrase: db.init( 'testsqlcipher.db', passphrase=get_passphrase_from_user()) try: # Actually execute a query against the db to test passphrase. db.get_tables() except DatabaseError as exc: # This error indicates the password was wrong. if exc.args[0] == 'file is encrypted or is not a database': tell_user_the_passphrase_was_wrong() db.init(None) # Reset the db. else: raise exc else: # The password was correct. right_passphrase = True See also: a slightly more elaborate `example `_. .. _postgres_ext: Postgresql Extensions --------------------- The postgresql extensions module provides a number of "postgres-only" functions, currently: * :ref:`json support `, including *jsonb* for Postgres 9.4. * :ref:`hstore support ` * :ref:`server-side cursors ` * :ref:`full-text search ` * :py:class:`ArrayField` field type, for storing arrays. * :py:class:`HStoreField` field type, for storing key/value pairs. * :py:class:`IntervalField` field type, for storing ``timedelta`` objects. * :py:class:`JSONField` field type, for storing JSON data. * :py:class:`BinaryJSONField` field type for the ``jsonb`` JSON data type. * :py:class:`TSVectorField` field type, for storing full-text search data. * :py:class:`DateTimeTZField` field type, a timezone-aware datetime field. In the future I would like to add support for more of postgresql's features. If there is a particular feature you would like to see added, please `open a Github issue `_. .. warning:: In order to start using the features described below, you will need to use the extension :py:class:`PostgresqlExtDatabase` class instead of :py:class:`PostgresqlDatabase`. The code below will assume you are using the following database and base model: .. code-block:: python from playhouse.postgres_ext import * ext_db = PostgresqlExtDatabase('peewee_test', user='postgres') class BaseExtModel(Model): class Meta: database = ext_db .. _pgjson: JSON Support ^^^^^^^^^^^^ peewee has basic support for Postgres' native JSON data type, in the form of :py:class:`JSONField`. As of version 2.4.7, peewee also supports the Postgres 9.4 binary json ``jsonb`` type, via :py:class:`BinaryJSONField`. .. warning:: Postgres supports a JSON data type natively as of 9.2 (full support in 9.3). In order to use this functionality you must be using the correct version of Postgres with `psycopg2` version 2.5 or greater. To use :py:class:`BinaryJSONField`, which has many performance and querying advantages, you must have Postgres 9.4 or later. .. note:: You must be sure your database is an instance of :py:class:`PostgresqlExtDatabase` in order to use the `JSONField`. Here is an example of how you might declare a model with a JSON field: .. code-block:: python import json import urllib2 from playhouse.postgres_ext import * db = PostgresqlExtDatabase('my_database') class APIResponse(Model): url = CharField() response = JSONField() class Meta: database = db @classmethod def request(cls, url): fh = urllib2.urlopen(url) return cls.create(url=url, response=json.loads(fh.read())) APIResponse.create_table() # Store a JSON response. offense = APIResponse.request('http://crime-api.com/api/offense/') booking = APIResponse.request('http://crime-api.com/api/booking/') # Query a JSON data structure using a nested key lookup: offense_responses = APIResponse.select().where( APIResponse.response['meta']['model'] == 'offense') # Retrieve a sub-key for each APIResponse. By calling .as_json(), the # data at the sub-key will be returned as Python objects (dicts, lists, # etc) instead of serialized JSON. q = (APIResponse .select( APIResponse.data['booking']['person'].as_json().alias('person')) .where(APIResponse.data['meta']['model'] == 'booking')) for result in q: print(result.person['name'], result.person['dob']) The :py:class:`BinaryJSONField` works the same and supports the same operations as the regular :py:class:`JSONField`, but provides several additional operations for testing **containment**. Using the binary json field, you can test whether your JSON data contains other partial JSON structures (:py:meth:`~BinaryJSONField.contains`, :py:meth:`~BinaryJSONField.contains_any`, :py:meth:`~BinaryJSONField.contains_all`), or whether it is a subset of a larger JSON document (:py:meth:`~BinaryJSONField.contained_by`). For more examples, see the :py:class:`JSONField` and :py:class:`BinaryJSONField` API documents below. .. _hstore: hstore support ^^^^^^^^^^^^^^ `Postgresql hstore `_ is an embedded key/value store. With hstore, you can store arbitrary key/value pairs in your database alongside structured relational data. To use ``hstore``, you need to specify an additional parameter when instantiating your :py:class:`PostgresqlExtDatabase`: .. code-block:: python # Specify "register_hstore=True": db = PostgresqlExtDatabase('my_db', register_hstore=True) Currently the ``postgres_ext`` module supports the following operations: * Store and retrieve arbitrary dictionaries * Filter by key(s) or partial dictionary * Update/add one or more keys to an existing dictionary * Delete one or more keys from an existing dictionary * Select keys, values, or zip keys and values * Retrieve a slice of keys/values * Test for the existence of a key * Test that a key has a non-NULL value Using hstore ^^^^^^^^^^^^ To start with, you will need to import the custom database class and the hstore functions from ``playhouse.postgres_ext`` (see above code snippet). Then, it is as simple as adding a :py:class:`HStoreField` to your model: .. code-block:: python class House(BaseExtModel): address = CharField() features = HStoreField() You can now store arbitrary key/value pairs on ``House`` instances: .. code-block:: pycon >>> h = House.create( ... address='123 Main St', ... features={'garage': '2 cars', 'bath': '2 bath'}) ... >>> h_from_db = House.get(House.id == h.id) >>> h_from_db.features {'bath': '2 bath', 'garage': '2 cars'} You can filter by individual key, multiple keys or partial dictionary: .. code-block:: pycon >>> query = House.select() >>> garage = query.where(House.features.contains('garage')) >>> garage_and_bath = query.where(House.features.contains(['garage', 'bath'])) >>> twocar = query.where(House.features.contains({'garage': '2 cars'})) Suppose you want to do an atomic update to the house: .. code-block:: pycon >>> new_features = House.features.update({'bath': '2.5 bath', 'sqft': '1100'}) >>> query = House.update(features=new_features) >>> query.where(House.id == h.id).execute() 1 >>> h = House.get(House.id == h.id) >>> h.features {'bath': '2.5 bath', 'garage': '2 cars', 'sqft': '1100'} Or, alternatively an atomic delete: .. code-block:: pycon >>> query = House.update(features=House.features.delete('bath')) >>> query.where(House.id == h.id).execute() 1 >>> h = House.get(House.id == h.id) >>> h.features {'garage': '2 cars', 'sqft': '1100'} Multiple keys can be deleted at the same time: .. code-block:: pycon >>> query = House.update(features=House.features.delete('garage', 'sqft')) You can select just keys, just values, or zip the two: .. code-block:: pycon >>> for h in House.select(House.address, House.features.keys().alias('keys')): ... print(h.address, h.keys) 123 Main St [u'bath', u'garage'] >>> for h in House.select(House.address, House.features.values().alias('vals')): ... print(h.address, h.vals) 123 Main St [u'2 bath', u'2 cars'] >>> for h in House.select(House.address, House.features.items().alias('mtx')): ... print(h.address, h.mtx) 123 Main St [[u'bath', u'2 bath'], [u'garage', u'2 cars']] You can retrieve a slice of data, for example, all the garage data: .. code-block:: pycon >>> query = House.select(House.address, House.features.slice('garage').alias('garage_data')) >>> for house in query: ... print(house.address, house.garage_data) 123 Main St {'garage': '2 cars'} You can check for the existence of a key and filter rows accordingly: .. code-block:: pycon >>> has_garage = House.features.exists('garage') >>> for house in House.select(House.address, has_garage.alias('has_garage')): ... print(house.address, house.has_garage) 123 Main St True >>> for house in House.select().where(House.features.exists('garage')): ... print(house.address, house.features['garage']) # <-- just houses w/garage data 123 Main St 2 cars Interval support ^^^^^^^^^^^^^^^^ Postgres supports durations through the ``INTERVAL`` data-type (`docs `_). .. py:class:: IntervalField([null=False, [...]]) Field class capable of storing Python ``datetime.timedelta`` instances. Example: .. code-block:: python from datetime import timedelta from playhouse.postgres_ext import * db = PostgresqlExtDatabase('my_db') class Event(Model): location = CharField() duration = IntervalField() start_time = DateTimeField() class Meta: database = db @classmethod def get_long_meetings(cls): return cls.select().where(cls.duration > timedelta(hours=1)) .. _server_side_cursors: Server-side cursors ^^^^^^^^^^^^^^^^^^^ When psycopg2 executes a query, normally all results are fetched and returned to the client by the backend. This can cause your application to use a lot of memory when making large queries. Using server-side cursors, results are returned a little at a time (by default 2000 records). For the definitive reference, please see the `psycopg2 documentation `_. .. note:: To use server-side (or named) cursors, you must be using :py:class:`PostgresqlExtDatabase`. To execute a query using a server-side cursor, simply wrap your select query using the :py:func:`ServerSide` helper: .. code-block:: python large_query = PageView.select() # Build query normally. # Iterate over large query inside a transaction. for page_view in ServerSide(large_query): # do some interesting analysis here. pass # Server-side resources are released. If you would like all ``SELECT`` queries to automatically use a server-side cursor, you can specify this when creating your :py:class:`PostgresqlExtDatabase`: .. code-block:: python from postgres_ext import PostgresqlExtDatabase ss_db = PostgresqlExtDatabase('my_db', server_side_cursors=True) .. note:: Server-side cursors live only as long as the transaction, so for this reason peewee will not automatically call ``commit()`` after executing a ``SELECT`` query. If you do not ``commit`` after you are done iterating, you will not release the server-side resources until the connection is closed (or the transaction is committed later). Furthermore, since peewee will by default cache rows returned by the cursor, you should always call ``.iterator()`` when iterating over a large query. If you are using the :py:func:`ServerSide` helper, the transaction and call to ``iterator()`` will be handled transparently. .. _pg_fts: Full-text search ^^^^^^^^^^^^^^^^ Postgresql provides `sophisticated full-text search `_ using special data-types (``tsvector`` and ``tsquery``). Documents should be stored or converted to the ``tsvector`` type, and search queries should be converted to ``tsquery``. For simple cases, you can simply use the :py:func:`Match` function, which will automatically perform the appropriate conversions, and requires no schema changes: .. code-block:: python def blog_search(search_term): return Blog.select().where( (Blog.status == Blog.STATUS_PUBLISHED) & Match(Blog.content, search_term)) The :py:func:`Match` function will automatically convert the left-hand operand to a ``tsvector``, and the right-hand operand to a ``tsquery``. For better performance, it is recommended you create a ``GIN`` index on the column you plan to search: .. code-block:: sql CREATE INDEX blog_full_text_search ON blog USING gin(to_tsvector(content)); Alternatively, you can use the :py:class:`TSVectorField` to maintain a dedicated column for storing ``tsvector`` data: .. code-block:: python class Blog(Model): content = TextField() search_content = TSVectorField() .. note:: :py:class:`TSVectorField`, will automatically be created with a GIN index. You will need to explicitly convert the incoming text data to ``tsvector`` when inserting or updating the ``search_content`` field: .. code-block:: python content = 'Excellent blog post about peewee ORM.' blog_entry = Blog.create( content=content, search_content=fn.to_tsvector(content)) To perform a full-text search, use :py:meth:`TSVectorField.match`: .. code-block:: python terms = 'python & (sqlite | postgres)' results = Blog.select().where(Blog.search_content.match(terms)) For more information, see the `Postgres full-text search docs `_. postgres_ext API notes ^^^^^^^^^^^^^^^^^^^^^^ .. py:class:: PostgresqlExtDatabase(database[, server_side_cursors=False[, register_hstore=False[, ...]]]) Identical to :py:class:`PostgresqlDatabase` but required in order to support: :param str database: Name of database to connect to. :param bool server_side_cursors: Whether ``SELECT`` queries should utilize server-side cursors. :param bool register_hstore: Register the HStore extension with the connection. * :ref:`server_side_cursors` * :py:class:`ArrayField` * :py:class:`DateTimeTZField` * :py:class:`JSONField` * :py:class:`BinaryJSONField` * :py:class:`HStoreField` * :py:class:`TSVectorField` If you wish to use the HStore extension, you must specify ``register_hstore=True``. If using ``server_side_cursors``, also be sure to wrap your queries with :py:func:`ServerSide`. .. py:function:: ServerSide(select_query) :param select_query: a :py:class:`SelectQuery` instance. :rtype generator: Wrap the given select query in a transaction, and call its :py:meth:`~SelectQuery.iterator` method to avoid caching row instances. In order for the server-side resources to be released, be sure to exhaust the generator (iterate over all the rows). Usage: .. code-block:: python large_query = PageView.select() for page_view in ServerSide(large_query): # Do something interesting. pass # At this point server side resources are released. .. _pgarrays: .. py:class:: ArrayField([field_class=IntegerField[, field_kwargs=None[, dimensions=1[, convert_values=False]]]]) :param field_class: a subclass of :py:class:`Field`, e.g. :py:class:`IntegerField`. :param dict field_kwargs: arguments to initialize ``field_class``. :param int dimensions: dimensions of array. :param bool convert_values: apply ``field_class`` value conversion to array data. Field capable of storing arrays of the provided `field_class`. .. note:: By default ArrayField will use a GIN index. To disable this, initialize the field with ``index=False``. You can store and retrieve lists (or lists-of-lists): .. code-block:: python class BlogPost(BaseModel): content = TextField() tags = ArrayField(CharField) post = BlogPost(content='awesome', tags=['foo', 'bar', 'baz']) Additionally, you can use the ``__getitem__`` API to query values or slices in the database: .. code-block:: python # Get the first tag on a given blog post. first_tag = (BlogPost .select(BlogPost.tags[0].alias('first_tag')) .where(BlogPost.id == 1) .dicts() .get()) # first_tag = {'first_tag': 'foo'} Get a slice of values: .. code-block:: python # Get the first two tags. two_tags = (BlogPost .select(BlogPost.tags[:2].alias('two')) .dicts() .get()) # two_tags = {'two': ['foo', 'bar']} .. py:method:: contains(*items) :param items: One or more items that must be in the given array field. .. code-block:: python # Get all blog posts that are tagged with both "python" and "django". Blog.select().where(Blog.tags.contains('python', 'django')) .. py:method:: contains_any(*items) :param items: One or more items to search for in the given array field. Like :py:meth:`~ArrayField.contains`, except will match rows where the array contains *any* of the given items. .. code-block:: python # Get all blog posts that are tagged with "flask" and/or "django". Blog.select().where(Blog.tags.contains_any('flask', 'django')) .. py:class:: DateTimeTZField(*args, **kwargs) A timezone-aware subclass of :py:class:`DateTimeField`. .. py:class:: HStoreField(*args, **kwargs) A field for storing and retrieving arbitrary key/value pairs. For details on usage, see :ref:`hstore`. .. attention:: To use the :py:class:`HStoreField` you will need to be sure the *hstore* extension is registered with the connection. To accomplish this, instantiate the :py:class:`PostgresqlExtDatabase` with ``register_hstore=True``. .. note:: By default ``HStoreField`` will use a *GiST* index. To disable this, initialize the field with ``index=False``. .. py:method:: keys() Returns the keys for a given row. .. code-block:: pycon >>> for h in House.select(House.address, House.features.keys().alias('keys')): ... print(h.address, h.keys) 123 Main St [u'bath', u'garage'] .. py:method:: values() Return the values for a given row. .. code-block:: pycon >>> for h in House.select(House.address, House.features.values().alias('vals')): ... print(h.address, h.vals) 123 Main St [u'2 bath', u'2 cars'] .. py:method:: items() Like python's ``dict``, return the keys and values in a list-of-lists: .. code-block:: pycon >>> for h in House.select(House.address, House.features.items().alias('mtx')): ... print(h.address, h.mtx) 123 Main St [[u'bath', u'2 bath'], [u'garage', u'2 cars']] .. py:method:: slice(*args) Return a slice of data given a list of keys. .. code-block:: pycon >>> for h in House.select(House.address, House.features.slice('garage').alias('garage_data')): ... print(h.address, h.garage_data) 123 Main St {'garage': '2 cars'} .. py:method:: exists(key) Query for whether the given key exists. .. code-block:: pycon >>> for h in House.select(House.address, House.features.exists('garage').alias('has_garage')): ... print(h.address, h.has_garage) 123 Main St True >>> for h in House.select().where(House.features.exists('garage')): ... print(h.address, h.features['garage']) # <-- just houses w/garage data 123 Main St 2 cars .. py:method:: defined(key) Query for whether the given key has a value associated with it. .. py:method:: update(**data) Perform an atomic update to the keys/values for a given row or rows. .. code-block:: pycon >>> query = House.update(features=House.features.update( ... sqft=2000, ... year_built=2012)) >>> query.where(House.id == 1).execute() .. py:method:: delete(*keys) Delete the provided keys for a given row or rows. .. note:: We will use an ``UPDATE`` query. .. code-block:: pycon >>> query = House.update(features=House.features.delete( ... 'sqft', 'year_built')) >>> query.where(House.id == 1).execute() .. py:method:: contains(value) :param value: Either a ``dict``, a ``list`` of keys, or a single key. Query rows for the existence of either: * a partial dictionary. * a list of keys. * a single key. .. code-block:: pycon >>> query = House.select() >>> has_garage = query.where(House.features.contains('garage')) >>> garage_bath = query.where(House.features.contains(['garage', 'bath'])) >>> twocar = query.where(House.features.contains({'garage': '2 cars'})) .. py:method:: contains_any(*keys) :param keys: One or more keys to search for. Query rows for the existence of *any* key. .. py:class:: JSONField(dumps=None, *args, **kwargs) :param dumps: The default is to call json.dumps() or the dumps function. You can override this method to create a customized JSON wrapper. Field class suitable for storing and querying arbitrary JSON. When using this on a model, set the field's value to a Python object (either a ``dict`` or a ``list``). When you retrieve your value from the database it will be returned as a Python data structure. .. note:: You must be using Postgres 9.2 / psycopg2 2.5 or greater. .. note:: If you are using Postgres 9.4, strongly consider using the :py:class:`BinaryJSONField` instead as it offers better performance and more powerful querying options. Example model declaration: .. code-block:: python db = PostgresqlExtDatabase('my_db') class APIResponse(Model): url = CharField() response = JSONField() class Meta: database = db Example of storing JSON data: .. code-block:: python url = 'http://foo.com/api/resource/' resp = json.loads(urllib2.urlopen(url).read()) APIResponse.create(url=url, response=resp) APIResponse.create(url='http://foo.com/baz/', response={'key': 'value'}) To query, use Python's ``[]`` operators to specify nested key or array lookups: .. code-block:: python APIResponse.select().where( APIResponse.response['key1']['nested-key'] == 'some-value') To illustrate the use of the ``[]`` operators, imagine we have the following data stored in an ``APIResponse``: .. code-block:: javascript { "foo": { "bar": ["i1", "i2", "i3"], "baz": { "huey": "mickey", "peewee": "nugget" } } } Here are the results of a few queries: .. code-block:: python def get_data(expression): # Helper function to just retrieve the results of a # particular expression. query = (APIResponse .select(expression.alias('my_data')) .dicts() .get()) return query['my_data'] # Accessing the foo -> bar subkey will return a JSON # representation of the list. get_data(APIResponse.data['foo']['bar']) # '["i1", "i2", "i3"]' # In order to retrieve this list as a Python list, # we will call .as_json() on the expression. get_data(APIResponse.data['foo']['bar'].as_json()) # ['i1', 'i2', 'i3'] # Similarly, accessing the foo -> baz subkey will # return a JSON representation of the dictionary. get_data(APIResponse.data['foo']['baz']) # '{"huey": "mickey", "peewee": "nugget"}' # Again, calling .as_json() will return an actual # python dictionary. get_data(APIResponse.data['foo']['baz'].as_json()) # {'huey': 'mickey', 'peewee': 'nugget'} # When dealing with simple values, either way works as # you expect. get_data(APIResponse.data['foo']['bar'][0]) # 'i1' # Calling .as_json() when the result is a simple value # will return the same thing as the previous example. get_data(APIResponse.data['foo']['bar'][0].as_json()) # 'i1' .. py:class:: BinaryJSONField(dumps=None, *args, **kwargs) :param dumps: The default is to call json.dumps() or the dumps function. You can override this method to create a customized JSON wrapper. Store and query arbitrary JSON documents. Data should be stored using normal Python ``dict`` and ``list`` objects, and when data is returned from the database, it will be returned using ``dict`` and ``list`` as well. For examples of basic query operations, see the above code samples for :py:class:`JSONField`. The example queries below will use the same ``APIResponse`` model described above. .. note:: By default BinaryJSONField will use a GiST index. To disable this, initialize the field with ``index=False``. .. note:: You must be using Postgres 9.4 / psycopg2 2.5 or newer. If you are using Postgres 9.2 or 9.3, you can use the regular :py:class:`JSONField` instead. .. py:method:: contains(other) Test whether the given JSON data contains the given JSON fragment or key. Example: .. code-block:: python search_fragment = { 'foo': {'bar': ['i2']} } query = (APIResponse .select() .where(APIResponse.data.contains(search_fragment))) # If we're searching for a list, the list items do not need to # be ordered in a particular way: query = (APIResponse .select() .where(APIResponse.data.contains({ 'foo': {'bar': ['i2', 'i1']}}))) We can pass in simple keys as well. To find APIResponses that contain the key ``foo`` at the top-level: .. code-block:: python APIResponse.select().where(APIResponse.data.contains('foo')) We can also search sub-keys using square-brackets: .. code-block:: python APIResponse.select().where( APIResponse.data['foo']['bar'].contains(['i2', 'i1'])) .. py:method:: contains_any(*items) Search for the presence of one or more of the given items. .. code-block:: python APIResponse.select().where( APIResponse.data.contains_any('foo', 'baz', 'nugget')) Like :py:meth:`~BinaryJSONField.contains`, we can also search sub-keys: .. code-block:: python APIResponse.select().where( APIResponse.data['foo']['bar'].contains_any('i2', 'ix')) .. py:method:: contains_all(*items) Search for the presence of all of the given items. .. code-block:: python APIResponse.select().where( APIResponse.data.contains_all('foo')) Like :py:meth:`~BinaryJSONField.contains_any`, we can also search sub-keys: .. code-block:: python APIResponse.select().where( APIResponse.data['foo']['bar'].contains_all('i1', 'i2', 'i3')) .. py:method:: contained_by(other) Test whether the given JSON document is contained by (is a subset of) the given JSON document. This method is the inverse of :py:meth:`~BinaryJSONField.contains`. .. code-block:: python big_doc = { 'foo': { 'bar': ['i1', 'i2', 'i3'], 'baz': { 'huey': 'mickey', 'peewee': 'nugget', } }, 'other_key': ['nugget', 'bear', 'kitten'], } APIResponse.select().where( APIResponse.data.contained_by(big_doc)) .. py:method:: concat(data) Concatentate two field data and the provided data. Note that this operation does not merge or do a "deep concat". .. py:method:: has_key(key) Test whether the key exists at the top-level of the JSON object. .. py:method:: remove(*keys) Remove one or more keys from the top-level of the JSON object. .. py:function:: Match(field, query) Generate a full-text search expression, automatically converting the left-hand operand to a ``tsvector``, and the right-hand operand to a ``tsquery``. Example: .. code-block:: python def blog_search(search_term): return Blog.select().where( (Blog.status == Blog.STATUS_PUBLISHED) & Match(Blog.content, search_term)) .. py:class:: TSVectorField Field type suitable for storing ``tsvector`` data. This field will automatically be created with a ``GIN`` index for improved search performance. .. note:: Data stored in this field will still need to be manually converted to the ``tsvector`` type. .. note:: By default TSVectorField will use a GIN index. To disable this, initialize the field with ``index=False``. Example usage: .. code-block:: python class Blog(Model): content = TextField() search_content = TSVectorField() content = 'this is a sample blog entry.' blog_entry = Blog.create( content=content, search_content=fn.to_tsvector(content)) # Note `to_tsvector()`. .. py:method:: match(query[, language=None[, plain=False]]) :param str query: the full-text search query. :param str language: language name (optional). :param bool plain: parse search query using plain (simple) parser. :returns: an expression representing full-text search/match. Example: .. code-block:: python # Perform a search using the "match" method. terms = 'python & (sqlite | postgres)' results = Blog.select().where(Blog.search_content.match(terms)) .. include:: crdb.rst .. _mysql_ext: MySQL Extensions ---------------- Peewee provides an alternate database implementation for using the `mysql-connector `_ driver or the `mariadb-connector `_. The implementations can be found in ``playhouse.mysql_ext``. .. py:class:: MySQLConnectorDatabase(database, **kwargs) Database implementation using `mysql-connector `_. Full list of supported `connection parameters `_. Example usage of mysql-connector: .. code-block:: python from playhouse.mysql_ext import MySQLConnectorDatabase # MySQL database implementation that utilizes mysql-connector driver. db = MySQLConnectorDatabase('my_database', host='1.2.3.4', user='mysql') .. py:class:: MariaDBConnectorDatabase(database, **kwargs) Database implementation using `mariadb-connector `_. Full list of supported `connection parameters `_. Example usage of mariadb-connector: .. code-block:: python from playhouse.mysql_ext import MariaDBConnectorDatabase # MySQL database implementation that utilizes mysql-connector driver. db = MariaDBConnectorDatabase('my_database', host='1.2.3.4', user='mysql') .. note:: The :py:class:`MariaDBConnectorDatabase` does **not** accept the following parameters: * ``charset`` (it is always utf8mb4) * ``sql_mode`` * ``use_unicode`` Additional MySQL-specific helpers: .. py:class:: JSONField() Extends :py:class:`TextField` and implements transparent JSON encoding and decoding in Python. .. py:method:: extract(path) :param str path: a JSON path, e.g. ``$.key1`` Extract a value from a JSON document at the given path. .. py:function:: Match(columns, expr[, modifier=None]) :param columns: a single :py:class:`Field` or a tuple of multiple fields. :param str expr: the full-text search expression. :param str modifier: optional modifiers for the search, e.g. *'in boolean mode'*. Helper class for constructing MySQL full-text search queries of the form: .. code-block:: sql MATCH (columns, ...) AGAINST (expr[ modifier]) .. _dataset: DataSet ------- The *dataset* module contains a high-level API for working with databases modeled after the popular `project of the same name `_. The aims of the *dataset* module are to provide: * A simplified API for working with relational data, along the lines of working with JSON. * An easy way to export relational data as JSON or CSV. * An easy way to import JSON or CSV data into a relational database. A minimal data-loading script might look like this: .. code-block:: python from playhouse.dataset import DataSet db = DataSet('sqlite:///:memory:') table = db['sometable'] table.insert(name='Huey', age=3) table.insert(name='Mickey', age=5, gender='male') huey = table.find_one(name='Huey') print(huey) # {'age': 3, 'gender': None, 'id': 1, 'name': 'Huey'} for obj in table: print(obj) # {'age': 3, 'gender': None, 'id': 1, 'name': 'Huey'} # {'age': 5, 'gender': 'male', 'id': 2, 'name': 'Mickey'} You can insert, update or delete using the dictionary APIs as well: .. code-block:: python huey = table.find_one(name='Huey') # {'age': 3, 'gender': None, 'id': 1, 'name': 'Huey'} # Perform an update by supplying a partial record of changes. table[1] = {'gender': 'male', 'age': 4} print(table[1]) # {'age': 4, 'gender': 'male', 'id': 1, 'name': 'Huey'} # Or insert a new record: table[3] = {'name': 'Zaizee', 'age': 2} print(table[3]) # {'age': 2, 'gender': None, 'id': 3, 'name': 'Zaizee'} # Or delete a record: del table[3] # Remove the row we just added. You can export or import data using :py:meth:`~DataSet.freeze` and :py:meth:`~DataSet.thaw`: .. code-block:: python # Export table content to the `users.json` file. db.freeze(table.all(), format='json', filename='users.json') # Import data from a CSV file into a new table. Columns will be automatically # created for each field in the CSV file. new_table = db['stats'] new_table.thaw(format='csv', filename='monthly_stats.csv') Getting started ^^^^^^^^^^^^^^^ :py:class:`DataSet` objects are initialized by passing in a database URL of the format ``dialect://user:password@host/dbname``. See the :ref:`db_url` section for examples of connecting to various databases. .. code-block:: python # Create an in-memory SQLite database. db = DataSet('sqlite:///:memory:') Storing data ^^^^^^^^^^^^ To store data, we must first obtain a reference to a table. If the table does not exist, it will be created automatically: .. code-block:: python # Get a table reference, creating the table if it does not exist. table = db['users'] We can now :py:meth:`~Table.insert` new rows into the table. If the columns do not exist, they will be created automatically: .. code-block:: python table.insert(name='Huey', age=3, color='white') table.insert(name='Mickey', age=5, gender='male') To update existing entries in the table, pass in a dictionary containing the new values and filter conditions. The list of columns to use as filters is specified in the *columns* argument. If no filter columns are specified, then all rows will be updated. .. code-block:: python # Update the gender for "Huey". table.update(name='Huey', gender='male', columns=['name']) # Update all records. If the column does not exist, it will be created. table.update(favorite_orm='peewee') Importing data ^^^^^^^^^^^^^^ To import data from an external source, such as a JSON or CSV file, you can use the :py:meth:`~Table.thaw` method. By default, new columns will be created for any attributes encountered. If you wish to only populate columns that are already defined on a table, you can pass in ``strict=True``. .. code-block:: python # Load data from a JSON file containing a list of objects. table = dataset['stock_prices'] table.thaw(filename='stocks.json', format='json') table.all()[:3] # Might print... [{'id': 1, 'ticker': 'GOOG', 'price': 703}, {'id': 2, 'ticker': 'AAPL', 'price': 109}, {'id': 3, 'ticker': 'AMZN', 'price': 300}] Using transactions ^^^^^^^^^^^^^^^^^^ DataSet supports nesting transactions using a simple context manager. .. code-block:: python table = db['users'] with db.transaction() as txn: table.insert(name='Charlie') with db.transaction() as nested_txn: # Set Charlie's favorite ORM to Django. table.update(name='Charlie', favorite_orm='django', columns=['name']) # jk/lol nested_txn.rollback() Inspecting the database ^^^^^^^^^^^^^^^^^^^^^^^ You can use the :py:meth:`tables` method to list the tables in the current database: .. code-block:: pycon >>> print(db.tables) ['sometable', 'user'] And for a given table, you can print the columns: .. code-block:: pycon >>> table = db['user'] >>> print(table.columns) ['id', 'age', 'name', 'gender', 'favorite_orm'] We can also find out how many rows are in a table: .. code-block:: pycon >>> print(len(db['user'])) 3 Reading data ^^^^^^^^^^^^ To retrieve all rows, you can use the :py:meth:`~Table.all` method: .. code-block:: python # Retrieve all the users. users = db['user'].all() # We can iterate over all rows without calling `.all()` for user in db['user']: print(user['name']) Specific objects can be retrieved using :py:meth:`~Table.find` and :py:meth:`~Table.find_one`. .. code-block:: python # Find all the users who like peewee. peewee_users = db['user'].find(favorite_orm='peewee') # Find Huey. huey = db['user'].find_one(name='Huey') Exporting data ^^^^^^^^^^^^^^ To export data, use the :py:meth:`~DataSet.freeze` method, passing in the query you wish to export: .. code-block:: python peewee_users = db['user'].find(favorite_orm='peewee') db.freeze(peewee_users, format='json', filename='peewee_users.json') API ^^^ .. py:class:: DataSet(url, **kwargs) :param url: A database URL or a :py:class:`Database` instance. For details on using a URL, see :ref:`db_url` for examples. :param kwargs: additional keyword arguments passed to :py:meth:`Introspector.generate_models` when introspecting the db. The *DataSet* class provides a high-level API for working with relational databases. .. py:attribute:: tables Return a list of tables stored in the database. This list is computed dynamically each time it is accessed. .. py:method:: __getitem__(table_name) Provide a :py:class:`Table` reference to the specified table. If the table does not exist, it will be created. .. py:method:: query(sql[, params=None[, commit=True]]) :param str sql: A SQL query. :param list params: Optional parameters for the query. :param bool commit: Whether the query should be committed upon execution. :return: A database cursor. Execute the provided query against the database. .. py:method:: transaction() Create a context manager representing a new transaction (or savepoint). .. py:method:: freeze(query[, format='csv'[, filename=None[, file_obj=None[, encoding='utf8'[, **kwargs]]]]]) :param query: A :py:class:`SelectQuery`, generated using :py:meth:`~Table.all` or `~Table.find`. :param format: Output format. By default, *csv* and *json* are supported. :param filename: Filename to write output to. :param file_obj: File-like object to write output to. :param str encoding: File encoding. :param kwargs: Arbitrary parameters for export-specific functionality. .. py:method:: thaw(table[, format='csv'[, filename=None[, file_obj=None[, strict=False[, encoding='utf8'[, **kwargs]]]]]]) :param str table: The name of the table to load data into. :param format: Input format. By default, *csv* and *json* are supported. :param filename: Filename to read data from. :param file_obj: File-like object to read data from. :param bool strict: Whether to store values for columns that do not already exist on the table. :param str encoding: File encoding. :param kwargs: Arbitrary parameters for import-specific functionality. .. py:method:: connect() Open a connection to the underlying database. If a connection is not opened explicitly, one will be opened the first time a query is executed. .. py:method:: close() Close the connection to the underlying database. .. py:class:: Table(dataset, name, model_class) :noindex: Provides a high-level API for working with rows in a given table. .. py:attribute:: columns Return a list of columns in the given table. .. py:attribute:: model_class A dynamically-created :py:class:`Model` class. .. py:method:: create_index(columns[, unique=False]) Create an index on the given columns: .. code-block:: python # Create a unique index on the `username` column. db['users'].create_index(['username'], unique=True) .. py:method:: insert(**data) Insert the given data dictionary into the table, creating new columns as needed. .. py:method:: update(columns=None, conjunction=None, **data) Update the table using the provided data. If one or more columns are specified in the *columns* parameter, then those columns' values in the *data* dictionary will be used to determine which rows to update. .. code-block:: python # Update all rows. db['users'].update(favorite_orm='peewee') # Only update Huey's record, setting his age to 3. db['users'].update(name='Huey', age=3, columns=['name']) .. py:method:: find(**query) Query the table for rows matching the specified equality conditions. If no query is specified, then all rows are returned. .. code-block:: python peewee_users = db['users'].find(favorite_orm='peewee') .. py:method:: find_one(**query) Return a single row matching the specified equality conditions. If no matching row is found then ``None`` will be returned. .. code-block:: python huey = db['users'].find_one(name='Huey') .. py:method:: all() Return all rows in the given table. .. py:method:: delete(**query) Delete all rows matching the given equality conditions. If no query is provided, then all rows will be deleted. .. code-block:: python # Adios, Django! db['users'].delete(favorite_orm='Django') # Delete all the secret messages. db['secret_messages'].delete() .. py:method:: freeze([format='csv'[, filename=None[, file_obj=None[, **kwargs]]]]) :param format: Output format. By default, *csv* and *json* are supported. :param filename: Filename to write output to. :param file_obj: File-like object to write output to. :param kwargs: Arbitrary parameters for export-specific functionality. .. py:method:: thaw([format='csv'[, filename=None[, file_obj=None[, strict=False[, **kwargs]]]]]) :param format: Input format. By default, *csv* and *json* are supported. :param filename: Filename to read data from. :param file_obj: File-like object to read data from. :param bool strict: Whether to store values for columns that do not already exist on the table. :param kwargs: Arbitrary parameters for import-specific functionality. .. _extra-fields: Fields ------ These fields can be found in the ``playhouse.fields`` module. .. py:class:: CompressedField([compression_level=6[, algorithm='zlib'[, **kwargs]]]) :param int compression_level: A value from 0 to 9. :param str algorithm: Either ``'zlib'`` or ``'bz2'``. Stores compressed data using the specified algorithm. This field extends :py:class:`BlobField`, transparently storing a compressed representation of the data in the database. .. py:class:: PickleField() Stores arbitrary Python data by transparently pickling and un-pickling data stored in the field. This field extends :py:class:`BlobField`. If the ``cPickle`` module is available, it will be used. .. _hybrid: Hybrid Attributes ----------------- Hybrid attributes encapsulate functionality that operates at both the Python *and* SQL levels. The idea for hybrid attributes comes from a feature of the `same name in SQLAlchemy `_. Consider the following example: .. code-block:: python class Interval(Model): start = IntegerField() end = IntegerField() @hybrid_property def length(self): return self.end - self.start @hybrid_method def contains(self, point): return (self.start <= point) & (point < self.end) The *hybrid attribute* gets its name from the fact that the ``length`` attribute will behave differently depending on whether it is accessed via the ``Interval`` class or an ``Interval`` instance. If accessed via an instance, then it behaves just as you would expect. If accessed via the ``Interval.length`` class attribute, however, the length calculation will be expressed as a SQL expression. For example: .. code-block:: python query = Interval.select().where(Interval.length > 5) This query will be equivalent to the following SQL: .. code-block:: sql SELECT "t1"."id", "t1"."start", "t1"."end" FROM "interval" AS t1 WHERE (("t1"."end" - "t1"."start") > 5) The ``playhouse.hybrid`` module also contains a decorator for implementing hybrid methods which can accept parameters. As with hybrid properties, when accessed via a model instance, then the function executes normally as-written. When the hybrid method is called on the class, however, it will generate a SQL expression. Example: .. code-block:: python query = Interval.select().where(Interval.contains(2)) This query is equivalent to the following SQL: .. code-block:: sql SELECT "t1"."id", "t1"."start", "t1"."end" FROM "interval" AS t1 WHERE (("t1"."start" <= 2) AND (2 < "t1"."end")) There is an additional API for situations where the python implementation differs slightly from the SQL implementation. Let's add a ``radius`` method to the ``Interval`` model. Because this method calculates an absolute value, we will use the Python ``abs()`` function for the instance portion and the ``fn.ABS()`` SQL function for the class portion. .. code-block:: python class Interval(Model): start = IntegerField() end = IntegerField() @hybrid_property def length(self): return self.end - self.start @hybrid_property def radius(self): return abs(self.length) / 2 @radius.expression def radius(cls): return fn.ABS(cls.length) / 2 What is neat is that both the ``radius`` implementations refer to the ``length`` hybrid attribute! When accessed via an ``Interval`` instance, the radius calculation will be executed in Python. When invoked via an ``Interval`` class, we will get the appropriate SQL. Example: .. code-block:: python query = Interval.select().where(Interval.radius < 3) This query is equivalent to the following SQL: .. code-block:: sql SELECT "t1"."id", "t1"."start", "t1"."end" FROM "interval" AS t1 WHERE ((abs("t1"."end" - "t1"."start") / 2) < 3) Pretty neat, right? Thanks for the cool idea, SQLAlchemy! Hybrid API ^^^^^^^^^^ .. py:class:: hybrid_method(func[, expr=None]) Method decorator that allows the definition of a Python object method with both instance-level and class-level behavior. Example: .. code-block:: python class Interval(Model): start = IntegerField() end = IntegerField() @hybrid_method def contains(self, point): return (self.start <= point) & (point < self.end) When called with an ``Interval`` instance, the ``contains`` method will behave as you would expect. When called as a classmethod, though, a SQL expression will be generated: .. code-block:: python query = Interval.select().where(Interval.contains(2)) Would generate the following SQL: .. code-block:: sql SELECT "t1"."id", "t1"."start", "t1"."end" FROM "interval" AS t1 WHERE (("t1"."start" <= 2) AND (2 < "t1"."end")) .. py:method:: expression(expr) Method decorator for specifying the SQL-expression producing method. .. py:class:: hybrid_property(fget[, fset=None[, fdel=None[, expr=None]]]) Method decorator that allows the definition of a Python object property with both instance-level and class-level behavior. Examples: .. code-block:: python class Interval(Model): start = IntegerField() end = IntegerField() @hybrid_property def length(self): return self.end - self.start @hybrid_property def radius(self): return abs(self.length) / 2 @radius.expression def radius(cls): return fn.ABS(cls.length) / 2 When accessed on an ``Interval`` instance, the ``length`` and ``radius`` properties will behave as you would expect. When accessed as class attributes, though, a SQL expression will be generated instead: .. code-block:: python query = (Interval .select() .where( (Interval.length > 6) & (Interval.radius >= 3))) Would generate the following SQL: .. code-block:: sql SELECT "t1"."id", "t1"."start", "t1"."end" FROM "interval" AS t1 WHERE ( (("t1"."end" - "t1"."start") > 6) AND ((abs("t1"."end" - "t1"."start") / 2) >= 3) ) .. _kv: Key/Value Store --------------- The ``playhouse.kv`` module contains the implementation of a persistent dictionary. .. py:class:: KeyValue([key_field=None[, value_field=None[, ordered=False[, database=None[, table_name='keyvalue']]]]]) :param Field key_field: field to use for key. Defaults to :py:class:`CharField`. **Must have** ``primary_key=True``. :param Field value_field: field to use for value. Defaults to :py:class:`PickleField`. :param bool ordered: data should be returned in key-sorted order. :param Database database: database where key/value data is stored. If not specified, an in-memory SQLite database will be used. :param str table_name: table name for data storage. Dictionary-like API for storing key/value data. Like dictionaries, supports the expected APIs, but also has the added capability of accepting expressions for getting, setting and deleting items. Table is created automatically (if it doesn't exist) when the ``KeyValue`` is instantiated. Uses efficient upsert implementation for setting and updating/overwriting key/value pairs. Basic examples: .. code-block:: python # Create a key/value store, which uses an in-memory SQLite database # for data storage. KV = KeyValue() # Set (or overwrite) the value for "k1". KV['k1'] = 'v1' # Set (or update) multiple keys at once (uses an efficient upsert). KV.update(k2='v2', k3='v3') # Getting values works as you'd expect. assert KV['k2'] == 'v2' # We can also do this: for value in KV[KV.key > 'k1']: print(value) # 'v2' # 'v3' # Update multiple values at once using expression: KV[KV.key > 'k1'] = 'vx' # What's stored in the KV? print(dict(KV)) # {'k1': 'v1', 'k2': 'vx', 'k3': 'vx'} # Delete a single item. del KV['k2'] # How many items are stored in the KV? print(len(KV)) # 2 # Delete items that match the given condition. del KV[KV.key > 'k1'] .. py:method:: __contains__(expr) :param expr: a single key or an expression :returns: Boolean whether key/expression exists. Example: .. code-block:: pycon >>> kv = KeyValue() >>> kv.update(k1='v1', k2='v2') >>> 'k1' in kv True >>> 'kx' in kv False >>> (KV.key < 'k2') in KV True >>> (KV.key > 'k2') in KV False .. py:method:: __len__() :returns: Count of items stored. .. py:method:: __getitem__(expr) :param expr: a single key or an expression. :returns: value(s) corresponding to key/expression. :raises: ``KeyError`` if single key given and not found. Examples: .. code-block:: pycon >>> KV = KeyValue() >>> KV.update(k1='v1', k2='v2', k3='v3') >>> KV['k1'] 'v1' >>> KV['kx'] KeyError: "kx" not found >>> KV[KV.key > 'k1'] ['v2', 'v3'] >>> KV[KV.key < 'k1'] [] .. py:method:: __setitem__(expr, value) :param expr: a single key or an expression. :param value: value to set for key(s) Set value for the given key. If ``expr`` is an expression, then any keys matching the expression will have their value updated. Example: .. code-block:: pycon >>> KV = KeyValue() >>> KV.update(k1='v1', k2='v2', k3='v3') >>> KV['k1'] = 'v1-x' >>> print(KV['k1']) 'v1-x' >>> KV[KV.key >= 'k2'] = 'v99' >>> dict(KV) {'k1': 'v1-x', 'k2': 'v99', 'k3': 'v99'} .. py:method:: __delitem__(expr) :param expr: a single key or an expression. Delete the given key. If an expression is given, delete all keys that match the expression. Example: .. code-block:: pycon >>> KV = KeyValue() >>> KV.update(k1=1, k2=2, k3=3) >>> del KV['k1'] # Deletes "k1". >>> del KV['k1'] KeyError: "k1" does not exist >>> del KV[KV.key > 'k2'] # Deletes "k3". >>> del KV[KV.key > 'k99'] # Nothing deleted, no keys match. .. py:method:: keys() :returns: an iterable of all keys in the table. .. py:method:: values() :returns: an iterable of all values in the table. .. py:method:: items() :returns: an iterable of all key/value pairs in the table. .. py:method:: update([__data=None[, **mapping]]) Efficiently bulk-insert or replace the given key/value pairs. Example: .. code-block:: pycon >>> KV = KeyValue() >>> KV.update(k1=1, k2=2) # Sets 'k1'=1, 'k2'=2. >>> dict(KV) {'k1': 1, 'k2': 2} >>> KV.update(k2=22, k3=3) # Updates 'k2'->22, sets 'k3'=3. >>> dict(KV) {'k1': 1, 'k2': 22, 'k3': 3} >>> KV.update({'k2': -2, 'k4': 4}) # Also can pass a dictionary. >>> dict(KV) {'k1': 1, 'k2': -2, 'k3': 3, 'k4': 4} .. py:method:: get(expr[, default=None]) :param expr: a single key or an expression. :param default: default value if key not found. :returns: value of given key/expr or default if single key not found. Get the value at the given key. If the key does not exist, the default value is returned, unless the key is an expression in which case an empty list will be returned. .. py:method:: pop(expr[, default=Sentinel]) :param expr: a single key or an expression. :param default: default value if key does not exist. :returns: value of given key/expr or default if single key not found. Get value and delete the given key. If the key does not exist, the default value is returned, unless the key is an expression in which case an empty list is returned. .. py:method:: clear() Remove all items from the key-value table. .. _shortcuts: Shortcuts --------- This module contains helper functions for expressing things that would otherwise be somewhat verbose or cumbersome using peewee's APIs. There are also helpers for serializing models to dictionaries and vice-versa. .. py:function:: model_to_dict(model[, recurse=True[, backrefs=False[, only=None[, exclude=None[, extra_attrs=None[, fields_from_query=None[, max_depth=None[, manytomany=False]]]]]]]]) :param bool recurse: Whether foreign-keys should be recursed. :param bool backrefs: Whether lists of related objects should be recursed. :param only: A list (or set) of field instances which should be included in the result dictionary. :param exclude: A list (or set) of field instances which should be excluded from the result dictionary. :param extra_attrs: A list of attribute or method names on the instance which should be included in the dictionary. :param Select fields_from_query: The :py:class:`SelectQuery` that created this model instance. Only the fields and values explicitly selected by the query will be serialized. :param int max_depth: Maximum depth when recursing. :param bool manytomany: Process many-to-many fields. Convert a model instance (and optionally any related instances) to a dictionary. Examples: .. code-block:: pycon >>> user = User.create(username='charlie') >>> model_to_dict(user) {'id': 1, 'username': 'charlie'} >>> model_to_dict(user, backrefs=True) {'id': 1, 'tweets': [], 'username': 'charlie'} >>> t1 = Tweet.create(user=user, message='tweet-1') >>> t2 = Tweet.create(user=user, message='tweet-2') >>> model_to_dict(user, backrefs=True) { 'id': 1, 'tweets': [ {'id': 1, 'message': 'tweet-1'}, {'id': 2, 'message': 'tweet-2'}, ], 'username': 'charlie' } >>> model_to_dict(t1) { 'id': 1, 'message': 'tweet-1', 'user': { 'id': 1, 'username': 'charlie' } } >>> model_to_dict(t2, recurse=False) {'id': 1, 'message': 'tweet-2', 'user': 1} The implementation of ``model_to_dict`` is fairly complex, owing to the various usages it attempts to support. If you have a special usage, I strongly advise that you do **not** attempt to shoe-horn some crazy combination of parameters into this function. Just write a simple function that accomplishes exactly what you're attempting to do. .. py:function:: dict_to_model(model_class, data[, ignore_unknown=False]) :param Model model_class: The model class to construct. :param dict data: A dictionary of data. Foreign keys can be included as nested dictionaries, and back-references as lists of dictionaries. :param bool ignore_unknown: Whether to allow unrecognized (non-field) attributes. Convert a dictionary of data to a model instance, creating related instances where appropriate. Examples: .. code-block:: pycon >>> user_data = {'id': 1, 'username': 'charlie'} >>> user = dict_to_model(User, user_data) >>> user <__main__.User at 0x7fea8fa4d490> >>> user.username 'charlie' >>> note_data = {'id': 2, 'text': 'note text', 'user': user_data} >>> note = dict_to_model(Note, note_data) >>> note.text 'note text' >>> note.user.username 'charlie' >>> user_with_notes = { ... 'id': 1, ... 'username': 'charlie', ... 'notes': [{'id': 1, 'text': 'note-1'}, {'id': 2, 'text': 'note-2'}]} >>> user = dict_to_model(User, user_with_notes) >>> user.notes[0].text 'note-1' >>> user.notes[0].user.username 'charlie' .. py:function:: update_model_from_dict(instance, data[, ignore_unknown=False]) :param Model instance: The model instance to update. :param dict data: A dictionary of data. Foreign keys can be included as nested dictionaries, and back-references as lists of dictionaries. :param bool ignore_unknown: Whether to allow unrecognized (non-field) attributes. Update a model instance with the given data dictionary. .. py:function:: resolve_multimodel_query(query[, key='_model_identifier']) :param query: a compound select query. :param str key: key to use for storing model identifier :return: an iteratable cursor that yields the proper model instance for each row selected in the compound select query. Helper for resolving rows returned in a compound select query to the correct model instance type. For example, if you have a union of two different tables, this helper will resolve each row to the proper model when iterating over the query results. .. py:class:: ThreadSafeDatabaseMetadata() Model :py:class:`Metadata` implementation that provides thread-safe access to the ``database`` attribute, allowing applications to swap the database at run-time safely in a multi-threaded application. Usage: .. code-block:: python from playhouse.shortcuts import ThreadSafeDatabaseMetadata # Our multi-threaded application will sometimes swap out the primary # for the read-replica at run-time. primary = PostgresqlDatabase(...) read_replica = PostgresqlDatabase(...) class BaseModel(Model): class Meta: database = primary model_metadata_class = ThreadSafeDatabaseMetadata .. _signals: Signal support -------------- Models with hooks for signals (a-la django) are provided in ``playhouse.signals``. To use the signals, you will need all of your project's models to be a subclass of ``playhouse.signals.Model``, which overrides the necessary methods to provide support for the various signals. .. code-block:: python from playhouse.signals import Model, post_save class MyModel(Model): data = IntegerField() @post_save(sender=MyModel) def on_save_handler(model_class, instance, created): put_data_in_cache(instance.data) .. warning:: For what I hope are obvious reasons, Peewee signals do not work when you use the :py:meth:`Model.insert`, :py:meth:`Model.update`, or :py:meth:`Model.delete` methods. These methods generate queries that execute beyond the scope of the ORM, and the ORM does not know about which model instances might or might not be affected when the query executes. Signals work by hooking into the higher-level peewee APIs like :py:meth:`Model.save` and :py:meth:`Model.delete_instance`, where the affected model instance is known ahead of time. The following signals are provided: ``pre_save`` Called immediately before an object is saved to the database. Provides an additional keyword argument ``created``, indicating whether the model is being saved for the first time or updated. ``post_save`` Called immediately after an object is saved to the database. Provides an additional keyword argument ``created``, indicating whether the model is being saved for the first time or updated. ``pre_delete`` Called immediately before an object is deleted from the database when :py:meth:`Model.delete_instance` is used. ``post_delete`` Called immediately after an object is deleted from the database when :py:meth:`Model.delete_instance` is used. ``pre_init`` Called when a model class is first instantiated Connecting handlers ^^^^^^^^^^^^^^^^^^^ Whenever a signal is dispatched, it will call any handlers that have been registered. This allows totally separate code to respond to events like model save and delete. The :py:class:`Signal` class provides a :py:meth:`~Signal.connect` method, which takes a callback function and two optional parameters for "sender" and "name". If specified, the "sender" parameter should be a single model class and allows your callback to only receive signals from that one model class. The "name" parameter is used as a convenient alias in the event you wish to unregister your signal handler. Example usage: .. code-block:: python from playhouse.signals import * def post_save_handler(sender, instance, created): print('%s was just saved' % instance) # our handler will only be called when we save instances of SomeModel post_save.connect(post_save_handler, sender=SomeModel) All signal handlers accept as their first two arguments ``sender`` and ``instance``, where ``sender`` is the model class and ``instance`` is the actual model being acted upon. If you'd like, you can also use a decorator to connect signal handlers. This is functionally equivalent to the above example: .. code-block:: python @post_save(sender=SomeModel) def post_save_handler(sender, instance, created): print('%s was just saved' % instance) Signal API ^^^^^^^^^^ .. py:class:: Signal() Stores a list of receivers (callbacks) and calls them when the "send" method is invoked. .. py:method:: connect(receiver[, name=None[, sender=None]]) :param callable receiver: a callable that takes at least two parameters, a "sender", which is the Model subclass that triggered the signal, and an "instance", which is the actual model instance. :param string name: a short alias :param Model sender: if specified, only instances of this model class will trigger the receiver callback. Add the receiver to the internal list of receivers, which will be called whenever the signal is sent. .. code-block:: python from playhouse.signals import post_save from project.handlers import cache_buster post_save.connect(cache_buster, name='project.cache_buster') .. py:method:: disconnect([receiver=None[, name=None[, sender=None]]]) :param callable receiver: the callback to disconnect :param string name: a short alias :param Model sender: disconnect model-specific handler. Disconnect the given receiver (or the receiver with the given name alias) so that it no longer is called. Either the receiver or the name must be provided. .. code-block:: python post_save.disconnect(name='project.cache_buster') .. py:method:: send(instance, *args, **kwargs) :param instance: a model instance Iterates over the receivers and will call them in the order in which they were connected. If the receiver specified a sender, it will only be called if the instance is an instance of the sender. .. py:method __call__([name=None[, sender=None]]) :param string name: a short alias :param Model sender: disconnect model-specific handler. Function decorator that is an alias for a signal's connect method: .. code-block:: python from playhouse.signals import connect, post_save @post_save(name='project.cache_buster') def cache_bust_handler(sender, instance, *args, **kwargs): # bust the cache for this instance cache.delete(cache_key_for(instance)) .. _pwiz: pwiz, a model generator ----------------------- ``pwiz`` is a little script that ships with peewee and is capable of introspecting an existing database and generating model code suitable for interacting with the underlying data. If you have a database already, pwiz can give you a nice boost by generating skeleton code with correct column affinities and foreign keys. If you install peewee using ``setup.py install``, pwiz will be installed as a "script" and you can just run: .. code-block:: console python -m pwiz -e postgresql -u postgres my_postgres_db This will print a bunch of models to standard output. So you can do this: .. code-block:: console python -m pwiz -e postgresql my_postgres_db > mymodels.py python # <-- fire up an interactive shell .. code-block:: pycon >>> from mymodels import Blog, Entry, Tag, Whatever >>> print([blog.name for blog in Blog.select()]) Command-line options ^^^^^^^^^^^^^^^^^^^^ pwiz accepts the following command-line options: ====== =================================== ============================================ Option Meaning Example ====== =================================== ============================================ -h show help -e database backend -e mysql -H host to connect to -H remote.db.server -p port to connect on -p 9001 -u database user -u postgres -P database password -P (will be prompted for password) -s schema -s public -t tables to generate -t tweet,users,relationships -v generate models for VIEWs (no argument) -i add info metadata to generated file (no argument) -o table column order is preserved (no argument) ====== =================================== ============================================ The following are valid parameters for the ``engine`` (``-e``): * sqlite * mysql * postgresql .. warning:: If a password is required to access your database, you will be prompted to enter it using a secure prompt. **The password will be included in the output**. Specifically, at the top of the file a :py:class:`Database` will be defined along with any required parameters -- including the password. pwiz examples ^^^^^^^^^^^^^ Examples of introspecting various databases: .. code-block:: console # Introspect a Sqlite database. python -m pwiz -e sqlite path/to/sqlite_database.db # Introspect a MySQL database, logging in as root. You will be prompted # for a password ("-P"). python -m pwiz -e mysql -u root -P mysql_db_name # Introspect a Postgresql database on a remote server. python -m pwiz -e postgres -u postgres -H 10.1.0.3 pg_db_name Full example: .. code-block:: console $ sqlite3 example.db << EOM CREATE TABLE "user" ("id" INTEGER NOT NULL PRIMARY KEY, "username" TEXT NOT NULL); CREATE TABLE "tweet" ( "id" INTEGER NOT NULL PRIMARY KEY, "content" TEXT NOT NULL, "timestamp" DATETIME NOT NULL, "user_id" INTEGER NOT NULL, FOREIGN KEY ("user_id") REFERENCES "user" ("id")); CREATE UNIQUE INDEX "user_username" ON "user" ("username"); EOM $ python -m pwiz -e sqlite example.db Produces the following output: .. code-block:: python from peewee import * database = SqliteDatabase('example.db', **{}) class UnknownField(object): def __init__(self, *_, **__): pass class BaseModel(Model): class Meta: database = database class User(BaseModel): username = TextField(unique=True) class Meta: table_name = 'user' class Tweet(BaseModel): content = TextField() timestamp = DateTimeField() user = ForeignKeyField(column_name='user_id', field='id', model=User) class Meta: table_name = 'tweet' Observations: * The foreign-key ``Tweet.user_id`` is detected and mapped correctly. * The ``User.username`` UNIQUE constraint is detected. * Each model explicitly declares its table name, even in cases where it is not necessary (as Peewee would automatically translate the class name into the appropriate table name). * All the parameters of the :py:class:`ForeignKeyField` are explicitly declared, even though they follow the conventions Peewee uses by default. .. note:: The ``UnknownField`` is a placeholder that is used in the event your schema contains a column declaration that Peewee doesn't know how to map to a field class. .. _migrate: Schema Migrations ----------------- Peewee now supports schema migrations, with well-tested support for Postgresql, SQLite and MySQL. Unlike other schema migration tools, peewee's migrations do not handle introspection and database "versioning". Rather, peewee provides a number of helper functions for generating and running schema-altering statements. This engine provides the basis on which a more sophisticated tool could some day be built. Migrations can be written as simple python scripts and executed from the command-line. Since the migrations only depend on your applications :py:class:`Database` object, it should be easy to manage changing your model definitions and maintaining a set of migration scripts without introducing dependencies. Example usage ^^^^^^^^^^^^^ Begin by importing the helpers from the `migrate` module: .. code-block:: python from playhouse.migrate import * Instantiate a ``migrator``. The :py:class:`SchemaMigrator` class is responsible for generating schema altering operations, which can then be run sequentially by the :py:func:`migrate` helper. .. code-block:: python # Postgres example: my_db = PostgresqlDatabase(...) migrator = PostgresqlMigrator(my_db) # SQLite example: my_db = SqliteDatabase('my_database.db') migrator = SqliteMigrator(my_db) Use :py:func:`migrate` to execute one or more operations: .. code-block:: python title_field = CharField(default='') status_field = IntegerField(null=True) migrate( migrator.add_column('some_table', 'title', title_field), migrator.add_column('some_table', 'status', status_field), migrator.drop_column('some_table', 'old_column'), ) .. warning:: Migrations are not run inside a transaction. If you wish the migration to run in a transaction you will need to wrap the call to `migrate` in a :py:meth:`~Database.atomic` context-manager, e.g. .. code-block:: python with my_db.atomic(): migrate(...) Supported Operations ^^^^^^^^^^^^^^^^^^^^ Add new field(s) to an existing model: .. code-block:: python # Create your field instances. For non-null fields you must specify a # default value. pubdate_field = DateTimeField(null=True) comment_field = TextField(default='') # Run the migration, specifying the database table, field name and field. migrate( migrator.add_column('comment_tbl', 'pub_date', pubdate_field), migrator.add_column('comment_tbl', 'comment', comment_field), ) .. note:: Peewee follows the Django convention of, by default, appending ``_id`` to the column name for a given :py:class:`ForeignKeyField`. When adding a foreign-key, you will want to ensure you give it the proper column name. For example, if I want to add a ``user`` foreign-key to a ``Tweet`` model: .. code-block:: python # Our desired model will look like this: class Tweet(BaseModel): user = ForeignKeyField(User) # I want to add this field. # ... other fields ... # Migration code: user = ForeignKeyField(User, field=User.id, null=True) migrate( # Note that the column name given is "user_id". migrator.add_column(Tweet._meta.table_name, 'user_id', user), ) Renaming a field: .. code-block:: python # Specify the table, original name of the column, and its new name. migrate( migrator.rename_column('story', 'pub_date', 'publish_date'), migrator.rename_column('story', 'mod_date', 'modified_date'), ) Dropping a field: .. code-block:: python migrate( migrator.drop_column('story', 'some_old_field'), ) Making a field nullable or not nullable: .. code-block:: python # Note that when making a field not null that field must not have any # NULL values present. migrate( # Make `pub_date` allow NULL values. migrator.drop_not_null('story', 'pub_date'), # Prevent `modified_date` from containing NULL values. migrator.add_not_null('story', 'modified_date'), ) Altering a field's data-type: .. code-block:: python # Change a VARCHAR(50) field to a TEXT field. migrate( migrator.alter_column_type('person', 'email', TextField()) ) Renaming a table: .. code-block:: python migrate( migrator.rename_table('story', 'stories_tbl'), ) Adding an index: .. code-block:: python # Specify the table, column names, and whether the index should be # UNIQUE or not. migrate( # Create an index on the `pub_date` column. migrator.add_index('story', ('pub_date',), False), # Create a multi-column index on the `pub_date` and `status` fields. migrator.add_index('story', ('pub_date', 'status'), False), # Create a unique index on the category and title fields. migrator.add_index('story', ('category_id', 'title'), True), ) Dropping an index: .. code-block:: python # Specify the index name. migrate(migrator.drop_index('story', 'story_pub_date_status')) Adding or dropping table constraints: .. code-block:: python # Add a CHECK() constraint to enforce the price cannot be negative. migrate(migrator.add_constraint( 'products', 'price_check', Check('price >= 0'))) # Remove the price check constraint. migrate(migrator.drop_constraint('products', 'price_check')) # Add a UNIQUE constraint on the first and last names. migrate(migrator.add_unique('person', 'first_name', 'last_name')) Adding or dropping a database-level default value for a column: .. code-block:: python # Add a default value for a status column. migrate(migrator.add_column_default( 'entries', 'status', 'draft')) # Remove the default. migrate(migrator.drop_column_default('entries', 'status')) # Use a function for the default value (does not work with Sqlite): migrate(migrator.add_column_default( 'entries', 'timestamp', fn.now())) # Or alternatively (works with Sqlite): migrate(migrator.add_column_default( 'entries', 'timestamp', 'now()')) .. note:: Postgres users may need to set the search-path when using a non-standard schema. This can be done as follows: .. code-block:: python new_field = TextField(default='', null=False) migrator = PostgresqlMigrator(db) migrate(migrator.set_search_path('my_schema_name'), migrator.add_column('table', 'field_name', new_field)) Migrations API ^^^^^^^^^^^^^^ .. py:function:: migrate(*operations) Execute one or more schema altering operations. Usage: .. code-block:: python migrate( migrator.add_column('some_table', 'new_column', CharField(default='')), migrator.create_index('some_table', ('new_column',)), ) .. py:class:: SchemaMigrator(database) :param database: a :py:class:`Database` instance. The :py:class:`SchemaMigrator` is responsible for generating schema-altering statements. .. py:method:: add_column(table, column_name, field) :param str table: Name of the table to add column to. :param str column_name: Name of the new column. :param Field field: A :py:class:`Field` instance. Add a new column to the provided table. The ``field`` provided will be used to generate the appropriate column definition. .. note:: If the field is not nullable it must specify a default value. .. note:: For non-null fields, the field will initially be added as a null field, then an ``UPDATE`` statement will be executed to populate the column with the default value. Finally, the column will be marked as not null. .. py:method:: drop_column(table, column_name[, cascade=True]) :param str table: Name of the table to drop column from. :param str column_name: Name of the column to drop. :param bool cascade: Whether the column should be dropped with `CASCADE`. .. py:method:: rename_column(table, old_name, new_name) :param str table: Name of the table containing column to rename. :param str old_name: Current name of the column. :param str new_name: New name for the column. .. py:method:: add_not_null(table, column) :param str table: Name of table containing column. :param str column: Name of the column to make not nullable. .. py:method:: drop_not_null(table, column) :param str table: Name of table containing column. :param str column: Name of the column to make nullable. .. py:method:: add_column_default(table, column, default) :param str table: Name of table containing column. :param str column: Name of the column to add default to. :param default: New default value for column. See notes below. Peewee attempts to properly quote the default if it appears to be a string literal. Otherwise the default will be treated literally. Postgres and MySQL support specifying the default as a peewee expression, e.g. ``fn.NOW()``, but Sqlite users will need to use ``default='now()'`` instead. .. py:method:: drop_column_default(table, column) :param str table: Name of table containing column. :param str column: Name of the column to remove default from. .. py:method:: alter_column_type(table, column, field[, cast=None]) :param str table: Name of the table. :param str column_name: Name of the column to modify. :param Field field: :py:class:`Field` instance representing new data type. :param cast: (postgres-only) specify a cast expression if the data-types are incompatible, e.g. ``column_name::int``. Can be provided as either a string or a :py:class:`Cast` instance. Alter the data-type of a column. This method should be used with care, as using incompatible types may not be well-supported by your database. .. py:method:: rename_table(old_name, new_name) :param str old_name: Current name of the table. :param str new_name: New name for the table. .. py:method:: add_index(table, columns[, unique=False[, using=None]]) :param str table: Name of table on which to create the index. :param list columns: List of columns which should be indexed. :param bool unique: Whether the new index should specify a unique constraint. :param str using: Index type (where supported), e.g. GiST or GIN. .. py:method:: drop_index(table, index_name) :param str table: Name of the table containing the index to be dropped. :param str index_name: Name of the index to be dropped. .. py:method:: add_constraint(table, name, constraint) :param str table: Table to add constraint to. :param str name: Name used to identify the constraint. :param constraint: either a :py:func:`Check` constraint or for adding an arbitrary constraint use :py:class:`SQL`. .. py:method:: drop_constraint(table, name) :param str table: Table to drop constraint from. :param str name: Name of constraint to drop. .. py:method:: add_unique(table, *column_names) :param str table: Table to add constraint to. :param str column_names: One or more columns for UNIQUE constraint. .. py:class:: PostgresqlMigrator(database) Generate migrations for Postgresql databases. .. py:method:: set_search_path(schema_name) :param str schema_name: Schema to use. Set the search path (schema) for the subsequent operations. .. py:class:: SqliteMigrator(database) Generate migrations for SQLite databases. SQLite has limited support for ``ALTER TABLE`` queries, so the following operations are currently not supported for SQLite: * ``add_constraint`` * ``drop_constraint`` * ``add_unique`` .. py:class:: MySQLMigrator(database) Generate migrations for MySQL databases. .. _reflection: Reflection ---------- The reflection module contains helpers for introspecting existing databases. This module is used internally by several other modules in the playhouse, including :ref:`dataset` and :ref:`pwiz`. .. py:function:: generate_models(database[, schema=None[, **options]]) :param Database database: database instance to introspect. :param str schema: optional schema to introspect. :param options: arbitrary options, see :py:meth:`Introspector.generate_models` for details. :returns: a ``dict`` mapping table names to model classes. Generate models for the tables in the given database. For an example of how to use this function, see the section :ref:`interactive`. Example: .. code-block:: pycon >>> from peewee import * >>> from playhouse.reflection import generate_models >>> db = PostgresqlDatabase('my_app') >>> models = generate_models(db) >>> list(models.keys()) ['account', 'customer', 'order', 'orderitem', 'product'] >>> globals().update(models) # Inject models into namespace. >>> for cust in customer.select(): # Query using generated model. ... print(cust.name) ... Huey Kitty Mickey Dog .. py:function:: print_model(model) :param Model model: model class to print :returns: no return value Print a user-friendly description of a model class, useful for debugging or interactive use. Currently this prints the table name, and all fields along with their data-types. The :ref:`interactive` section contains an example. Example output: .. code-block:: pycon >>> from playhouse.reflection import print_model >>> print_model(User) user id AUTO PK email TEXT name TEXT dob DATE index(es) email UNIQUE >>> print_model(Tweet) tweet id AUTO PK user INT FK: User.id title TEXT content TEXT timestamp DATETIME is_published BOOL index(es) user_id is_published, timestamp .. py:function:: print_table_sql(model) :param Model model: model to print :returns: no return value Prints the SQL ``CREATE TABLE`` for the given model class, which may be useful for debugging or interactive use. See the :ref:`interactive` section for example usage. Note that indexes and constraints are not included in the output of this function. Example output: .. code-block:: pycon >>> from playhouse.reflection import print_table_sql >>> print_table_sql(User) CREATE TABLE IF NOT EXISTS "user" ( "id" INTEGER NOT NULL PRIMARY KEY, "email" TEXT NOT NULL, "name" TEXT NOT NULL, "dob" DATE NOT NULL ) >>> print_table_sql(Tweet) CREATE TABLE IF NOT EXISTS "tweet" ( "id" INTEGER NOT NULL PRIMARY KEY, "user_id" INTEGER NOT NULL, "title" TEXT NOT NULL, "content" TEXT NOT NULL, "timestamp" DATETIME NOT NULL, "is_published" INTEGER NOT NULL, FOREIGN KEY ("user_id") REFERENCES "user" ("id") ) .. py:class:: Introspector(metadata[, schema=None]) Metadata can be extracted from a database by instantiating an :py:class:`Introspector`. Rather than instantiating this class directly, it is recommended to use the factory method :py:meth:`~Introspector.from_database`. .. py:classmethod:: from_database(database[, schema=None]) :param database: a :py:class:`Database` instance. :param str schema: an optional schema (supported by some databases). Creates an :py:class:`Introspector` instance suitable for use with the given database. Usage: .. code-block:: python db = SqliteDatabase('my_app.db') introspector = Introspector.from_database(db) models = introspector.generate_models() # User and Tweet (assumed to exist in the database) are # peewee Model classes generated from the database schema. User = models['user'] Tweet = models['tweet'] .. py:method:: generate_models([skip_invalid=False[, table_names=None[, literal_column_names=False[, bare_fields=False[, include_views=False]]]]]) :param bool skip_invalid: Skip tables whose names are invalid python identifiers. :param list table_names: List of table names to generate. If unspecified, models are generated for all tables. :param bool literal_column_names: Use column-names as-is. By default, column names are "python-ized", i.e. mixed-case becomes lower-case. :param bare_fields: **SQLite-only**. Do not specify data-types for introspected columns. :param include_views: generate models for VIEWs as well. :return: A dictionary mapping table-names to model classes. Introspect the database, reading in the tables, columns, and foreign key constraints, then generate a dictionary mapping each database table to a dynamically-generated :py:class:`Model` class. .. _db_url: Database URL ------------ This module contains a helper function to generate a database connection from a URL connection string. .. py:function:: connect(url, **connect_params) Create a :py:class:`Database` instance from the given connection URL. Examples: * *sqlite:///my_database.db* will create a :py:class:`SqliteDatabase` instance for the file ``my_database.db`` in the current directory. * *sqlite:///:memory:* will create an in-memory :py:class:`SqliteDatabase` instance. * *postgresql://postgres:my_password@localhost:5432/my_database* will create a :py:class:`PostgresqlDatabase` instance. A username and password are provided, as well as the host and port to connect to. * *mysql://user:passwd@ip:port/my_db* will create a :py:class:`MySQLDatabase` instance for the local MySQL database *my_db*. * *mysql+pool://user:passwd@ip:port/my_db?max_connections=20&stale_timeout=300* will create a :py:class:`PooledMySQLDatabase` instance for the local MySQL database *my_db* with max_connections set to 20 and a stale_timeout setting of 300 seconds. Supported schemes: * ``apsw``: :py:class:`APSWDatabase` * ``cockroachdb``: :py:class:`CockroachDatabase` * ``cockroachdb+pool``: :py:class:`PooledCockroachDatabase` * ``mysql``: :py:class:`MySQLDatabase` * ``mysql+pool``: :py:class:`PooledMySQLDatabase` * ``postgres``: :py:class:`PostgresqlDatabase` * ``postgres+pool``: :py:class:`PooledPostgresqlDatabase` * ``postgresext``: :py:class:`PostgresqlExtDatabase` * ``postgresext+pool``: :py:class:`PooledPostgresqlExtDatabase` * ``psycopg3``: :py:class:`Psycopg3Database` * ``psycopg3+pool``: :py:class:`PooledPsycopg3Database` * ``sqlite``: :py:class:`SqliteDatabase` * ``sqliteext``: :py:class:`SqliteExtDatabase` * ``sqlite+pool``: :py:class:`PooledSqliteDatabase` * ``sqliteext+pool``: :py:class:`PooledSqliteExtDatabase` Usage: .. code-block:: python import os from playhouse.db_url import connect # Connect to the database URL defined in the environment, falling # back to a local Sqlite database if no database URL is specified. db = connect(os.environ.get('DATABASE') or 'sqlite:///default.db') .. py:function:: parse(url) Parse the information in the given URL into a dictionary containing ``database``, ``host``, ``port``, ``user`` and/or ``password``. Additional connection arguments can be passed in the URL query string. If you are using a custom database class, you can use the ``parse()`` function to extract information from a URL which can then be passed in to your database object. .. py:function:: register_database(db_class, *names) :param db_class: A subclass of :py:class:`Database`. :param names: A list of names to use as the scheme in the URL, e.g. 'sqlite' or 'firebird' Register additional database class under the specified names. This function can be used to extend the ``connect()`` function to support additional schemes. Suppose you have a custom database class for ``Firebird`` named ``FirebirdDatabase``. .. code-block:: python from playhouse.db_url import connect, register_database register_database(FirebirdDatabase, 'firebird') db = connect('firebird://my-firebird-db') .. _pool: Connection pool --------------- The ``pool`` module contains a number of :py:class:`Database` classes that provide connection pooling for PostgreSQL, MySQL and SQLite databases. The pool works by overriding the methods on the :py:class:`Database` class that open and close connections to the backend. The pool can specify a timeout after which connections are recycled, as well as an upper bound on the number of open connections. In a multi-threaded application, up to `max_connections` will be opened. Each thread (or, if using gevent, greenlet) will have its own connection. In a single-threaded application, only one connection will be created. It will be continually recycled until either it exceeds the stale timeout or is closed explicitly (using `.manual_close()`). **By default, all your application needs to do is ensure that connections are closed when you are finished with them, and they will be returned to the pool**. For web applications, this typically means that at the beginning of a request, you will open a connection, and when you return a response, you will close the connection. Simple Postgres pool example code: .. code-block:: python # Use the special postgresql extensions. from playhouse.pool import PooledPostgresqlExtDatabase db = PooledPostgresqlExtDatabase( 'my_app', max_connections=32, stale_timeout=300, # 5 minutes. user='postgres') class BaseModel(Model): class Meta: database = db That's it! If you would like finer-grained control over the pool of connections, check out the :ref:`connection_management` section. Pool APIs ^^^^^^^^^ .. py:class:: PooledDatabase(database[, max_connections=20[, stale_timeout=None[, timeout=None[, **kwargs]]]]) :param str database: The name of the database or database file. :param int max_connections: Maximum number of connections. Provide ``None`` for unlimited. :param int stale_timeout: Number of seconds to allow connections to be used. :param int timeout: Number of seconds to block when pool is full. By default peewee does not block when the pool is full but simply throws an exception. To block indefinitely set this value to ``0``. :param kwargs: Arbitrary keyword arguments passed to database class. Mixin class intended to be used with a subclass of :py:class:`Database`. .. note:: Connections will not be closed exactly when they exceed their `stale_timeout`. Instead, stale connections are only closed when a new connection is requested. .. note:: If the number of open connections exceeds `max_connections`, a `ValueError` will be raised. .. py:method:: manual_close() Close the currently-open connection without returning it to the pool. .. py:method:: close_idle() Close all idle connections. This does not include any connections that are currently in-use -- only those that were previously created but have since been returned back to the pool. .. py:method:: close_stale([age=600]) :param int age: Age at which a connection should be considered stale. :returns: Number of connections closed. Close connections which are in-use but exceed the given age. **Use caution when calling this method!** .. py:method:: close_all() Close all connections. This includes any connections that may be in use at the time. **Use caution when calling this method!** .. py:class:: PooledPostgresqlDatabase Subclass of :py:class:`PostgresqlDatabase` that mixes in the :py:class:`PooledDatabase` helper. .. py:class:: PooledPostgresqlExtDatabase Subclass of :py:class:`PostgresqlExtDatabase` that mixes in the :py:class:`PooledDatabase` helper. The :py:class:`PostgresqlExtDatabase` is a part of the :ref:`postgres_ext` module and provides support for many Postgres-specific features. .. py:class:: PooledMySQLDatabase Subclass of :py:class:`MySQLDatabase` that mixes in the :py:class:`PooledDatabase` helper. .. py:class:: PooledSqliteDatabase Persistent connections for SQLite apps. .. py:class:: PooledSqliteExtDatabase Persistent connections for SQLite apps, using the :ref:`sqlite_ext` advanced database driver :py:class:`SqliteExtDatabase`. .. _test_utils: Test Utils ---------- Contains utilities helpful when testing peewee projects. .. py:class:: count_queries([only_select=False]) Context manager that will count the number of queries executed within the context. :param bool only_select: Only count *SELECT* queries. .. code-block:: python with count_queries() as counter: huey = User.get(User.username == 'huey') huey_tweets = [tweet.message for tweet in huey.tweets] assert counter.count == 2 .. py:attribute:: count The number of queries executed. .. py:method:: get_queries() Return a list of 2-tuples consisting of the SQL query and a list of parameters. .. py:function:: assert_query_count(expected[, only_select=False]) Function or method decorator that will raise an ``AssertionError`` if the number of queries executed in the decorated function does not equal the expected number. .. code-block:: python class TestMyApp(unittest.TestCase): @assert_query_count(1) def test_get_popular_blogs(self): popular_blogs = Blog.get_popular() self.assertEqual( [blog.title for blog in popular_blogs], ["Peewee's Playhouse!", "All About Huey", "Mickey's Adventures"]) This function can also be used as a context manager: .. code-block:: python class TestMyApp(unittest.TestCase): def test_expensive_operation(self): with assert_query_count(1): perform_expensive_operation() .. _flask_utils: Flask Utils ----------- The ``playhouse.flask_utils`` module contains several helpers for integrating peewee with the `Flask `_ web framework. Database Wrapper ^^^^^^^^^^^^^^^^ The :py:class:`FlaskDB` class is a wrapper for configuring and referencing a Peewee database from within a Flask application. Don't let its name fool you: it is **not the same thing as a peewee database**. ``FlaskDB`` is designed to remove the following boilerplate from your flask app: * Dynamically create a Peewee database instance based on app config data. * Create a base class from which all your application's models will descend. * Register hooks at the start and end of a request to handle opening and closing a database connection. Basic usage: .. code-block:: python import datetime from flask import Flask from peewee import * from playhouse.flask_utils import FlaskDB DATABASE = 'postgresql://postgres:password@localhost:5432/my_database' # If we want to exclude particular views from the automatic connection # management, we list them this way: FLASKDB_EXCLUDED_ROUTES = ('logout',) app = Flask(__name__) app.config.from_object(__name__) db_wrapper = FlaskDB(app) class User(db_wrapper.Model): username = CharField(unique=True) class Tweet(db_wrapper.Model): user = ForeignKeyField(User, backref='tweets') content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) The above code example will create and instantiate a peewee :py:class:`PostgresqlDatabase` specified by the given database URL. Request hooks will be configured to establish a connection when a request is received, and automatically close the connection when the response is sent. Lastly, the :py:class:`FlaskDB` class exposes a :py:attr:`FlaskDB.Model` property which can be used as a base for your application's models. Here is how you can access the wrapped Peewee database instance that is configured for you by the ``FlaskDB`` wrapper: .. code-block:: python # Obtain a reference to the Peewee database instance. peewee_db = db_wrapper.database @app.route('/transfer-funds/', methods=['POST']) def transfer_funds(): with peewee_db.atomic(): # ... return jsonify({'transfer-id': xid}) .. note:: The actual peewee database can be accessed using the ``FlaskDB.database`` attribute. Here is another way to configure a Peewee database using ``FlaskDB``: .. code-block:: python app = Flask(__name__) db_wrapper = FlaskDB(app, 'sqlite:///my_app.db') While the above examples show using a database URL, for more advanced usages you can specify a dictionary of configuration options, or simply pass in a peewee :py:class:`Database` instance: .. code-block:: python DATABASE = { 'name': 'my_app_db', 'engine': 'playhouse.pool.PooledPostgresqlDatabase', 'user': 'postgres', 'max_connections': 32, 'stale_timeout': 600, } app = Flask(__name__) app.config.from_object(__name__) wrapper = FlaskDB(app) pooled_postgres_db = wrapper.database Using a peewee :py:class:`Database` object: .. code-block:: python peewee_db = PostgresqlExtDatabase('my_app') app = Flask(__name__) db_wrapper = FlaskDB(app, peewee_db) Database with Application Factory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you prefer to use the `application factory pattern `_, the :py:class:`FlaskDB` class implements an ``init_app()`` method. Using as a factory: .. code-block:: python db_wrapper = FlaskDB() # Even though the database is not yet initialized, you can still use the # `Model` property to create model classes. class User(db_wrapper.Model): username = CharField(unique=True) def create_app(): app = Flask(__name__) app.config['DATABASE'] = 'sqlite:////home/code/apps/my-database.db' db_wrapper.init_app(app) return app Query utilities ^^^^^^^^^^^^^^^ The ``flask_utils`` module provides several helpers for managing queries in your web app. Some common patterns include: .. py:function:: get_object_or_404(query_or_model, *query) :param query_or_model: Either a :py:class:`Model` class or a pre-filtered :py:class:`SelectQuery`. :param query: An arbitrarily complex peewee expression. Retrieve the object matching the given query, or return a 404 not found response. A common use-case might be a detail page for a weblog. You want to either retrieve the post matching the given URL, or return a 404. Example: .. code-block:: python @app.route('/blog//') def post_detail(slug): public_posts = Post.select().where(Post.published == True) post = get_object_or_404(public_posts, (Post.slug == slug)) return render_template('post_detail.html', post=post) .. py:function:: object_list(template_name, query[, context_variable='object_list'[, paginate_by=20[, page_var='page'[, check_bounds=True[, **kwargs]]]]]) :param template_name: The name of the template to render. :param query: A :py:class:`SelectQuery` instance to paginate. :param context_variable: The context variable name to use for the paginated object list. :param paginate_by: Number of objects per-page. :param page_var: The name of the ``GET`` argument which contains the page. :param check_bounds: Whether to check that the given page is a valid page. If ``check_bounds`` is ``True`` and an invalid page is specified, then a 404 will be returned. :param kwargs: Arbitrary key/value pairs to pass into the template context. Retrieve a paginated list of objects specified by the given query. The paginated object list will be dropped into the context using the given ``context_variable``, as well as metadata about the current page and total number of pages, and finally any arbitrary context data passed as keyword-arguments. The page is specified using the ``page`` ``GET`` argument, e.g. ``/my-object-list/?page=3`` would return the third page of objects. Example: .. code-block:: python @app.route('/blog/') def post_index(): public_posts = (Post .select() .where(Post.published == True) .order_by(Post.timestamp.desc())) return object_list( 'post_index.html', query=public_posts, context_variable='post_list', paginate_by=10) The template will have the following context: * ``post_list``, which contains a list of up to 10 posts. * ``page``, which contains the current page based on the value of the ``page`` ``GET`` parameter. * ``pagination``, a :py:class:`PaginatedQuery` instance. .. py:class:: PaginatedQuery(query_or_model, paginate_by[, page_var='page'[, check_bounds=False]]) :param query_or_model: Either a :py:class:`Model` or a :py:class:`SelectQuery` instance containing the collection of records you wish to paginate. :param paginate_by: Number of objects per-page. :param page_var: The name of the ``GET`` argument which contains the page. :param check_bounds: Whether to check that the given page is a valid page. If ``check_bounds`` is ``True`` and an invalid page is specified, then a 404 will be returned. Helper class to perform pagination based on ``GET`` arguments. .. py:method:: get_page() Return the currently selected page, as indicated by the value of the ``page_var`` ``GET`` parameter. If no page is explicitly selected, then this method will return 1, indicating the first page. .. py:method:: get_page_count() Return the total number of possible pages. .. py:method:: get_object_list() Using the value of :py:meth:`~PaginatedQuery.get_page`, return the page of objects requested by the user. The return value is a :py:class:`SelectQuery` with the appropriate ``LIMIT`` and ``OFFSET`` clauses. If ``check_bounds`` was set to ``True`` and the requested page contains no objects, then a 404 will be raised. peewee-3.17.7/docs/peewee/query_builder.rst000066400000000000000000000311741470346076600207150ustar00rootroot00000000000000.. _query-builder: Query Builder ============= Peewee's high-level :py:class:`Model` and :py:class:`Field` APIs are built upon lower-level :py:class:`Table` and :py:class:`Column` counterparts. While these lower-level APIs are not documented in as much detail as their high-level counterparts, this document will present an overview with examples that should hopefully allow you to experiment. We'll use the following schema: .. code-block:: sql CREATE TABLE "person" ( "id" INTEGER NOT NULL PRIMARY KEY, "first" TEXT NOT NULL, "last" TEXT NOT NULL); CREATE TABLE "note" ( "id" INTEGER NOT NULL PRIMARY KEY, "person_id" INTEGER NOT NULL, "content" TEXT NOT NULL, "timestamp" DATETIME NOT NULL, FOREIGN KEY ("person_id") REFERENCES "person" ("id")); CREATE TABLE "reminder" ( "id" INTEGER NOT NULL PRIMARY KEY, "note_id" INTEGER NOT NULL, "alarm" DATETIME NOT NULL, FOREIGN KEY ("note_id") REFERENCES "note" ("id")); Declaring tables ---------------- There are two ways we can declare :py:class:`Table` objects for working with these tables: .. code-block:: python # Explicitly declare columns Person = Table('person', ('id', 'first', 'last')) Note = Table('note', ('id', 'person_id', 'content', 'timestamp')) # Do not declare columns, they will be accessed using magic ".c" attribute Reminder = Table('reminder') Typically we will want to :py:meth:`~Table.bind` our tables to a database. This saves us having to pass the database explicitly every time we wish to execute a query on the table: .. code-block:: python db = SqliteDatabase('my_app.db') Person = Person.bind(db) Note = Note.bind(db) Reminder = Reminder.bind(db) Select queries -------------- To select the first three notes and print their content, we can write: .. code-block:: python query = Note.select().order_by(Note.timestamp).limit(3) for note_dict in query: print(note_dict['content']) .. note:: By default, rows will be returned as dictionaries. You can use the :py:meth:`~BaseQuery.tuples`, :py:meth:`~BaseQuery.namedtuples` or :py:meth:`~BaseQuery.objects` methods to specify a different container for the row data, if you wish. Because we didn't specify any columns, all the columns we defined in the note's :py:class:`Table` constructor will be selected. This won't work for Reminder, as we didn't specify any columns at all. To select all notes published in 2018 along with the name of the creator, we will use :py:meth:`~BaseQuery.join`. We'll also request that rows be returned as *namedtuple* objects: .. code-block:: python query = (Note .select(Note.content, Note.timestamp, Person.first, Person.last) .join(Person, on=(Note.person_id == Person.id)) .where(Note.timestamp >= datetime.date(2018, 1, 1)) .order_by(Note.timestamp) .namedtuples()) for row in query: print(row.timestamp, '-', row.content, '-', row.first, row.last) Let's query for the most prolific people, that is, get the people who have created the most notes. This introduces calling a SQL function (COUNT), which is accomplished using the ``fn`` object: .. code-block:: python name = Person.first.concat(' ').concat(Person.last) query = (Person .select(name.alias('name'), fn.COUNT(Note.id).alias('count')) .join(Note, JOIN.LEFT_OUTER, on=(Note.person_id == Person.id)) .group_by(name) .order_by(fn.COUNT(Note.id).desc())) for row in query: print(row['name'], row['count']) There are a couple things to note in the above query: * We store an expression in a variable (``name``), then use it in the query. * We call SQL functions using ``fn.(...)`` passing arguments as if it were a normal Python function. * The :py:meth:`~ColumnBase.alias` method is used to specify the name used for a column or calculation. As a more complex example, we'll generate a list of all people and the contents and timestamp of their most recently-published note. To do this, we will end up using the Note table twice in different contexts within the same query, which will require us to use a table alias. .. code-block:: python # Start with the query that calculates the timestamp of the most recent # note for each person. NA = Note.alias('na') max_note = (NA .select(NA.person_id, fn.MAX(NA.timestamp).alias('max_ts')) .group_by(NA.person_id) .alias('max_note')) # Now we'll select from the note table, joining on both the subquery and # on the person table to construct the result set. query = (Note .select(Note.content, Note.timestamp, Person.first, Person.last) .join(max_note, on=((max_note.c.person_id == Note.person_id) & (max_note.c.max_ts == Note.timestamp))) .join(Person, on=(Note.person_id == Person.id)) .order_by(Person.first, Person.last)) for row in query.namedtuples(): print(row.first, row.last, ':', row.timestamp, '-', row.content) In the join predicate for the join on the *max_note* subquery, we can reference columns in the subquery using the magical ".c" attribute. So, *max_note.c.max_ts* is translated into "the max_ts column value from the max_note subquery". We can also use the ".c" magic attribute to access columns on tables that do not explicitly define their columns, like we did with the Reminder table. Here's a simple query to get all reminders for today, along with their associated note content: .. code-block:: python today = datetime.date.today() tomorrow = today + datetime.timedelta(days=1) query = (Reminder .select(Reminder.c.alarm, Note.content) .join(Note, on=(Reminder.c.note_id == Note.id)) .where(Reminder.c.alarm.between(today, tomorrow)) .order_by(Reminder.c.alarm)) for row in query: print(row['alarm'], row['content']) .. note:: The ".c" attribute will not work on tables that explicitly define their columns, to prevent confusion. Insert queries -------------- Inserting data is straightforward. We can specify data to :py:meth:`~Table.insert` in two different ways (in both cases, the ID of the new row is returned): .. code-block:: python # Using keyword arguments: zaizee_id = Person.insert(first='zaizee', last='cat').execute() # Using column: value mappings: Note.insert({ Note.person_id: zaizee_id, Note.content: 'meeeeowwww', Note.timestamp: datetime.datetime.now()}).execute() It is easy to bulk-insert data, just pass in either: * A list of dictionaries (all must have the same keys/columns). * A list of tuples, if the columns are specified explicitly. Examples: .. code-block:: python people = [ {'first': 'Bob', 'last': 'Foo'}, {'first': 'Herb', 'last': 'Bar'}, {'first': 'Nuggie', 'last': 'Bar'}] # Inserting multiple rows returns the ID of the last-inserted row. last_id = Person.insert(people).execute() # We can also specify row tuples, so long as we tell Peewee which # columns the tuple values correspond to: people = [ ('Bob', 'Foo'), ('Herb', 'Bar'), ('Nuggie', 'Bar')] Person.insert(people, columns=[Person.first, Person.last]).execute() Update queries -------------- :py:meth:`~Table.update` queries accept either keyword arguments or a dictionary mapping column to value, just like :py:meth:`~Table.insert`. Examples: .. code-block:: python # "Bob" changed his last name from "Foo" to "Baze". nrows = (Person .update(last='Baze') .where((Person.first == 'Bob') & (Person.last == 'Foo')) .execute()) # Use dictionary mapping column to value. nrows = (Person .update({Person.last: 'Baze'}) .where((Person.first == 'Bob') & (Person.last == 'Foo')) .execute()) You can also use expressions as the value to perform an atomic update. Imagine we have a *PageView* table and we need to atomically increment the page-view count for some URL: .. code-block:: python # Do an atomic update: (PageView .update({PageView.count: PageView.count + 1}) .where(PageView.url == some_url) .execute()) Delete queries -------------- :py:meth:`~Table.delete` queries are simplest of all, as they do not accept any arguments: .. code-block:: python # Delete all notes created before 2018, returning number deleted. n = Note.delete().where(Note.timestamp < datetime.date(2018, 1, 1)).execute() Because DELETE (and UPDATE) queries do not support joins, we can use subqueries to delete rows based on values in related tables. For example, here is how you would delete all notes by anyone whose last name is "Foo": .. code-block:: python # Get the id of all people whose last name is "Foo". foo_people = Person.select(Person.id).where(Person.last == 'Foo') # Delete all notes by any person whose ID is in the previous query. Note.delete().where(Note.person_id.in_(foo_people)).execute() Query Objects ------------- One of the fundamental limitations of the abstractions provided by Peewee 2.x was the absence of a class that represented a structured query with no relation to a given model class. An example of this might be computing aggregate values over a subquery. For example, the :py:meth:`~SelectBase.count` method, which returns the count of rows in an arbitrary query, is implemented by wrapping the query: .. code-block:: sql SELECT COUNT(1) FROM (...) To accomplish this with Peewee, the implementation is written in this way: .. code-block:: python def count(query): # Select([source1, ... sourcen], [column1, ...columnn]) wrapped = Select(from_list=[query], columns=[fn.COUNT(SQL('1'))]) curs = wrapped.tuples().execute(db) return curs[0][0] # Return first column from first row of result. We can actually express this more concisely using the :py:meth:`~SelectBase.scalar` method, which is suitable for returning values from aggregate queries: .. code-block:: python def count(query): wrapped = Select(from_list=[query], columns=[fn.COUNT(SQL('1'))]) return wrapped.scalar(db) The :ref:`query_examples` document has a more complex example, in which we write a query for a facility with the highest number of available slots booked: The SQL we wish to express is: .. code-block:: sql SELECT facid, total FROM ( SELECT facid, SUM(slots) AS total, rank() OVER (order by SUM(slots) DESC) AS rank FROM bookings GROUP BY facid ) AS ranked WHERE rank = 1 We can express this fairly elegantly by using a plain :py:class:`Select` for the outer query: .. code-block:: python # Store rank expression in variable for readability. rank_expr = fn.rank().over(order_by=[fn.SUM(Booking.slots).desc()]) subq = (Booking .select(Booking.facility, fn.SUM(Booking.slots).alias('total'), rank_expr.alias('rank')) .group_by(Booking.facility)) # Use a plain "Select" to create outer query. query = (Select(columns=[subq.c.facid, subq.c.total]) .from_(subq) .where(subq.c.rank == 1) .tuples()) # Iterate over the resulting facility ID(s) and total(s): for facid, total in query.execute(db): print(facid, total) For another example, let's create a recursive common table expression to calculate the first 10 fibonacci numbers: .. code-block:: python base = Select(columns=( Value(1).alias('n'), Value(0).alias('fib_n'), Value(1).alias('next_fib_n'))).cte('fibonacci', recursive=True) n = (base.c.n + 1).alias('n') recursive_term = Select(columns=( n, base.c.next_fib_n, base.c.fib_n + base.c.next_fib_n)).from_(base).where(n < 10) fibonacci = base.union_all(recursive_term) query = fibonacci.select_from(fibonacci.c.n, fibonacci.c.fib_n) results = list(query.execute(db)) # Generates the following result list: [{'fib_n': 0, 'n': 1}, {'fib_n': 1, 'n': 2}, {'fib_n': 1, 'n': 3}, {'fib_n': 2, 'n': 4}, {'fib_n': 3, 'n': 5}, {'fib_n': 5, 'n': 6}, {'fib_n': 8, 'n': 7}, {'fib_n': 13, 'n': 8}, {'fib_n': 21, 'n': 9}, {'fib_n': 34, 'n': 10}] More ---- For a description of the various classes used to describe a SQL AST, see the :ref:`query builder API documentation `. If you're interested in learning more, you can also check out the `project source code `_. peewee-3.17.7/docs/peewee/query_examples.rst000066400000000000000000001253351470346076600211100ustar00rootroot00000000000000.. _query_examples: Query Examples ============== These query examples are taken from the site `PostgreSQL Exercises `_. A sample data-set can be found on the `getting started page `_. Here is a visual representation of the schema used in these examples: .. image:: schema-horizontal.png Model Definitions ----------------- To begin working with the data, we'll define the model classes that correspond to the tables in the diagram. .. note:: In some cases we explicitly specify column names for a particular field. This is so our models are compatible with the database schema used for the postgres exercises. .. code-block:: python from functools import partial from peewee import * db = PostgresqlDatabase('peewee_test') class BaseModel(Model): class Meta: database = db class Member(BaseModel): memid = AutoField() # Auto-incrementing primary key. surname = CharField() firstname = CharField() address = CharField(max_length=300) zipcode = IntegerField() telephone = CharField() recommendedby = ForeignKeyField('self', backref='recommended', column_name='recommendedby', null=True) joindate = DateTimeField() class Meta: table_name = 'members' # Conveniently declare decimal fields suitable for storing currency. MoneyField = partial(DecimalField, decimal_places=2) class Facility(BaseModel): facid = AutoField() name = CharField() membercost = MoneyField() guestcost = MoneyField() initialoutlay = MoneyField() monthlymaintenance = MoneyField() class Meta: table_name = 'facilities' class Booking(BaseModel): bookid = AutoField() facility = ForeignKeyField(Facility, column_name='facid') member = ForeignKeyField(Member, column_name='memid') starttime = DateTimeField() slots = IntegerField() class Meta: table_name = 'bookings' Schema Creation --------------- If you downloaded the SQL file from the PostgreSQL Exercises site, then you can load the data into a PostgreSQL database using the following commands:: createdb peewee_test psql -U postgres -f clubdata.sql -d peewee_test -x -q To create the schema using Peewee, without loading the sample data, you can run the following: .. code-block:: python # Assumes you have created the database "peewee_test" already. db.create_tables([Member, Facility, Booking]) Basic Exercises --------------- This category deals with the basics of SQL. It covers select and where clauses, case expressions, unions, and a few other odds and ends. Retrieve everything ^^^^^^^^^^^^^^^^^^^ Retrieve all information from facilities table. .. code-block:: sql SELECT * FROM facilities .. code-block:: python # By default, when no fields are explicitly passed to select(), all fields # will be selected. query = Facility.select() Retrieve specific columns from a table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Retrieve names of facilities and cost to members. .. code-block:: sql SELECT name, membercost FROM facilities; .. code-block:: python query = Facility.select(Facility.name, Facility.membercost) # To iterate: for facility in query: print(facility.name) Control which rows are retrieved ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Retrieve list of facilities that have a cost to members. .. code-block:: sql SELECT * FROM facilities WHERE membercost > 0 .. code-block:: python query = Facility.select().where(Facility.membercost > 0) Control which rows are retrieved - part 2 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Retrieve list of facilities that have a cost to members, and that fee is less than 1/50th of the monthly maintenance cost. Return id, name, cost and monthly-maintenance. .. code-block:: sql SELECT facid, name, membercost, monthlymaintenance FROM facilities WHERE membercost > 0 AND membercost < (monthlymaintenance / 50) .. code-block:: python query = (Facility .select(Facility.facid, Facility.name, Facility.membercost, Facility.monthlymaintenance) .where( (Facility.membercost > 0) & (Facility.membercost < (Facility.monthlymaintenance / 50)))) Basic string searches ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce a list of all facilities with the word 'Tennis' in their name? .. code-block:: sql SELECT * FROM facilities WHERE name ILIKE '%tennis%'; .. code-block:: python query = Facility.select().where(Facility.name.contains('tennis')) # OR use the exponent operator. Note: you must include wildcards here: query = Facility.select().where(Facility.name ** '%tennis%') Matching against multiple possible values ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you retrieve the details of facilities with ID 1 and 5? Try to do it without using the OR operator. .. code-block:: sql SELECT * FROM facilities WHERE facid IN (1, 5); .. code-block:: python query = Facility.select().where(Facility.facid.in_([1, 5])) # OR: query = Facility.select().where((Facility.facid == 1) | (Facility.facid == 5)) Classify results into buckets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce a list of facilities, with each labelled as 'cheap' or 'expensive' depending on if their monthly maintenance cost is more than $100? Return the name and monthly maintenance of the facilities in question. .. code-block:: sql SELECT name, CASE WHEN monthlymaintenance > 100 THEN 'expensive' ELSE 'cheap' END FROM facilities; .. code-block:: python cost = Case(None, [(Facility.monthlymaintenance > 100, 'expensive')], 'cheap') query = Facility.select(Facility.name, cost.alias('cost')) .. note:: See documentation :py:class:`Case` for more examples. Working with dates ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce a list of members who joined after the start of September 2012? Return the memid, surname, firstname, and joindate of the members in question. .. code-block:: sql SELECT memid, surname, firstname, joindate FROM members WHERE joindate >= '2012-09-01'; .. code-block:: python query = (Member .select(Member.memid, Member.surname, Member.firstname, Member.joindate) .where(Member.joindate >= datetime.date(2012, 9, 1))) Removing duplicates, and ordering results ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce an ordered list of the first 10 surnames in the members table? The list must not contain duplicates. .. code-block:: sql SELECT DISTINCT surname FROM members ORDER BY surname LIMIT 10; .. code-block:: python query = (Member .select(Member.surname) .order_by(Member.surname) .limit(10) .distinct()) Combining results from multiple queries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You, for some reason, want a combined list of all surnames and all facility names. .. code-block:: sql SELECT surname FROM members UNION SELECT name FROM facilities; .. code-block:: python lhs = Member.select(Member.surname) rhs = Facility.select(Facility.name) query = lhs | rhs Queries can be composed using the following operators: * ``|`` - ``UNION`` * ``+`` - ``UNION ALL`` * ``&`` - ``INTERSECT`` * ``-`` - ``EXCEPT`` Simple aggregation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You'd like to get the signup date of your last member. How can you retrieve this information? .. code-block:: sql SELECT MAX(join_date) FROM members; .. code-block:: python query = Member.select(fn.MAX(Member.joindate)) # To conveniently obtain a single scalar value, use "scalar()": # max_join_date = query.scalar() More aggregation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You'd like to get the first and last name of the last member(s) who signed up - not just the date. .. code-block:: sql SELECT firstname, surname, joindate FROM members WHERE joindate = (SELECT MAX(joindate) FROM members); .. code-block:: python # Use "alias()" to reference the same table multiple times in a query. MemberAlias = Member.alias() subq = MemberAlias.select(fn.MAX(MemberAlias.joindate)) query = (Member .select(Member.firstname, Member.surname, Member.joindate) .where(Member.joindate == subq)) Joins and Subqueries -------------------- This category deals primarily with a foundational concept in relational database systems: joining. Joining allows you to combine related information from multiple tables to answer a question. This isn't just beneficial for ease of querying: a lack of join capability encourages denormalisation of data, which increases the complexity of keeping your data internally consistent. This topic covers inner, outer, and self joins, as well as spending a little time on subqueries (queries within queries). Retrieve the start times of members' bookings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce a list of the start times for bookings by members named 'David Farrell'? .. code-block:: sql SELECT starttime FROM bookings INNER JOIN members ON (bookings.memid = members.memid) WHERE surname = 'Farrell' AND firstname = 'David'; .. code-block:: python query = (Booking .select(Booking.starttime) .join(Member) .where((Member.surname == 'Farrell') & (Member.firstname == 'David'))) Work out the start times of bookings for tennis courts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce a list of the start times for bookings for tennis courts, for the date '2012-09-21'? Return a list of start time and facility name pairings, ordered by the time. .. code-block:: sql SELECT starttime, name FROM bookings INNER JOIN facilities ON (bookings.facid = facilities.facid) WHERE date_trunc('day', starttime) = '2012-09-21':: date AND name ILIKE 'tennis%' ORDER BY starttime, name; .. code-block:: python query = (Booking .select(Booking.starttime, Facility.name) .join(Facility) .where( (fn.date_trunc('day', Booking.starttime) == datetime.date(2012, 9, 21)) & Facility.name.startswith('Tennis')) .order_by(Booking.starttime, Facility.name)) # To retrieve the joined facility's name when iterating: for booking in query: print(booking.starttime, booking.facility.name) Produce a list of all members who have recommended another member ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you output a list of all members who have recommended another member? Ensure that there are no duplicates in the list, and that results are ordered by (surname, firstname). .. code-block:: sql SELECT DISTINCT m.firstname, m.surname FROM members AS m2 INNER JOIN members AS m ON (m.memid = m2.recommendedby) ORDER BY m.surname, m.firstname; .. code-block:: python MA = Member.alias() query = (Member .select(Member.firstname, Member.surname) .join(MA, on=(MA.recommendedby == Member.memid)) .order_by(Member.surname, Member.firstname)) Produce a list of all members, along with their recommender ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you output a list of all members, including the individual who recommended them (if any)? Ensure that results are ordered by (surname, firstname). .. code-block:: sql SELECT m.firstname, m.surname, r.firstname, r.surname FROM members AS m LEFT OUTER JOIN members AS r ON (m.recommendedby = r.memid) ORDER BY m.surname, m.firstname .. code-block:: python MA = Member.alias() query = (Member .select(Member.firstname, Member.surname, MA.firstname, MA.surname) .join(MA, JOIN.LEFT_OUTER, on=(Member.recommendedby == MA.memid)) .order_by(Member.surname, Member.firstname)) # To display the recommender's name when iterating: for m in query: print(m.firstname, m.surname) if m.recommendedby: print(' ', m.recommendedby.firstname, m.recommendedby.surname) Produce a list of all members who have used a tennis court ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce a list of all members who have used a tennis court? Include in your output the name of the court, and the name of the member formatted as a single column. Ensure no duplicate data, and order by the member name. .. code-block:: sql SELECT DISTINCT m.firstname || ' ' || m.surname AS member, f.name AS facility FROM members AS m INNER JOIN bookings AS b ON (m.memid = b.memid) INNER JOIN facilities AS f ON (b.facid = f.facid) WHERE f.name LIKE 'Tennis%' ORDER BY member, facility; .. code-block:: python fullname = Member.firstname + ' ' + Member.surname query = (Member .select(fullname.alias('member'), Facility.name.alias('facility')) .join(Booking) .join(Facility) .where(Facility.name.startswith('Tennis')) .order_by(fullname, Facility.name) .distinct()) Produce a list of costly bookings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you produce a list of bookings on the day of 2012-09-14 which will cost the member (or guest) more than $30? Remember that guests have different costs to members (the listed costs are per half-hour 'slot'), and the guest user is always ID 0. Include in your output the name of the facility, the name of the member formatted as a single column, and the cost. Order by descending cost, and do not use any subqueries. .. code-block:: sql SELECT m.firstname || ' ' || m.surname AS member, f.name AS facility, (CASE WHEN m.memid = 0 THEN f.guestcost * b.slots ELSE f.membercost * b.slots END) AS cost FROM members AS m INNER JOIN bookings AS b ON (m.memid = b.memid) INNER JOIN facilities AS f ON (b.facid = f.facid) WHERE (date_trunc('day', b.starttime) = '2012-09-14') AND ((m.memid = 0 AND b.slots * f.guestcost > 30) OR (m.memid > 0 AND b.slots * f.membercost > 30)) ORDER BY cost DESC; .. code-block:: python cost = Case(Member.memid, ( (0, Booking.slots * Facility.guestcost), ), (Booking.slots * Facility.membercost)) fullname = Member.firstname + ' ' + Member.surname query = (Member .select(fullname.alias('member'), Facility.name.alias('facility'), cost.alias('cost')) .join(Booking) .join(Facility) .where( (fn.date_trunc('day', Booking.starttime) == datetime.date(2012, 9, 14)) & (cost > 30)) .order_by(SQL('cost').desc())) # To iterate over the results, it might be easiest to use namedtuples: for row in query.namedtuples(): print(row.member, row.facility, row.cost) Produce a list of all members, along with their recommender, using no joins. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can you output a list of all members, including the individual who recommended them (if any), without using any joins? Ensure that there are no duplicates in the list, and that each firstname + surname pairing is formatted as a column and ordered. .. code-block:: sql SELECT DISTINCT m.firstname || ' ' || m.surname AS member, (SELECT r.firstname || ' ' || r.surname FROM cd.members AS r WHERE m.recommendedby = r.memid) AS recommended FROM members AS m ORDER BY member; .. code-block:: python MA = Member.alias() subq = (MA .select(MA.firstname + ' ' + MA.surname) .where(Member.recommendedby == MA.memid)) query = (Member .select(fullname.alias('member'), subq.alias('recommended')) .order_by(fullname)) Produce a list of costly bookings, using a subquery ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The "Produce a list of costly bookings" exercise contained some messy logic: we had to calculate the booking cost in both the WHERE clause and the CASE statement. Try to simplify this calculation using subqueries. .. code-block:: sql SELECT member, facility, cost from ( SELECT m.firstname || ' ' || m.surname as member, f.name as facility, CASE WHEN m.memid = 0 THEN b.slots * f.guestcost ELSE b.slots * f.membercost END AS cost FROM members AS m INNER JOIN bookings AS b ON m.memid = b.memid INNER JOIN facilities AS f ON b.facid = f.facid WHERE date_trunc('day', b.starttime) = '2012-09-14' ) as bookings WHERE cost > 30 ORDER BY cost DESC; .. code-block:: python cost = Case(Member.memid, ( (0, Booking.slots * Facility.guestcost), ), (Booking.slots * Facility.membercost)) iq = (Member .select(fullname.alias('member'), Facility.name.alias('facility'), cost.alias('cost')) .join(Booking) .join(Facility) .where(fn.date_trunc('day', Booking.starttime) == datetime.date(2012, 9, 14))) query = (Member .select(iq.c.member, iq.c.facility, iq.c.cost) .from_(iq) .where(iq.c.cost > 30) .order_by(SQL('cost').desc())) # To iterate, try using dicts: for row in query.dicts(): print(row['member'], row['facility'], row['cost']) Modifying Data -------------- Querying data is all well and good, but at some point you're probably going to want to put data into your database! This section deals with inserting, updating, and deleting information. Operations that alter your data like this are collectively known as Data Manipulation Language, or DML. In previous sections, we returned to you the results of the query you've performed. Since modifications like the ones we're making in this section don't return any query results, we instead show you the updated content of the table you're supposed to be working on. Insert some data into a table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The club is adding a new facility - a spa. We need to add it into the facilities table. Use the following values: facid: 9, Name: 'Spa', membercost: 20, guestcost: 30, initialoutlay: 100000, monthlymaintenance: 800 .. code-block:: sql INSERT INTO "facilities" ("facid", "name", "membercost", "guestcost", "initialoutlay", "monthlymaintenance") VALUES (9, 'Spa', 20, 30, 100000, 800) .. code-block:: python res = Facility.insert({ Facility.facid: 9, Facility.name: 'Spa', Facility.membercost: 20, Facility.guestcost: 30, Facility.initialoutlay: 100000, Facility.monthlymaintenance: 800}).execute() # OR: res = (Facility .insert(facid=9, name='Spa', membercost=20, guestcost=30, initialoutlay=100000, monthlymaintenance=800) .execute()) Insert multiple rows of data into a table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the previous exercise, you learned how to add a facility. Now you're going to add multiple facilities in one command. Use the following values: facid: 9, Name: 'Spa', membercost: 20, guestcost: 30, initialoutlay: 100000, monthlymaintenance: 800. facid: 10, Name: 'Squash Court 2', membercost: 3.5, guestcost: 17.5, initialoutlay: 5000, monthlymaintenance: 80. .. code-block:: sql -- see above -- .. code-block:: python data = [ {'facid': 9, 'name': 'Spa', 'membercost': 20, 'guestcost': 30, 'initialoutlay': 100000, 'monthlymaintenance': 800}, {'facid': 10, 'name': 'Squash Court 2', 'membercost': 3.5, 'guestcost': 17.5, 'initialoutlay': 5000, 'monthlymaintenance': 80}] res = Facility.insert_many(data).execute() Insert calculated data into a table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Let's try adding the spa to the facilities table again. This time, though, we want to automatically generate the value for the next facid, rather than specifying it as a constant. Use the following values for everything else: Name: 'Spa', membercost: 20, guestcost: 30, initialoutlay: 100000, monthlymaintenance: 800. .. code-block:: sql INSERT INTO "facilities" ("facid", "name", "membercost", "guestcost", "initialoutlay", "monthlymaintenance") SELECT (SELECT (MAX("facid") + 1) FROM "facilities") AS _, 'Spa', 20, 30, 100000, 800; .. code-block:: python maxq = Facility.select(fn.MAX(Facility.facid) + 1) subq = Select(columns=(maxq, 'Spa', 20, 30, 100000, 800)) res = Facility.insert_from(subq, Facility._meta.sorted_fields).execute() Update some existing data ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We made a mistake when entering the data for the second tennis court. The initial outlay was 10000 rather than 8000: you need to alter the data to fix the error. .. code-block:: sql UPDATE facilities SET initialoutlay = 10000 WHERE name = 'Tennis Court 2'; .. code-block:: python res = (Facility .update({Facility.initialoutlay: 10000}) .where(Facility.name == 'Tennis Court 2') .execute()) # OR: res = (Facility .update(initialoutlay=10000) .where(Facility.name == 'Tennis Court 2') .execute()) Update multiple rows and columns at the same time ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We want to increase the price of the tennis courts for both members and guests. Update the costs to be 6 for members, and 30 for guests. .. code-block:: sql UPDATE facilities SET membercost=6, guestcost=30 WHERE name ILIKE 'Tennis%'; .. code-block:: python nrows = (Facility .update(membercost=6, guestcost=30) .where(Facility.name.startswith('Tennis')) .execute()) Update a row based on the contents of another row ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We want to alter the price of the second tennis court so that it costs 10% more than the first one. Try to do this without using constant values for the prices, so that we can reuse the statement if we want to. .. code-block:: sql UPDATE facilities SET membercost = (SELECT membercost * 1.1 FROM facilities WHERE facid = 0), guestcost = (SELECT guestcost * 1.1 FROM facilities WHERE facid = 0) WHERE facid = 1; -- OR -- WITH new_prices (nmc, ngc) AS ( SELECT membercost * 1.1, guestcost * 1.1 FROM facilities WHERE name = 'Tennis Court 1') UPDATE facilities SET membercost = new_prices.nmc, guestcost = new_prices.ngc FROM new_prices WHERE name = 'Tennis Court 2' .. code-block:: python sq1 = Facility.select(Facility.membercost * 1.1).where(Facility.facid == 0) sq2 = Facility.select(Facility.guestcost * 1.1).where(Facility.facid == 0) res = (Facility .update(membercost=sq1, guestcost=sq2) .where(Facility.facid == 1) .execute()) # OR: cte = (Facility .select(Facility.membercost * 1.1, Facility.guestcost * 1.1) .where(Facility.name == 'Tennis Court 1') .cte('new_prices', columns=('nmc', 'ngc'))) res = (Facility .update(membercost=SQL('new_prices.nmc'), guestcost=SQL('new_prices.ngc')) .with_cte(cte) .from_(cte) .where(Facility.name == 'Tennis Court 2') .execute()) Delete all bookings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As part of a clearout of our database, we want to delete all bookings from the bookings table. .. code-block:: sql DELETE FROM bookings; .. code-block:: python nrows = Booking.delete().execute() Delete a member from the cd.members table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We want to remove member 37, who has never made a booking, from our database. .. code-block:: sql DELETE FROM members WHERE memid = 37; .. code-block:: python nrows = Member.delete().where(Member.memid == 37).execute() Delete based on a subquery ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ How can we make that more general, to delete all members who have never made a booking? .. code-block:: sql DELETE FROM members WHERE NOT EXISTS ( SELECT * FROM bookings WHERE bookings.memid = members.memid); .. code-block:: python subq = Booking.select().where(Booking.member == Member.memid) nrows = Member.delete().where(~fn.EXISTS(subq)).execute() Aggregation ----------- Aggregation is one of those capabilities that really make you appreciate the power of relational database systems. It allows you to move beyond merely persisting your data, into the realm of asking truly interesting questions that can be used to inform decision making. This category covers aggregation at length, making use of standard grouping as well as more recent window functions. Count the number of facilities ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For our first foray into aggregates, we're going to stick to something simple. We want to know how many facilities exist - simply produce a total count. .. code-block:: sql SELECT COUNT(facid) FROM facilities; .. code-block:: python query = Facility.select(fn.COUNT(Facility.facid)) count = query.scalar() # OR: count = Facility.select().count() Count the number of expensive facilities ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a count of the number of facilities that have a cost to guests of 10 or more. .. code-block:: sql SELECT COUNT(facid) FROM facilities WHERE guestcost >= 10 .. code-block:: python query = Facility.select(fn.COUNT(Facility.facid)).where(Facility.guestcost >= 10) count = query.scalar() # OR: # count = Facility.select().where(Facility.guestcost >= 10).count() Count the number of recommendations each member makes. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a count of the number of recommendations each member has made. Order by member ID. .. code-block:: sql SELECT recommendedby, COUNT(memid) FROM members WHERE recommendedby IS NOT NULL GROUP BY recommendedby ORDER BY recommendedby .. code-block:: python query = (Member .select(Member.recommendedby, fn.COUNT(Member.memid)) .where(Member.recommendedby.is_null(False)) .group_by(Member.recommendedby) .order_by(Member.recommendedby)) List the total slots booked per facility ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of the total number of slots booked per facility. For now, just produce an output table consisting of facility id and slots, sorted by facility id. .. code-block:: sql SELECT facid, SUM(slots) FROM bookings GROUP BY facid ORDER BY facid; .. code-block:: python query = (Booking .select(Booking.facid, fn.SUM(Booking.slots)) .group_by(Booking.facid) .order_by(Booking.facid)) List the total slots booked per facility in a given month ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of the total number of slots booked per facility in the month of September 2012. Produce an output table consisting of facility id and slots, sorted by the number of slots. .. code-block:: sql SELECT facid, SUM(slots) FROM bookings WHERE (date_trunc('month', starttime) = '2012-09-01'::dates) GROUP BY facid ORDER BY SUM(slots) .. code-block:: python query = (Booking .select(Booking.facility, fn.SUM(Booking.slots)) .where(fn.date_trunc('month', Booking.starttime) == datetime.date(2012, 9, 1)) .group_by(Booking.facility) .order_by(fn.SUM(Booking.slots))) List the total slots booked per facility per month ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of the total number of slots booked per facility per month in the year of 2012. Produce an output table consisting of facility id and slots, sorted by the id and month. .. code-block:: sql SELECT facid, date_part('month', starttime), SUM(slots) FROM bookings WHERE date_part('year', starttime) = 2012 GROUP BY facid, date_part('month', starttime) ORDER BY facid, date_part('month', starttime) .. code-block:: python month = fn.date_part('month', Booking.starttime) query = (Booking .select(Booking.facility, month, fn.SUM(Booking.slots)) .where(fn.date_part('year', Booking.starttime) == 2012) .group_by(Booking.facility, month) .order_by(Booking.facility, month)) Find the count of members who have made at least one booking ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Find the total number of members who have made at least one booking. .. code-block:: sql SELECT COUNT(DISTINCT memid) FROM bookings -- OR -- SELECT COUNT(1) FROM (SELECT DISTINCT memid FROM bookings) AS _ .. code-block:: python query = Booking.select(fn.COUNT(Booking.member.distinct())) # OR: query = Booking.select(Booking.member).distinct() count = query.count() # count() wraps in SELECT COUNT(1) FROM (...) List facilities with more than 1000 slots booked ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of facilities with more than 1000 slots booked. Produce an output table consisting of facility id and hours, sorted by facility id. .. code-block:: sql SELECT facid, SUM(slots) FROM bookings GROUP BY facid HAVING SUM(slots) > 1000 ORDER BY facid; .. code-block:: python query = (Booking .select(Booking.facility, fn.SUM(Booking.slots)) .group_by(Booking.facility) .having(fn.SUM(Booking.slots) > 1000) .order_by(Booking.facility)) Find the total revenue of each facility ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of facilities along with their total revenue. The output table should consist of facility name and revenue, sorted by revenue. Remember that there's a different cost for guests and members! .. code-block:: sql SELECT f.name, SUM(b.slots * ( CASE WHEN b.memid = 0 THEN f.guestcost ELSE f.membercost END)) AS revenue FROM bookings AS b INNER JOIN facilities AS f ON b.facid = f.facid GROUP BY f.name ORDER BY revenue; .. code-block:: python revenue = fn.SUM(Booking.slots * Case(None, ( (Booking.member == 0, Facility.guestcost), ), Facility.membercost)) query = (Facility .select(Facility.name, revenue.alias('revenue')) .join(Booking) .group_by(Facility.name) .order_by(SQL('revenue'))) Find facilities with a total revenue less than 1000 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of facilities with a total revenue less than 1000. Produce an output table consisting of facility name and revenue, sorted by revenue. Remember that there's a different cost for guests and members! .. code-block:: sql SELECT f.name, SUM(b.slots * ( CASE WHEN b.memid = 0 THEN f.guestcost ELSE f.membercost END)) AS revenue FROM bookings AS b INNER JOIN facilities AS f ON b.facid = f.facid GROUP BY f.name HAVING SUM(b.slots * ...) < 1000 ORDER BY revenue; .. code-block:: python # Same definition as previous example. revenue = fn.SUM(Booking.slots * Case(None, ( (Booking.member == 0, Facility.guestcost), ), Facility.membercost)) query = (Facility .select(Facility.name, revenue.alias('revenue')) .join(Booking) .group_by(Facility.name) .having(revenue < 1000) .order_by(SQL('revenue'))) Output the facility id that has the highest number of slots booked ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Output the facility id that has the highest number of slots booked. .. code-block:: sql SELECT facid, SUM(slots) FROM bookings GROUP BY facid ORDER BY SUM(slots) DESC LIMIT 1 .. code-block:: python query = (Booking .select(Booking.facility, fn.SUM(Booking.slots)) .group_by(Booking.facility) .order_by(fn.SUM(Booking.slots).desc()) .limit(1)) # Retrieve multiple scalar values by calling scalar() with as_tuple=True. facid, nslots = query.scalar(as_tuple=True) List the total slots booked per facility per month, part 2 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of the total number of slots booked per facility per month in the year of 2012. In this version, include output rows containing totals for all months per facility, and a total for all months for all facilities. The output table should consist of facility id, month and slots, sorted by the id and month. When calculating the aggregated values for all months and all facids, return null values in the month and facid columns. Postgres ONLY. .. code-block:: sql SELECT facid, date_part('month', starttime), SUM(slots) FROM booking WHERE date_part('year', starttime) = 2012 GROUP BY ROLLUP(facid, date_part('month', starttime)) ORDER BY facid, date_part('month', starttime) .. code-block:: python month = fn.date_part('month', Booking.starttime) query = (Booking .select(Booking.facility, month.alias('month'), fn.SUM(Booking.slots)) .where(fn.date_part('year', Booking.starttime) == 2012) .group_by(fn.ROLLUP(Booking.facility, month)) .order_by(Booking.facility, month)) List the total hours booked per named facility ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of the total number of hours booked per facility, remembering that a slot lasts half an hour. The output table should consist of the facility id, name, and hours booked, sorted by facility id. .. code-block:: sql SELECT f.facid, f.name, SUM(b.slots) * .5 FROM facilities AS f INNER JOIN bookings AS b ON (f.facid = b.facid) GROUP BY f.facid, f.name ORDER BY f.facid .. code-block:: python query = (Facility .select(Facility.facid, Facility.name, fn.SUM(Booking.slots) * .5) .join(Booking) .group_by(Facility.facid, Facility.name) .order_by(Facility.facid)) List each member's first booking after September 1st 2012 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of each member name, id, and their first booking after September 1st 2012. Order by member ID. .. code-block:: sql SELECT m.surname, m.firstname, m.memid, min(b.starttime) as starttime FROM members AS m INNER JOIN bookings AS b ON b.memid = m.memid WHERE starttime >= '2012-09-01' GROUP BY m.surname, m.firstname, m.memid ORDER BY m.memid; .. code-block:: python query = (Member .select(Member.surname, Member.firstname, Member.memid, fn.MIN(Booking.starttime).alias('starttime')) .join(Booking) .where(Booking.starttime >= datetime.date(2012, 9, 1)) .group_by(Member.surname, Member.firstname, Member.memid) .order_by(Member.memid)) Produce a list of member names, with each row containing the total member count ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of member names, with each row containing the total member count. Order by join date. Postgres ONLY (as written). .. code-block:: sql SELECT COUNT(*) OVER(), firstname, surname FROM members ORDER BY joindate .. code-block:: python query = (Member .select(fn.COUNT(Member.memid).over(), Member.firstname, Member.surname) .order_by(Member.joindate)) Produce a numbered list of members ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a monotonically increasing numbered list of members, ordered by their date of joining. Remember that member IDs are not guaranteed to be sequential. Postgres ONLY (as written). .. code-block:: sql SELECT row_number() OVER (ORDER BY joindate), firstname, surname FROM members ORDER BY joindate; .. code-block:: python query = (Member .select(fn.row_number().over(order_by=[Member.joindate]), Member.firstname, Member.surname) .order_by(Member.joindate)) Output the facility id that has the highest number of slots booked, again ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Output the facility id that has the highest number of slots booked. Ensure that in the event of a tie, all tieing results get output. Postgres ONLY (as written). .. code-block:: sql SELECT facid, total FROM ( SELECT facid, SUM(slots) AS total, rank() OVER (order by SUM(slots) DESC) AS rank FROM bookings GROUP BY facid ) AS ranked WHERE rank = 1 .. code-block:: python rank = fn.rank().over(order_by=[fn.SUM(Booking.slots).desc()]) subq = (Booking .select(Booking.facility, fn.SUM(Booking.slots).alias('total'), rank.alias('rank')) .group_by(Booking.facility)) # Here we use a plain Select() to create our query. query = (Select(columns=[subq.c.facid, subq.c.total]) .from_(subq) .where(subq.c.rank == 1) .bind(db)) # We must bind() it to the database. # To iterate over the query results: for facid, total in query.tuples(): print(facid, total) Rank members by (rounded) hours used ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of members, along with the number of hours they've booked in facilities, rounded to the nearest ten hours. Rank them by this rounded figure, producing output of first name, surname, rounded hours, rank. Sort by rank, surname, and first name. Postgres ONLY (as written). .. code-block:: sql SELECT firstname, surname, ((SUM(bks.slots)+10)/20)*10 as hours, rank() over (order by ((sum(bks.slots)+10)/20)*10 desc) as rank FROM members AS mems INNER JOIN bookings AS bks ON mems.memid = bks.memid GROUP BY mems.memid ORDER BY rank, surname, firstname; .. code-block:: python hours = ((fn.SUM(Booking.slots) + 10) / 20) * 10 query = (Member .select(Member.firstname, Member.surname, hours.alias('hours'), fn.rank().over(order_by=[hours.desc()]).alias('rank')) .join(Booking) .group_by(Member.memid) .order_by(SQL('rank'), Member.surname, Member.firstname)) Find the top three revenue generating facilities ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Produce a list of the top three revenue generating facilities (including ties). Output facility name and rank, sorted by rank and facility name. Postgres ONLY (as written). .. code-block:: sql SELECT name, rank FROM ( SELECT f.name, RANK() OVER (ORDER BY SUM( CASE WHEN memid = 0 THEN slots * f.guestcost ELSE slots * f.membercost END) DESC) AS rank FROM bookings INNER JOIN facilities AS f ON bookings.facid = f.facid GROUP BY f.name) AS subq WHERE rank <= 3 ORDER BY rank; .. code-block:: python total_cost = fn.SUM(Case(None, ( (Booking.member == 0, Booking.slots * Facility.guestcost), ), (Booking.slots * Facility.membercost))) subq = (Facility .select(Facility.name, fn.RANK().over(order_by=[total_cost.desc()]).alias('rank')) .join(Booking) .group_by(Facility.name)) query = (Select(columns=[subq.c.name, subq.c.rank]) .from_(subq) .where(subq.c.rank <= 3) .order_by(subq.c.rank) .bind(db)) # Here again we used plain Select, and call bind(). Classify facilities by value ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Classify facilities into equally sized groups of high, average, and low based on their revenue. Order by classification and facility name. Postgres ONLY (as written). .. code-block:: sql SELECT name, CASE class WHEN 1 THEN 'high' WHEN 2 THEN 'average' ELSE 'low' END FROM ( SELECT f.name, ntile(3) OVER (ORDER BY SUM( CASE WHEN memid = 0 THEN slots * f.guestcost ELSE slots * f.membercost END) DESC) AS class FROM bookings INNER JOIN facilities AS f ON bookings.facid = f.facid GROUP BY f.name ) AS subq ORDER BY class, name; .. code-block:: python cost = fn.SUM(Case(None, ( (Booking.member == 0, Booking.slots * Facility.guestcost), ), (Booking.slots * Facility.membercost))) subq = (Facility .select(Facility.name, fn.NTILE(3).over(order_by=[cost.desc()]).alias('klass')) .join(Booking) .group_by(Facility.name)) klass_case = Case(subq.c.klass, [(1, 'high'), (2, 'average')], 'low') query = (Select(columns=[subq.c.name, klass_case]) .from_(subq) .order_by(subq.c.klass, subq.c.name) .bind(db)) Recursion --------- Common Table Expressions allow us to, effectively, create our own temporary tables for the duration of a query - they're largely a convenience to help us make more readable SQL. Using the WITH RECURSIVE modifier, however, it's possible for us to create recursive queries. This is enormously advantageous for working with tree and graph-structured data - imagine retrieving all of the relations of a graph node to a given depth, for example. Find the upward recommendation chain for member ID 27 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Find the upward recommendation chain for member ID 27: that is, the member who recommended them, and the member who recommended that member, and so on. Return member ID, first name, and surname. Order by descending member id. .. code-block:: sql WITH RECURSIVE recommenders(recommender) as ( SELECT recommendedby FROM members WHERE memid = 27 UNION ALL SELECT mems.recommendedby FROM recommenders recs INNER JOIN members AS mems ON mems.memid = recs.recommender ) SELECT recs.recommender, mems.firstname, mems.surname FROM recommenders AS recs INNER JOIN members AS mems ON recs.recommender = mems.memid ORDER By memid DESC; .. code-block:: python # Base-case of recursive CTE. Get member recommender where memid=27. base = (Member .select(Member.recommendedby) .where(Member.memid == 27) .cte('recommenders', recursive=True, columns=('recommender',))) # Recursive term of CTE. Get recommender of previous recommender. MA = Member.alias() recursive = (MA .select(MA.recommendedby) .join(base, on=(MA.memid == base.c.recommender))) # Combine the base-case with the recursive term. cte = base.union_all(recursive) # Select from the recursive CTE, joining on member to get name info. query = (cte .select_from(cte.c.recommender, Member.firstname, Member.surname) .join(Member, on=(cte.c.recommender == Member.memid)) .order_by(Member.memid.desc())) peewee-3.17.7/docs/peewee/query_operators.rst000066400000000000000000000406511470346076600213050ustar00rootroot00000000000000.. _query-operators: Query operators =============== The following types of comparisons are supported by peewee: ================ ======================================= Comparison Meaning ================ ======================================= ``==`` x equals y ``<`` x is less than y ``<=`` x is less than or equal to y ``>`` x is greater than y ``>=`` x is greater than or equal to y ``!=`` x is not equal to y ``<<`` x IN y, where y is a list or query ``>>`` x IS y, where y is None/NULL ``%`` x LIKE y where y may contain wildcards ``**`` x ILIKE y where y may contain wildcards ``^`` x XOR y ``~`` Unary negation (e.g., NOT x) ================ ======================================= Because I ran out of operators to override, there are some additional query operations available as methods: ======================= =============================================== Method Meaning ======================= =============================================== ``.in_(value)`` IN lookup (identical to ``<<``). ``.not_in(value)`` NOT IN lookup. ``.is_null(is_null)`` IS NULL or IS NOT NULL. Accepts boolean param. ``.contains(substr)`` Wild-card search for substring. ``.startswith(prefix)`` Search for values beginning with ``prefix``. ``.endswith(suffix)`` Search for values ending with ``suffix``. ``.between(low, high)`` Search where ``low <= value <= high``. ``.regexp(exp)`` Regular expression match (case-sensitive). ``.iregexp(exp)`` Regular expression match (case-insensitive). ``.bin_and(value)`` Binary AND. ``.bin_or(value)`` Binary OR. ``.concat(other)`` Concatenate two strings or objects using ``||``. ``.distinct()`` Mark column for DISTINCT selection. ``.collate(collation)`` Specify column with the given collation. ``.cast(type)`` Cast the value of the column to the given type. ======================= =============================================== To combine clauses using logical operators, use: ================ ==================== ====================================================== Operator Meaning Example ================ ==================== ====================================================== ``&`` AND ``(User.is_active == True) & (User.is_admin == True)`` ``|`` (pipe) OR ``(User.is_admin) | (User.is_superuser)`` ``~`` NOT (unary negation) ``~(User.username.contains('admin'))`` ================ ==================== ====================================================== Here is how you might use some of these query operators: .. code-block:: python # Find the user whose username is "charlie". User.select().where(User.username == 'charlie') # Find the users whose username is in [charlie, huey, mickey] User.select().where(User.username.in_(['charlie', 'huey', 'mickey'])) # Find users whose salary is between 50k and 60k (inclusive). Employee.select().where(Employee.salary.between(50000, 60000)) Employee.select().where(Employee.name.startswith('C')) Blog.select().where(Blog.title.contains(search_string)) Here is how you might combine expressions. Comparisons can be arbitrarily complex. .. note:: Note that the actual comparisons are wrapped in parentheses. Python's operator precedence necessitates that comparisons be wrapped in parentheses. .. code-block:: python # Find any users who are active administrations. User.select().where( (User.is_admin == True) & (User.is_active == True)) # Find any users who are either administrators or super-users. User.select().where( (User.is_admin == True) | (User.is_superuser == True)) # Alternatively, use the boolean values directly. Here we query users who # are admins and NOT superusers. User.select().where(User.is_admin & ~User.is_superuser) # Find any Tweets by users who are not admins (NOT IN). admins = User.select().where(User.is_admin == True) non_admin_tweets = Tweet.select().where(Tweet.user.not_in(admins)) # Find any users who are not my friends (strangers). friends = User.select().where(User.username.in_(['charlie', 'huey', 'mickey'])) strangers = User.select().where(User.id.not_in(friends)) .. warning:: Although you may be tempted to use python's ``in``, ``and``, ``or``, ``is``, and ``not`` operators in your query expressions, these **will not work.** The return value of an ``in`` expression is always coerced to a boolean value. Similarly, ``and``, ``or`` and ``not`` all treat their arguments as boolean values and cannot be overloaded. So just remember: * Use ``.in_()`` and ``.not_in()`` instead of ``in`` and ``not in`` * Use ``&`` instead of ``and`` * Use ``|`` instead of ``or`` * Use ``~`` instead of ``not`` * Use ``.is_null()`` instead of ``is None`` or ``== None``. * Use ``==`` and ``!=`` for comparing against ``True`` and ``False``, or you may use the implicit value of the expression. * **Don't forget to wrap your comparisons in parentheses when using logical operators.** For more examples, see the :ref:`expressions` section. .. note:: **LIKE and ILIKE with SQLite** Because SQLite's ``LIKE`` operation is case-insensitive by default, peewee will use the SQLite ``GLOB`` operation for case-sensitive searches. The glob operation uses asterisks for wildcards as opposed to the usual percent-sign. If you are using SQLite and want case-sensitive partial string matching, remember to use asterisks for the wildcard. Three valued logic ------------------ Because of the way SQL handles ``NULL``, there are some special operations available for expressing: * ``IS NULL`` * ``IS NOT NULL`` * ``IN`` * ``NOT IN`` While it would be possible to use the ``IS NULL`` and ``IN`` operators with the negation operator (``~``), sometimes to get the correct semantics you will need to explicitly use ``IS NOT NULL`` and ``NOT IN``. The simplest way to use ``IS NULL`` and ``IN`` is to use the operator overloads: .. code-block:: python # Get all User objects whose last login is NULL. User.select().where(User.last_login >> None) # Get users whose username is in the given list. usernames = ['charlie', 'huey', 'mickey'] User.select().where(User.username << usernames) If you don't like operator overloads, you can call the Field methods instead: .. code-block:: python # Get all User objects whose last login is NULL. User.select().where(User.last_login.is_null(True)) # Get users whose username is in the given list. usernames = ['charlie', 'huey', 'mickey'] User.select().where(User.username.in_(usernames)) To negate the above queries, you can use unary negation, but for the correct semantics you may need to use the special ``IS NOT`` and ``NOT IN`` operators: .. code-block:: python # Get all User objects whose last login is *NOT* NULL. User.select().where(User.last_login.is_null(False)) # Using unary negation instead. User.select().where(~(User.last_login >> None)) # Get users whose username is *NOT* in the given list. usernames = ['charlie', 'huey', 'mickey'] User.select().where(User.username.not_in(usernames)) # Using unary negation instead. usernames = ['charlie', 'huey', 'mickey'] User.select().where(~(User.username << usernames)) .. _custom-operators: Adding user-defined operators ----------------------------- Because I ran out of python operators to overload, there are some missing operators in peewee, for instance ``modulo``. If you find that you need to support an operator that is not in the table above, it is very easy to add your own. Here is how you might add support for ``modulo`` in SQLite: .. code-block:: python from peewee import * from peewee import Expression # The building block for expressions. def mod(lhs, rhs): # Note: this works with Sqlite, but some drivers may use string- # formatting before sending the query to the database, so you may # need to use '%%' instead here. return Expression(lhs, '%', rhs) Now you can use these custom operators to build richer queries: .. code-block:: python # Users with even ids. User.select().where(mod(User.id, 2) == 0) For more examples check out the source to the ``playhouse.postgresql_ext`` module, as it contains numerous operators specific to postgresql's hstore. .. _expressions: Expressions ----------- Peewee is designed to provide a simple, expressive, and pythonic way of constructing SQL queries. This section will provide a quick overview of some common types of expressions. There are two primary types of objects that can be composed to create expressions: * :py:class:`Field` instances * SQL aggregations and functions using :py:class:`fn` We will assume a simple "User" model with fields for username and other things. It looks like this: .. code-block:: python class User(Model): username = CharField() is_admin = BooleanField() is_active = BooleanField() last_login = DateTimeField() login_count = IntegerField() failed_logins = IntegerField() Comparisons use the :ref:`query-operators`: .. code-block:: python # username is equal to 'charlie' User.username == 'charlie' # user has logged in less than 5 times User.login_count < 5 Comparisons can be combined using **bitwise** *and* and *or*. Operator precedence is controlled by python and comparisons can be nested to an arbitrary depth: .. code-block:: python # User is both and admin and has logged in today (User.is_admin == True) & (User.last_login >= today) # User's username is either charlie or charles (User.username == 'charlie') | (User.username == 'charles') # User is active and not a superuser. (User.is_active & ~User.is_superuser) Comparisons can be used with functions as well: .. code-block:: python # user's username starts with a 'g' or a 'G': fn.Lower(fn.Substr(User.username, 1, 1)) == 'g' We can do some fairly interesting things, as expressions can be compared against other expressions. Expressions also support arithmetic operations: .. code-block:: python # users who entered the incorrect more than half the time and have logged # in at least 10 times (User.failed_logins > (User.login_count * .5)) & (User.login_count > 10) Expressions allow us to do *atomic updates*: .. code-block:: python # when a user logs in we want to increment their login count: User.update(login_count=User.login_count + 1).where(User.id == user_id) Expressions can be used in all parts of a query, so experiment! Row values ^^^^^^^^^^ Many databases support `row values `_, which are similar to Python `tuple` objects. In Peewee, it is possible to use row-values in expressions via :py:class:`Tuple`. For example, .. code-block:: python # If for some reason your schema stores dates in separate columns ("year", # "month" and "day"), you can use row-values to find all rows that happened # in a given month: Tuple(Event.year, Event.month) == (2019, 1) The more common use for row-values is to compare against multiple columns from a subquery in a single expression. There are other ways to express these types of queries, but row-values may offer a concise and readable approach. For example, assume we have a table "EventLog" which contains an event type, an event source, and some metadata. We also have an "IncidentLog", which has incident type, incident source, and metadata columns. We can use row-values to correlate incidents with certain events: .. code-block:: python class EventLog(Model): event_type = TextField() source = TextField() data = TextField() timestamp = TimestampField() class IncidentLog(Model): incident_type = TextField() source = TextField() traceback = TextField() timestamp = TimestampField() # Get a list of all the incident types and sources that have occured today. incidents = (IncidentLog .select(IncidentLog.incident_type, IncidentLog.source) .where(IncidentLog.timestamp >= datetime.date.today())) # Find all events that correlate with the type and source of the # incidents that occured today. events = (EventLog .select() .where(Tuple(EventLog.event_type, EventLog.source).in_(incidents)) .order_by(EventLog.timestamp)) Other ways to express this type of query would be to use a :ref:`join ` or to :ref:`join on a subquery `. The above example is there just to give you and idea how :py:class:`Tuple` might be used. You can also use row-values to update multiple columns in a table, when the new data is derived from a subquery. For an example, see `here `_. SQL Functions ------------- SQL functions, like ``COUNT()`` or ``SUM()``, can be expressed using the :py:func:`fn` helper: .. code-block:: python # Get all users and the number of tweets they've authored. Sort the # results from most tweets -> fewest tweets. query = (User .select(User, fn.COUNT(Tweet.id).alias('tweet_count')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User) .order_by(fn.COUNT(Tweet.id).desc())) for user in query: print('%s -- %s tweets' % (user.username, user.tweet_count)) The ``fn`` helper exposes any SQL function as if it were a method. The parameters can be fields, values, subqueries, or even nested functions. Nesting function calls ^^^^^^^^^^^^^^^^^^^^^^ Suppose you need to want to get a list of all users whose username begins with *a*. There are a couple ways to do this, but one method might be to use some SQL functions like *LOWER* and *SUBSTR*. To use arbitrary SQL functions, use the special :py:func:`fn` object to construct queries: .. code-block:: python # Select the user's id, username and the first letter of their username, lower-cased first_letter = fn.LOWER(fn.SUBSTR(User.username, 1, 1)) query = User.select(User, first_letter.alias('first_letter')) # Alternatively we could select only users whose username begins with 'a' a_users = User.select().where(first_letter == 'a') >>> for user in a_users: ... print(user.username) SQL Helper ---------- There are times when you may want to simply pass in some arbitrary sql. You can do this using the special :py:class:`SQL` class. One use-case is when referencing an alias: .. code-block:: python # We'll query the user table and annotate it with a count of tweets for # the given user query = (User .select(User, fn.Count(Tweet.id).alias('ct')) .join(Tweet) .group_by(User)) # Now we will order by the count, which was aliased to "ct" query = query.order_by(SQL('ct')) # You could, of course, also write this as: query = query.order_by(fn.COUNT(Tweet.id)) There are two ways to execute hand-crafted SQL statements with peewee: 1. :py:meth:`Database.execute_sql` for executing any type of query 2. :py:class:`RawQuery` for executing ``SELECT`` queries and returning model instances. Security and SQL Injection -------------------------- By default peewee will parameterize queries, so any parameters passed in by the user will be escaped. The only exception to this rule is if you are writing a raw SQL query or are passing in a ``SQL`` object which may contain untrusted data. To mitigate this, ensure that any user-defined data is passed in as a query parameter and not part of the actual SQL query: .. code-block:: python # Bad! DO NOT DO THIS! query = MyModel.raw('SELECT * FROM my_table WHERE data = %s' % (user_data,)) # Good. `user_data` will be treated as a parameter to the query. query = MyModel.raw('SELECT * FROM my_table WHERE data = %s', user_data) # Bad! DO NOT DO THIS! query = MyModel.select().where(SQL('Some SQL expression %s' % user_data)) # Good. `user_data` will be treated as a parameter. query = MyModel.select().where(SQL('Some SQL expression %s', user_data)) .. note:: MySQL and Postgresql use ``'%s'`` to denote parameters. SQLite, on the other hand, uses ``'?'``. Be sure to use the character appropriate to your database. You can also find this parameter by checking :py:attr:`Database.param`. peewee-3.17.7/docs/peewee/querying.rst000066400000000000000000002102601470346076600177000ustar00rootroot00000000000000.. _querying: Querying ======== This section will cover the basic CRUD operations commonly performed on a relational database: * :py:meth:`Model.create`, for executing *INSERT* queries. * :py:meth:`Model.save` and :py:meth:`Model.update`, for executing *UPDATE* queries. * :py:meth:`Model.delete_instance` and :py:meth:`Model.delete`, for executing *DELETE* queries. * :py:meth:`Model.select`, for executing *SELECT* queries. .. note:: There is also a large collection of example queries taken from the `Postgresql Exercises `_ website. Examples are listed on the :ref:`query examples ` document. Creating a new record --------------------- You can use :py:meth:`Model.create` to create a new model instance. This method accepts keyword arguments, where the keys correspond to the names of the model's fields. A new instance is returned and a row is added to the table. .. code-block:: pycon >>> User.create(username='Charlie') <__main__.User object at 0x2529350> This will *INSERT* a new row into the database. The primary key will automatically be retrieved and stored on the model instance. Alternatively, you can build up a model instance programmatically and then call :py:meth:`~Model.save`: .. code-block:: pycon >>> user = User(username='Charlie') >>> user.save() # save() returns the number of rows modified. 1 >>> user.id 1 >>> huey = User() >>> huey.username = 'Huey' >>> huey.save() 1 >>> huey.id 2 When a model has a foreign key, you can directly assign a model instance to the foreign key field when creating a new record. .. code-block:: pycon >>> tweet = Tweet.create(user=huey, message='Hello!') You can also use the value of the related object's primary key: .. code-block:: pycon >>> tweet = Tweet.create(user=2, message='Hello again!') If you simply wish to insert data and do not need to create a model instance, you can use :py:meth:`Model.insert`: .. code-block:: pycon >>> User.insert(username='Mickey').execute() 3 After executing the insert query, the primary key of the new row is returned. .. note:: There are several ways you can speed up bulk insert operations. Check out the :ref:`bulk_inserts` recipe section for more information. .. _bulk_inserts: Bulk inserts ------------ There are a couple of ways you can load lots of data quickly. The naive approach is to simply call :py:meth:`Model.create` in a loop: .. code-block:: python data_source = [ {'field1': 'val1-1', 'field2': 'val1-2'}, {'field1': 'val2-1', 'field2': 'val2-2'}, # ... ] for data_dict in data_source: MyModel.create(**data_dict) The above approach is slow for a couple of reasons: 1. If you are not wrapping the loop in a transaction then each call to :py:meth:`~Model.create` happens in its own transaction. That is going to be really slow! 2. There is a decent amount of Python logic getting in your way, and each :py:class:`InsertQuery` must be generated and parsed into SQL. 3. That's a lot of data (in terms of raw bytes of SQL) you are sending to your database to parse. 4. We are retrieving the *last insert id*, which causes an additional query to be executed in some cases. You can get a significant speedup by simply wrapping this in a transaction with :py:meth:`~Database.atomic`. .. code-block:: python # This is much faster. with db.atomic(): for data_dict in data_source: MyModel.create(**data_dict) The above code still suffers from points 2, 3 and 4. We can get another big boost by using :py:meth:`~Model.insert_many`. This method accepts a list of tuples or dictionaries, and inserts multiple rows in a single query: .. code-block:: python data_source = [ {'field1': 'val1-1', 'field2': 'val1-2'}, {'field1': 'val2-1', 'field2': 'val2-2'}, # ... ] # Fastest way to INSERT multiple rows. MyModel.insert_many(data_source).execute() The :py:meth:`~Model.insert_many` method also accepts a list of row-tuples, provided you also specify the corresponding fields: .. code-block:: python # We can INSERT tuples as well... data = [('val1-1', 'val1-2'), ('val2-1', 'val2-2'), ('val3-1', 'val3-2')] # But we need to indicate which fields the values correspond to. MyModel.insert_many(data, fields=[MyModel.field1, MyModel.field2]).execute() It is also a good practice to wrap the bulk insert in a transaction: .. code-block:: python # You can, of course, wrap this in a transaction as well: with db.atomic(): MyModel.insert_many(data, fields=fields).execute() .. note:: SQLite users should be aware of some caveats when using bulk inserts. Specifically, your SQLite3 version must be 3.7.11.0 or newer to take advantage of the bulk insert API. Additionally, by default SQLite limits the number of bound variables in a SQL query to ``999`` for SQLite versions prior to 3.32.0 (2020-05-22) and 32766 for SQLite versions after 3.32.0. Inserting rows in batches ^^^^^^^^^^^^^^^^^^^^^^^^^ Depending on the number of rows in your data source, you may need to break it up into chunks. SQLite in particular typically has a `limit of 999 or 32766 `_ variables-per-query (batch size would then be 999 // row length or 32766 // row length). You can write a loop to batch your data into chunks (in which case it is **strongly recommended** you use a transaction): .. code-block:: python # Insert rows 100 at a time. with db.atomic(): for idx in range(0, len(data_source), 100): MyModel.insert_many(data_source[idx:idx+100]).execute() Peewee comes with a :py:func:`chunked` helper function which you can use for *efficiently* chunking a generic iterable into a series of *batch*-sized iterables: .. code-block:: python from peewee import chunked # Insert rows 100 at a time. with db.atomic(): for batch in chunked(data_source, 100): MyModel.insert_many(batch).execute() Alternatives ^^^^^^^^^^^^ The :py:meth:`Model.bulk_create` method behaves much like :py:meth:`Model.insert_many`, but instead it accepts a list of unsaved model instances to insert, and it optionally accepts a batch-size parameter. To use the :py:meth:`~Model.bulk_create` API: .. code-block:: python # Read list of usernames from a file, for example. with open('user_list.txt') as fh: # Create a list of unsaved User instances. users = [User(username=line.strip()) for line in fh.readlines()] # Wrap the operation in a transaction and batch INSERT the users # 100 at a time. with db.atomic(): User.bulk_create(users, batch_size=100) .. note:: If you are using Postgresql (which supports the ``RETURNING`` clause), then the previously-unsaved model instances will have their new primary key values automatically populated. In addition, Peewee also offers :py:meth:`Model.bulk_update`, which can efficiently update one or more columns on a list of models. For example: .. code-block:: python # First, create 3 users with usernames u1, u2, u3. u1, u2, u3 = [User.create(username='u%s' % i) for i in (1, 2, 3)] # Now we'll modify the user instances. u1.username = 'u1-x' u2.username = 'u2-y' u3.username = 'u3-z' # Update all three users with a single UPDATE query. User.bulk_update([u1, u2, u3], fields=[User.username]) This will result in executing the following SQL: .. code-block:: sql UPDATE "users" SET "username" = CASE "users"."id" WHEN 1 THEN "u1-x" WHEN 2 THEN "u2-y" WHEN 3 THEN "u3-z" END WHERE "users"."id" IN (1, 2, 3); .. note:: For large lists of objects, you should specify a reasonable batch_size and wrap the call to :py:meth:`~Model.bulk_update` with :py:meth:`Database.atomic`: .. code-block:: python with database.atomic(): User.bulk_update(list_of_users, fields=['username'], batch_size=50) .. warning:: :py:meth:`Model.bulk_update` may not be the most efficient method for updating large numbers of records. This functionality is implemented such that we create a "mapping" of primary key to corresponding field values for all rows being updated using a SQL ``CASE`` statement. Alternatively, you can use the :py:meth:`Database.batch_commit` helper to process chunks of rows inside *batch*-sized transactions. This method also provides a workaround for databases besides Postgresql, when the primary-key of the newly-created rows must be obtained. .. code-block:: python # List of row data to insert. row_data = [{'username': 'u1'}, {'username': 'u2'}, ...] # Assume there are 789 items in row_data. The following code will result in # 8 total transactions (7x100 rows + 1x89 rows). for row in db.batch_commit(row_data, 100): User.create(**row) Bulk-loading from another table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If the data you would like to bulk load is stored in another table, you can also create *INSERT* queries whose source is a *SELECT* query. Use the :py:meth:`Model.insert_from` method: .. code-block:: python res = (TweetArchive .insert_from( Tweet.select(Tweet.user, Tweet.message), fields=[TweetArchive.user, TweetArchive.message]) .execute()) The above query is equivalent to the following SQL: .. code-block:: sql INSERT INTO "tweet_archive" ("user_id", "message") SELECT "user_id", "message" FROM "tweet"; Updating existing records ------------------------- Once a model instance has a primary key, any subsequent call to :py:meth:`~Model.save` will result in an *UPDATE* rather than another *INSERT*. The model's primary key will not change: .. code-block:: pycon >>> user.save() # save() returns the number of rows modified. 1 >>> user.id 1 >>> user.save() >>> user.id 1 >>> huey.save() 1 >>> huey.id 2 If you want to update multiple records, issue an *UPDATE* query. The following example will update all ``Tweet`` objects, marking them as *published*, if they were created before today. :py:meth:`Model.update` accepts keyword arguments where the keys correspond to the model's field names: .. code-block:: pycon >>> today = datetime.today() >>> query = Tweet.update(is_published=True).where(Tweet.creation_date < today) >>> query.execute() # Returns the number of rows that were updated. 4 For more information, see the documentation on :py:meth:`Model.update`, :py:class:`Update` and :py:meth:`Model.bulk_update`. .. note:: If you would like more information on performing atomic updates (such as incrementing the value of a column), check out the :ref:`atomic update ` recipes. .. _atomic_updates: Atomic updates -------------- Peewee allows you to perform atomic updates. Let's suppose we need to update some counters. The naive approach would be to write something like this: .. code-block:: pycon >>> for stat in Stat.select().where(Stat.url == request.url): ... stat.counter += 1 ... stat.save() **Do not do this!** Not only is this slow, but it is also vulnerable to race conditions if multiple processes are updating the counter at the same time. Instead, you can update the counters atomically using :py:meth:`~Model.update`: .. code-block:: pycon >>> query = Stat.update(counter=Stat.counter + 1).where(Stat.url == request.url) >>> query.execute() You can make these update statements as complex as you like. Let's give all our employees a bonus equal to their previous bonus plus 10% of their salary: .. code-block:: pycon >>> query = Employee.update(bonus=(Employee.bonus + (Employee.salary * .1))) >>> query.execute() # Give everyone a bonus! We can even use a subquery to update the value of a column. Suppose we had a denormalized column on the ``User`` model that stored the number of tweets a user had made, and we updated this value periodically. Here is how you might write such a query: .. code-block:: pycon >>> subquery = Tweet.select(fn.COUNT(Tweet.id)).where(Tweet.user == User.id) >>> update = User.update(num_tweets=subquery) >>> update.execute() Upsert ^^^^^^ Peewee provides support for varying types of upsert functionality. With SQLite prior to 3.24.0 and MySQL, Peewee offers the :py:meth:`~Model.replace`, which allows you to insert a record or, in the event of a constraint violation, replace the existing record. For Sqlite 3.24+ and Postgres, peewee provides full support for ``ON CONFLICT`` queries. Example of using :py:meth:`~Model.replace` and :py:meth:`~Insert.on_conflict_replace`: .. code-block:: python class User(Model): username = TextField(unique=True) last_login = DateTimeField(null=True) # Insert or update the user. The "last_login" value will be updated # regardless of whether the user existed previously. user_id = (User .replace(username='the-user', last_login=datetime.now()) .execute()) # This query is equivalent: user_id = (User .insert(username='the-user', last_login=datetime.now()) .on_conflict_replace() .execute()) .. note:: In addition to *replace*, SQLite, MySQL and Postgresql provide an *ignore* action (see: :py:meth:`~Insert.on_conflict_ignore`) if you simply wish to insert and ignore any potential constraint violation. **MySQL** supports upsert via the *ON DUPLICATE KEY UPDATE* clause. For example: .. code-block:: python class User(Model): username = TextField(unique=True) last_login = DateTimeField(null=True) login_count = IntegerField() # Insert a new user. User.create(username='huey', login_count=0) # Simulate the user logging in. The login count and timestamp will be # either created or updated correctly. now = datetime.now() rowid = (User .insert(username='huey', last_login=now, login_count=1) .on_conflict( preserve=[User.last_login], # Use the value we would have inserted. update={User.login_count: User.login_count + 1}) .execute()) In the above example, we could safely invoke the upsert query as many times as we wanted. The login count will be incremented atomically, the last login column will be updated, and no duplicate rows will be created. **Postgresql and SQLite** (3.24.0 and newer) provide a different syntax that allows for more granular control over which constraint violation should trigger the conflict resolution, and what values should be updated or preserved. Example of using :py:meth:`~Insert.on_conflict` to perform a Postgresql-style upsert (or SQLite 3.24+): .. code-block:: python class User(Model): username = TextField(unique=True) last_login = DateTimeField(null=True) login_count = IntegerField() # Insert a new user. User.create(username='huey', login_count=0) # Simulate the user logging in. The login count and timestamp will be # either created or updated correctly. now = datetime.now() rowid = (User .insert(username='huey', last_login=now, login_count=1) .on_conflict( conflict_target=[User.username], # Which constraint? preserve=[User.last_login], # Use the value we would have inserted. update={User.login_count: User.login_count + 1}) .execute()) In the above example, we could safely invoke the upsert query as many times as we wanted. The login count will be incremented atomically, the last login column will be updated, and no duplicate rows will be created. .. note:: The main difference between MySQL and Postgresql/SQLite is that Postgresql and SQLite require that you specify a ``conflict_target``. Here is a more advanced (if contrived) example using the :py:class:`EXCLUDED` namespace. The :py:class:`EXCLUDED` helper allows us to reference values in the conflicting data. For our example, we'll assume a simple table mapping a unique key (string) to a value (integer): .. code-block:: python class KV(Model): key = CharField(unique=True) value = IntegerField() # Create one row. KV.create(key='k1', value=1) # Demonstrate usage of EXCLUDED. # Here we will attempt to insert a new value for a given key. If that # key already exists, then we will update its value with the *sum* of its # original value and the value we attempted to insert -- provided that # the new value is larger than the original value. query = (KV.insert(key='k1', value=10) .on_conflict(conflict_target=[KV.key], update={KV.value: KV.value + EXCLUDED.value}, where=(EXCLUDED.value > KV.value))) # Executing the above query will result in the following data being # present in the "kv" table: # (key='k1', value=11) query.execute() # If we attempted to execute the query *again*, then nothing would be # updated, as the new value (10) is now less than the value in the # original row (11). There are several important concepts to understand when using ``ON CONFLICT``: * ``conflict_target=``: which column(s) have the UNIQUE constraint. For a user table, this might be the user's email. * ``preserve=``: if a conflict occurs, this parameter is used to indicate which values from the **new** data we wish to update. * ``update=``: if a conflict occurs, this is a mapping of data to apply to the pre-existing row. * ``EXCLUDED``: this "magic" namespace allows you to reference the new data that would have been inserted if the constraint hadn't failed. Full example: .. code-block:: python class User(Model): email = CharField(unique=True) # Unique identifier for user. last_login = DateTimeField() login_count = IntegerField(default=0) ip_log = TextField(default='') # Demonstrates the above 4 concepts. def login(email, ip): rowid = (User .insert({User.email: email, User.last_login: datetime.now(), User.login_count: 1, User.ip_log: ip}) .on_conflict( # If the INSERT fails due to a constraint violation on the # user email, then perform an UPDATE instead. conflict_target=[User.email], # Set the "last_login" to the value we would have inserted # (our call to datetime.now()). preserve=[User.last_login], # Increment the user's login count and prepend the new IP # to the user's ip history. update={User.login_count: User.login_count + 1, User.ip_log: fn.CONCAT(EXCLUDED.ip_log, ',', User.ip_log)}) .execute()) return rowid # This will insert the initial row, returning the new row id (1). print(login('test@example.com', '127.1')) # Because test@example.com exists, this will trigger the UPSERT. The row id # from above is returned again (1). print(login('test@example.com', '127.2')) u = User.get() print(u.login_count, u.ip_log) # Prints "2 127.2,127.1" For more information, see :py:meth:`Insert.on_conflict` and :py:class:`OnConflict`. Deleting records ---------------- To delete a single model instance, you can use the :py:meth:`Model.delete_instance` shortcut. :py:meth:`~Model.delete_instance` will delete the given model instance and can optionally delete any dependent objects recursively (by specifying `recursive=True`). .. code-block:: pycon >>> user = User.get(User.id == 1) >>> user.delete_instance() # Returns the number of rows deleted. 1 >>> User.get(User.id == 1) UserDoesNotExist: instance matching query does not exist: SQL: SELECT t1."id", t1."username" FROM "user" AS t1 WHERE t1."id" = ? PARAMS: [1] To delete an arbitrary set of rows, you can issue a *DELETE* query. The following will delete all ``Tweet`` objects that are over one year old: .. code-block:: pycon >>> query = Tweet.delete().where(Tweet.creation_date < one_year_ago) >>> query.execute() # Returns the number of rows deleted. 7 For more information, see the documentation on: * :py:meth:`Model.delete_instance` * :py:meth:`Model.delete` * :py:class:`DeleteQuery` Selecting a single record ------------------------- You can use the :py:meth:`Model.get` method to retrieve a single instance matching the given query. For primary-key lookups, you can also use the shortcut method :py:meth:`Model.get_by_id`. This method is a shortcut that calls :py:meth:`Model.select` with the given query, but limits the result set to a single row. Additionally, if no model matches the given query, a ``DoesNotExist`` exception will be raised. .. code-block:: pycon >>> User.get(User.id == 1) <__main__.User object at 0x25294d0> >>> User.get_by_id(1) # Same as above. <__main__.User object at 0x252df10> >>> User[1] # Also same as above. <__main__.User object at 0x252dd10> >>> User.get(User.id == 1).username u'Charlie' >>> User.get(User.username == 'Charlie') <__main__.User object at 0x2529410> >>> User.get(User.username == 'nobody') UserDoesNotExist: instance matching query does not exist: SQL: SELECT t1."id", t1."username" FROM "user" AS t1 WHERE t1."username" = ? PARAMS: ['nobody'] For more advanced operations, you can use :py:meth:`SelectBase.get`. The following query retrieves the latest tweet from the user named *charlie*: .. code-block:: pycon >>> (Tweet ... .select() ... .join(User) ... .where(User.username == 'charlie') ... .order_by(Tweet.created_date.desc()) ... .get()) <__main__.Tweet object at 0x2623410> For more information, see the documentation on: * :py:meth:`Model.get` * :py:meth:`Model.get_by_id` * :py:meth:`Model.get_or_none` - if no matching row is found, return ``None``. * :py:meth:`Model.select` * :py:meth:`SelectBase.get` * :py:meth:`SelectBase.first` - return first record of result-set or ``None``. Create or get ------------- Peewee has one helper method for performing "get/create" type operations: :py:meth:`Model.get_or_create`, which first attempts to retrieve the matching row. Failing that, a new row will be created. For "create or get" type logic, typically one would rely on a *unique* constraint or primary key to prevent the creation of duplicate objects. As an example, let's say we wish to implement registering a new user account using the :ref:`example User model `. The *User* model has a *unique* constraint on the username field, so we will rely on the database's integrity guarantees to ensure we don't end up with duplicate usernames: .. code-block:: python try: with db.atomic(): return User.create(username=username) except peewee.IntegrityError: # `username` is a unique column, so this username already exists, # making it safe to call .get(). return User.get(User.username == username) You can easily encapsulate this type of logic as a ``classmethod`` on your own ``Model`` classes. The above example first attempts at creation, then falls back to retrieval, relying on the database to enforce a unique constraint. If you prefer to attempt to retrieve the record first, you can use :py:meth:`~Model.get_or_create`. This method is implemented along the same lines as the Django function of the same name. You can use the Django-style keyword argument filters to specify your ``WHERE`` conditions. The function returns a 2-tuple containing the instance and a boolean value indicating if the object was created. Here is how you might implement user account creation using :py:meth:`~Model.get_or_create`: .. code-block:: python user, created = User.get_or_create(username=username) Suppose we have a different model ``Person`` and would like to get or create a person object. The only conditions we care about when retrieving the ``Person`` are their first and last names, **but** if we end up needing to create a new record, we will also specify their date-of-birth and favorite color: .. code-block:: python person, created = Person.get_or_create( first_name=first_name, last_name=last_name, defaults={'dob': dob, 'favorite_color': 'green'}) Any keyword argument passed to :py:meth:`~Model.get_or_create` will be used in the ``get()`` portion of the logic, except for the ``defaults`` dictionary, which will be used to populate values on newly-created instances. For more details read the documentation for :py:meth:`Model.get_or_create`. Selecting multiple records -------------------------- We can use :py:meth:`Model.select` to retrieve rows from the table. When you construct a *SELECT* query, the database will return any rows that correspond to your query. Peewee allows you to iterate over these rows, as well as use indexing and slicing operations: .. code-block:: pycon >>> query = User.select() >>> [user.username for user in query] ['Charlie', 'Huey', 'Peewee'] >>> query[1] <__main__.User at 0x7f83e80f5550> >>> query[1].username 'Huey' >>> query[:2] [<__main__.User at 0x7f83e80f53a8>, <__main__.User at 0x7f83e80f5550>] :py:class:`Select` queries are smart, in that you can iterate, index and slice the query multiple times but the query is only executed once. In the following example, we will simply call :py:meth:`~Model.select` and iterate over the return value, which is an instance of :py:class:`Select`. This will return all the rows in the *User* table: .. code-block:: pycon >>> for user in User.select(): ... print(user.username) ... Charlie Huey Peewee .. note:: Subsequent iterations of the same query will not hit the database as the results are cached. To disable this behavior (to reduce memory usage), call :py:meth:`Select.iterator` when iterating. When iterating over a model that contains a foreign key, be careful with the way you access values on related models. Accidentally resolving a foreign key or iterating over a back-reference can cause :ref:`N+1 query behavior `. When you create a foreign key, such as ``Tweet.user``, you can use the *backref* to create a back-reference (``User.tweets``). Back-references are exposed as :py:class:`Select` instances: .. code-block:: pycon >>> tweet = Tweet.get() >>> tweet.user # Accessing a foreign key returns the related model. >>> user = User.get() >>> user.tweets # Accessing a back-reference returns a query. You can iterate over the ``user.tweets`` back-reference just like any other :py:class:`Select`: .. code-block:: pycon >>> for tweet in user.tweets: ... print(tweet.message) ... hello world this is fun look at this picture of my food In addition to returning model instances, :py:class:`Select` queries can return dictionaries, tuples and namedtuples. Depending on your use-case, you may find it easier to work with rows as dictionaries, for example: .. code-block:: pycon >>> query = User.select().dicts() >>> for row in query: ... print(row) {'id': 1, 'username': 'Charlie'} {'id': 2, 'username': 'Huey'} {'id': 3, 'username': 'Peewee'} See :py:meth:`~BaseQuery.namedtuples`, :py:meth:`~BaseQuery.tuples`, :py:meth:`~BaseQuery.dicts` for more information. Iterating over large result-sets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By default peewee will cache the rows returned when iterating over a :py:class:`Select` query. This is an optimization to allow multiple iterations as well as indexing and slicing without causing additional queries. This caching can be problematic, however, when you plan to iterate over a large number of rows. To reduce the amount of memory used by peewee when iterating over a query, use the :py:meth:`~BaseQuery.iterator` method. This method allows you to iterate without caching each model returned, using much less memory when iterating over large result sets. .. code-block:: python # Let's assume we've got 10 million stat objects to dump to a csv file. stats = Stat.select() # Our imaginary serializer class serializer = CSVSerializer() # Loop over all the stats and serialize. for stat in stats.iterator(): serializer.serialize_object(stat) For simple queries you can see further speed improvements by returning rows as dictionaries, namedtuples or tuples. The following methods can be used on any :py:class:`Select` query to change the result row type: * :py:meth:`~BaseQuery.dicts` * :py:meth:`~BaseQuery.namedtuples` * :py:meth:`~BaseQuery.tuples` Don't forget to append the :py:meth:`~BaseQuery.iterator` method call to also reduce memory consumption. For example, the above code might look like: .. code-block:: python # Let's assume we've got 10 million stat objects to dump to a csv file. stats = Stat.select() # Our imaginary serializer class serializer = CSVSerializer() # Loop over all the stats (rendered as tuples, without caching) and serialize. for stat_tuple in stats.tuples().iterator(): serializer.serialize_tuple(stat_tuple) When iterating over a large number of rows that contain columns from multiple tables, peewee will reconstruct the model graph for each row returned. This operation can be slow for complex graphs. For example, if we were selecting a list of tweets along with the username and avatar of the tweet's author, Peewee would have to create two objects for each row (a tweet and a user). In addition to the above row-types, there is a fourth method :py:meth:`~BaseQuery.objects` which will return the rows as model instances, but will not attempt to resolve the model graph. For example: .. code-block:: python query = (Tweet .select(Tweet, User) # Select tweet and user data. .join(User)) # Note that the user columns are stored in a separate User instance # accessible at tweet.user: for tweet in query: print(tweet.user.username, tweet.content) # Using ".objects()" will not create the tweet.user object and assigns all # user attributes to the tweet instance: for tweet in query.objects(): print(tweet.username, tweet.content) For maximum performance, you can execute queries and then iterate over the results using the underlying database cursor. :py:meth:`Database.execute` accepts a query object, executes the query, and returns a DB-API 2.0 ``Cursor`` object. The cursor will return the raw row-tuples: .. code-block:: python query = Tweet.select(Tweet.content, User.username).join(User) cursor = database.execute(query) for (content, username) in cursor: print(username, '->', content) Filtering records ----------------- You can filter for particular records using normal python operators. Peewee supports a wide variety of :ref:`query operators `. .. code-block:: pycon >>> user = User.get(User.username == 'Charlie') >>> for tweet in Tweet.select().where(Tweet.user == user, Tweet.is_published == True): ... print(tweet.user.username, '->', tweet.message) ... Charlie -> hello world Charlie -> this is fun >>> for tweet in Tweet.select().where(Tweet.created_date < datetime.datetime(2011, 1, 1)): ... print(tweet.message, tweet.created_date) ... Really old tweet 2010-01-01 00:00:00 You can also filter across joins: .. code-block:: pycon >>> for tweet in Tweet.select().join(User).where(User.username == 'Charlie'): ... print(tweet.message) hello world this is fun look at this picture of my food If you want to express a complex query, use parentheses and python's bitwise *or* and *and* operators: .. code-block:: pycon >>> Tweet.select().join(User).where( ... (User.username == 'Charlie') | ... (User.username == 'Peewee Herman')) .. note:: Note that Peewee uses **bitwise** operators (``&`` and ``|``) rather than logical operators (``and`` and ``or``). The reason for this is that Python coerces the return value of logical operations to a boolean value. This is also the reason why "IN" queries must be expressed using ``.in_()`` rather than the ``in`` operator. Check out :ref:`the table of query operations ` to see what types of queries are possible. .. note:: A lot of fun things can go in the where clause of a query, such as: * A field expression, e.g. ``User.username == 'Charlie'`` * A function expression, e.g. ``fn.Lower(fn.Substr(User.username, 1, 1)) == 'a'`` * A comparison of one column to another, e.g. ``Employee.salary < (Employee.tenure * 1000) + 40000`` You can also nest queries, for example tweets by users whose username starts with "a": .. code-block:: python # get users whose username starts with "a" a_users = User.select().where(fn.Lower(fn.Substr(User.username, 1, 1)) == 'a') # the ".in_()" method signifies an "IN" query a_user_tweets = Tweet.select().where(Tweet.user.in_(a_users)) More query examples ^^^^^^^^^^^^^^^^^^^ .. note:: For a wide range of example queries, see the :ref:`Query Examples ` document, which shows how to implements queries from the `PostgreSQL Exercises `_ website. Get active users: .. code-block:: python User.select().where(User.active == True) Get users who are either staff or superusers: .. code-block:: python User.select().where( (User.is_staff == True) | (User.is_superuser == True)) Get tweets by user named "charlie": .. code-block:: python Tweet.select().join(User).where(User.username == 'charlie') Get tweets by staff or superusers (assumes FK relationship): .. code-block:: python Tweet.select().join(User).where( (User.is_staff == True) | (User.is_superuser == True)) Get tweets by staff or superusers using a subquery: .. code-block:: python staff_super = User.select(User.id).where( (User.is_staff == True) | (User.is_superuser == True)) Tweet.select().where(Tweet.user.in_(staff_super)) Sorting records --------------- To return rows in order, use the :py:meth:`~Query.order_by` method: .. code-block:: pycon >>> for t in Tweet.select().order_by(Tweet.created_date): ... print(t.pub_date) ... 2010-01-01 00:00:00 2011-06-07 14:08:48 2011-06-07 14:12:57 >>> for t in Tweet.select().order_by(Tweet.created_date.desc()): ... print(t.pub_date) ... 2011-06-07 14:12:57 2011-06-07 14:08:48 2010-01-01 00:00:00 You can also use ``+`` and ``-`` prefix operators to indicate ordering: .. code-block:: python # The following queries are equivalent: Tweet.select().order_by(Tweet.created_date.desc()) Tweet.select().order_by(-Tweet.created_date) # Note the "-" prefix. # Similarly you can use "+" to indicate ascending order, though ascending # is the default when no ordering is otherwise specified. User.select().order_by(+User.username) You can also order across joins. Assuming you want to order tweets by the username of the author, then by created_date: .. code-block:: pycon query = (Tweet .select() .join(User) .order_by(User.username, Tweet.created_date.desc())) .. code-block:: sql SELECT t1."id", t1."user_id", t1."message", t1."is_published", t1."created_date" FROM "tweet" AS t1 INNER JOIN "user" AS t2 ON t1."user_id" = t2."id" ORDER BY t2."username", t1."created_date" DESC When sorting on a calculated value, you can either include the necessary SQL expressions, or reference the alias assigned to the value. Here are two examples illustrating these methods: .. code-block:: python # Let's start with our base query. We want to get all usernames and the number of # tweets they've made. We wish to sort this list from users with most tweets to # users with fewest tweets. query = (User .select(User.username, fn.COUNT(Tweet.id).alias('num_tweets')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.username)) You can order using the same COUNT expression used in the ``select`` clause. In the example below we are ordering by the ``COUNT()`` of tweet ids descending: .. code-block:: python query = (User .select(User.username, fn.COUNT(Tweet.id).alias('num_tweets')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.username) .order_by(fn.COUNT(Tweet.id).desc())) Alternatively, you can reference the alias assigned to the calculated value in the ``select`` clause. This method has the benefit of being a bit easier to read. Note that we are not referring to the named alias directly, but are wrapping it using the :py:class:`SQL` helper: .. code-block:: python query = (User .select(User.username, fn.COUNT(Tweet.id).alias('num_tweets')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.username) .order_by(SQL('num_tweets').desc())) Or, to do things the "peewee" way: .. code-block:: python ntweets = fn.COUNT(Tweet.id) query = (User .select(User.username, ntweets.alias('num_tweets')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.username) .order_by(ntweets.desc()) Getting random records ---------------------- Occasionally you may want to pull a random record from the database. You can accomplish this by ordering by the *random* or *rand* function (depending on your database): Postgresql and Sqlite use the *Random* function: .. code-block:: python # Pick 5 lucky winners: LotteryNumber.select().order_by(fn.Random()).limit(5) MySQL uses *Rand*: .. code-block:: python # Pick 5 lucky winners: LotteryNumber.select().order_by(fn.Rand()).limit(5) Paginating records ------------------ The :py:meth:`~Query.paginate` method makes it easy to grab a *page* or records. :py:meth:`~Query.paginate` takes two parameters, ``page_number``, and ``items_per_page``. .. attention:: Page numbers are 1-based, so the first page of results will be page 1. .. code-block:: pycon >>> for tweet in Tweet.select().order_by(Tweet.id).paginate(2, 10): ... print(tweet.message) ... tweet 10 tweet 11 tweet 12 tweet 13 tweet 14 tweet 15 tweet 16 tweet 17 tweet 18 tweet 19 If you would like more granular control, you can always use :py:meth:`~Query.limit` and :py:meth:`~Query.offset`. Counting records ---------------- You can count the number of rows in any select query: .. code-block:: python >>> Tweet.select().count() 100 >>> Tweet.select().where(Tweet.id > 50).count() 50 Peewee will wrap your query in an outer query that performs a count, which results in SQL like: .. code-block:: sql SELECT COUNT(1) FROM ( ... your query ... ); Aggregating records ------------------- Suppose you have some users and want to get a list of them along with the count of tweets in each. .. code-block:: python query = (User .select(User, fn.Count(Tweet.id).alias('count')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User)) The resulting query will return *User* objects with all their normal attributes plus an additional attribute *count* which will contain the count of tweets for each user. We use a left outer join to include users who have no tweets. Let's assume you have a tagging application and want to find tags that have a certain number of related objects. For this example we'll use some different models in a :ref:`many-to-many ` configuration: .. code-block:: python class Photo(Model): image = CharField() class Tag(Model): name = CharField() class PhotoTag(Model): photo = ForeignKeyField(Photo) tag = ForeignKeyField(Tag) Now say we want to find tags that have at least 5 photos associated with them: .. code-block:: python query = (Tag .select() .join(PhotoTag) .join(Photo) .group_by(Tag) .having(fn.Count(Photo.id) > 5)) This query is equivalent to the following SQL: .. code-block:: sql SELECT t1."id", t1."name" FROM "tag" AS t1 INNER JOIN "phototag" AS t2 ON t1."id" = t2."tag_id" INNER JOIN "photo" AS t3 ON t2."photo_id" = t3."id" GROUP BY t1."id", t1."name" HAVING Count(t3."id") > 5 Suppose we want to grab the associated count and store it on the tag: .. code-block:: python query = (Tag .select(Tag, fn.Count(Photo.id).alias('count')) .join(PhotoTag) .join(Photo) .group_by(Tag) .having(fn.Count(Photo.id) > 5)) Retrieving Scalar Values ------------------------ You can retrieve scalar values by calling :py:meth:`Query.scalar`. For instance: .. code-block:: python >>> PageView.select(fn.Count(fn.Distinct(PageView.url))).scalar() 100 You can retrieve multiple scalar values by passing ``as_tuple=True``: .. code-block:: python >>> Employee.select( ... fn.Min(Employee.salary), fn.Max(Employee.salary) ... ).scalar(as_tuple=True) (30000, 50000) .. _window-functions: Window functions ---------------- A :py:class:`Window` function refers to an aggregate function that operates on a sliding window of data that is being processed as part of a ``SELECT`` query. Window functions make it possible to do things like: 1. Perform aggregations against subsets of a result-set. 2. Calculate a running total. 3. Rank results. 4. Compare a row value to a value in the preceding (or succeeding!) row(s). peewee comes with support for SQL window functions, which can be created by calling :py:meth:`Function.over` and passing in your partitioning or ordering parameters. For the following examples, we'll use the following model and sample data: .. code-block:: python class Sample(Model): counter = IntegerField() value = FloatField() data = [(1, 10), (1, 20), (2, 1), (2, 3), (3, 100)] Sample.insert_many(data, fields=[Sample.counter, Sample.value]).execute() Our sample table now contains: === ======== ====== id counter value === ======== ====== 1 1 10.0 2 1 20.0 3 2 1.0 4 2 3.0 5 3 100.0 === ======== ====== Ordered Windows ^^^^^^^^^^^^^^^ Let's calculate a running sum of the ``value`` field. In order for it to be a "running" sum, we need it to be ordered, so we'll order with respect to the Sample's ``id`` field: .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.SUM(Sample.value).over(order_by=[Sample.id]).alias('total')) for sample in query: print(sample.counter, sample.value, sample.total) # 1 10. 10. # 1 20. 30. # 2 1. 31. # 2 3. 34. # 3 100 134. For another example, we'll calculate the difference between the current value and the previous value, when ordered by the ``id``: .. code-block:: python difference = Sample.value - fn.LAG(Sample.value, 1).over(order_by=[Sample.id]) query = Sample.select( Sample.counter, Sample.value, difference.alias('diff')) for sample in query: print(sample.counter, sample.value, sample.diff) # 1 10. NULL # 1 20. 10. -- (20 - 10) # 2 1. -19. -- (1 - 20) # 2 3. 2. -- (3 - 1) # 3 100 97. -- (100 - 3) Partitioned Windows ^^^^^^^^^^^^^^^^^^^ Let's calculate the average ``value`` for each distinct "counter" value. Notice that there are three possible values for the ``counter`` field (1, 2, and 3). We can do this by calculating the ``AVG()`` of the ``value`` column over a window that is partitioned depending on the ``counter`` field: .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.AVG(Sample.value).over(partition_by=[Sample.counter]).alias('cavg')) for sample in query: print(sample.counter, sample.value, sample.cavg) # 1 10. 15. # 1 20. 15. # 2 1. 2. # 2 3. 2. # 3 100 100. We can use ordering within partitions by specifying both the ``order_by`` and ``partition_by`` parameters. For an example, let's rank the samples by value within each distinct ``counter`` group. .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.RANK().over( order_by=[Sample.value], partition_by=[Sample.counter]).alias('rank')) for sample in query: print(sample.counter, sample.value, sample.rank) # 1 10. 1 # 1 20. 2 # 2 1. 1 # 2 3. 2 # 3 100 1 Bounded windows ^^^^^^^^^^^^^^^ By default, window functions are evaluated using an *unbounded preceding* start for the window, and the *current row* as the end. We can change the bounds of the window our aggregate functions operate on by specifying a ``start`` and/or ``end`` in the call to :py:meth:`Function.over`. Additionally, Peewee comes with helper-methods on the :py:class:`Window` object for generating the appropriate boundary references: * :py:attr:`Window.CURRENT_ROW` - attribute that references the current row. * :py:meth:`Window.preceding` - specify number of row(s) preceding, or omit number to indicate **all** preceding rows. * :py:meth:`Window.following` - specify number of row(s) following, or omit number to indicate **all** following rows. To examine how boundaries work, we'll calculate a running total of the ``value`` column, ordered with respect to ``id``, **but** we'll only look the running total of the current row and it's two preceding rows: .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.id], start=Window.preceding(2), end=Window.CURRENT_ROW).alias('rsum')) for sample in query: print(sample.counter, sample.value, sample.rsum) # 1 10. 10. # 1 20. 30. -- (20 + 10) # 2 1. 31. -- (1 + 20 + 10) # 2 3. 24. -- (3 + 1 + 20) # 3 100 104. -- (100 + 3 + 1) .. note:: Technically we did not need to specify the ``end=Window.CURRENT`` because that is the default. It was shown in the example for demonstration. Let's look at another example. In this example we will calculate the "opposite" of a running total, in which the total sum of all values is decreased by the value of the samples, ordered by ``id``. To accomplish this, we'll calculate the sum from the current row to the last row. .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.id], start=Window.CURRENT_ROW, end=Window.following()).alias('rsum')) # 1 10. 134. -- (10 + 20 + 1 + 3 + 100) # 1 20. 124. -- (20 + 1 + 3 + 100) # 2 1. 104. -- (1 + 3 + 100) # 2 3. 103. -- (3 + 100) # 3 100 100. -- (100) Filtered Aggregates ^^^^^^^^^^^^^^^^^^^ Aggregate functions may also support filter functions (Postgres and Sqlite 3.25+), which get translated into a ``FILTER (WHERE...)`` clause. Filter expressions are added to an aggregate function with the :py:meth:`Function.filter` method. For an example, we will calculate the running sum of the ``value`` field with respect to the ``id``, but we will filter-out any samples whose ``counter=2``. .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.SUM(Sample.value).filter(Sample.counter != 2).over( order_by=[Sample.id]).alias('csum')) for sample in query: print(sample.counter, sample.value, sample.csum) # 1 10. 10. # 1 20. 30. # 2 1. 30. # 2 3. 30. # 3 100 130. .. note:: The call to :py:meth:`~Function.filter` must precede the call to :py:meth:`~Function.over`. Reusing Window Definitions ^^^^^^^^^^^^^^^^^^^^^^^^^^ If you intend to use the same window definition for multiple aggregates, you can create a :py:class:`Window` object. The :py:class:`Window` object takes the same parameters as :py:meth:`Function.over`, and can be passed to the ``over()`` method in-place of the individual parameters. Here we'll declare a single window, ordered with respect to the sample ``id``, and call several window functions using that window definition: .. code-block:: python win = Window(order_by=[Sample.id]) query = Sample.select( Sample.counter, Sample.value, fn.LEAD(Sample.value).over(win), fn.LAG(Sample.value).over(win), fn.SUM(Sample.value).over(win) ).window(win) # Include our window definition in query. for row in query.tuples(): print(row) # counter value lead() lag() sum() # 1 10. 20. NULL 10. # 1 20. 1. 10. 30. # 2 1. 3. 20. 31. # 2 3. 100. 1. 34. # 3 100. NULL 3. 134. Multiple window definitions ^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the previous example, we saw how to declare a :py:class:`Window` definition and re-use it for multiple different aggregations. You can include as many window definitions as you need in your queries, but it is necessary to ensure each window has a unique alias: .. code-block:: python w1 = Window(order_by=[Sample.id]).alias('w1') w2 = Window(partition_by=[Sample.counter]).alias('w2') query = Sample.select( Sample.counter, Sample.value, fn.SUM(Sample.value).over(w1).alias('rsum'), # Running total. fn.AVG(Sample.value).over(w2).alias('cavg') # Avg per category. ).window(w1, w2) # Include our window definitions. for sample in query: print(sample.counter, sample.value, sample.rsum, sample.cavg) # counter value rsum cavg # 1 10. 10. 15. # 1 20. 30. 15. # 2 1. 31. 2. # 2 3. 34. 2. # 3 100 134. 100. Similarly, if you have multiple window definitions that share similar definitions, it is possible to extend a previously-defined window definition. For example, here we will be partitioning the data-set by the counter value, so we'll be doing our aggregations with respect to the counter. Then we'll define a second window that extends this partitioning, and adds an ordering clause: .. code-block:: python w1 = Window(partition_by=[Sample.counter]).alias('w1') # By extending w1, this window definition will also be partitioned # by "counter". w2 = Window(extends=w1, order_by=[Sample.value.desc()]).alias('w2') query = (Sample .select(Sample.counter, Sample.value, fn.SUM(Sample.value).over(w1).alias('group_sum'), fn.RANK().over(w2).alias('revrank')) .window(w1, w2) .order_by(Sample.id)) for sample in query: print(sample.counter, sample.value, sample.group_sum, sample.revrank) # counter value group_sum revrank # 1 10. 30. 2 # 1 20. 30. 1 # 2 1. 4. 2 # 2 3. 4. 1 # 3 100. 100. 1 .. _window-frame-types: Frame types: RANGE vs ROWS vs GROUPS ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Depending on the frame type, the database will process ordered groups differently. Let's create two additional ``Sample`` rows to visualize the difference: .. code-block:: pycon >>> Sample.create(counter=1, value=20.) >>> Sample.create(counter=2, value=1.) Our table now contains: === ======== ====== id counter value === ======== ====== 1 1 10.0 2 1 20.0 3 2 1.0 4 2 3.0 5 3 100.0 6 1 20.0 7 2 1.0 === ======== ====== Let's examine the difference by calculating a "running sum" of the samples, ordered with respect to the ``counter`` and ``value`` fields. To specify the frame type, we can use either: * :py:attr:`Window.RANGE` * :py:attr:`Window.ROWS` * :py:attr:`Window.GROUPS` The behavior of :py:attr:`~Window.RANGE`, when there are logical duplicates, may lead to unexpected results: .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.counter, Sample.value], frame_type=Window.RANGE).alias('rsum')) for sample in query.order_by(Sample.counter, Sample.value): print(sample.counter, sample.value, sample.rsum) # counter value rsum # 1 10. 10. # 1 20. 50. # 1 20. 50. # 2 1. 52. # 2 1. 52. # 2 3. 55. # 3 100 155. With the inclusion of the new rows we now have some rows that have duplicate ``category`` and ``value`` values. The :py:attr:`~Window.RANGE` frame type causes these duplicates to be evaluated together rather than separately. The more expected result can be achieved by using :py:attr:`~Window.ROWS` as the frame-type: .. code-block:: python query = Sample.select( Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.counter, Sample.value], frame_type=Window.ROWS).alias('rsum')) for sample in query.order_by(Sample.counter, Sample.value): print(sample.counter, sample.value, sample.rsum) # counter value rsum # 1 10. 10. # 1 20. 30. # 1 20. 50. # 2 1. 51. # 2 1. 52. # 2 3. 55. # 3 100 155. Peewee uses these rules for determining what frame-type to use: * If the user specifies a ``frame_type``, that frame type will be used. * If ``start`` and/or ``end`` boundaries are specified Peewee will default to using ``ROWS``. * If the user did not specify frame type or start/end boundaries, Peewee will use the database default, which is ``RANGE``. The :py:attr:`Window.GROUPS` frame type looks at the window range specification in terms of groups of rows, based on the ordering term(s). Using ``GROUPS``, we can define the frame so it covers distinct groupings of rows. Let's look at an example: .. code-block:: python query = (Sample .select(Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.counter, Sample.value], frame_type=Window.GROUPS, start=Window.preceding(1)).alias('gsum')) .order_by(Sample.counter, Sample.value)) for sample in query: print(sample.counter, sample.value, sample.gsum) # counter value gsum # 1 10 10 # 1 20 50 # 1 20 50 (10) + (20+0) # 2 1 42 # 2 1 42 (20+20) + (1+1) # 2 3 5 (1+1) + 3 # 3 100 103 (3) + 100 As you can hopefully infer, the window is grouped by its ordering term, which is ``(counter, value)``. We are looking at a window that extends between one previous group and the current group. .. note:: For information about the window function APIs, see: * :py:meth:`Function.over` * :py:meth:`Function.filter` * :py:class:`Window` For general information on window functions, read the postgres `window functions tutorial `_ Additionally, the `postgres docs `_ and the `sqlite docs `_ contain a lot of good information. .. _rowtypes: Retrieving row tuples / dictionaries / namedtuples -------------------------------------------------- Sometimes you do not need the overhead of creating model instances and simply want to iterate over the row data without needing all the APIs provided :py:class:`Model`. To do this, use: * :py:meth:`~BaseQuery.dicts` * :py:meth:`~BaseQuery.namedtuples` * :py:meth:`~BaseQuery.tuples` * :py:meth:`~BaseQuery.objects` -- accepts an arbitrary constructor function which is called with the row tuple. .. code-block:: python stats = (Stat .select(Stat.url, fn.Count(Stat.url)) .group_by(Stat.url) .tuples()) # iterate over a list of 2-tuples containing the url and count for stat_url, stat_count in stats: print(stat_url, stat_count) Similarly, you can return the rows from the cursor as dictionaries using :py:meth:`~BaseQuery.dicts`: .. code-block:: python stats = (Stat .select(Stat.url, fn.Count(Stat.url).alias('ct')) .group_by(Stat.url) .dicts()) # iterate over a list of 2-tuples containing the url and count for stat in stats: print(stat['url'], stat['ct']) .. _returning-clause: Returning Clause ---------------- :py:class:`PostgresqlDatabase` supports a ``RETURNING`` clause on ``UPDATE``, ``INSERT`` and ``DELETE`` queries. Specifying a ``RETURNING`` clause allows you to iterate over the rows accessed by the query. By default, the return values upon execution of the different queries are: * ``INSERT`` - auto-incrementing primary key value of the newly-inserted row. When not using an auto-incrementing primary key, Postgres will return the new row's primary key, but SQLite and MySQL will not. * ``UPDATE`` - number of rows modified * ``DELETE`` - number of rows deleted When a returning clause is used the return value upon executing a query will be an iterable cursor object. Postgresql allows, via the ``RETURNING`` clause, to return data from the rows inserted or modified by a query. For example, let's say you have an :py:class:`Update` that deactivates all user accounts whose registration has expired. After deactivating them, you want to send each user an email letting them know their account was deactivated. Rather than writing two queries, a ``SELECT`` and an ``UPDATE``, you can do this in a single ``UPDATE`` query with a ``RETURNING`` clause: .. code-block:: python query = (User .update(is_active=False) .where(User.registration_expired == True) .returning(User)) # Send an email to every user that was deactivated. for deactivate_user in query.execute(): send_deactivation_email(deactivated_user.email) The ``RETURNING`` clause is also available on :py:class:`Insert` and :py:class:`Delete`. When used with ``INSERT``, the newly-created rows will be returned. When used with ``DELETE``, the deleted rows will be returned. The only limitation of the ``RETURNING`` clause is that it can only consist of columns from tables listed in the query's ``FROM`` clause. To select all columns from a particular table, you can simply pass in the :py:class:`Model` class. As another example, let's add a user and set their creation-date to the server-generated current timestamp. We'll create and retrieve the new user's ID, Email and the creation timestamp in a single query: .. code-block:: python query = (User .insert(email='foo@bar.com', created=fn.now()) .returning(User)) # Shorthand for all columns on User. # When using RETURNING, execute() returns a cursor. cursor = query.execute() # Get the user object we just inserted and log the data: user = cursor[0] logger.info('Created user %s (id=%s) at %s', user.email, user.id, user.created) By default the cursor will return :py:class:`Model` instances, but you can specify a different row type: .. code-block:: python data = [{'name': 'charlie'}, {'name': 'huey'}, {'name': 'mickey'}] query = (User .insert_many(data) .returning(User.id, User.username) .dicts()) for new_user in query.execute(): print('Added user "%s", id=%s' % (new_user['username'], new_user['id'])) Just as with :py:class:`Select` queries, you can specify various :ref:`result row types `. .. _cte: Common Table Expressions ------------------------ Peewee supports the inclusion of common table expressions (CTEs) in all types of queries. CTEs may be useful for: * Factoring out a common subquery. * Grouping or filtering by a column derived in the CTE's result set. * Writing recursive queries. To declare a :py:class:`Select` query for use as a CTE, use :py:meth:`~SelectQuery.cte` method, which wraps the query in a :py:class:`CTE` object. To indicate that a :py:class:`CTE` should be included as part of a query, use the :py:meth:`Query.with_cte` method, passing a list of CTE objects. Simple Example ^^^^^^^^^^^^^^ For an example, let's say we have some data points that consist of a key and a floating-point value. Let's define our model and populate some test data: .. code-block:: python class Sample(Model): key = TextField() value = FloatField() data = ( ('a', (1.25, 1.5, 1.75)), ('b', (2.1, 2.3, 2.5, 2.7, 2.9)), ('c', (3.5, 3.5))) # Populate data. for key, values in data: Sample.insert_many([(key, value) for value in values], fields=[Sample.key, Sample.value]).execute() Let's use a CTE to calculate, for each distinct key, which values were above-average for that key. .. code-block:: python # First we'll declare the query that will be used as a CTE. This query # simply determines the average value for each key. cte = (Sample .select(Sample.key, fn.AVG(Sample.value).alias('avg_value')) .group_by(Sample.key) .cte('key_avgs', columns=('key', 'avg_value'))) # Now we'll query the sample table, using our CTE to find rows whose value # exceeds the average for the given key. We'll calculate how far above the # average the given sample's value is, as well. query = (Sample .select(Sample.key, Sample.value) .join(cte, on=(Sample.key == cte.c.key)) .where(Sample.value > cte.c.avg_value) .order_by(Sample.value) .with_cte(cte)) We can iterate over the samples returned by the query to see which samples had above-average values for their given group: .. code-block:: pycon >>> for sample in query: ... print(sample.key, sample.value) # 'a', 1.75 # 'b', 2.7 # 'b', 2.9 Complex Example ^^^^^^^^^^^^^^^ For a more complete example, let's consider the following query which uses multiple CTEs to find per-product sales totals in only the top sales regions. Our model looks like this: .. code-block:: python class Order(Model): region = TextField() amount = FloatField() product = TextField() quantity = IntegerField() Here is how the query might be written in SQL. This example can be found in the `postgresql documentation `_. .. code-block:: sql WITH regional_sales AS ( SELECT region, SUM(amount) AS total_sales FROM orders GROUP BY region ), top_regions AS ( SELECT region FROM regional_sales WHERE total_sales > (SELECT SUM(total_sales) / 10 FROM regional_sales) ) SELECT region, product, SUM(quantity) AS product_units, SUM(amount) AS product_sales FROM orders WHERE region IN (SELECT region FROM top_regions) GROUP BY region, product; With Peewee, we would write: .. code-block:: python reg_sales = (Order .select(Order.region, fn.SUM(Order.amount).alias('total_sales')) .group_by(Order.region) .cte('regional_sales')) top_regions = (reg_sales .select(reg_sales.c.region) .where(reg_sales.c.total_sales > ( reg_sales.select(fn.SUM(reg_sales.c.total_sales) / 10))) .cte('top_regions')) query = (Order .select(Order.region, Order.product, fn.SUM(Order.quantity).alias('product_units'), fn.SUM(Order.amount).alias('product_sales')) .where(Order.region.in_(top_regions.select(top_regions.c.region))) .group_by(Order.region, Order.product) .with_cte(reg_sales, top_regions)) Recursive CTEs ^^^^^^^^^^^^^^ Peewee supports recursive CTEs. Recursive CTEs can be useful when, for example, you have a tree data-structure represented by a parent-link foreign key. Suppose, for example, that we have a hierarchy of categories for an online bookstore. We wish to generate a table showing all categories and their absolute depths, along with the path from the root to the category. We'll assume the following model definition, in which each category has a foreign-key to its immediate parent category: .. code-block:: python class Category(Model): name = TextField() parent = ForeignKeyField('self', backref='children', null=True) To list all categories along with their depth and parents, we can use a recursive CTE: .. code-block:: python # Define the base case of our recursive CTE. This will be categories that # have a null parent foreign-key. Base = Category.alias() level = Value(1).alias('level') path = Base.name.alias('path') base_case = (Base .select(Base.id, Base.name, Base.parent, level, path) .where(Base.parent.is_null()) .cte('base', recursive=True)) # Define the recursive terms. RTerm = Category.alias() rlevel = (base_case.c.level + 1).alias('level') rpath = base_case.c.path.concat('->').concat(RTerm.name).alias('path') recursive = (RTerm .select(RTerm.id, RTerm.name, RTerm.parent, rlevel, rpath) .join(base_case, on=(RTerm.parent == base_case.c.id))) # The recursive CTE is created by taking the base case and UNION ALL with # the recursive term. cte = base_case.union_all(recursive) # We will now query from the CTE to get the categories, their levels, and # their paths. query = (cte .select_from(cte.c.name, cte.c.level, cte.c.path) .order_by(cte.c.path)) # We can now iterate over a list of all categories and print their names, # absolute levels, and path from root -> category. for category in query: print(category.name, category.level, category.path) # Example output: # root, 1, root # p1, 2, root->p1 # c1-1, 3, root->p1->c1-1 # c1-2, 3, root->p1->c1-2 # p2, 2, root->p2 # c2-1, 3, root->p2->c2-1 Data-Modifying CTE ^^^^^^^^^^^^^^^^^^ Peewee supports data-modifying CTE's. Example of using a data-modifying CTE to move data from one table to an archive table, using a single query: .. code-block:: python class Event(Model): name = CharField() timestamp = DateTimeField() class Archive(Model): name = CharField() timestamp = DateTimeField() # Move rows older than 24 hours from the Event table to the Archive. cte = (Event .delete() .where(Event.timestamp < (datetime.now() - timedelta(days=1))) .returning(Event) .cte('moved_rows')) # Create a simple SELECT to get the resulting rows from the CTE. src = Select((cte,), (cte.c.id, cte.c.name, cte.c.timestamp)) # Insert into the archive table whatever data was returned by the DELETE. res = (Archive .insert_from(src, (Archive.id, Archive.name, Archive.timestamp)) .with_cte(cte) .execute()) The above corresponds to, roughly, the following SQL: .. code-block:: sql WITH "moved_rows" AS ( DELETE FROM "event" WHERE ("timestamp" < XXXX-XX-XXTXX:XX:XX) RETURNING "id", "name", "timestamp") INSERT INTO "archive" ("id", "name", "timestamp") SELECT "moved_rows"."id", "moved_rows"."name", "moved_rows"."timestamp" FROM "moved_rows"; For additional examples, refer to the tests in ``models.py`` and ``sql.py``: * https://github.com/coleifer/peewee/blob/master/tests/models.py * https://github.com/coleifer/peewee/blob/master/tests/sql.py Foreign Keys and Joins ---------------------- This section has been moved into its own document: :ref:`relationships`. peewee-3.17.7/docs/peewee/quickstart.rst000066400000000000000000000336771470346076600202460ustar00rootroot00000000000000.. _quickstart: Quickstart ========== This document presents a brief, high-level overview of Peewee's primary features. This guide will cover: * :ref:`model-definition` * :ref:`storing-data` * :ref:`retrieving-data` .. note:: If you'd like something a bit more meaty, there is a thorough tutorial on :ref:`creating a "twitter"-style web app ` using peewee and the Flask framework. In the projects ``examples/`` folder you can find more self-contained Peewee examples, like a `blog app `_. I **strongly** recommend opening an interactive shell session and running the code. That way you can get a feel for typing in queries. .. _model-definition: Model Definition ----------------- Model classes, fields and model instances all map to database concepts: ================= ================================= Object Corresponds to... ================= ================================= Model class Database table Field instance Column on a table Model instance Row in a database table ================= ================================= When starting a project with peewee, it's typically best to begin with your data model, by defining one or more :py:class:`Model` classes: .. code-block:: python from peewee import * db = SqliteDatabase('people.db') class Person(Model): name = CharField() birthday = DateField() class Meta: database = db # This model uses the "people.db" database. .. note:: Peewee will automatically infer the database table name from the name of the class. You can override the default name by specifying a ``table_name`` attribute in the inner "Meta" class (alongside the ``database`` attribute). To learn more about how Peewee generates table names, refer to the :ref:`table_names` section. Also note that we named our model ``Person`` instead of ``People``. This is a convention you should follow -- even though the table will contain multiple people, we always name the class using the singular form. There are lots of :ref:`field types ` suitable for storing various types of data. Peewee handles converting between *pythonic* values and those used by the database, so you can use Python types in your code without having to worry. Things get interesting when we set up relationships between models using :ref:`foreign key relationships `. This is simple with peewee: .. code-block:: python class Pet(Model): owner = ForeignKeyField(Person, backref='pets') name = CharField() animal_type = CharField() class Meta: database = db # this model uses the "people.db" database Now that we have our models, let's connect to the database. Although it's not necessary to open the connection explicitly, it is good practice since it will reveal any errors with your database connection immediately, as opposed to some arbitrary time later when the first query is executed. It is also good to close the connection when you are done -- for instance, a web app might open a connection when it receives a request, and close the connection when it sends the response. .. code-block:: python db.connect() We'll begin by creating the tables in the database that will store our data. This will create the tables with the appropriate columns, indexes, sequences, and foreign key constraints: .. code-block:: python db.create_tables([Person, Pet]) .. _storing-data: Storing data ------------ Let's begin by populating the database with some people. We will use the :py:meth:`~Model.save` and :py:meth:`~Model.create` methods to add and update people's records. .. code-block:: python from datetime import date uncle_bob = Person(name='Bob', birthday=date(1960, 1, 15)) uncle_bob.save() # bob is now stored in the database # Returns: 1 .. note:: When you call :py:meth:`~Model.save`, the number of rows modified is returned. You can also add a person by calling the :py:meth:`~Model.create` method, which returns a model instance: .. code-block:: python grandma = Person.create(name='Grandma', birthday=date(1935, 3, 1)) herb = Person.create(name='Herb', birthday=date(1950, 5, 5)) To update a row, modify the model instance and call :py:meth:`~Model.save` to persist the changes. Here we will change Grandma's name and then save the changes in the database: .. code-block:: python grandma.name = 'Grandma L.' grandma.save() # Update grandma's name in the database. # Returns: 1 Now we have stored 3 people in the database. Let's give them some pets. Grandma doesn't like animals in the house, so she won't have any, but Herb is an animal lover: .. code-block:: python bob_kitty = Pet.create(owner=uncle_bob, name='Kitty', animal_type='cat') herb_fido = Pet.create(owner=herb, name='Fido', animal_type='dog') herb_mittens = Pet.create(owner=herb, name='Mittens', animal_type='cat') herb_mittens_jr = Pet.create(owner=herb, name='Mittens Jr', animal_type='cat') After a long full life, Mittens sickens and dies. We need to remove him from the database: .. code-block:: python herb_mittens.delete_instance() # he had a great life # Returns: 1 .. note:: The return value of :py:meth:`~Model.delete_instance` is the number of rows removed from the database. Uncle Bob decides that too many animals have been dying at Herb's house, so he adopts Fido: .. code-block:: python herb_fido.owner = uncle_bob herb_fido.save() .. _retrieving-data: Retrieving Data --------------- The real strength of our database is in how it allows us to retrieve data through *queries*. Relational databases are excellent for making ad-hoc queries. Getting single records ^^^^^^^^^^^^^^^^^^^^^^ Let's retrieve Grandma's record from the database. To get a single record from the database, use :py:meth:`Select.get`: .. code-block:: python grandma = Person.select().where(Person.name == 'Grandma L.').get() We can also use the equivalent shorthand :py:meth:`Model.get`: .. code-block:: python grandma = Person.get(Person.name == 'Grandma L.') Lists of records ^^^^^^^^^^^^^^^^ Let's list all the people in the database: .. code-block:: python for person in Person.select(): print(person.name) # prints: # Bob # Grandma L. # Herb Let's list all the cats and their owner's name: .. code-block:: python query = Pet.select().where(Pet.animal_type == 'cat') for pet in query: print(pet.name, pet.owner.name) # prints: # Kitty Bob # Mittens Jr Herb .. attention:: There is a big problem with the previous query: because we are accessing ``pet.owner.name`` and we did not select this relation in our original query, peewee will have to perform an additional query to retrieve the pet's owner. This behavior is referred to as :ref:`N+1 ` and it should generally be avoided. For an in-depth guide to working with relationships and joins, refer to the :ref:`relationships` documentation. We can avoid the extra queries by selecting both *Pet* and *Person*, and adding a *join*. .. code-block:: python query = (Pet .select(Pet, Person) .join(Person) .where(Pet.animal_type == 'cat')) for pet in query: print(pet.name, pet.owner.name) # prints: # Kitty Bob # Mittens Jr Herb Let's get all the pets owned by Bob: .. code-block:: python for pet in Pet.select().join(Person).where(Person.name == 'Bob'): print(pet.name) # prints: # Kitty # Fido We can do another cool thing here to get bob's pets. Since we already have an object to represent Bob, we can do this instead: .. code-block:: python for pet in Pet.select().where(Pet.owner == uncle_bob): print(pet.name) Sorting ^^^^^^^ Let's make sure these are sorted alphabetically by adding an :py:meth:`~Select.order_by` clause: .. code-block:: python for pet in Pet.select().where(Pet.owner == uncle_bob).order_by(Pet.name): print(pet.name) # prints: # Fido # Kitty Let's list all the people now, youngest to oldest: .. code-block:: python for person in Person.select().order_by(Person.birthday.desc()): print(person.name, person.birthday) # prints: # Bob 1960-01-15 # Herb 1950-05-05 # Grandma L. 1935-03-01 Combining filter expressions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Peewee supports arbitrarily-nested expressions. Let's get all the people whose birthday was either: * before 1940 (grandma) * after 1959 (bob) .. code-block:: python d1940 = date(1940, 1, 1) d1960 = date(1960, 1, 1) query = (Person .select() .where((Person.birthday < d1940) | (Person.birthday > d1960))) for person in query: print(person.name, person.birthday) # prints: # Bob 1960-01-15 # Grandma L. 1935-03-01 Now let's do the opposite. People whose birthday is between 1940 and 1960 (inclusive of both years): .. code-block:: python query = (Person .select() .where(Person.birthday.between(d1940, d1960))) for person in query: print(person.name, person.birthday) # prints: # Herb 1950-05-05 Aggregates and Prefetch ^^^^^^^^^^^^^^^^^^^^^^^ Now let's list all the people *and* how many pets they have: .. code-block:: python for person in Person.select(): print(person.name, person.pets.count(), 'pets') # prints: # Bob 2 pets # Grandma L. 0 pets # Herb 1 pets Once again we've run into a classic example of :ref:`N+1 ` query behavior. In this case, we're executing an additional query for every ``Person`` returned by the original ``SELECT``! We can avoid this by performing a *JOIN* and using a SQL function to aggregate the results. .. code-block:: python query = (Person .select(Person, fn.COUNT(Pet.id).alias('pet_count')) .join(Pet, JOIN.LEFT_OUTER) # include people without pets. .group_by(Person) .order_by(Person.name)) for person in query: # "pet_count" becomes an attribute on the returned model instances. print(person.name, person.pet_count, 'pets') # prints: # Bob 2 pets # Grandma L. 0 pets # Herb 1 pets .. note:: Peewee provides a magical helper :py:func:`fn`, which can be used to call any SQL function. In the above example, ``fn.COUNT(Pet.id).alias('pet_count')`` would be translated into ``COUNT(pet.id) AS pet_count``. Now let's list all the people and the names of all their pets. As you may have guessed, this could easily turn into another :ref:`N+1 ` situation if we're not careful. Before diving into the code, consider how this example is different from the earlier example where we listed all the pets and their owner's name. A pet can only have one owner, so when we performed the join from ``Pet`` to ``Person``, there was always going to be a single match. The situation is different when we are joining from ``Person`` to ``Pet`` because a person may have zero pets or they may have several pets. Because we're using a relational databases, if we were to do a join from ``Person`` to ``Pet`` then every person with multiple pets would be repeated, once for each pet. It would look like this: .. code-block:: python query = (Person .select(Person, Pet) .join(Pet, JOIN.LEFT_OUTER) .order_by(Person.name, Pet.name)) for person in query: # We need to check if they have a pet instance attached, since not all # people have pets. if hasattr(person, 'pet'): print(person.name, person.pet.name) else: print(person.name, 'no pets') # prints: # Bob Fido # Bob Kitty # Grandma L. no pets # Herb Mittens Jr Usually this type of duplication is undesirable. To accommodate the more common (and intuitive) workflow of listing a person and attaching **a list** of that person's pets, we can use a special method called :py:meth:`~ModelSelect.prefetch`: .. code-block:: python query = Person.select().order_by(Person.name).prefetch(Pet) for person in query: print(person.name) for pet in person.pets: print(' *', pet.name) # prints: # Bob # * Kitty # * Fido # Grandma L. # Herb # * Mittens Jr SQL Functions ^^^^^^^^^^^^^ One last query. This will use a SQL function to find all people whose names start with either an upper or lower-case *G*: .. code-block:: python expression = fn.Lower(fn.Substr(Person.name, 1, 1)) == 'g' for person in Person.select().where(expression): print(person.name) # prints: # Grandma L. This is just the basics! You can make your queries as complex as you like. Check the documentation on :ref:`querying` for more info. Database -------- We're done with our database, let's close the connection: .. code-block:: python db.close() In an actual application, there are some established patterns for how you would manage your database connection lifetime. For example, a web application will typically open a connection at start of request, and close the connection after generating the response. A :ref:`connection pool ` can help eliminate latency associated with startup costs. To learn about setting up your database, see the :ref:`database` documentation, which provides many examples. Peewee also supports :ref:`configuring the database at run-time ` as well as setting or changing the database at any time. Working with existing databases ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you already have a database, you can autogenerate peewee models using :ref:`pwiz`. For instance, if I have a postgresql database named *charles_blog*, I might run: .. code-block:: console python -m pwiz -e postgresql charles_blog > blog_models.py What next? ---------- That's it for the quickstart. If you want to look at a full web-app, check out the :ref:`example-app`. peewee-3.17.7/docs/peewee/relationships.rst000066400000000000000000001031011470346076600207140ustar00rootroot00000000000000.. _relationships: Relationships and Joins ======================= In this document we'll cover how Peewee handles relationships between models. Model definitions ----------------- We'll use the following model definitions for our examples: .. code-block:: python import datetime from peewee import * db = SqliteDatabase(':memory:') class BaseModel(Model): class Meta: database = db class User(BaseModel): username = TextField() class Tweet(BaseModel): content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) user = ForeignKeyField(User, backref='tweets') class Favorite(BaseModel): user = ForeignKeyField(User, backref='favorites') tweet = ForeignKeyField(Tweet, backref='favorites') Peewee uses :py:class:`ForeignKeyField` to define foreign-key relationships between models. Every foreign-key field has an implied back-reference, which is exposed as a pre-filtered :py:class:`Select` query using the provided ``backref`` attribute. Creating test data ^^^^^^^^^^^^^^^^^^ To follow along with the examples, let's populate this database with some test data: .. code-block:: python def populate_test_data(): db.create_tables([User, Tweet, Favorite]) data = ( ('huey', ('meow', 'hiss', 'purr')), ('mickey', ('woof', 'whine')), ('zaizee', ())) for username, tweets in data: user = User.create(username=username) for tweet in tweets: Tweet.create(user=user, content=tweet) # Populate a few favorites for our users, such that: favorite_data = ( ('huey', ['whine']), ('mickey', ['purr']), ('zaizee', ['meow', 'purr'])) for username, favorites in favorite_data: user = User.get(User.username == username) for content in favorites: tweet = Tweet.get(Tweet.content == content) Favorite.create(user=user, tweet=tweet) This gives us the following: ========= ========== =========================== User Tweet Favorited by ========= ========== =========================== huey meow zaizee huey hiss huey purr mickey, zaizee mickey woof mickey whine huey ========= ========== =========================== .. attention:: In the following examples we will be executing a number of queries. If you are unsure how many queries are being executed, you can add the following code, which will log all queries to the console: .. code-block:: python import logging logger = logging.getLogger('peewee') logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.DEBUG) .. note:: In SQLite, foreign keys are not enabled by default. Most things, including the Peewee foreign-key API, will work fine, but ON DELETE behaviour will be ignored, even if you explicitly specify ``on_delete`` in your :py:class:`ForeignKeyField`. In conjunction with the default :py:class:`AutoField` behaviour (where deleted record IDs can be reused), this can lead to subtle bugs. To avoid problems, I recommend that you enable foreign-key constraints when using SQLite, by setting ``pragmas={'foreign_keys': 1}`` when you instantiate :py:class:`SqliteDatabase`. .. code-block:: python # Ensure foreign-key constraints are enforced. db = SqliteDatabase('my_app.db', pragmas={'foreign_keys': 1}) Performing simple joins ----------------------- As an exercise in learning how to perform joins with Peewee, let's write a query to print out all the tweets by "huey". To do this we'll select from the ``Tweet`` model and join on the ``User`` model, so we can then filter on the ``User.username`` field: .. code-block:: pycon >>> query = Tweet.select().join(User).where(User.username == 'huey') >>> for tweet in query: ... print(tweet.content) ... meow hiss purr .. note:: We did not have to explicitly specify the join predicate (the "ON" clause), because Peewee inferred from the models that when we joined from Tweet to User, we were joining on the ``Tweet.user`` foreign-key. The following code is equivalent, but more explicit: .. code-block:: python query = (Tweet .select() .join(User, on=(Tweet.user == User.id)) .where(User.username == 'huey')) If we already had a reference to the ``User`` object for "huey", we could use the ``User.tweets`` back-reference to list all of huey's tweets: .. code-block:: pycon >>> huey = User.get(User.username == 'huey') >>> for tweet in huey.tweets: ... print(tweet.content) ... meow hiss purr Taking a closer look at ``huey.tweets``, we can see that it is just a simple pre-filtered ``SELECT`` query: .. code-block:: pycon >>> huey.tweets >>> huey.tweets.sql() ('SELECT "t1"."id", "t1"."content", "t1"."timestamp", "t1"."user_id" FROM "tweet" AS "t1" WHERE ("t1"."user_id" = ?)', [1]) Joining multiple tables ----------------------- Let's take another look at joins by querying the list of users and getting the count of how many tweet's they've authored that were favorited. This will require us to join twice: from user to tweet, and from tweet to favorite. We'll add the additional requirement that users should be included who have not created any tweets, as well as users whose tweets have not been favorited. The query, expressed in SQL, would be: .. code-block:: sql SELECT user.username, COUNT(favorite.id) FROM user LEFT OUTER JOIN tweet ON tweet.user_id = user.id LEFT OUTER JOIN favorite ON favorite.tweet_id = tweet.id GROUP BY user.username .. note:: In the above query both joins are LEFT OUTER, since a user may not have any tweets or, if they have tweets, none of them may have been favorited. Peewee has a concept of a *join context*, meaning that whenever we call the :py:meth:`~ModelSelect.join` method, we are implicitly joining on the previously-joined model (or if this is the first call, the model we are selecting from). Since we are joining straight through, from user to tweet, then from tweet to favorite, we can simply write: .. code-block:: python query = (User .select(User.username, fn.COUNT(Favorite.id).alias('count')) .join(Tweet, JOIN.LEFT_OUTER) # Joins user -> tweet. .join(Favorite, JOIN.LEFT_OUTER) # Joins tweet -> favorite. .group_by(User.username)) Iterating over the results: .. code-block:: pycon >>> for user in query: ... print(user.username, user.count) ... huey 3 mickey 1 zaizee 0 For a more complicated example involving multiple joins and switching join contexts, let's find all the tweets by Huey and the number of times they've been favorited. To do this we'll need to perform two joins and we'll also use an aggregate function to calculate the favorite count. Here is how we would write this query in SQL: .. code-block:: sql SELECT tweet.content, COUNT(favorite.id) FROM tweet INNER JOIN user ON tweet.user_id = user.id LEFT OUTER JOIN favorite ON favorite.tweet_id = tweet.id WHERE user.username = 'huey' GROUP BY tweet.content; .. note:: We use a LEFT OUTER join from tweet to favorite since a tweet may not have any favorites, yet we still wish to display it's content (along with a count of zero) in the result set. With Peewee, the resulting Python code looks very similar to what we would write in SQL: .. code-block:: python query = (Tweet .select(Tweet.content, fn.COUNT(Favorite.id).alias('count')) .join(User) # Join from tweet -> user. .switch(Tweet) # Move "join context" back to tweet. .join(Favorite, JOIN.LEFT_OUTER) # Join from tweet -> favorite. .where(User.username == 'huey') .group_by(Tweet.content)) Note the call to :py:meth:`~ModelSelect.switch` - that instructs Peewee to set the *join context* back to ``Tweet``. If we had omitted the explicit call to switch, Peewee would have used ``User`` (the last model we joined) as the join context and constructed the join from User to Favorite using the ``Favorite.user`` foreign-key, which would have given us incorrect results. If we wanted to omit the join-context switching we could instead use the :py:meth:`~ModelSelect.join_from` method. The following query is equivalent to the previous one: .. code-block:: python query = (Tweet .select(Tweet.content, fn.COUNT(Favorite.id).alias('count')) .join_from(Tweet, User) # Join tweet -> user. .join_from(Tweet, Favorite, JOIN.LEFT_OUTER) # Join tweet -> favorite. .where(User.username == 'huey') .group_by(Tweet.content)) We can iterate over the results of the above query to print the tweet's content and the favorite count: .. code-block:: pycon >>> for tweet in query: ... print('%s favorited %d times' % (tweet.content, tweet.count)) ... meow favorited 1 times hiss favorited 0 times purr favorited 2 times .. _multiple-sources: Selecting from multiple sources ------------------------------- If we wished to list all the tweets in the database, along with the username of their author, you might try writing this: .. code-block:: pycon >>> for tweet in Tweet.select(): ... print(tweet.user.username, '->', tweet.content) ... huey -> meow huey -> hiss huey -> purr mickey -> woof mickey -> whine There is a big problem with the above loop: it executes an additional query for every tweet to look up the ``tweet.user`` foreign-key. For our small table the performance penalty isn't obvious, but we would find the delays grew as the number of rows increased. If you're familiar with SQL, you might remember that it's possible to SELECT from multiple tables, allowing us to get the tweet content *and* the username in a single query: .. code-block:: sql SELECT tweet.content, user.username FROM tweet INNER JOIN user ON tweet.user_id = user.id; Peewee makes this quite easy. In fact, we only need to modify our query a little bit. We tell Peewee we wish to select ``Tweet.content`` as well as the ``User.username`` field, then we include a join from tweet to user. To make it a bit more obvious that it's doing the correct thing, we can ask Peewee to return the rows as dictionaries. .. code-block:: pycon >>> for row in Tweet.select(Tweet.content, User.username).join(User).dicts(): ... print(row) ... {'content': 'meow', 'username': 'huey'} {'content': 'hiss', 'username': 'huey'} {'content': 'purr', 'username': 'huey'} {'content': 'woof', 'username': 'mickey'} {'content': 'whine', 'username': 'mickey'} Now we'll leave off the call to ".dicts()" and return the rows as ``Tweet`` objects. Notice that Peewee assigns the ``username`` value to ``tweet.user.username`` -- NOT ``tweet.username``! Because there is a foreign-key from tweet to user, and we have selected fields from both models, Peewee will reconstruct the model-graph for us: .. code-block:: pycon >>> for tweet in Tweet.select(Tweet.content, User.username).join(User): ... print(tweet.user.username, '->', tweet.content) ... huey -> meow huey -> hiss huey -> purr mickey -> woof mickey -> whine If we wish to, we can control where Peewee puts the joined ``User`` instance in the above query, by specifying an ``attr`` in the ``join()`` method: .. code-block:: pycon >>> query = Tweet.select(Tweet.content, User.username).join(User, attr='author') >>> for tweet in query: ... print(tweet.author.username, '->', tweet.content) ... huey -> meow huey -> hiss huey -> purr mickey -> woof mickey -> whine Conversely, if we simply wish *all* attributes we select to be attributes of the ``Tweet`` instance, we can add a call to :py:meth:`~ModelSelect.objects` at the end of our query (similar to how we called ``dicts()``): .. code-block:: pycon >>> for tweet in query.objects(): ... print(tweet.username, '->', tweet.content) ... huey -> meow (etc) More complex example ^^^^^^^^^^^^^^^^^^^^ As a more complex example, in this query, we will write a single query that selects all the favorites, along with the user who created the favorite, the tweet that was favorited, and that tweet's author. In SQL we would write: .. code-block:: sql SELECT owner.username, tweet.content, author.username AS author FROM favorite INNER JOIN user AS owner ON (favorite.user_id = owner.id) INNER JOIN tweet ON (favorite.tweet_id = tweet.id) INNER JOIN user AS author ON (tweet.user_id = author.id); Note that we are selecting from the user table twice - once in the context of the user who created the favorite, and again as the author of the tweet. With Peewee, we use :py:meth:`Model.alias` to alias a model class so it can be referenced twice in a single query: .. code-block:: python Owner = User.alias() query = (Favorite .select(Favorite, Tweet.content, User.username, Owner.username) .join(Owner) # Join favorite -> user (owner of favorite). .switch(Favorite) .join(Tweet) # Join favorite -> tweet .join(User)) # Join tweet -> user We can iterate over the results and access the joined values in the following way. Note how Peewee has resolved the fields from the various models we selected and reconstructed the model graph: .. code-block:: pycon >>> for fav in query: ... print(fav.user.username, 'liked', fav.tweet.content, 'by', fav.tweet.user.username) ... huey liked whine by mickey mickey liked purr by huey zaizee liked meow by huey zaizee liked purr by huey .. _join-subquery: Subqueries ---------- Peewee allows you to join on any table-like object, including subqueries or common table expressions (CTEs). To demonstrate joining on a subquery, let's query for all users and their latest tweet. Here is the SQL: .. code-block:: sql SELECT tweet.*, user.* FROM tweet INNER JOIN ( SELECT latest.user_id, MAX(latest.timestamp) AS max_ts FROM tweet AS latest GROUP BY latest.user_id) AS latest_query ON ((tweet.user_id = latest_query.user_id) AND (tweet.timestamp = latest_query.max_ts)) INNER JOIN user ON (tweet.user_id = user.id) We'll do this by creating a subquery which selects each user and the timestamp of their latest tweet. Then we can query the tweets table in the outer query and join on the user and timestamp combination from the subquery. .. code-block:: python # Define our subquery first. We'll use an alias of the Tweet model, since # we will be querying from the Tweet model directly in the outer query. Latest = Tweet.alias() latest_query = (Latest .select(Latest.user, fn.MAX(Latest.timestamp).alias('max_ts')) .group_by(Latest.user) .alias('latest_query')) # Our join predicate will ensure that we match tweets based on their # timestamp *and* user_id. predicate = ((Tweet.user == latest_query.c.user_id) & (Tweet.timestamp == latest_query.c.max_ts)) # We put it all together, querying from tweet and joining on the subquery # using the above predicate. query = (Tweet .select(Tweet, User) # Select all columns from tweet and user. .join(latest_query, on=predicate) # Join tweet -> subquery. .join_from(Tweet, User)) # Join from tweet -> user. Iterating over the query, we can see each user and their latest tweet. .. code-block:: pycon >>> for tweet in query: ... print(tweet.user.username, '->', tweet.content) ... huey -> purr mickey -> whine There are a couple things you may not have seen before in the code we used to create the query in this section: * We used :py:meth:`~ModelSelect.join_from` to explicitly specify the join context. We wrote ``.join_from(Tweet, User)``, which is equivalent to ``.switch(Tweet).join(User)``. * We referenced columns in the subquery using the magic ``.c`` attribute, for example ``latest_query.c.max_ts``. The ``.c`` attribute is used to dynamically create column references. * Instead of passing individual fields to ``Tweet.select()``, we passed the ``Tweet`` and ``User`` models. This is shorthand for selecting all fields on the given model. Common-table Expressions ^^^^^^^^^^^^^^^^^^^^^^^^ In the previous section we joined on a subquery, but we could just as easily have used a :ref:`common-table expression (CTE) `. We will repeat the same query as before, listing users and their latest tweets, but this time we will do it using a CTE. Here is the SQL: .. code-block:: sql WITH latest AS ( SELECT user_id, MAX(timestamp) AS max_ts FROM tweet GROUP BY user_id) SELECT tweet.*, user.* FROM tweet INNER JOIN latest ON ((latest.user_id = tweet.user_id) AND (latest.max_ts = tweet.timestamp)) INNER JOIN user ON (tweet.user_id = user.id) This example looks very similar to the previous example with the subquery: .. code-block:: python # Define our CTE first. We'll use an alias of the Tweet model, since # we will be querying from the Tweet model directly in the main query. Latest = Tweet.alias() cte = (Latest .select(Latest.user, fn.MAX(Latest.timestamp).alias('max_ts')) .group_by(Latest.user) .cte('latest')) # Our join predicate will ensure that we match tweets based on their # timestamp *and* user_id. predicate = ((Tweet.user == cte.c.user_id) & (Tweet.timestamp == cte.c.max_ts)) # We put it all together, querying from tweet and joining on the CTE # using the above predicate. query = (Tweet .select(Tweet, User) # Select all columns from tweet and user. .join(cte, on=predicate) # Join tweet -> CTE. .join_from(Tweet, User) # Join from tweet -> user. .with_cte(cte)) We can iterate over the result-set, which consists of the latest tweets for each user: .. code-block:: pycon >>> for tweet in query: ... print(tweet.user.username, '->', tweet.content) ... huey -> purr mickey -> whine .. note:: For more information about using CTEs, including information on writing recursive CTEs, see the :ref:`cte` section of the "Querying" document. Multiple foreign-keys to the same Model --------------------------------------- When there are multiple foreign keys to the same model, it is good practice to explicitly specify which field you are joining on. Referring back to the :ref:`example app's models `, consider the *Relationship* model, which is used to denote when one user follows another. Here is the model definition: .. code-block:: python class Relationship(BaseModel): from_user = ForeignKeyField(User, backref='relationships') to_user = ForeignKeyField(User, backref='related_to') class Meta: indexes = ( # Specify a unique multi-column index on from/to-user. (('from_user', 'to_user'), True), ) Since there are two foreign keys to *User*, we should always specify which field we are using in a join. For example, to determine which users I am following, I would write: .. code-block:: python (User .select() .join(Relationship, on=Relationship.to_user) .where(Relationship.from_user == charlie)) On the other hand, if I wanted to determine which users are following me, I would instead join on the *from_user* column and filter on the relationship's *to_user*: .. code-block:: python (User .select() .join(Relationship, on=Relationship.from_user) .where(Relationship.to_user == charlie)) Joining on arbitrary fields --------------------------- If a foreign key does not exist between two tables you can still perform a join, but you must manually specify the join predicate. In the following example, there is no explicit foreign-key between *User* and *ActivityLog*, but there is an implied relationship between the *ActivityLog.object_id* field and *User.id*. Rather than joining on a specific :py:class:`Field`, we will join using an :py:class:`Expression`. .. code-block:: python user_log = (User .select(User, ActivityLog) .join(ActivityLog, on=(User.id == ActivityLog.object_id), attr='log') .where( (ActivityLog.activity_type == 'user_activity') & (User.username == 'charlie'))) for user in user_log: print(user.username, user.log.description) #### Print something like #### charlie logged in charlie posted a tweet charlie retweeted charlie posted a tweet charlie logged out .. note:: Recall that we can control the attribute Peewee will assign the joined instance to by specifying the ``attr`` parameter in the ``join()`` method. In the previous example, we used the following *join*: .. code-block:: python join(ActivityLog, on=(User.id == ActivityLog.object_id), attr='log') Then when iterating over the query, we were able to directly access the joined *ActivityLog* without incurring an additional query: .. code-block:: python for user in user_log: print(user.username, user.log.description) Self-joins ---------- Peewee supports constructing queries containing a self-join. Using model aliases ^^^^^^^^^^^^^^^^^^^ To join on the same model (table) twice, it is necessary to create a model alias to represent the second instance of the table in a query. Consider the following model: .. code-block:: python class Category(Model): name = CharField() parent = ForeignKeyField('self', backref='children') What if we wanted to query all categories whose parent category is *Electronics*. One way would be to perform a self-join: .. code-block:: python Parent = Category.alias() query = (Category .select() .join(Parent, on=(Category.parent == Parent.id)) .where(Parent.name == 'Electronics')) When performing a join that uses a :py:class:`ModelAlias`, it is necessary to specify the join condition using the ``on`` keyword argument. In this case we are joining the category with its parent category. Using subqueries ^^^^^^^^^^^^^^^^ Another less common approach involves the use of subqueries. Here is another way we might construct a query to get all the categories whose parent category is *Electronics* using a subquery: .. code-block:: python Parent = Category.alias() join_query = Parent.select().where(Parent.name == 'Electronics') # Subqueries used as JOINs need to have an alias. join_query = join_query.alias('jq') query = (Category .select() .join(join_query, on=(Category.parent == join_query.c.id))) This will generate the following SQL query: .. code-block:: sql SELECT t1."id", t1."name", t1."parent_id" FROM "category" AS t1 INNER JOIN ( SELECT t2."id" FROM "category" AS t2 WHERE (t2."name" = ?)) AS jq ON (t1."parent_id" = "jq"."id") To access the ``id`` value from the subquery, we use the ``.c`` magic lookup which will generate the appropriate SQL expression: .. code-block:: python Category.parent == join_query.c.id # Becomes: (t1."parent_id" = "jq"."id") .. _manytomany: Implementing Many to Many ------------------------- Peewee provides a field for representing many-to-many relationships, much like Django does. This feature was added due to many requests from users, but I strongly advocate against using it, since it conflates the idea of a field with a junction table and hidden joins. It's just a nasty hack to provide convenient accessors. To implement many-to-many **correctly** with peewee, you will therefore create the intermediary table yourself and query through it: .. code-block:: python class Student(Model): name = CharField() class Course(Model): name = CharField() class StudentCourse(Model): student = ForeignKeyField(Student) course = ForeignKeyField(Course) To query, let's say we want to find students who are enrolled in math class: .. code-block:: python query = (Student .select() .join(StudentCourse) .join(Course) .where(Course.name == 'math')) for student in query: print(student.name) To query what classes a given student is enrolled in: .. code-block:: python courses = (Course .select() .join(StudentCourse) .join(Student) .where(Student.name == 'da vinci')) for course in courses: print(course.name) To efficiently iterate over a many-to-many relation, i.e., list all students and their respective courses, we will query the *through* model ``StudentCourse`` and *precompute* the Student and Course: .. code-block:: python query = (StudentCourse .select(StudentCourse, Student, Course) .join(Course) .switch(StudentCourse) .join(Student) .order_by(Student.name)) To print a list of students and their courses you might do the following: .. code-block:: python for student_course in query: print(student_course.student.name, '->', student_course.course.name) Since we selected all fields from ``Student`` and ``Course`` in the *select* clause of the query, these foreign key traversals are "free" and we've done the whole iteration with just 1 query. ManyToManyField ^^^^^^^^^^^^^^^ The :py:class:`ManyToManyField` provides a *field-like* API over many-to-many fields. For all but the simplest many-to-many situations, you're better off using the standard peewee APIs. But, if your models are very simple and your querying needs are not very complex, :py:class:`ManyToManyField` may work. Modeling students and courses using :py:class:`ManyToManyField`: .. code-block:: python from peewee import * db = SqliteDatabase('school.db') class BaseModel(Model): class Meta: database = db class Student(BaseModel): name = CharField() class Course(BaseModel): name = CharField() students = ManyToManyField(Student, backref='courses') StudentCourse = Course.students.get_through_model() db.create_tables([ Student, Course, StudentCourse]) # Get all classes that "huey" is enrolled in: huey = Student.get(Student.name == 'Huey') for course in huey.courses.order_by(Course.name): print(course.name) # Get all students in "English 101": engl_101 = Course.get(Course.name == 'English 101') for student in engl_101.students: print(student.name) # When adding objects to a many-to-many relationship, we can pass # in either a single model instance, a list of models, or even a # query of models: huey.courses.add(Course.select().where(Course.name.contains('English'))) engl_101.students.add(Student.get(Student.name == 'Mickey')) engl_101.students.add([ Student.get(Student.name == 'Charlie'), Student.get(Student.name == 'Zaizee')]) # The same rules apply for removing items from a many-to-many: huey.courses.remove(Course.select().where(Course.name.startswith('CS'))) engl_101.students.remove(huey) # Calling .clear() will remove all associated objects: cs_150.students.clear() .. attention:: Before many-to-many relationships can be added, the objects being referenced will need to be saved first. In order to create relationships in the many-to-many through table, Peewee needs to know the primary keys of the models being referenced. .. warning:: It is **strongly recommended** that you do not attempt to subclass models containing :py:class:`ManyToManyField` instances. A :py:class:`ManyToManyField`, despite its name, is not a field in the usual sense. Instead of being a column on a table, the many-to-many field covers the fact that behind-the-scenes there's actually a separate table with two foreign-key pointers (the *through table*). Therefore, when a subclass is created that inherits a many-to-many field, what actually needs to be inherited is the *through table*. Because of the potential for subtle bugs, Peewee does not attempt to automatically subclass the through model and modify its foreign-key pointers. As a result, many-to-many fields typically will not work with inheritance. For more examples, see: * :py:meth:`ManyToManyField.add` * :py:meth:`ManyToManyField.remove` * :py:meth:`ManyToManyField.clear` * :py:meth:`ManyToManyField.get_through_model` .. _nplusone: Avoiding the N+1 problem ------------------------ The *N+1 problem* refers to a situation where an application performs a query, then for each row of the result set, the application performs at least one other query (another way to conceptualize this is as a nested loop). In many cases, these *n* queries can be avoided through the use of a SQL join or subquery. The database itself may do a nested loop, but it will usually be more performant than doing *n* queries in your application code, which involves latency communicating with the database and may not take advantage of indices or other optimizations employed by the database when joining or executing a subquery. Peewee provides several APIs for mitigating *N+1* query behavior. Recollecting the models used throughout this document, *User* and *Tweet*, this section will try to outline some common *N+1* scenarios, and how peewee can help you avoid them. .. attention:: In some cases, N+1 queries will not result in a significant or measurable performance hit. It all depends on the data you are querying, the database you are using, and the latency involved in executing queries and retrieving results. As always when making optimizations, profile before and after to ensure the changes do what you expect them to. List recent tweets ^^^^^^^^^^^^^^^^^^ The twitter timeline displays a list of tweets from multiple users. In addition to the tweet's content, the username of the tweet's author is also displayed. The N+1 scenario here would be: 1. Fetch the 10 most recent tweets. 2. For each tweet, select the author (10 queries). By selecting both tables and using a *join*, peewee makes it possible to accomplish this in a single query: .. code-block:: python query = (Tweet .select(Tweet, User) # Note that we are selecting both models. .join(User) # Use an INNER join because every tweet has an author. .order_by(Tweet.id.desc()) # Get the most recent tweets. .limit(10)) for tweet in query: print(tweet.user.username, '-', tweet.message) Without the join, accessing ``tweet.user.username`` would trigger a query to resolve the foreign key ``tweet.user`` and retrieve the associated user. But since we have selected and joined on ``User``, peewee will automatically resolve the foreign-key for us. .. note:: This technique is discussed in more detail in :ref:`multiple-sources`. List users and all their tweets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Let's say you want to build a page that shows several users and all of their tweets. The N+1 scenario would be: 1. Fetch some users. 2. For each user, fetch their tweets. This situation is similar to the previous example, but there is one important difference: when we selected tweets, they only have a single associated user, so we could directly assign the foreign key. The reverse is not true, however, as one user may have any number of tweets (or none at all). Peewee provides an approach to avoiding *O(n)* queries in this situation. Fetch users first, then fetch all the tweets associated with those users. Once peewee has the big list of tweets, it will assign them out, matching them with the appropriate user. This method is usually faster but will involve a query for each table being selected. .. _prefetch: Using prefetch ^^^^^^^^^^^^^^ peewee supports pre-fetching related data using sub-queries. This method requires the use of a special API, :py:func:`prefetch`. Prefetch, as its name implies, will eagerly load the appropriate tweets for the given users using subqueries. This means instead of *O(n)* queries for *n* rows, we will do *O(k)* queries for *k* tables. Here is an example of how we might fetch several users and any tweets they created within the past week. .. code-block:: python week_ago = datetime.date.today() - datetime.timedelta(days=7) users = User.select() tweets = (Tweet .select() .where(Tweet.timestamp >= week_ago)) # This will perform two queries. users_with_tweets = prefetch(users, tweets) for user in users_with_tweets: print(user.username) for tweet in user.tweets: print(' ', tweet.message) .. note:: Note that neither the ``User`` query, nor the ``Tweet`` query contained a JOIN clause. When using :py:func:`prefetch` you do not need to specify the join. :py:func:`prefetch` can be used to query an arbitrary number of tables. Check the API documentation for more examples. Some things to consider when using :py:func:`prefetch`: * Foreign keys must exist between the models being prefetched. * `LIMIT` works as you'd expect on the outer-most query, but may be difficult to implement correctly if trying to limit the size of the sub-selects. * The parameter `prefetch_type` may be used when `LIMIT` is not supported with the default query construction (e.g. with MySQL). peewee-3.17.7/docs/peewee/schema-horizontal.png000066400000000000000000001632101470346076600214420ustar00rootroot00000000000000‰PNG  IHDRþ M¯bKGDÿÿÿ ½§“ pHYs × ×B(›xtIMEá  )Ê‚[² IDATxÚìÝw|M÷ÿÀñW¶+fÍØ³¶¢¶!ÊרjkDm¶mmZ£~EÑ¢ö&v«±Bl±÷ ±G„ B†ŒóûãsîÍ¹× ±Úàý|<î#çžó9ësO’ûþLB!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„xšÀfý5ä-»÷¡†{_!„B!„x6Ž’¯…@}ùÞ[vïe ÷>E!„B!„x6ö’B!„B!„þB!„B!„x ISÿW¯,Ð( Ä—åÀñdÒ7šn@0ë9ÎÙ¨¨/ÏÒ]€¨®>À}{-à +p ˜¡Ÿ×š³ž®à ÿ­Òe¾Ò—OëiÚú1ŽÓ€Hý»éy l–‰O¸·¢ú½B56®Á$Ð(*äº,ѯËÈøX_Þ „ßêù¶˜«o+´ÖÓg"ôÏs%pNu!„B!„x»Ø¿èA¬fõJ~ÓÓ˜8êA¯uÚh`¾áýŠœ{Š!}?à¾ó üdãú"÷lƧl\›¬ÒÒæ5l[­åÖûª×ml›ouî•VÛblì³˜Ç ±a6ÒÆm”˜¶O× Lïêi:è…¶ò h'¼B!„B¼]¾± 6·¢j¿ÖeH?Ø*˜Ü®„[­ÖÀ?¸©ͧ ëôŸçôm· Û¶Žåœ0l;,@µ°¬çµ:wà ,ÕdãùcQ­¶YÝc•dMÊ—;¬Öÿhا¨^€aÚ¶UÓaX÷Y2‚Õq¿ré0¦óÿ æÒG£aB!„BñH‹e­ñÿ Û< ëO¡jý­Ò÷3¤/hü?kà Õ, £Õ±öë=@,kýM:YЦôéP5÷¦¿ˆÀ_zŽ5Þj[#öí†õ_$øßò¶ 0l»8éëçÖÿdH_ˆÒ×’Ô⢡ÕuÑï³,ª»@ ö_­òú;½ ¤ŸÕµ !„B!„xƒÕDzY»µ6@IÀA_Ù>˜Ç›­ÿúÿx«m'’ °íHª½N$iàGCúO¬Žõ¥a[o¼^@`ò¹a›u?û †mß'ø¶ÚǸkØ^Q¿c!J6«}ŒÇ+n#ðÒ nŒjZ ì†éëeœ !„B!Dª&AË«QȰ|ÁÆvëàÝXS¨ÌFg^àZ®[½3,_1,kÀT«;’jà Ò|xÞƒê’6ΦÒ&áÉœýÜ&ÉÜ‹uaÁ#Ô@„Y ùx5h éž–XíSܰ\ŠÇ廀j¶o´Ø ÔÐßWÑ_Cõ{Z Çö ˆB!„B!ÿÈåÓ;–mhõ×bÄ b¬¶iOyF*>á<™l¬‹y†s'¾„ûsIJ¶Þ5È_r2ÚXwÇÆºD 1ªàã TŸ“,¨ÿ>ªGåñB!„BHàÿæ»mX¶Õï» j¤ý³¨ꧤÏÿŠ®SKAš«¨¾î Æ*Ø‘LºGÏx|í9®·°uy ËÁ¨fþ ¨V @žd® l¨Ä$“ö!ªVÿgT׌zÀ¨éQSÿõÃrÐ@!„B!„øÏÙK¼{HªÁ® ”6lË,DMswUë|ØœBÕ›8“9OF=}!ïËþ²›××F5m7½>Æ¢|ç_È×.z€mò¾¡0 UÛER­»PÉp½¨±†­õ€=%MÉÀ&T¡M0ÕšÀ8î î'„B!„oã xÁ@=P>bX?×~®aýM=íG€?ÉOçg\oŠa½qp¿/­®k‹a[]«mÆéîLýìK4 _0(xéÁ¶¦˶Fõ·îÇo•Õ¶á†m ë­§óóZÝP-+LëgX˜Öß>EµZ˜ˆåà‚¦{lø„ëhgØ4re€E$?¢B!„Bˆ7XÔÀqZ2¯«@nCúlÀùdÒþõþ æ²×žðúÖöUþk’9 Iújɲò ×{ÕZ€þXNhëuÈ.½B!„"µ‘¦þ¯NP U |S_—\fU ëAMKWUóoê{~øè¡–±?à¾a}¨a}¨aý}«ëºeØf=ðßeÃ6£ €°•¤¦ð1¨‘î[ëÛMâ Ǹjuœ‡Éܨ¦ø¦mÆÑÿoÖŽ`Ï4SA”×Är¶‚Dàc=4ž@°\O¿Ç>ú ×j¬€Îz!Ê Ò°í*ðð¶B!„Bñ–H‹åèýObdHÅ÷’)\C’¦|Àõ%Ÿß•gŸ½A!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„B!„BˆWÀî)Û+NÀ#É*ñö@.à¦d…H%œ€à€d…B!„Àÿ ÜÜÜ6xxxTussK¬ɉµÛ¼y³S³fͤ€H¤ ÷îݳ÷õõ=ZWrC!„B¼íŸ´1K–,Áƒ Ê\²dIÉ)‘¬ˆˆ¼¼¼B'OžœUrC¤—/_& ànhh¨d†Bñú™„jM Ð ¸‚}jƒÒ¦¯@`×+¾Öæ@{}y °X_ntÓ—73ôåêÀ·ú²?0ÕÆ1ó× ïÓóôå(ÀKñR!„B!„ø—5ŠèËß§ }a`àbX÷ªÛß«VøH_2¬/dXo¬‰ÈgXiu,7`P (g³™ö¹'‡À_!„Bñ¶ifúÏ¿#¯Ñ=äŽYSò‘ ü…B!„"INÃò\`æ¿xîI$5ãIAú5¨š}€XÃúŒzÐoËÃ>š|ÜB!„B!Ä[4Þ2¡úæŸÒƒÝX鳟¢úåǾ€ß3œ/ð? ’a]Tßú‹†c9-Rú> úµm’9vF 5P5[Ð-`/°Å*ø.TÕ—ûŸrÍyQýÿÎÛQýþYåK7T³þeX6õ„*Ü0²>Dsà ÜÖ‡“¹†2À@~ pÕRbµ^È $ðB!„BˆÇ×ü6¶ÕƒÝ†uÕµ@vúoQƒã9¥ðœéVëÚè/=ðwÓüŠ6ö̺XóÀBl×Àÿ£˜ 2cõå1)ü+®y¶øDÒ€¹õ4zàŸÆ°Ï=«À?§žU­Î3L?þ—@œaý@`$¶gw»ª\ÇùÍc/Y „B!„x™ † ÿ®ÐÞÑß—ÐfSì‘M/$0ýáÀFÔàxŸ^âµýaú¯«€}†@¿3ª¦Ü¤ªæÛô_Öï%Jï LN%ùîümúo¢ZMÜÖû.¨ñLj‚þKÀàÿ€úöüÀ"yœ%ðÿO%$$0vìX<°Ýúdüøñܽ{W>ÑThñâÅœ=ˆgX÷¾îcTô…súyZÕ€i†}Ê–G jØAõÝ/¢ßK’šÁ{¡f xYúè×frJ¿‡âOÙ¯PE_ÐÓ{êùxN_ßÓp­MHªé*±ªkÆZ`ª‹@zy¤%ðÿOÿåË—msû!C–O4ò÷÷çòåËOÿ‹×§óçÏ— B!„x½´0,îëË‘À@+½à}}Cúq€éK|<)›¾/¥éÁqÔ8õø§j “ŒúÏ4¨¦û Z $iJÀ@=àoŒª¿•Êòý7CÁD„¡`Ѥ—û†ô QµûQ-/þ‡êr0RÏ'ñ†y)}ü£¢¢pqqÁÁÁ€èèhÒ¦MûXºÄÄDpr²Ým'22’ 2X7]ºt8;;sðàÁÇŽ³³³|’©ØŒ3l®ÅÅ%iºÕ   (ðÔt& $$$$ûù'·À£Gä¹B!„x9Š–ÏZmÛe#}~Ãò «mWô5ãK¼¾|@[TS÷Z$obg¸.Ó—ÇP’º*˜¬LeùnlÐUÀb’×°\Zÿ¹€êš‘Õ­âS½ã°Õ!Zé7Ïs×øðÁбcGòäÉCæÌ™Y¾|9^^^dÏž7776nÜh.èÖ­ï¼óyòä¡yóæÜ¹£~úöíK=(X° 9sæ¤|ùòlÞ¼™âÅ‹“%KjÔ¨ÁljÅÍÍÍ\«?qâDÜÜÜpww§K—.$&&ʧ™J5kÖŒ¥KU«­}:yòä¡@”)S†={ö˜9dÈ2gÎŒ»»;Ÿþ9Å‹'00PýE^¹’Â… ãîîNÁ‚Y·n‡¢víÚ4lØ´iÓ²eËùpÞN¨¹r…BñrküâSÞXûgcûý—xm_ ºüª†<=ªÏ¾qö€„ç¼ÿZ:Ãr]T½éUݰ-³þóªYÿr«û³Cuw 2È#-ÒoB|<[·n¥^½z„‡‡óý÷ßÓ®];<<<ˆŒŒ¤{÷îŒ5ʤݺu‹K—.L®\¹èÙ³' jõW¯^ŸŸááá$$$гgOvîÜÉÝ»w fåJU¸Nbb"{öìaðàÁìÙ³‡›7oR @d»ˆÿ^dd$±±jàÓˆˆNž<Éùóç¹yó&§OŸfÙ²e|óÍ74hЀÁƒóÓO?±uëV† ÂöíÛ fôèÑ´jÕŠˆˆV¯^ÍŒ38qâ7nÜÀÍÍÀÀ@8sæ ]ºtÁÇLJ[·n±dÉ:tèÀÕ«W‰g×®]Ô®]›ƒòþûïˇóöúUQ}ØÖ¡Jð¯ó$k„Bˆ—ÂX3ž×j[!=à.DRÍz¨a{n«ôN@®—t]ÅPØ9ëçüÕ¬½.j<“D÷‘ê€ýûD×x}ɱKaº0Ãòg¨î¶^ÆîgP­òŸ3õ“R@y¤%ð·ü­tr¢Cõ\Ô¬YWWWÚµk‡µk׿Ö-ÕõeÁ‚Ô¬Y“={ö°uëVªU«Æš5kˆWM.\Ê•+GóæÍÉ‘#2d lÙ²„„„Xœ÷ï¿ÿ¦eË–”*¥ÆÀøþûï¥Ùök¤[·n¤I“777*UªÄÕ«WK³dÉÞ{ï=._¾ŒŸŸiÓ¦ÅÙÙ™íÛ·³råJ¼¼¼pwwÇÎÎŽáÇãè¨z­,_¾œR¥JŽŸŸ<ÀÝÝÝ\ëoggG¿~ý¨P¡‚Íî(â“Õ'/5ª­'I}úÚIö!„/Å^òõÿ×>¨üƒ€õuÆ)ï>±Jÿ1ÉwGÎŒšã>K ƒãº¨ P£ùû Æ€¤Á1œï&ª«)NjkŒO6¡ú÷ð’óð‘UáGJ0,.^MQƒ(¾o8Þ'¨Zý5¨®Kõ4îÀTñ Ê#ýæy¡>þ9rä0\ýóMëãââ aûöí:tȼ½E‹Ü¿¯ZñdÏž4}§ƒƒ®®®… š¦Yœ7<<œwÞIHÓÙÙ777ù4_Y²d1/»¸¸ðXš[·nqýúuþüóOóºjÕªáââBXX*T0¯O›6-™2©XîÆ„††ZìW´hQ2gV-œ²eËFš4iäCx{ÔFÕ0Øb¯ IM¢Fú!„âu1Õ$…ªAß‚šFï }}j@9PóËwÓƒév¨ç5@%Ôà€É¹˜…t<½/º±ö°‰_Fõkïjüzju/côåi¨ §QÍç+®cÛ+ø`’O¿¾ Àø'ì3Õ·ßè¯XéÿT×…’f ¨|©/»éŸÁý»RiÃqÉ#-¿;»§´999‘;wn¾üòK>üðCBCCÙ¶m›9³·¶†ùòå³è/**ŠÐÐPù4ß ùòå#wîÜüêÕ«)_¾cÆŒ”.­ZŒôë×Î;Ó¡CªT©BºtéX²d ùóç'Ož¤¶ÝÝÝÉ;iŒB… ‘+W.ììì¨T©NNN.\˜µk×2qâDªU«†»»;žžžÒg;•*V¬Y³f B… ¤OŸÞ¼­`Á‚æÏÛËË‹+W®0yòdjÕªÅÚµk?~<%K–díÚµøùù‘3gNŠ/ÎæÍ›Ù½{7ãÇ端¾ }úôäË—–,YBéÒ¥™4ik×®¥T©R¤OŸÞâo SÓÂÒ¨oŽèÿÜâQ%åH !„/ÅT³ò,zàžÕên¶´ª6;ª6Þ ¨œ:ëïÝPMóMòÖü¡†õ¿[ç´ì¦GõeÏtAÕøÓ÷Éc„Ç¢jÇs£òË‹jP‡Ç›øO2œ{„aý<Ãú¾†õk ë­[E,× Üõ¼¨Š û¸ÛÈËE¨PÍöÓe±]sè¡?=jð¿lzü ŸK¼m .<÷ôéÓšO®µhÑâî¿u¾¿ÿþ[›7ožùý‘#G´´iÓj±±±òaMÓ4íÒ¥KZÑ¢EŸV{ž¤élº¤‚?¹­õ/A5ä¿B!„x™% Äë&oÞ¼têÔ‰£G’1cFæÌ™Ãwß}'3;ˆg‰ˆèÿ€¨’ü)’-B!„B!þcåË—çèÑ£øúúÅÒ¥K©^½ºdŒxªù]$ª©Þ}Tÿ6!„B!$ðâ¿”/_>ºwï.!^–a¨¹g—€’%B!„âMa/Y „ôüÔ@>B!„BHà/„oDÔ¨ÿ1ÀbÀA²D!„BHà/„o–0àS &0X²C!„B¼ ¤¿BXÚ üŒVÇ%KD ¤JêËQÀ™§¤Ï‚škTÓ¥Tv?9PóeÜn>%½+P\_~œ“GB!„H=¤Æ_!78ŒšæOþNŠ”(Ô_ R¾‘!ýÏ©ð~>1\ßW)H_ÞþOy„BˆÔå‰5þ‘‘‘y~úé§77·É*‘œØØX»ãÇ»ôêÕ+TrC¤÷îݳˆˆÈñ‡Hz€0[rU!„B¼‘Ú´iïvêÔ)s‘"E$§D²îß¿ÏÅ‹#¾ýöÛ¬’"5¸~ý:»v트sç΋æ0øø ¸+9+Dò¿vÀX}ù’d‡BñþŽŽŽ±ùòå£P¡B’S"Y¸ºº&Ès"R {{{½„Cý´†½$gßøÿ‡ùP³9\žöü¸¡úÁ/éPýþãË€ö´G5õdFTüû)<éÚ#ôë.ß?Cúw€Lz^G=%m>Ô ¸ç¸Ç;€´\Bñv?–,Bˆd…c€®@^ÉŽ7’0ÕËó¨)7•m¤/øêÁäTK¥@¶¼oà¨_ÇU s2i3¡Æ¡¸£ïsFV·¡f¤°ÅèœÖïõ jоóÀúö”謟+ êëë«Ö¯3¤¯hX?¨ Õ *Lù7p±q®z~\Îê…ß­ Çô6¤Oüª¯7Þã` ­Ò„B¼µdT!„x²?€¾@ ·dÇ% °(a#Hn Ôûôõï»ôŸ&ÀÇzºçÕ5˜žQ^ÔØÙ€q†õùôߺ‰•Pðºó Ûì¨Ö+ÖŠè5€Ž<¹•AC`:धó¶¾OdÑ—3X}Ï0­o œ ÛÓ=ô@}¨aýg¨AnÀàˆá˜ÆƒE@+Ãû8ýZÓmÂ@U¤ö_!Ä[Hjü…âÉbPµ«Ý€<’o”±† ÿ$Ð UËo š]‰†ô£ AÿM=È,£¬™_à:Ü€=¨ÚðŠVAûO¨æü& Aÿ%TmýG¨mS =x×°OCÐ þ§;F_ßU›žœ²zá“þ¾Õu¦DUàP5õá"Ã6/Ãrz`²!è_TÑ 'ü É`˜‚þ¨fþÎzAÉ}} ž<öB!ÞFRã/„O7è§P_Kv¼ÒôåTóñ@ý½7P Õ´ü˜ž6hcØ¿#°E_>…ªMîøœ× 4GÕz›‚àrúËÕä}:ª9}mÃ>5PMÙуòXýœõgµƒ¨úN€¾¼V¿ö%úû€I<^#žø5–€©0bâsÜg<ð¡áš{£jöÑ TìP- “T£hGRßþæÀ §Õ±³xäÓóÊ5è`T!N ~ â5áââ2#wîÜ­3gÎ+¹!R‹ëׯ§Ë’%K\úôéã$7DjqñâÅt÷îÝ«—À_!ž_ðÀH=è¹#YòÚ+‡jn . ÛP5ÌFÅõàÑto±Ú¾æÿ½† =ø]«_#$ÕÞ72¤ñ1Ð&S ×ðá>MAòM`¹Õ>ËP}ì³ÙQ5ûG¬Òt2,Ç¢jãŸG¨Õ5‡¡$̨P¤îTí½ñËõÔø ¬Ž}¸‡ÿ j&ŽXTWŽ ú{ ú_32d8;eÊ”,M›6•̩ƀ"Ú¶m›µR¥J’"Õøä“On-]º4ìIi¤©¿B¤Ìt=é,YñF0Æ—’‚œL†åÛ6¶ßzk yÊñLç60yÑÆ>Æu9õÿñÆ}.ñx~ Ëé÷Þyʵº  ¿ž7ð·cX¶³‘×¶>[3<@µp¸gu­õQSržAu“H'¾Bˆ·‘þâ?³`ÁºvíúŸ?22__ßTŸO‹/¦]»v/åX^^^øûû°oß>š6mJ‰%ððð ÀœîòåË´iÓ†bŊѺuk.]JŠ ‚ƒƒùôÓO)V¬~ø!çÎ >>ž:uêú¦>²‘ÀbÔÈèò·óõmXvKAú˜d L2¾ÀµØ:¿q€¼‡úOcI~æ§' HLÁ>ÖûE&“æ/CÀí ”Îß!k‰O)Énc{®dŽÿ·^ÐÑUËÿЪPá3ÔÌB!„þBü[îÝ»GppðvþI“&±dÉ’TŸO¥J•âÃ?|áãlÚ´‰êÕ«ÇåË—ñððà³Ï>cûöí´nÝnܸ¦i´lÙ’%J°uëVJ•*EË–-Ñ4UQئM²eˆ¿¿?Õ«W§iÓ¦$$$àèèH=èÛ·ï›üØþX­±ü¿öΖÝm˜+€ÔÈúî¨ÚtSß÷l<>ª~•¸–ªXŽtj?S7„3†u müÿÀ°|ÈpŸ¦àºPÐjŸ"@}9Û}w FÅ¢¿wàùúøk)LdXnDÒ€‚ jì“ûýˈ¬qÐ5N@]`¥U¾ !„ø?‹Û·o³páB–,YÂÅ‹I- wîÜIXXR%ÃÙ³g9þ<{÷î5ïwèÐ!Ξ=Ë•+W8~ü8sæÌaÏž=ç aõêÕÌœ9sðqéÒ%9uêÓ§Og×®];vŒéÓ§sàÀËoAAÌŸ?Ÿ5kÖððáCùäÿeW®\aöìÙ,X°ÀâÙ0}–sçÎeåÊ•ÄÅ%uå¼ÿ>¾¾¾L›6•+WxÆÆ IDAT£*ÜnܸÁîÝ»9räóçÏ'22’„„vìØÁ¬Y³˜7o!!–-g8ÀôéÓY³f qqqܼy““'OråÊ6oÞlNÀ¬Y³ðóó3?kÑÑѬ[·Î|7oÞLú«i¬\¹Ò¢–;**ŠåË—¦iìÝ»—Ù³g3gή]»fN·råJn߾͜9sغu+ëÖ­³¸æÀÀ@öìÙC† È;7G%00cÇŽ1kÖ,¶mÛöØïäœ9sXµjááá¬Y³Æ¼m̘1|õÕWþøc>lN;iÒ$fΜ @×®]iÖ¬¿ýöóçÏç·ß~ãÃ?¤gÏžìÙ³‡¦M›2fÌs°Vºti6oÞÌÙ³géС}úôT3ñV­ZáååÅ‘#Gððð sçÎ|ýõ×8p€š5k²qãF–,YBµjÕ8xð ‹/¦bÅŠÁ›xµ|}})W®û÷ïÇßߟҥKsùòe>LÛ¶m9zô( ÀËËË\P¼xq–-[Æ•+W6lžžž€j¢îíí§§'cÇŽ%,, OOOÌÅ‹Y¾|9¥J•2ã?þø#­[·æüùóLž<™úõësëÖ-‚‚‚fß>5EwÏž=ñöö&00¡C‡âééIbb"¡¡¡tìØ‘ 0yòdNœ8a¾7;;;¦OŸÎìÙ³ÍëV­ZŸqãptt¤mÛ¶|óÍ7áëëK‰%Ì÷þÙgŸáááÁ”)Sð÷÷§M›6\¸pÁ|œþýûsàÀ6oÞÌСêûüœ9shÛ¶-½{÷æàÁƒ´jÕŠ_~ù€'NPªT)üýýñõõ¥fÍšæü¼|ù2‡¦Q#56X›6m,Z;„††D¡B…8tèeÊ”ÁÙYU@:88PºtiΞ=ËÑ£G)\¸0™2e2ß… 8}ú´9­§§'sçÎ}“é©@S ¿üv¿öú“T‹ÿ)ªúET“qS0}˜0a j„=Š££#...lÚ´‰S§NaooO||<7n¤V­ZôêÕ‹åË—S¯žš¾÷›o¾aøðáLŸ>]ž€Aß¾}™0a;«1ÑúõëgnÙñèÑ#¶oßNºtéøøã©[·. ,àĉx{{óóÏ?йsgŠ/Nl¬úþÄŋɟ??ÁÁÁ¸¸¸°víZ\\\Ð4B… ±k×.*UªÄøñã9uê… &>>ž?ü´iÓÒ¼ysÎ;Ç AƒØ½{7Ë—/'00L™2‘@åÊ•Y¾|95jÔ <<œÙ³gó¿ÿýï±ûóòòbܸqôë§¾/\¸///ˆ‰‰ÁÏÏ 2˜ŸýíÛ·ãîî@ûöíÍMãÏœ9ÃâÅ‹2dwïÞÅÏÏ?ÿü“U«VYœÏÉɉmÛ¶aggGùòå™6mýû÷gðàÁx{{3nÜ8~ùåFeþ}­P¡‚9˜·ˆ8>¤mÛ¶4iÒ„ªU«²yófÜÜ,»gΜ™2gÎüØ6777‹‚´*Uª¼](^Àrà7=-¿á¯µmzÀ?Õÿ==–Mág`9ÝÔT{¢šœ¿£¿"ôãüCÒuÏâ'T-«¥MzÑ4½ bj¬W’fHf¢¦É36«?jò>(­ÿï7Þç~àclšgmƒ~]P] ú?¿‚Ïf¢^˜2”¤–qÀpý³2R˜ b®éy¸ ÕÜ¿¬þ2ZŒBµB!$ðO©† 2bÄŠ-J‹-øðéS§NŠö}ï½÷,ÞW©R;;õ]ÉÝÝ{÷TKÃ=zpüøq¦M›ÆÑ£GÙ³gE3ý’%Kâè¨n![¶l”/_{{ÕˆáwÞ!,,ŒóçÏÆúõëÍ-._¾l1X™xuîÝ»ÇÙ³gùàƒ¤®§¦ÀtÊ”)”)S†téÔ ËùòåãÑ£G|Ⱦ}û¨_¿þSÓšº”š¾Ó ‘jÿÂ… Ȇ X½z5Í›7§S§NLž<ÙâaÕçÙTãi ,ŒŒ¨íììÌûNš4‰‘#GÒ®];êÖ­K5,3°øÃlú%ŠŽŽÆÙÙ™"EŠ˜·*TˆôéÓ˧ÿ/ppp°ø ˜˜h. ±~LÖ®]K§Nh×®U«V¥yóæÔ¯_ßül˜ššƒêó_µjU*W®L:u8p ¹`ÇÁÁÁ\8dëüÆ ?wîÜÿü4h`®™·>§QÚ´iiÛ¶-‹/&GŽ4mÚ777BCC©X±"%J” AƒôíÛ—[·,[¨Ù¤IºvíÊÁƒY¸pa²ƒä_ìííÍyâììLBBÂcÿPŒ÷mB“&M(X° ‹-ÂÅÅŸ߹c9ƒVhh(¹rå"W®\ÑBÙ²e-Ö½ÿÄ–£¦õ+¡<âõÌ}†ô×€ÿKæ¹°œÌúkúËèý•Qzðÿ,4½ÐbG Òž×_¶\Õ_Fw“¹Ï°dÖ›ümc] ½0å pÕÝÂ8à_IÃòe÷¸Y‰Ô/pßF¡OŠôë×õë×S¯^=¼½½ŸyÿÛ·oÓ°aÃÇþ_š¾»6lØÐÐÐÇZº ‘ì?‡k×ðôô$::úÉÀ£¢ððð`Ó¦MÏ\`%Ä¿øÏŸ?Ÿ°°0¾ùæsá!C†˜ƒ9S­=ÀÉ“'É™3ç3Ÿã?þ`üøñtèЀ‰'>VÛø4Å‹ÇÎÎŽJ•*Q©R%óµ_½zU>ý««+ dïÞ½´i£ÆêÝ»7...,X0ÙýæÎK÷îÝ;v,€yà;Óßÿþ›œ9sš²»ÿ>×®]#>>ž2eÊpþüy‚ƒƒÉ•+‰‰‰”+WŽQ£Fáääd~žÊ—/ÏäÉ“iÚ´©¹P¨wïÞ899Ù¬)·Ö©S'ºté‚››ƒ©®©þþþØÙÙ™[šÄÆÆ”ì?GGGÚ·oÏï¿ÿÎùóçŸy$ÿš5kò÷ßÓ°¡´ú¯¿þ2oË›7¯Å@€÷îÝ£N:Ô¯_ŸÉ“'[„T®\ÙÜj"GŽDDDpèÐ!fΜI¦L™¸pá×®]#_¾|<|ø;wš»d€šî/oÞ¼oú£í‡ª½lÃóÏi.„°í:P(§¿¯Žêv¡¡º,˜ªÒî ÆD¯¯}¨Y2– €]¤|ö<È?þø\A?¨V¦¶‚~!žW‰%žô›¾«îܹS2L¼±bÅhܸ1aaadÏž3f˜; ÀåË— 07E~VåÊ•cúôé8;;sîÜ9Ö®]KLLŒE­æÓdÉ’…aÆѼysz÷îMTT&L`ùòåòéÿKFMÏž=9þ<÷ïßÇÇLJ]»v±iÓ¦d÷©\¹23gΤX±b„‡‡³lÙ2œ ·ùœœ>}š±cÇ’5kV/^LæÌ™‰ŒŒ¤`Á‚xyyѤI¼½½Ù±cÎÎÎ4lب¨(ÆÇàÁƒ1båË—çƒ> }ûöü¿£ÆL¨ÂãÓ%Æ£Æ2ˆ–ìz­™Æ¨èŒêBr5PãB’oqÀW_}ÅéÓ§™8q"ÁÁÁôëןþ™­[·rãÆ Š+ÆèÑ£)_¾< …þý÷ß¹ÿ>µk×fܸqDEEѾ}{óì>>>>üòË/ØÛÛ›ÇâY\»v/¾ø‚þù‡-[¶˜g$óõõ%cÆŒ 6Œzõêѽ{w@uËœ6m dìØ±¬Y³;;;>þøc¾ýö[ìíí‰ŠŠ¢ÿþøùùQ¨P!>úè#Nœ8ÁøñãÑ4©S§²hÑ"¢¢¢Ìƒ]»¸¸ð믿’&M|||puuåŸþ‘H$¯páÂsOŸ>­%çäÉ“ÚÏ?ÿ¬ýøãÚ_ý¥%&&jš¦i±±±Ú¢E‹´Aƒi7nÔŽ9¢hš¦i>>>ZPPù;vìÐvïÞm~õêUmÑ¢Eš¦iZdd¤öÇhƒ Ò-Z¤ÅÅÅi3fÌÐîÞ½«:tHÛ´i“y¿C‡i[·n5¿ß·oŸ¶cÇóûÝ»wk?ÿü³6jÔ(íÔ©SšxyÂÃõ-ZÜ}Rš£Gj£FÒ~ùåíúõëæçgýúõæ4<ЦOŸ®ÅÆÆj Ú²eË´ÁƒksçÎÕîß¿¯-_¾\ Ô.^¼¨­\¹ÒâøÚСCµI“&i×®]ÓöîÝ«ùûû›·¯[·N2dˆ6cÆ -**JÓ4MKHHÐfΜ©ýþûïæ÷+V¬Ð¬ýþûïZxx¸Åu%$$<1vîÜ©mÙ²ÅbÝ‘#G´áÇkãÇ×Ξ=«>|Ø|Ï3gÎ4ŸÃ$!!AË’%‹¶oß>óºÓ§Okk×®5?Ç{öì1o»qㆶxñbó¾áááÚÚµkµ 6h¾¾¾š»»»9mÅŠÍ×·yófmúô齌¿ï«V­Ò¬-]ºT‹·¸Îµk×jƒÖ.\¨ÅÅÅ™×ÇÇÇk øÏÇ.]º¤-ZtÅ+þ骙*ù’Ž×Z?^ ùï# Æ"˜‡k!Uà Õ-£‚dϦðHÿ{õ¢¯¸'lû1[¶l}ÿùç›ç¯_¿®½ûî»Ú”)S´Û·ok?ýô“V½zu-((H»qã†æåå¥Õ®][Ó4M[±b…–-[6ÍÏÏO»~ýºÖ¢E ­K—.Ú¥K—4Àü?}Z«R¥Š6fÌMÓ4­sçÎZݺuµ   mëÖ­Z¶lÙÌÏö„ ´wß}W;zô¨¤5nÜXëÝ»·¦išæíí­eÊ”Iûã?´+VÈ÷†k×®ÝMÔØC¯&ð"¥¿xºGisæÌѪV­ú\ûwéÒEûá‡Ì…]»vÕÚ´icÞ¾bÅ ­mÛ¶¯ôÖ­[§µjÕê?ÏË)ðwBõ_þ^!Ä[¤™þ·j 0æ_‘†@?5¸f j:ÊwŸøkš¦U®\YóññÑ4MÓN:e®XŠŒŒÔ&Ož¬.\XÓ4MkÚ´©6bÄó~W®\ѶnÝjøwïÞ]ëÙ³§9ÍÙ³g%ð/ø¿óÎ;æŠÑàà` Ðnݺ¥ÝºuK´èèh-!!AK›6­¶yóf-,,L ÓV®\©(P@‹ŠŠÒÒ¤I£;vÌ|ŽÑ£G›ÿ"EŠhóçÏ7ï·gÏÍÅÅE‹×¼½½µO>ùD>0 üÍåÿ—©CÅŠ¹zõês7Åêß¿?}ôK—.%::šâÅ‹³pᤨ²ukæÎË‘#G,šæ¿,š¦1~üxfÏžý¶|dqÀÔ´fcä B¼ef[_ð]PMýMOÎVéè_S$K–, 8;wM¾|ù̃Ú^¿~Ýbçüùó“?~._¾l^wóæM‹‘Ø‹)"£­‹–3gNósdCêÑ£GÓ+M·nÝ,ž9'''nÞ¼ILL … 2¯/V¬ëׯ'11‘K—.1pà@† fÞž'OBBB,©B!R‰€€ìììÌÓ>«bÅŠqìØ1ÂÃñ···9 ÁÒ¥K-¦|™X¼xñs äùÛˆê‡ìŠê—,„"å&±¨i2¯¿ÈÚ·oOžaàÀÜ»wÏ< ¥IëÖ­=z4QQQhšÆðáÃiÑ¢…´Vø !ÄKv84–¬BˆW† ÌM¦GŽÉ Aƒ¨P¡Õ«W§Q£FØÛÛsçκtéB£F(^¼8ùòåcïÞ½L›6 {{{sax³fÍøâ‹/(W® $22’9rH%ž‰ƒƒ™3gÀÙÙ™ 2˜·ÙÙÙ‘%KìííI›6-žžž”(Q???þüóO"""È›7/yóæåÂ… ÌŸ?€©S§’9sfÊ”)C·nÝhذ¡yæ§Q£F‘#G (@þüùñõõeÅŠæ¤iÒ¤‘E¤Œ î'dp?ñ:ú—÷3™¾„ãÈà~Bˆ×ip¿ú¯úDOÜÏZ||üc³åÅÅÅi÷îÝ{â1bbb´È?Rñ\ƒû½¨˜˜íþýûëæÌ™£Ý½›ô5û‡~Ð:uêôØÑòáÈà~2¸ŸB¼B€o€‚À%É!„øok[m~éut$cÆŒO<†‹‹‹¹6Uˆ›­çÏ××—Õ«WÓ½{w.^¼ÈÔ©SY¶l™E''§d»y a"Mý…âÅìDð_[²B!„/ÓìÙ³©S§‹/æÌ™3¬_¿žFIƈg&5þBñb¢€ã@u`žd‡B!^WWWúôé#!^màVÐÛÛ;ÔÕÕ5^²J$'..ÎþÌ™3é6lx[rC¤111¡¡¡yÿÅSîêHÎ !„Bˆ×.ðwss»4{öìÚ%K–”œÉŠˆˆÀËË+tÍš59%7Djpùòe5j´=,,ìß:å^  ¸'Ÿ€B!„HM¤¿B¼¸=úßÓ÷$+„B!Dj#}ü…âÅ]nï›_³kï„ÈGøÖ)\IV¼Uò¾¼œ)H…BHà/„oÔ¯•9r|;|øð|òñ½]fÍšÛªU«4Y²dÑ$7Þk׮ݰaCþB!¿Bˆçsèûº]´««kä_|á,ßÛe×®]‘;vÌ'OÉŒ·È­[·¢6lØöº\o\\\N___íÚµkR@%RÇ;988h”çR¤itø !Ä«wÈä‚%;„â…Åg̘ÑÎÍÍÍN²B¤iÓ¦%S¦Lò\ŠTÅÅÅE%ðBˆWï„þ³¬þBñ✜œBkÖ¬IÓ¦M%3DªqðàÁ¸ P©R%É ‘j¬Zµ*fïÞ½1OJóB£úÇÆÆ²aÃ6oÞÌÅ‹ –ïº"åBBB¸pá‚dÄSܹs‡sçν”c]¼x‘óçϛ߇‡‡süøqÂÃÃKɱcLjŒŒ|lÛƒ8vì÷î%Í\϶mÛÞæê:ªþB!„B¤/ø{{{Ó½{wfÍšÅ?þȼyóžiÿ¸¸8:vìHDD„|o!¾ýöÛÿìüû÷ïgذa©>ŸV®\I×®]_ø8=â“O>!mÚ´ >œ%JЫW/Š-j‘+V¬ H‘"|ùå—,XeË–™·­_¿ž¢E‹òå—_R¨P!fÍš€££#S¦Laß¾}oóc}xW~»…B!DjòBMýýýý™;w.5"$$gçgêáÇ,X°€ß~ûM> ñ¯Û´iAAA©þ:?ýôSš7oþÂÇ™2e 5jÔ oÞ¼ìØ±ƒ)S¦pâÄ råÊÅ•+W([¶,žžž”,Y’®]»²fÍêÔ©ÃÞ½{ñôô¤Y³f888йsg¦NJË–-9zô(5kÖ¤U«VdÉ’…¡C‡âååÅ¡C‡ÞÖÇê8PS~»„B!DjòÜ5þ;w&44”qãÆ±dÉ/^ÌöíÛèÕ«k×®¥E‹Lž<™;wîЫW/6lH×®]Ù»w/_ýµùX—.]bèСlÚ´‰.]ºàááÁСCyôHM1Ã/¿üB›6mhР?üð<`âĉüõ×_ôêÕ‹ 0jÔ(ûì3þþûo<==2d:u"11iLþøãöíÛÇôéÓ˜3g>>>ôïߟ† Ò³gOBCCÍû,^¼˜¦M›òé§Ÿ²e˼¼¼HHH`òäÉtìØFM®\¹(P  àÂ… øûû“={vêÔ©@õêÕÉ“'7ndÏž=$&&Ò¢E Ê—/OÙ²eñõõàÝwß%>>žÍ›7¿­û  2~ŠB!„x///Ò¤ICË–-©R¥ æ mýúõ|÷ÝwT­Zwww:uê„££#cÇŽ¥J•*4nܘðñÇ›•-[6¶mÛF·nݨW¯½zõbîܹæÖýû÷góæÍ|÷Ýw 4ˆ 60jÔ(öíÛG×®]©P¡ß~û-cÆŒáý÷ß§Zµjx{{3bĶmÛFbb"|ð·nÝbذaT®\™š5krýúuy^¡Áƒ3}útúôéÃ_|A÷îÝñóó`ãÆ\»vàèèH»víðóóÃËË‹–-[2fÌräÈA«V­Ð4ëׯ3räHŽ;FÅŠÉ”)õë×'þüüüóÏ´nÝš:pêÔ)4M£I“&\¹r…‘#GRµjUêׯOúôé©\¹2EŠ¡U«VéOG¸råŠù½§§'+W®|› È›rC‹-¢qãÆ >ü¹öß°a3gδ¹mçÎÒâ+ äÇLQÚסՔB!^ VªN:899ñÞ{ïQ¤Èãßq{ôèÁ7ß|À°aÃ(Y²$  bÅŠT«V '''Þÿ}ó±2dÈ@§Nhß¾=:tàèÑ£4hЀ>}úàîîNbb"ÿûßÿ8|ø°ù|x{{P£F Š)B‡˜:u*§N"..Ž7n°wï^¨Zµ*û÷ïgÚ´iüôÓOò4¼‰‰‰L:•Õ«W›kçÍ›G¦L™(T¨cÇŽ GŽ”,Y’G‘;wn|||¨_¿>éÒ¥cÒ¤IDGG›»lÙ2œ cܸqæç¦xñâŒ=šÓ§OÃÙ³gÙºu+iÓ¦¥Zµj¤K—Ž 2P¸paâãã©Zµ*>>>ØÙÙ1aÂìììxï½÷ؽ{7sçÎ¥mÛ¶ÄÄÄðÇP¡B›…`­Zµbüøñ8::²páBÆGll,ǧcÇŽØÙÙQªT)Ê”)Ã… ¨]»6¤M›6€j°hÑ"êիlj'¸víM›6eöìÙç«W¯ž9ØgÈ!Lš4‰aÆ™k㣣£Í¿ûöíãÝwmw=?tèÍš5cüøñäÏŸŸ¨¨(óçc’!CÂÂÂÈš5+3f´Ø–)S&BBBÌïË”)Ä ÞÖGÞ4Zeaàìë~3÷ïßÇËË‹±cÇR«V­ç:FLLŒ¹u–µ3gÎàëëkný%R‡¸¸8‹;“sþüyjÔ¨añû/„BˆÔÉþU¸X±bæåß~ûÍ›7“3gNêÖ­Ë¡C‡pqq±¹_Á‚ÍËnnnÄĨY Ê—/ϯ¿þJõêÕÉ;7sæÌ!..Μ¶P¡BæerçÎmñ>..޳gÏBöìÙqssÃÍÍ åIxE§D‰æuMš4¡J•**:*\ؼ>}úôhšFLL Å‹çäÉ“4iÒ„ ЪU+sÀÿÿìgXUÇÖ€_zSQAĨ(‚bÇØÅ†½Q ‚ M¢&Š5¶/^5jb‹»‰×X®½‹‚]°KSPAAšô6ß.ûrMç}Îì½gÎ왵÷™5³Öš"9)Š)Q¹reŒ4hõêÕÃÖÖ–¸¸8rrrˆˆˆ víÚJ@;€Ñ£GS­Z5•z†……Ž‘‘‘"^^^*Ñô­­­K½Ç6mÚ`hhˆ··7wîÜ!99™îÝ»S¾|yjÔ¨ÁСCiذ!uëÖ%<<œüüüRËtwwçÀdee±cÇ\\\J›ñÑG•úŒ„‡‡ckk«œkÔèÁåããã©X±b‰²üüüèÚµ+_ý5ãÆÀÄÄ„ÔÔT•ëÒÒÒÐÓÓÃÈȈôôt•s)))èëë«Ô)!!áCù °|ßo$??Ÿ‡RPP€›››b-RPPÀ“'O¸w,å¹ÿ¾ŠU=T‚Sæååqÿþýr$);X[[+uYYYddd „ ""BeB 66–ììl’’’TÜ”?~\êd@ff&¡¡¡dff’]Bž={FTTT‰wOnn.Ïž=ûU“‰DRV™:u*·nÝzw”„„÷b¶3fpùòå?\NFF®®®%Æ*K–,áÚµk*ÇüýýñððÀÕÕ•ãÇ«œ aüøñ 6Œýû÷+Ç/^¼Èºuë¤â¨(,ü1„……Ñ»wo¾øâ •†S©zéUêÖ­ÙÙÙ¬[·Žèèh<<ÅÒÒRe€\¼Ì† R»vm¼¼¼Ø³gâÿ:¥733ãùóçJºøÀ»|ùò%Ù§N¢wïÞ|ÿý÷|þùçÊñêÕ««˜îDEEQ«V-ÌÍÍ‰ŽŽF¡œ{òä VVVJ:==½„UÀÆ£‚⃛›BºwŸ{÷îÅÜÜWWWúõëG­Zµxüø1Pè†R·n]LûöíéׯÙÙÙ¬\¹Rq+ £~ýú8;;S¿~}öíÛ'_’e+W®(=k×®¥oß¾ØÚÚâè舩©)ëÖ­CÁøñãIOO§k×®$$$pïÞ=êÕ«‡““­Zµ¢wïÞÊ$¢ŸŸ––– > ¤X$=}ú”V­Záàà@·nÝhÓ¦ ±±±¸¸¸0dÈ,,,èÖ­›ì‰DòÞræÌž={öξ¿}ûöïÅbg… ~sÀøÒ˜={6­[·V7¯^½šY³f©L2‡……ѵkW5j¤Äù:räˆòûÔ±cGj×®££#žžžüûßÿVÚsûöíÚ–Ûïµâ_D^^Õ«WçìÙ³XZZâé鉥¥%)))Ê*lFFÆ[ËÈÎÎ&<<œÑ£GÓ¬Y3rss9vìØ/æ{{{{îÝ»§øKgggÓ¦M•à’?MMMìííùñÇÂÕÂI“&qêÔ©·æ ¦k×®888`hh¨L™ú¿>KW«V-† FõêÕ¹yó&aaaäææbggÇË—/9{ö¬¢ ÷ïߟ¨¨(Ê—/¯l%Ù¥K®\¹¢¸–¼zõŠÆ¿Ñ7ùuFŒÁÉ“'Ù·oŸPïîÝ»˜˜˜0jÔ(jժѸwïÞ[åÖÝÝ P¾|yÅ*â×âììÌŠ+HKK#;;›o¾ùF9W§Ne>((ˆAƒ)¯?'ÏŸ?çÂ… @¡›@dd$;w¦E‹äää(Áü ¢gÏžJþÈÈÈRÝ>0Å¿öû~}ô‡B]][·nѾ}{¦M›ÆîÝ»¹|ù2áááXZZ²{÷n†Ž››„‡‡Í®]»TÊœ8q"}ûö%((ˆÐÐP’’’äKò=àÚµk=z”»wï²uëVæÍ›‡šš»ví¢\¹rܺu ccc†Θ1cð÷÷'<<===,X€‚#F0sæLnß¾M@@!!!JùŸ}ömÛ¶%$$„ÐÐPš5k†§§§r>""‚øøø«0’›‚‚eb?""‚û÷ï«LJC¡»ÒíÛ· SY,*z÷Ü¿Ÿ€€%_HHÁÁÁ%¾+66–;wîüæq§äÃåÅ‹\»vM™Ä,íÜëÖ•999„††âçç§²`GFF’’’ˆŽŽæÑ£G%ž©˜˜•cIII* OQQQøúúªŒ;_½zÅ‹/HLL$88˜ØØØÛ¹GGG“žžŽ»»»bÁúøñc„ÄÄÄ }/"!!7nššJRR‰‰‰<þœÝ»w3zôhEèÛ·/Û¶m£jÕª*eüðÃôë× &0lØ0.\¨¸!¯_¿ž6mÚ0mÚ4œY¶lË—//T¢ÕÕ7n‹-zô²¿Cñ[¾|9ƒ¦aÃ†ÄÆÆbaaÁàÁƒÑÑÑ¡I“&X[[sâĉ7–¡££Ã„ ”@‚OŸ>¥uëÖœ8q¢ÄKþ—²6l`РAØØØ…â/-ùkøá‡prrÂËË‹´´4,--™¤{÷îŒ9²Ô—öëT¯^fÍš‘’’BýúõB+•Š+Ò¬Y3,,,HMM¥K—.%^¢Å2džžž,X°à7·óÌ™3‰ŽŽ¦V­Zèêêâàà Ì˜véÒWWWRSS©P¡?üðéééôêÕK¥Œõë×3~üxvìØ³³3uëÖåÁƒlÚ´‰Ê•+…ÁÞ\]]±²²"<<œuëÖ©<ôõõ¥GºâßëŸvSêêêDFF’œœŒŸŸ$&&’žžNTT÷îÝc„ Ê{ÿêÕ«ÊîPh­sþüyV­ZÆípwwW¬y$e—fÍš)“yvvv$&&ª¸Ú ؘ8q¢bÉQ³fMN:ÅèÑ£yòä Ÿ~ú)Ph}çîîNpp0¹¹¹œ>}šŽ;*ùÌÍÍÙ¹s§Rvß¾}122’ñ' „PNy ­¢âÞå½ÄÆÆÒ¸qcš4iBZZXYYqùòe444øúë¯Ù´i7æÁƒèëëãëëK… ¨\¹2£GææÍ›<þ{{{*T¨ÀÝ»w ÇÑÑ‘mÛ¶‘››Ë¸qã8sæ ÖÖÖ„††²qãFúöí+…IòF-ZÄŠ+°³³#88˜É“'3}útÖ¬YìY³ÐÖÖ&22’K—.Ñ AŽ;ÆØ±ciÒ¤ )))sêÔ)Ú·oÏÈ‘#8wîÍ›7gß¾}888P¡B*V¬ÈåË—ù׿þÅ”)SÈÎÎfàÀceeEPP{öìáܹs$%%±|ùrÊ•+‡½½=ÎÎÎDFFbffÆ£Gؽ{7mÚ´aåÊ•øúúríÚ5 xöì™b¡FçΉ‹‹SÜ^Ï“žž.222”s¹¹¹"99Y!ÄÕ«WE\\œò|:tHØÚÚ*׺¸¸ˆ-[¶(ÏDbbb‰¿âÏIFF† RÊ/Nff¦ III%êoffö«Úë¯äñãÇÂÊÊjÿ;zuŽ2µß˜oÀÒmÿÎÊZZZú¿©ÃÃÃ…†††Bˆ‚‚1eÊadd$ºví*æÎ+:vì(fÍš%îß¿/ÔÕÕKȳB,^¼X¸¸¸ˆôôtˆ˜˜åÜ®]»D·nÝäKô0lذ„âï¾âœ?^Ô¬YS!Ä·ß~+zõê¥"€ÈÊÊþþþÂÐÐP!ÄíÛ·…†††ðððPùûòË/Å­[·„žžžÊw¬Y³F8;;‹çÏŸ @Œ1¢DÞììlÑ»wo±víZÙasçÎMŽÿàßjà/÷é266öúHù-ÍÍÍ5kÖûöí¢pÅH,\¸P!DXX˜ÄÊ•+…BܺuKhhhˆ¬¬,±jÕ*Ñ´iSå·ýÚµkÂØØ¸ÔßEɇÃôéÓ“nݺUê¹àà`¡¯¯¯è;ááá¢bÅŠ"))I4lØP¸¸¸(ãÜ~ýú‰)S¦!„ppp{÷îUÊ;v¬3fŒBˆ=zˆ-ZˆŒŒ ‘––&V­Z%F­\ûŸÿüGXXX!„øþûïE³fÍ”±âÖ­[…½½½BˆêÕ«‹‹/ !„˜:uªèß¿¿ò»½{÷naee%òóóÅܹs…‘‘‘ˆŽŽiii%î±Aƒb÷îÝÊ8ZOOOÄÄĈ~øA8;;+×={V”+WN!Ä?þ(´´´„¿¿¿HKK<zzzʳ´eËÑ¥K!„Íš5;vìB¡««+æÍ›§èŒuêÔ[·nIII¢\¹râÌ™3B!âãã…©©©˜6mšBˆN:‰­[·–ÚG¶¶¶*m]£F qìØ1%]ô»”˜˜(š4i"¶oß®¢·*úŒ­­­8|øð;—K—g€Å_¶â_ýôÓ_Õ^ÿ`"= *ðìŸrSÁÁÁ¬^½š¨¨(E>;wîLNN5kÖDCCƒÐÐPå9\¹r¥ŠœéëëcnnNPP|UV}ÿßëEVwµjÕB§§§Ü÷Æ„……Q§N  ¥^½z@¡ 544ÄÝÝN:(;±ü>žUÔÔÔ° ðý§ÜSQð_CCCêÔ©Crr2¦¦¦DFFòðáC>Œ¿¿?™™™*î‚]»vPÆ&Ý»wÀÆÆ†üü|’““¹rå Õ«WççŸ.n5¿¿?;v”%)ÁÙ³giݺµ"WuêÔ!115µÂµ€=z(Ÿ6l¨Xz{{ÏÙ³g  00PeÜÜ»woeœùÅ_žžÎõë× äìÙ³ŠÙüÙ³g:t¨ríÈ‘#•x*Å9}ú4Í›7W¬oóòòxøð!Ož< uëÖ%aáîîÎÎ;qqqaß¾}têÔ sss>ýôSFŽÉ­[· âòåË*î1uêÔQâÇÔ­[—¦M›ràÀFÅŽ;J­' XØhkkÓ¸qcðóó£råÊÊslll¬ìj…î?¯oMý&´´´TbÌõOnn.ZZZ*1жñ.îQ£F %ÞQ™ÿÝ–¨DR6ðòòBMMMñú­Ì™3‡=z0sæL.\ÈèÑ£Uö^¯_¿>C† áÒ¥KIýsss¹{÷.3gÎüл²èí_óŸtSåË—GMM ????~Ì7ß|ƒŸŸéééèèèðé§Ÿ2nÜ8n޼ɡC‡X¼x1Íš5S)cÒ¤I|ùå—\¼x‘C‡±aÃùà¿Ç˜ššòêÕ+¶nÝŠ––¸¹¹áëë˹sç2d±±±2eÊ\\\صksæÌáðáÃÊ jÚ´i|þùçœ={???† Bxx¸l`ɯ¢øb“––B222èÑ£:ubÿþýT©R###÷Ð*Uª¨ ò‹›Š+‰‰‰dffòèÑ#åo̘1úä¶ä-dee•X\)’1P]ô)>y:oÞ<4hÀªU«ÈÊÊ¢N:¥ÊkÑxÑÒÒ’iÓ¦NãÆ•k³²²TÊŠwq’’’HOOWäúéÓ§LŸ>]QrMLLÞxÆ ãìÙ³$$$ðóÏ?ãææÛ¶´´dÒ¤I—X *~Å'žZZZdgg¿ò©)Q‰¤lðG·ÑÕÕeΜ9Ì™3ç×Lž<ù/«¿–––Äñ§((„Ùû~#åÊ•càÀ@áŠîîݻٴitíÚ•£G*ÁR—/_ÎêÕ«ùꫯÐ××gïÞ½ØÙÙ¥ü8OŸ>æÏŸOÍš5Y¿~=wïÞ•SÆ011QâX[[«øó—+WŽAƒ¡®®Ž©©)K—.娱ctèеkײfÍþõ¯!„`Ö¬YÊŽ‹/¦Q£F\ºt‰úõëãáᡬP}õÕWT­Z•U«V‘™™É°aÃøòË/h׮݇,Tò;8zô(¡¡¡<|ø X¸páoœ×®]!K–, c•,]ºSSSÙÈ’R±²²bÛ¶m!PSS£  €.]º°~ýú7æIJJbáÂ…øûûÓ¸qcÜÜÜÞ¸ííÿýßÿ1uêT¦M›À¾}û”à•uêÔ!00P¹6$$„‰'âíí­2Ñ`ee…{ ))‰o¿ýV™˜xÓ„î"åààÀš5k¸ÿ¾²Ò¾hÑ"FŒ¡¥©¿D"‘üùÄrIH"ù/uëÖ¥K—.Œ;–öíÛóüùsÆŽ+Fò»100ÀÃÃCÅ4¿ÿþXZZÒ¹sg¾ûî;Ö¬YÃâÅ‹éß¿?ÿþ÷¿¥ÆÃÃCźxZSSôôô¨W¯×®]#""‚o¾ù†J•*qá•øDIq´µµñóóÃÀÀ€¥K—¢§§Ç™3gÐÐÐ`àÀ*~çÍ›7§K—.èêêrúôiBCCùúë¯)((àâÅ‹JÌœž={ªÄ‹Ú¼y35kÖdΜ9ܸqƒsçÎñé§Ÿ /^$ €åË—3hÐ ¾ûî;¾ýö[rss•«Ž;Æõë×Yºt)mÛ¶åÀJ½Šè7Ñ·o_FÍgŸ}¦[±bÍ›7gîܹøøø°wï^¦M›Fll,ÖÖÖ¥î†Ñ¯_?ÌÌÌTbf899)%£GV‰)×¥K%NÀúõë™1c 8îÝ»+nNNNœ>}ºÔº4H‰/…+þÞÞÞ\½z•mÛ¶±dÉ\\\€B ŠóçÏÀ† ˜;w®Ê$õµk×°°°ÀÚÚúýÞ_Šê/‘üÚ¨þÉÕà°é7æ)sQý%fTÿ¿Š»wïŠýû÷‹sçΉÜÜ\Ù ï€yóæ%ýÝï˜?ÂÛ¢úK$e1ª¿ä·áêê*–,Yò›ó½zõJtìØQÄÅÅ ! w›²¶¶V¢õˆÆ‹   ¿´þ£FR‰úÿŽê/‘H$’7®ø›ÉfHþG‘é©D"‘H>lnܸÁˆ#ÈÌÌäûï¿ÿÍùË•+‡µµ5mÛ¶¥yóæÜ¾}›úõë+îƒjjj¬^½š °oß¾¿äžúõ¿UñÏÉÉÑȈÁ·- IDAT‰‰)"*y™™™j‰‰‰êW¯^M“­!) $%%©çææê¾Ã*Èà~‰D"‘H$’÷Cñ/_¾|ܧŸ~ª_¯^=ÙR’7’œœLHHÈ˹sçÉÖ”žüåÒ¥KªU«&ãbþüùÉ ,x*[B"‘H¤â/‘H$’?Oñ—¦þ‰Dò;ÉÈȨ?eÊ”ô äÊÖ”¢¢¢tNž<ùÊÀÀ _¶†¤¬^¨ DKÅ_"‘H¤â/‘H$ï úúú!+V¬0èÕ«—l I™aÆŒÉÎÎÎíììdcHÊ C† ‰Ý³gOâÛ®‘Áý$‰äÏçP ‰D"‘H$IYàoSü¿ùæž>ž°°°?\NDD«W¯¦]»vdffÒ¾}{nݺ… Ë–-ã‹/¾P®õðð`ëÖ­Ô©S‡­[·2räHåÜäÉ“Y¹r%VVVìÝ»— ¥¥…±±1«V­’Â]H& '›A"‘H$‰Dò®ùÃ>þBž={†®®.FFFª£ÞÌLÒÒÒ011)‘/++‹ÔÔTªT©Rj¹/^¼ÀÄÄ555RRRÐÔÔÄÀÀ D9‰‰‰T­ZU¹ W%ãââ¨\¹2ººº²§ËÏŸ?G[[›J•*•ªÜêëë—èß¼¼<ž={F¥J•(_^5@z~~>ÉÉÉ*r‡šš¦¦¦%®ÅÈÈ=½B]ìM«ýqqq˜˜˜ ¡¡Qª\¾If߯‹/(((ÀÌ̬Ĺ„„*W®Œºzéóp}úô¡OŸ>¥–iddT¢ž/^¼ |ùòÊ}±xñbÆ––—/_FÁŽ;PWWçã?¦E‹,_¾œèèhvïÞMTT&&& 6 sss"""ÐÓÓcýúõ„††R»vmÜÜܨZµ*ÁÁÁ4lØ/¾ø<<<(W®Ü‡.òyÈ8*‰D"‘H$’2ÀZñŸ6mæææôîÝ›5j0zôhåÜŠ+¨V­íÚµ£C‡dff*ç6mÚDµjÕ°··§}ûö¤¤¤¨(÷...˜šš²víZ¢¢¢°··§qãÆXYYáääDrr2[¶l¡FôîÝ333¾ûî;îß¿OýúõéÖ­VVVtëÖMåû%/Ož<¡E‹ØÙÙakk‹““YYYÄÄÄкukZ¶l‰‰‰ ëׯ ##gggjÕªEŸ>}033ã믿ààÁƒØÛÛcmm‰‰ wïÞeùò嘛›Ó£GêÖ­KïÞ½ót///jÖ¬I×®]±°°`Ñ¢Eœ;wŽ•+Wrøðaúöí ÀáÇ©V­]ºt¡jÕª¬]»€èèhªV­JÏž=155eïÞ½*÷W¯^=vïÞ­¤ÏŸ?……ùùùlÙ²…jժѭ[7êׯO›6mHOO@WW—Q£Fabb AƒÐÓÓSd`„ L›6 6`ooÀ_|Á°a𶶦iÓ¦qöìYåùéÞ½;5ÂÆÆ†1cÆP±bEåܾ}û0`;w&((H™l¸{÷.UªTASS“‹/Ò¸qce®B… 4mÚ___®^½J5¨]»6zzz´mÛ–+W®`hhÈÇÌŽ;¤àKÅ_"‘H$‰Dò¾+þüôÓOÜ»w€€îÝ»ÇöíÛyðà÷îÝcΜ9\¾|™0fÌ^¾| À£G˜4i'Ožäþýû̘1CÅ÷???ŸªU«ň#5jööö<~ü˜§OŸ¢©©ÉüùóBàéé‰þþþœ9s†C‡‘——Çwß}Ç'Ÿ|ÂÝ»wyôèÚÚÚܾ}[öö;büøñØÙÙEdd$±±±ŠÏzhh(7näÑ£GlÙ²…9sæ°sçNžDFFÁ«W¯”<çÎÃÊÊ ccc•²ÂÂÂèܹ3lß¾555Åâ¡8•+WæùóçÄÇÇ—8gbbBLLŒ’îСgΜ‘‚/ÿßL@@VVV¥ž{òäI «2É_Õ+WhÒ¤É/^—››ËºuëdƒI$‰DòOVü7nLLL êêêøúúrìØ1´µµ‰çäÉ“têÔ [[[\]]177ÀÛÛ›–-[Ò²eK ÐŒ¹nݺ*e»¹¹aaaºº:>>>XXX°ÿ~:„••GŽAMM +++ÆÇ?ü€‘‘¾¾¾hjjÒ A~úé'æÎ‹¿¿?ÇŽ£]»v²·ßYYYøøøðÅ_ ¦¦†––^^^Lœ8€FѨQ#Ú¶mKbb"éééŒ;–+W®’’ÂÙ³gñóó#''‡ììlLMMqvv¦N:˜™™‘””„‰‰ 7oÞdß¾}hjjŸŸ††† 4€jÕªñøñcå;‹8uêUªTáéÓ§ìÛ·7n`mmͱcÇ”kÆGõêÕK¸# >œ³gÏOVVÀÍÍÊ•+“œœL:u¸sç»wïFCCƒÔÔT%ï¨Q£¨V­ÆÆÆ*§N¢zõê4lذD›öèÑCqèØ±#Ïž=àÈ‘#Lš4 455™2eŠ’çþýûXZZ–(«R¥J,X°€Ï?ÿWWW¥,mmm•ë´´´ÈÌÌDMM­ÔsiiiJºvíÚ2fÇÿ Ù ¿ž5j°|ùòRÏ””$éoÀÚښŋÿâu÷îÝcöìÙ²Á$‰D"yøÝ«Q))) 2„Û·oÓ¶m[Ú¶m‹††$&&–ðã.RTRSSK¬:–¶‚…¾ÊP®¸ÿsÑ~®^^^üðÃlݺ• &УG8À_|‘‘ûöíã»ï¾ÃÌÌŒ]»vѺukÙã3äççS¹re嘡¡¡ò¹¸¿|‘¯z~~> <˜¸¸8>þøcE.Š<_µjU%_NN£Gæøñã4mÚ”¶mÛ¢££CAAA©òV¼.E¼xñ‚¼¼<|||T¿uêÔQÒE“W¯S­Z5:vìȾ}û¨R¥ õêÕÃÆÆ†¼¼<>ûì3öïßO£FhÓ¦ :::*Ñó‹ß‡‹‹ S§N%&&†ÿûߥ®öŠù>€ŽŽùùù¼zõJ¥m‹ßgzz::::%Ê211ÁÄÄ„víÚqþüyöìÙƒ¹¹¹b¡SDrr2•*UÂÌÌLÅàåË—|ôÑG*uÊÈÈÂÿXñ áåË—hhhàííM­Zµ>|8ׯ_ÇÇLJzõê1`À%¾Jpp0gΜAWWGGG,,,€ÂI©ÆãããCLL ýúõÃÖÖ–Ÿþ™ØØXœœœ¨W¯yyy*òî]»Ð××§E‹R¢þ&rss¼   RRRÐÑÑáÂ… ˜šš2dÈ´µµ9pà999lÚ´‰aÆa``@@@ÞÞÞàèèHµjÕ”r½½½¹~ý:õêÕÃÖÖ–èèhçÔ©SлwoÅò£ÈUËÛÛ›*Uª0xð`ÙA’wFµjÕ”±FY"00ˆˆœœœd'IT¸~ý:ŽŽŽÄÅÅÉÆüþÿõëד””Dtt4d̘1¤§§“ŸŸO­Zµ¸ÿ¾ŠbVdÎ_«V-BCCå§È4¹4,,,ÐÐÐ`„ lܸ‘7òÅ_ЩS'RSSÙ¿?sçÎåöíÛ„……qýúuÎ;ÇáÇiÙ²%Gåùóç´hтիWËÞ~T®\™ *¡Û¾};7n|k¾yóæQ¯^=¢££9tèâ‡_´u\ñ@ŽûöíÃÏϨ¨(Î;Ç‚ ÈÏÏ'??Ÿš5kòøñc%ÀôéÓ¹rå êêêŠÖ¬YEÎ6nÜHŸ>}TVÜ‹ç븻»³wï^vïÞ»»;gÏžåÈ‘#äääQBÜÜÜÐÐÐ`ß¾}888°yóf ÐÂÉÞÞžøøx^¾|IÛ¶m¹xñ"k×®¥W¯^>|˜K—.ÉΑHJaÕªUɆ”ÀÆÆ¦Dl*ɇËï^ÒÐÐ %%…ÈÈHttt˜3gêêêäææ2`ÀæÍ›Ç¢E‹pqqaÅŠÊêAÏž=ñôôdîܹŒ9’Í›7¿1ººŽŽ®®®Œ?žõë×£®®Ž»»;|òÉ',\¸ÔÔT† Æýû÷ÉÎΦvíÚ¬_¿žuëÖ±nÝ:òóóyúô©b% ùûñðð`êÔ©lÞ¼™¤¤$æÌ™ÃúõëUb;¼ŽžžOŸ>%..ŽÔÔTfÍšPêJ²††ééé<}ú”J•*±råJž?Nnn.vvvÔªU‹)S¦àééÉŋٺu+“'O&44”ÀÀ@._¾Œ““³fÍböìÙŒ;–;wîàî·÷¯ºGGGG&L˜@~~>[·nUê•Í“'OÈËËcË–-<~üø­«áîî¸Ð¡C‡ß¼{ÀôéÓ9r$†††èèè(w€fÍšñí·ß*éæÍ›ãááA‡èÔ©Û·oçÙ³g 0,--™9s&&L`óæÍ˜››Ó¾}{ÔÕÕiÚ´)S§NeêÔ©üüóÏèëëÓ­[7¥ì»wïÒ¬Y3)øûß±ÿýåï¬dñ‰¨ÒHJJâîÝ»hii¡¦¦ÆæÍ›‰ˆˆ@SSS™pwwgâĉìÛ·Ž;*“~sæÌáðáÃ@¡[WÑV>>>´jÕŠùóç…–?AAA*;A¬X±‚ž={*“„µk×~£Œä¯—‘Ó§O£­­ƒƒ]ºt`äÈ‘lß¾3f••Å”)S8xð "•*UbΜ9ìÚµ‹ àíí­XÞ•òå—_²|ùreBÁÒÒ’éÓ§sýúuå}%®’×yõêÛ·o§gÏžlݺ---<<<ÈÎÎfË–-0nÜ8ÅêïÙ³gìÚµ‹ÄÄD:tè@÷îÝÂÑŒŒ âãã¹~ýº÷àÁƒ\½z;;;\\\”‰úÄÄD–.]Jvv6ŽŽŽ4nÜX©“ŸŸ^^^èéé1hÐ ÅjpëÖ­4mÚ”#GŽÐ¨Q# Àýû÷Ù³gYYYôèÑCyn222رc>ÄÚÚWWWÅbïÎ;}š[·nÑ«W/6oÞÌÞ½{±±±á›o¾¡Y³f¸ºº2bĺuëÆÌ™3eo¿#/^LŸ>}3f _}õË–-£OŸ>T¯^]%€”®®.hjj²hÑ"ÌÌÌèÞ½;³gÏfÞ¼yŒ1‚§OŸR¥J%F€³³3cÇŽÅÕÕ7776lÈŠ+Ÿô'N››ËÀ9tègΜ¡jÕª8::R·n]V®\I¹råðõõ%&&'''6mÚÄÁƒiÙ²¥R¯·­øëêêâééÉ矮˜â;88ðÕW_1nÜ8† ‚¾¾>6l 77(Œ¬ÿz¼€nݺ¡¡¡›››rÌ‚æÍ›P·n]•˜•+W¦mÛ¶Šbµwï^|}}ñóócþüùŠ"ÕªU+rrr ÀÊÊ ///8@ÿþý‰ˆˆÀÏÏ*Uª ¦¦†——))) 8èèhŽ=ªü0;vŒ¼¼<HHH§NR”@( $èèè(ÿ·›ú_KÿÎ? o«” ZZ…† FFFX[[+ýmdd¤¬ð'$$ðý÷ßãì쌳³3§OŸV±:iРò¹bÅŠJ ˜¢rŠvú("((ˆ:(éâŸ%/õêÕSb{˜ššªÄ[)âÑ£G$%%±fÍš2B~~>­ZµR®ïÚµ+Ph5ÆÏ?ÿ¬äÛ½{7ŠSq‹"‰¤ˆÔÔTfΜ‰»»;¦¦¦øùùѳgO<<<011áĉ 6 (´biÞ¼9 XXX0iÒ$–,YRøâ½paÆñóÏ?cnnÎìÙ³±··çøñã3~üxöíÛ§|ïÈ‘# _ðyyØÛÛ+ï¹M›61|øpLLLÈÉÉ¡mÛ¶°råJHhh(?æêÕ«´mÛ–‚‚,,,:t(ÿùÏHNN¦iÓ¦øøø`eeÅÑ£Gi×®999Ó§OªT©‚©©)£GfçÎèêꢭ­®®n‰1…äï'&&†iÓ¦1jÔ(ªT©Â­[·hݺ5¼xñ‚¹sçª\ÿÍ7߯«W¯˜9s&#FŒÀÄÄ„;wîгgOF‘‘>>> 2@±¨Š‹‹£zõêL›6yóæ…Ö¦nnnìÚµ‹ÀÀ@ž?ÎÂ… xúô)M›6%**Šºuëòÿ÷J€h‰„Úµkÿ"$’·‘””$dKüq‚‚‚DµjÕDNNÎoÎûÝw߉ãÇ+éµk׊öíÛ+éÅ‹‹3fü¥õ¿{÷®hÔ¨‘(((x§íøøñcaeeµÿ¿Bo+Êú{ÞÒÒÒÿMíøÃ?ˆž={ª¤{ô衤—/_.œœœDhh¨PWW'OžÞÞÞÊßùóç…B´iÓFüüóÏJ¾¦M›Šƒ*iqêÔ)qóæMall,„¢cÇŽbÓ¦MÊ5ÑÑÑBMMM¾$þ$† –]ê¹óçÏ‹š5k !„øöÛoE¯^½”sáááYYYÂßß_ !„¸wïžÐÐЧN*!÷îÝššš"//O)géÒ¥ÂÙÙY¤¤¤@ìÚµK%Ÿ···ÈË˽{÷k×®•ö'1oÞ¼$ íû2566ö=zô ýóCCCYµjfff>|˜››Ë¨Y³&*T ^½z¤¥¥)«Y{÷îeÿþýJà?‰ämÔ¯_ø_àÛ"«¸Š+*–)wîÜ!$$„[·n)ù²³³U¬àЬÚÊ—/¯bYW©R%•kŠïÕ²eKŽ9ÂãÇIIIaæÌ™Êï_||¼Ê.>Å]‚ƒƒ•Ýà®/Ÿþ¹bÅ ®®N›6m bÖ¬Y¬Y³†*UªÐ¡C¨X4HÊåÊ•£víÚ…Š–¦&UªTùE·º×å¹(X³µµµ"ÏE–qäåå)%P©HžmmmU,1‹Ë]‘K@Ó¦MË\ JÉ_‹Tü%’2 AƒprrRy)ÿú÷ïO“&M¸|ù2Š»Dñ¢íÛ·ÿ—(þùùù 8P1á•|8Š¿––7nd̘1\¾|™¼¼œ;wòäÉòóó•\Ö®]‹‹‹ þþþèêê²mÛ6%VŠDòKèë뫤KóWÖÔÔdøðáJ|Š"êÖ­‹zzz*Ç‹d³4 ”Ï™™™”/_MMMÔÔÔX°`Š»jqå«x= TÁôôtÒÒÒÐÐÐP&ÓŠOP¨««cbb‚¿¿?AAAýôÓ·Û–HÅ_"‘H$¿Œ6ý>ß@íÚµsÅÒÒ¯›´´´ÄÒÒ²D9;wVI¿nRdЯ_?ås… T¶4h”ª¿E!±¶¶VLM¡Ðr¨x?Ô«Wzõê½QFŠhРkÖ¬ÁÉɉ¨¨(Ž9¿þõ/å¼………JPÓâ ›DòGY¸p!]ºtÁÁÁúõë³wï^&L˜ ˜SÿöïßOhh( DFF²nÝ:455Ù¼y3nnnœ>}šììlN:ÅÉ“'K-cÞ¼ytêÔ‰O>ùŽ9ÂñãÇiÚ´)ǧyóætéÒ///:v숓“Ož<¡uëÖ¼xñ333öï߯ìxdee¥ìÜSü¹’”-ªV­J¯^½èܹ3-Z´àÁƒ*n-óæÍ£K—.tîÜ™† ²wï^<<<¨T©Ò[óMš4‰C‡Ñ©S'6lÈþýûY·n\í—Š¿D"‘HþÅ?G6ƒD?ýô_~ù%cÇŽU¶]+Ú¾O"ù=ãíí­˜Ó¿ž677ÇËË (Ü2ôæÍ›\]¸oŠ IDATºt‰ØØXƯX ¾¾¾\¸pØØX<==©Y³¦ì4©øK$‰D*þÉŸC×®]¹wïB¹º$ùSÐÑÑQ ùzZ___ŧ_WW·T—º×Ÿ×­–¬¬¬”ÏíÛ· ÝœJÃÜܼTˤ֭[—8V¾|ù7–ÓºuëRóT¨P''§Ç544ä–§e„òåË—è‹â–KZZZ*VnÅWé‹Ë¯¶¶¶JZOO¯„¼—SéuwOCCCìííÿ§øýwëtɇ‰ºl‰D"ùkÆ¥Rñ—HT‘J¿D"‘H$Rñ—H$’Ò»U È’M!‘H$‰D"y×¼ÕÔ?99ù£©S§&ÊP¡’7’““£vëÖ-½¡C‡&ÈÖ”ÒÒÒÔ“““«¾Ã*þWùO–½!‘H$‰D")ÓŠ… bæÍ›×©xÔf‰äuRRRøì³Ï×­[g,[CRxúô) xÿ.¤â/‘H$‰D")󊿺ºz~… ~q{ɇššÚÚÚBʉ¤¬’’‚ººú»´Tª(‰D"‘H$IYAúøK$‰Tü%‰D"‘H$ÿ`äv~‰Dò+þ/_¾4éÑ£‡ŒÏñ¤?tèÐD==½ÙaaaÚÀG€¯l ‰D"‘Š¿D"‘Hþ¸âŸ¤—õŠÅ{yy5‘]öa1|øð—K—.5ªV­šlŒˆùóç'/X°àéûR_!„fZZIII²ó$e†ììlµÔÔT)—’²&—êü‚5¿Tü%‰ä¯Qü“!›B"‘H~™™™–Ë–-{õã?æÊÖ”îß¿¯sãÆTCCÃ<Ù’²‚¿¿¿>ÿ .-‰D"ù›¨¤ÈfH$’ß¾¾~Ø‚ Ê÷êÕK6†¤Ì0cÆŒdggçŠvvv²1$e†!C†ÄîÙ³ç­f(2¸ŸD"‘üùTžËfH$‰D"‘”þÅ¿S§Nøúþ=qcŽ9 AƒdO–a† ÂÁƒñºˆˆ._¾,LòOÀˆ‘Í ‘H$‰D"ùÇ*þãÆ£fÍšË ¤¥¥ñìÙ3Ù“e˜¡C‡Ò Aƒ_¼nÊ”)ܹsG6˜äŸ@5@¾˜$‰D"‘H$e‚ßíãăTŽéééѧO ÑÔÔ$''‡£GÒ¾}{Nž;w¦víÚJž‡ròäI>úè#òòTãj¤¦¦âååÅË—/±··ÇÖÖVöò;¦|ùòhkkpðàA:uêÄ… xþü9]ºtÁÊÊŠ{÷îñìÙ3¸}û6vvväææròäI¢¢¢hÚ´)mÛ¶UÊ|õê 77—>}úpûömìíí©P¡B¼½½yðàuëÖ¥[·n¨©©‘˜˜È;w055åòåË 0SSSÙA’¿s V6ƒD"‘H$‰¤,ð»WüÃÂÂðññÁÇLJK—.1mÚ4¾ûî;&NœHpp0ééé <˜®]»ÇùóçiÒ¤ ñññÌž=›áÇ“ÀŽ;h×®ÙÙÙøøø`kkËÍ›7 ¡eË–>|€‹/bggÇÇ9tèÓ¦MSêE³fÍ8zô(QQQtïÞ;vÈ^~ÇÌŸ?Ÿ‹/àêꊃƒ^^^\¸p&Mš(Jrr2QQQ<ýöî;¾æëàø+K†I†,‰•M‰TŠXIm !EljSQ³V[5Š1kÖ.Ú˜1B‰{„D"7CvrÏï¿|¾nQªÚçùxôQ÷žóùÜÏ=çäs?çsÎyâãÉÎΦ~ýú,Z´ˆзo_F€J¥¢V­ZlÞ¼™›7oÒ¤IHLLD­VÓºuk&OžLrr2&L K—.\¿~   |}}ùñÇIHH•#ýt€²¼SýÕê×{¼!Ä3ûQ©TüòË/²O×QDD·nÝ*öíJ’$I’¤×äââ²òÒ¥KâeV¬X!Ž{÷„B¸¸¸ˆððp‘’’"±eË%oóæÍŤI“Dbb¢(Q¢„ˆBQXX(Ú·o/Ο?/>øàñý÷ß+ÛlÞ¼YXYY‰‚‚ѰaC1sæL%­oß¾¢nݺB!>ûì31tèP%-&&F˜››‹œœ!ýsT*•hÓ¦Mò‹Ò4h V®\)„ÂÈÈHÌ;W#mÆŒB!Z·n­¤Íš5Kxyy µZ-„"%%E˜˜˜ˆ .ˆiÓ¦‰Æ+ûˆˆˆ€¸té’Ø´i“¨T©’ÈËËB‘““#ìííExx¸ˆŽŽ€8s挬´w\\\œ¨X±â–ÿèÔYŽ'ñkü6œçO¿N÷éÓG>|ø/o—••%¼¼¼DFF†BˆmÛ¶‰1cÆ!„8qâ„°´´” ø_еk×ä;wî<7-77W4lØPøày'§˜xúñ'åÊ•#==ý™<Ç'##ƒþýû+ï•(Q‚˜˜¢££iÚ´©ò~Æ )UªÑÑѨÕj ¤¤kkksòäI5j„‘‘5jÔ• ýÓxÇ×øïÝ»—ÀÀÀ¿¼ÝãÇùý÷ß•×G%++ wwweF˜ôßÉÉÉQfiÁ“ÿËþýûéÖ­›¬©XÊÏÏGOOï­Û·ôþJII!22’V­ZÉÂþ^p¿sçÎѹsgÖ¬YCõêÕ_˜OGGGù·Z­F[[û¹'·¼¼<ôôôÐÖÖFKKë™ôÂÂÂg¦ˆ­/:i~øá‡x{{ãííMÓ¦MÙ¸q#eË–•5]ŒÅqž[ÏTªTI©Kooo,X€§§'%J” °°PÉ+„P^àä䤱ÝÌ™3)zp©R¥^ø™’ô†Øüÿÿßú5þôìÙ“Ê•+S¯^=¾ùæÔj5Ç'))‰¾}û²uëVòòò?~}:Í›7ÇÓÓ“öíÛРAfÍšÅêÕ«Ù´iýû÷çòåËx{{°uëVÆG¿~ýpqq¡qãÆÄÅÅ1tèP*T¨@«V­¸ÿɳ³³3f nnnÔ®]› hüNHMQ5nܘ+W®Ð·o_~ýõWêׯϚ5k¨Y³&UªTaÑ¢EìÙ³wwwªU«Æºuë”ý9r„fÍšQ¥JˆW~ë‡ ‚««+Œ;–¼¼<¦M›FLL S¦LañâŨÕj¾ýö[6lˆ‹‹ õë×'** €-[¶¬Ñ>®]»ÆÀ•öñàÁ:tèÀ‚ hР®®®Œ?^ã7D’^ʼn'ð÷÷ÿGö­R©øè£d!Ko\\\œ²TV’^»ãŸ˜˜ˆ¯¯/S§N¥eË–š÷éGûíß¿///ªV­Ê½{÷¸}û¶ÒyswwgçÎT¬X‘Ý»wklcaa³³3µjÕbÿþýJÚÓ#deeáç營ŸµjÕbÉ’%M©øÒÓÓS‚5V¯^¤¤$¥.?ýôS~üñGrss•`‘En{÷îÕ1LHH mÛ¶øùùѾ}{6oÞŒJ¥’,ý[œT ímÿ"“'O¦  €ÈÈH–.]Êüùó9pàÁÁÁXZZòÕW_Ñ´iS~øá"""X¿~=¿ÿþ;åÊ•cðàÁܽ{—iӦѢE ÌüùóX½z5AAA´nÝšæÍ›3iÒ$2339{ö,>dæÌ™ÔªU‹C‡!„ fÍšÔ¬Y“;wòèÑ#fÏž œ.]º`g÷dù™““˜ššbbb‚Í3ŸïääDß¾}qppÀÇLJ *н{w*W®L³f͸yó&iii¬]»–©S§Rºtiìíí:t(‹/–-ñ5ÙÛÛ+åÿ¼çƒ ¢råÊøûû£««Ëˆ#¨X±"=zô 77—¤¤$BCCñóóÃËË‹%J0pà@rrrˆˆˆ °°øøxbbb°¶¶æøñãtíÚ•2eÊ`hhH™2e033£N:lß¾Š+R²dIÜÜ܈UŽÃÑÑ‘>}úàààÀ§Ÿ~J•*UèÖ­•*UÂÇLJ¸¸8%opp0...”+WŽ &°zõjYÑ Áæææ|öÙg 8°°0ÒÒÒpww'33SÉÛ§OvîÜ < ݦMÊ–-K•*U4n®X±‚Š+beeEƒ 8~ü8çÏŸgâĉœ>}Z™AxöìY7nŒ™™|ð2+&--`ccƒÆ #;;€“'Oâé鉥¥%5jÔ`ÅŠJbwwwv+½7ˆºté¤I“¨\¹2•*U"44x2úþôª|||ˆŠŠâÑ£G4mڔŋcoo££#6l`éÒ¥8::R¡B¥µÓ>ú;;;HJJ`Ù²e|ùå—xyyáêêÊñãÇiÖ¬™²Ý’%K¨Zµ*VVV´nÝZã·Z’ÿêß¿?S¦L¡V­Zÿ•(Q‚iÓ¦áêêªäݼy3†††4iÒ„ãÇSºtif̘Ahh(jµšV­ZñÛo¿a``€¯¯/'NœÀÉÉ ccc"##•µ¤õêÕãøñãXXXP³fMöíÛÇèÑ£Ÿ\mW¨ÀÙ³gñôô$77—… "kù?6jÔ(<== ÁÉÉIIëÕ«mÚ´`âĉL˜0,--9qâdff2bÄvïÞŽŽ:::„‡‡3gÎ:wîÌŽ;055EWW—ýû÷3dȲ²²èÚµ+G¥dÉ’”/_^ŽòHÿ†ŠïJÇÒ¤IT¯^Î;ceeÅСC•‹‹§2aÂ\\\¨S§¿üò‹Fdö§ÿæÿ åߺººXXX(¯õôôP«Õܺu ---Ú·o»»;îîî̘1CF†ÿ=ýT]]],--'˸´´´”zÙ¶m›R'îîîèêê’œœÌ!ChÑ¢½{÷¦L™2ôîÝû¹O 033cîܹTªT 777Ö¬Y£ñß§oý±}èëëk,÷¨T©’F{,Zv ½½îß¿OëÖ­6l‰‰‰¸¹¹±hÑ"=zDAA111K:®^½ª¤y{{S¹reîܹÎ;˜>}:»ví"))‰²gÏ’’’hÒ¤ cÆŒÁÕÕ•1cÆP½zuÖ¯_Off&-[¶¤E‹<|øÐÐP¸{÷.½zõâÆœ>}š“'OrîÜ9FŒ@ïÞ½0`ÉÉÉ,Z´ˆ!C†––¦t ÃÃÃ5Î{ÒÛ'++‹7’––Fdd$Ó¦McРAÊ«NŸ>­‘ÿüù󤧧“ŸŸÏÁƒ9qâgΜ!88˜ž={*¹îÝ»7C‡ž<{úôé¬]»–+W®`kkK§N€'³ìæÌ™ÃgŸ}Æwß}‡‚sçΰk×.ÆŽËòå˹}û6ôéÓGVÚ{䵃ûuèÐá…i;wP¦WÛÛÛ+'½?jÔ¨5zæ}gggmO«T©cÆŒÑÈ[ÄÒÒ’Þ½{Ëš-Fž(R4M³HãÆÿ |nll¬ÔÉÀÀ€®]»>³¿%K–°cÇvíÚ…žžóæÍ£|ùòJ,:tèðL-[¶,Ÿ}ö™¬éßèø}¾Hzz:³gÏ&44”ˆˆÆŒÔ)SX¸p¡F¾^½záàà@tt4eÊ”aݺuŒ?^£SöZ?P¯°••BŽ;¦t@SSSIKK“-ñòtÜž)[¶,AAA7[¯^½Š½½=©©©Œ?ž9sæÅĉ5j”Æ >€¡C‡¢R©ˆˆˆÀÖÖ–èèhöíÛ÷—Ž£HJJŠòï{÷îiܼÞNÛ¶m£zõêôèÑ€‘#Gòã?¾t»3gÎOŸ>}¸sçzzztèÐü‘:uê V«Y½z5:t`ܸqLœ8---ŒŒŒÐÕÕÅÄÄ„]»v¡««Ë_|–– 4ààÁƒƉ'”€×³gÏÆÓÓ“ŒŒŒØ±cvvvÔ¯_•J…žžž2 fff&+öñÍ7ß`hhˆ¿¿?ýû÷'!!á¥õ«V«9r$´iÓ†Ï?ÿœqãÆaiiI»ví;v¬rܵkW yðà}úô!$$DY>]­Z5%Ðù±cÇ”ý¯[·Ž   êÔ©À”)Sä2€÷Œö?¹s---ÌÌÌd05é ¢téÒØÚÚbkkËòåËÙ¸q£,©8(Ø7Þ…/3tèP&NœˆŽŽ5ÂÉÉI ¨jbbB||úˆ_ýoooBBB¨[·.+W®ÄÔÔTÉ÷ôß¶ÆkCCCJ–, ÀÚµkéÙ³'¶¶¶R»vm–-[&[ãë^èêÒªU+>úè#vïÞM©R¥”µþEKªžþzäÝÌÌ mmmZ´hÁ¤I“hРäçç³zõj*W®ÌÔ©Sñ÷÷ÇÆÆCCCìíí•5÷-[¶$88˜ÄÄD&MšÄgŸ}ÆæÍ›100 ÿþ9r„ØØØgÚÇóÚKQû(ºð®P¡™™™øúú2iÒ$YÑo9GGGvíڥѺyó¦F§)++KM/ZËlccƒ¾¾>ÇŽSÚn\\jµš´´4Ê–-ËþýûIIIá§Ÿ~bÀ€Ê’Ä"åÊ•{fÙÓ’%Kððð@WW—„„ªT©À­[·”™—.]bÍš5²ÿ~zö쉧§'uëÖ•úyÑ€§®®.¹¹¹¢££ƒâ™Øh¯2“ÉÚÚšvíÚ)At…DGGSµjUÂÂÂ^¸råÊ)OÃ'3³CCC3fŒ¤•ÀÅÅeå¥K—„$ý•J%Ú´i“,KB*.âââDÅŠ·ü§ÍV€,Þš;ÎΧ_Vž™™™"77÷OóŠôôôÿ´Þ³³³Evv¶üx]»vM¾sçοòY©©©«¾ÒÒÒþÖçW®\YlݺUäç独¬¬÷ºÞ'Nœ¨ê½-ç'KKË‘;wî|a»²°°!!!"11QL:UbåÊ•B­V‹2eʈٳg‹ììl±~ýz¡££#V­Z%rrr„ƒƒƒ=z´ÈËËIII¢ZµjbòäÉâìÙ³ÂØØXÄÄÄ!„8räˆÐ××*•JlÛ¶MT©REܹsGdddkkk±lÙ2¡V«Ett´(]º´¸}û¶ ­Zµééé"==]øøøˆ¶mÛŠÂÂBakk+-Z$„"))IØÛÛ‹_~ùE¨T*ˆS§N½ô\+ý÷F­:yòäsÓbbb„¹¹¹Æ{âðáÃ"==]ˆ_~ùEäåå‰Y³f mmm±gÏqïÞ=(稤¤$×qqqâI_ˆ¹sç gggqûömåu™2eDVV–˜øCCC´´´X²d sçÎÅØØ˜={öпeöÊþýû‰‰‰Á‚ʕ+Ó¤IÆŽK5øöÛoñõõÅÄÄ„>}ú°víZLMMùøã)Y²$uêÔÁÈȈíÛ·³hÑ"J•*E@@«W¯ÆÁÁ~ø333±µµÅÜÜœümmm~úé'BBB(UªµjÕ¢K—.øúúbjjŠŸŸM›6%&&FVî[L__ÿ™€¶PªT)BBBèÝ»7fffܹs‡€€ŒŒŒÐÑÑÁÙÙYyÿãk]]]e ÉàÁƒéÕ«žžžXXX°jÕ*ÂÂÂ044ÄÌÌL#†‰²Æ××—Q£FáååE™2e8}úô+ÅÅÞrÄ_’#þ’ñÿKVoÓyþUFü%9âÿ6›5k–8þ¼¬ôwlÄÿÆbéÒ¥Êë‚‚Q¶lYqôèQ|/,×M{•|jµZþ­}Ko߈ÿ«zSmàu÷#Ûàû9â¯+ooH’$½1nÀ!Y ’T|Œ9RÂ;¨T©R|ùå—\»vŠ+†““µk×ÖÈ÷gk¦_7íUòiii½pÝô_y"…ônzSmàu÷#ÛàûINõ—$Iz3ôWàŒ, I’¤VÙ²e9qâ&&&œ={–-ZpðàÁ×~|¨$IÒ»Nž%I’ÞŒª€žìøK’$ý;œœœ7nœ,I’¤¿ÛñOII)ß«W¯ccãYTÒ‹äççk_ºtɨY³fdiHÅAVV–ΣGìþåuò€K²$I’$I’¤·¦ãonn·|ùò®®®²¤¤JMM¥G¶oß^V–†Tܺu‹fÍšE¤¤¤üÛÿ‹ÿßù—$I’$I’¤bC®ñ—$Izs9Í_’$I’$I*väI’¤7s.ýØø¶øãÇK…††Êå\W¯–X»vm¡‰‰‰¥ñþ8~ü¸`.KB’$Ivü%I’¤¿î#Àøým;pµ………ü-xÏèëëcjjªcn.û€ï“’%Kæ…²$$I’dÇ_’$IúëêéÀ…·íÀ 3ýüüd ¾gvìØ‘÷é§ŸR®\9Yï‘ .äoݺ5ím9ÞÜÜÜr6lÈ=uꔼY!‘‘‘ºééé¹»wï–íR*6Ο?oÄ“A(Ùñ—$IúÕ"‘#i’$IoŒŽŽÎã*Uªè{xxÈŠsçÎ=®Q£†~ÅŠeaHÅþ%>â IDATFxxx / 0-;þ’$IàÌ“E!I’ôæèêꦹ¹¹áíí- C*N¬jÕª% C*6–/_žû²Žÿ¿Õÿüùóܾ}ûoíãôéÓ$&&ÊZ}‡$$$pæŒ ‚þ2wîÜáäÉ“od_/^äâÅ‹Êëøøx>üÜ¿Ï{÷îqèÐ!îÝ»§¼—••EXX˜¬M•€²¼…ëû%I’$I’¤÷ÃßêøwêÔ‰[·n½4ßÔ©SÙ¸ñï»3fŒìp¼cvìØÁ„ þ³Ï?rä_|ñE±/§°°0FŒñ·÷“••E·nݰ´´`ذaÔ«Wï¿ÿOOO†ªä]±bnnnÌš5‹êÕ«³téRŒŒŒX´h'Nœ ø¼€\@Š$I’$I’ônuü…lÚ´I– ôÖ:räIIIÅþ8ƒ‚‚عsçßÞÏ÷ßOóæÍ±²²"<<œ7rîÜ9¶oßΙ3gX³f ‘‘‘¤¥¥1tèPÂÂÂØ¹s'dÔ¨Q¤§§0qâD›M€c@¶, I’$I’$éêøÈÙ³gB°dɼ½½iذ!sæÌA­V?³]aa!3fÌ aÆx{{³zõj%mĈlÚ´‰6mÚðñÇ3uêT ÿ+ëîÝ»tëÖ www‚‚‚P©T¨Õj,X@£Fhذ!ß~û-ùùùÌš5‹åË—3`ÀêÔ©C·nÝ4:{;vìÀ××—ºuë2nÜ8rrrd«xÃòóó™:u*uêÔ¡~ýú¬X±â™´ºuëÒ¼ysbbb”´5kÖЮ];jÕªEóæÍ9xð ¿ÿþ;ãÇgÈ!Ô¯_Ÿ7npòäI©S§õêÕ#44TÙORR}ûöÅÝÝ___Ž=Jtt4+W®dÿþý 0€ÄÄDzõêÅÇŒ¿¿?.< ОœœLçÎ ÁÓÓS9xr¬S§N?~\yïÆ´iÓ†‚‚.^¼HPPžžžxzzòÍ7ß Ä“ÇfûøøðÓO?Q·n]FŽIëÖ­)(øßãÔüñG¾ÿþ{ÂÃÙ4i ,`ùòå 4ˆ:uêÐ¥K%0?üðžžž´lÙ’Ÿþ™¶mÛPPPÀÂ…  L™2Ì;333Ê–-‹­­-wîÜáàÁƒØÛÛS»vmjÔ¨³³3{÷îàã?F¥RqôèQÙ¸Ahì–E!I’$I’$½sÿQ£FŒ³³33gÎdÁ‚LŸ>yóæ±iÓ&¦M›öÌv#GŽdß¾}„„„0yòd¦NÊš5kˆ‰‰aäÈ‘|ùå—¬_¿žíÛ·óÕW_)ÛÎ;—öíÛ³téRΞ=ËôéÓ˜4i‹/æ›o¾aÞ¼y„……1lØ0.]ºÄˆ#ðòòbáÂ…$&&*iÛ·ogàÀ 6ŒåË—sùòeúôé#[Å6jÔ(ÂÂÂX°`ß}÷ãÇç×_`ß¾}”(Q‚¥K—âââ¢tLÃÂÂ=z4£FbË–-xxx€Z­æÁƒÌž=›ôôt:v숹¹9-Z´ qãÆlذѣG3tèPNŸ>Z­¦yóæhiiñÓO?@Ë–-133£Y³f|ôÑG 2„ììl¼¼¼ppp`ݺu4oÞœO>ù„¤¤$rrrضmÛ¶m£[·nT¨PAùnZZZ”)S†åË—kܰ(Uªx{{S£F Ö¯_Ï”)Søæ›o8pàdîܹtïÞ???NŸ>­¤Ý´ªR¥ŠÆÿk×®1|øp>þøc.\Hrr²ÒžCCC™9s&ß}÷_}õ'N$""€èèh ùàƒpss£S§NÊg8p€øøx4hÀÍ›7±³³Ó¨Cµþ-Z´à矖< `çûô¥“““ñ÷÷¥¥ñññÊMZéípñâE>ÿüóWÊ{öìYY`’$I’ô¶sqqYyéÒ%ñ|8€²|ÄÉÉ LLLxüø1ÞÞÞ”+WŽ–-[òèÑ#òóó¹}û6NNN”(QBùœŽ;bee¥qœ±±±\¾|Yi æææ8p€»wï>}#ì¹ßÑÝÝvïÞͱcÇÈËË£qãÆ¡V«iÑ¢ööö4jÔˆ„„å+Oï³{÷îlß¾¬¬,V¯^M—.]ÐÕ}ö‰›O·gSSS¥=ߺu‹§Ÿ)[¥Jåß)))”.]ú™}íÙ³‡Ö­[³`Áºté<™öŸ––¦‘/==cccåuÙ²eyðàlààûÿ£ýâ]ý‚=z”›7o>wùV‘»wïÅÇ•÷îܹCff& dgg+ùŽ=úVÄ×x_ÙÛÛÓ¿ÒÒÒHNN&77—“'O*¿«7oÞ¤  €ØØXe™Rnn.§NâêÕ«Ï´—¢%B< ##ƒû÷ï+iEK£Î;§±ä)))‰¬¬,.]º$Ÿì#=síÙ´iS233ÿ4ßõë×iß¾½ò:22Rãw7::ú¥ŸÕ¬Y3Yo¯ãÎ;Êõehh( ,•(ýã®\¹‚¿¿¿, Ý7±###ŒŒŒ8tènnnÊ¿J¥Òèl”)SF9 [XX™™©1]ôé)¡<ÀÌÌLéàik?{Ÿ¢téÒñðáC”‹‰¢Ï~Þvÿ'™9sæ(B±µµ•-ã 133COO•JEÙ²e8uê”Ò (ªÛ?7n,^¼˜êÕ«K•*U”N³žžž’7""‚~ýúñÓO?áååEéÒ¥©R¥ B¬¬¬HIIÑØ÷Î;5:ÅÔ¯_Ÿýû÷k´¿R¥JñèÑ£g>ózôèÁ† °´´¤[·nhkksæÌ:wîÌÚµkñööÆÔÔ”š5kjtüŸÞgåÊ•©V­¿þú+›6mRÖÔ?s·îí¹\¹rÆOß´(º9ò´7Ò¿Ö®]K«V­”÷ˆÕÈ{ûöm›YYY˜˜˜¼ïÍÛ¨LxW¿à±cÇèСnnnÜ¿Ÿ¬¬,‹æ¢›qƒæçŸÆÍ͘˜Ì„ øê«¯(,,dÒ¤IÌ›7¨¨(fÍš…››gÏž¥Aƒq^¤âSïAAAÄÅűtéR¶oß΃°²²âôéÓŒ7Ž1cÆ0uêT²³³éׯëׯ'..???œQ©T”,Y’;v`aaAxx8:u¢f͚ܾ}GGG,,,ظq#ׯ_§]»v˜˜˜ „ ##ƒ°°0éÝ»7ùùù>|77·Wê¤IïjÕªõÂ눧¯Q‹®M¯]»FóæÍ•ßÃjÕª½ÒoÙþýû•ë–×Õ¬Y3æÏŸ££#×®]Ó¸¤JÉ’%©Q£†,éIâu7ÔÒÒÂÈȈ´´4´µµiݺ53fÌPîÔO™2…-ZhlcmmMݺu™:u*BÔj5ýû÷§gÏžJžeË–‘››KAAsæÌ¡]»v/=ŽÆ3sæL B0sæL<<<”Žæ‹´iÓ†yóæ)?«W¯ÆÍÍMø{Ã?ÌÍš5cÁ‚!ÈËËcÈ!/½x»yó&uêÔ¡fÍšèéé±jÕ*¥ÃùG—/_¦\¹r´hтҥKsèÐ!®_¿N~~>µjÕ"33“íÛ·+ØÀÀ@ÒÒÒ(]º´2:Ù¬Y3¢¢¢øí·ßxøð!µk×fóæÍ¯ô=Ù·o[·n¥{÷î7¸Z·n©©)§NââÅ‹ÏýOß@˜}Z#ŸJ¥âÊ•+²ò‹±¬¬,._¾LNNû÷ï'**J 8ž““óLž?žôôtòóó9{ö,yyy`Ö¬Y/=ŽeË–„££#zzzØÚÚ²~ýú—n7fÌâãã)_¾}777¸|ù2Íš5ãþýû,_¾œüü|f̘ÁÒ¥KiÛ¶-ßÿ=­ZµbðàÁìÞ½OOO®\¹Bbb"çÎÃÒÒRãs‹ž@åååEtt47fùòåܽ{wwwÎYûöí áüùó¨T*6lØ€£££ÆþvïÞMPP 6$''‡.]º°qãF<==©[·.QQQÊŒ…iÓ¦QXXÈܹse(¦®\¹Bûöí133£\¹rœ?ggg}šºuë*7ÇOœ8AûöíY´h´iÓ†ôôtœœœˆŒŒ$44”6mÚ°téRvîÜÉ•+W000`Æ (˦š7oŽJ¥¢ZµjtïÞ%K–¼tUzOüYp¿"?jµZy––&îÞ½ûÒ=<Ðx¯AƒbÕªU"++K$%%ýå iiiâÞ½{y»¬¬,‘ edˆ7ÜïiÉÉÉ"55õ•÷›——'âããE~~þ+åOLL™™™/LOHH999ïegg‹Çk­LHHøÓýüU÷îÝéé鯔799Y˜šš>ó·ñ*öïß/nݺ¥¼Þ²e‹¨Q£†ò:((H,\¸ð•÷WXX(D^^žÆû±±±ÂÖÖVdee½ÏÁýìBÀïm?Ï¿,¸ß‘#GÄ Aƒ„066{÷îÕî×£G¬±M=Ĉ#„B”(QB î—ŸŸ/6lØ :uê$J•*%œ_é÷Búwƒû:tH899 !„˜9s¦ðññQÒ®_¿®a=}ú´@ôÔ©SBGGGkü7iÒ$# 4®æÍ›'üýýŃ ü̶¹¹¹Â××WüðòÂdp¿g¤¤¤@¤¦¦Š}ûö @\+:GݼySœ,¬¬¬Dvv¶˜"::ZXYY !„øî»ï„‡‡‡r]½yófÑ´iSY¡2¸ß«+Y²$ZZÿ h]ºtilll^º¹¹¹²æÿ Ÿ ¾ö*J—.µµõ_ÞÎÐÐ;;»®–Þ ‹¿´.\OO{{ûç¸{[[Û?­agg‡¾¾¾Æ{”,YRy­¥¥…ÝõammM©R¥^šoíÚµtëÖN:½ðoãÏDEEѾ}{ÂÃÃÙ¾};ãÆ£sçÎJú¸qãXºt©Fà¬?=9hkcgg÷LlƒÆ¯c|Gû3°wùK†††bddDHHñññtìØñ™5ù*TИÞ*„ &&F™ù¢­­‚ììl&L˜€¯¯/6làþýûèèè°{÷nyr|Kéèè(üœ'K޾ýö[¾ýö[Z¶l‰••+VD[[›sçÎ)ÛþþûïÀ“Ø?øøø(Û`jjª”U’^ÆÔÔ”ZµjObÕ<ãçEÊ–-Ë™3gˆ‹‹cÅŠ :”””%xn‘ãÇÓ´iSå7ÚØØ˜V­Zqøðá×>îåË—óñdzeË&L˜@xx¸;((ˆõë×SXXHxx8VVVÊè¿T|éëëóÉ'Ÿ(mËÚÚú•—µyyy)mžĆ'A­‹/:t---¾ûî;f̘ATT*•Šëׯðá‡biiùÌŒºˆˆÚ¶m«\WwìØ‘}ûöÉ {«žn—.]¨V­š¬é½tñâE™1cÆkmLÏž=Yºt)ëׯgâĉ|ùå—Jº‹‹ Ÿþ9G}ícÌÌÌ$77—~ýú½ïÕü d¿Ë_211‘¾}û²oß>vïÞMdd$uëÖÕÈÓ½{wÎ;Gpp0 0€ôôt妓¹¹9‹/æþýû9r„ÉO?ýÄýû÷qww—üo)kkk222”%yýúõ#00°°0¶nݪ<)¤T©RL˜0Ž;òÃ?0pà@ÂÃÕAƒ &0pà@¶lÙÂÎ;ñ÷÷&©$½ÌÓ7ñAóÆÔˤ¥¥áîîNŸ>}¸rå Mš4ÁÎÎNcÚ~Q¾? j´×§?óUø;–ºuë²{÷n¬¬¬ðôôTÒ¼½½ÑÖÖ&""‚5kÖУGYÑoI[|z0QWWWiKl“\6Uüüé›€Æ kFFÆ37F§L™¢,Éüãò”§Û£\Îü~Ó-N#;Òûì›o¾ù[ÛëééñùçŸóù矿0O¯^½þö™|U7`Ì»þE'NœH™2eX²d FFFLœ8‘®]»¢R©èÛ·/zzzØÙÙqæÌfϞ͜9s¨Zµ*ÇÇÌÌ €%K–ðóÏ?sçζmÛÆÜ¹s™1cöööìÞ½[F.†lmm žŒ==SªtéÒôíÛÊ”)òeËøý÷ßIIIá‡~`õêÕ¬[·!óçϧC‡À“˜:}ôGŽ¡^½zJp`€¡C‡âààÀ¶mÛÈÎÎ&88˜   àÉúþ§ó+IGÑ ¤?Ú»w/)))ܸq èÝ»÷3Áž+W®LXXjµZéØEEEáãã£ÌŽKMMÅÜÜœ¬¬, –Ï»‘™™ÉÌ™3‰ŒŒÄÃÀ¾}û*3ótttèÖ­7ndïÞ½rmÿ[ÎÈȈÜÜ\²²²022"##C㸯ÊÕÕ‚ƒƒ•6÷å—_¾tv©««+gΜQ^_¸p   Ž;&g=ËŽ¿$I’ôÝ€ûÀwý‹êéé1dȆ ¢ñ~Ñ(~›bõõõÅ××Wyýõ×_ËTÌUªTI©§¦M›Ò´iS%­lÙ²uß«W/ŠÝ»wWžlò´O>ù„ŋӼys Y¼x1mÚ´QÒÛµk÷ÜSƒ ’"½1–––äää0qâDFŽ©¼ïàà@RRkÖ¬ÁÎÎŽ¥K—OFVŸæççÇìÙ³ Äßߟ={öpûömzõê…¥¥%åË—gÈ!4oÞœuëÖall¬qž\¸p¡F \¬­­Y³f YYYDEE±k×.œœœ”ùäÌÍÍ©V­óæÍ£D‰JÝ>|8µjÕbÊ”)T¯^éÓ§Ó²eKÙéÈš–$Izuú@/`P ‹C’^··7Mš4¡cÇŽ¸¹¹¡V«éÓ§,鵌¾¾>NNN 8P#}ĈØØØ`mmÍСC'1%6mÚDzz:ôë× *P§N6nÜÈ‘#Gصk#GŽdݺuÊ£þ‚ƒƒ155¥T©R?~777~þùg¬­­‰‰‰ÁÊÊ >L¹rå8pà£Gfúôé”/_€ùóçS±bE’““iܸ1Mš4AGG‡ˆˆtttX»v-öööœ8q‚Æ+³\\\pvv–ÓüßVVVJ{+2`ÀìííÑÑÑáàÁƒX[[ÁÔ©S™2e åË—§dÉ’+ëïÿøÚÄÄDá¯Q£QQQddd°uëVÚ¶m˶mÛ¨W¯mÛ¶U>ÛÖÖV¹êààÀ‰'ÈÎÎfÇŽ 6LãÑÏÒ»OëÏ]\\Vþúë¯Ý]]]eII/”ššJ=mß¾ÝB–†Tܺu‹fÍšm½~ýzÇ7¼ë `)P¸õ.”•³³óé›7oÊhQï™ÀÀÀG3f̰(zLß¿áÎ;\¿~sss>üðCY ÿI“&¥N™2åS òm8^KKË‘«V­šåããóÞÖYLL mÛ¶%..Kÿ¬àààTÓ¢`’’TÜÛ°aƒpçEyäD’$éÕ ¶½+~Iú7ÙÙÙagg' B’^Q‹-8xð ¡¡¡²Ó/IÒß&Ï"’$I¯Æ pFÊ¢$I’þiK–,!//Oy<ª$IÒ?ÖñÏÎÎ6Û»w/.\%%½Pff&‰‰‰%6oÞ, C*>|HVV–ÉÞí`àpD–°$I’ôO+z–»$IÒ?ÞñW«Õ:*•ªÐÀÀ@È¢’^$;;›ÜÜ\­GÉ`@ôt IDATgR±ššª%„x“ÁK]@oYº’$I’$IÒ;Õñ/Y²drçÎudp?é%,>œÛ¿cYRqpëÖ-V®\©zƒ»œ ÄkeéJ’$I’$IïTÇ_’$I¢:OFû‘ð“$I’$I’dÇ_’$é3¸ l|¿ÜƒŒëׯ'«ùýrõêUÓk×®ÅëëëÊÒxÄÅÅ•ÊÉ’$I’I’$é|( A’$I’dÇ_’$齦,"í²8$I’þ]YYY•ÆŽ›:wîÜ@®,I’$I’$é½íø‘‘Ann.W¯^UÞ/,,äêÕ«>žÛ·o?·L²³³¹té’ÆqIJJâÆÊqýÑÍ›7IIIQ^?~ü˜ìììg:й¹o_?dÔ¨QxzzššŠZ­&--k×®)õMrr2ÙÙÙdee)Û¦§§séÒ¥ç~ïøøxbcc•ú|ºí<¯. HOO'//«W¯¾°.¤÷Zs`0š'Aý$I’$I’$éýíøGGGS¯^=ªU«†««+äÈ‘#899Ñ£GjÔ¨AÏž=•ÎXxx8ŽŽŽôêÕ zöì‰Z­æþýû|òÉ'4jÔ*UªÄ©S§7níÛ·ÇÍÍ.]ºàààÀÒ¥K©U«íÚµ£B… ÊÔpKKKºv튯¯/^^^têÔ‰àëëËG}DÿþýB0tèP*W®L¯^½prrâ×_`óæÍ´hÑ777qpp`òäÉÊ ÀÀ@<<<èØ±#¾¾¾åñÃ?àèèHß¾})_¾<¡¡¡ßjÕªHýúõ5:ôjµš-ZÐ¥K*W®L=øî»ïh×®’/??._¾üÖ5²O?ý”õë×P®\9zôèAíÚµiÔ¨~ø!>dÕªUDEE±páBæÎ øà"„¢uëÖbîܹB!6mÚ$œEJJŠR‡¦¦¦âÞ½{bÅŠ¢B… J]ÌŸ?_âÒ¥K"**JXXX(uqóæMaff&.\¸ ¢££ Ö¬Y#RSSE~~¾ÞMqqq¢bÅŠ[þÂé®,p8ýíÝyXUÕÞð/È B JB¨€Ê Šä-rÄ)çLEÈÌ@-œJ¯æ@fe¨yõ5çLíj‚â‰ˆŠ " Hˆ)¨È‘AF9œßû‡/ûõ\s¨ë½¡}?ÏÓ‹½öÀZÛsöoï½~ € ?ý‰ˆþ|¯¼òÊG{÷îå—Õ)Ó¦M+LNNfCP2lذ<OúLý·¦ó344ÄàÁƒQ¯^=ÄÅÅáÞ½{¸~ý:V¯^ àÁÓÝÀÓÓÅÅÅ:t(ÀÔÔYYYÐ××Gÿþý±gÏèê>xù`âĉW^§÷öö†¥¥% yóæppp€™™ÀÕÕEEEÊñtíÚðÚk¯AWW}ûö¸¹¹A£Ñ °°111hÖ¬™òttt””¤ì£}ûö___TWW£¬¬ û÷ïÇàÁƒ•c™4ifΜ ˆŽŽ†ƒƒbbb”mÚØØ 66·oßF¯^½Ð¸qcÀ¨Q£ªÔÓ××Gpp0 aÆ4hâââгgOøùùá‡~ÀäÉ“±iÓ&Œ=ú¥ø"4httt```€V­Z!??ÿ‘:{÷î…££#vìØ¡üÎÄĉ‰‰Ø·oFŽ©ôÅûï¿?þXY¯iÓ¦Z}akk‹ØØXøúúBOOC‡…¾¾>¯¨èá ?€1¼ê_Î&!"¢U\\ SSSèééý®eÏ“ˆ ¨¨–––ÐÑÑa§Ð©Õj”••Á‚Aÿ^r¿† ¢^½z€;wîÀÀÀÙÙÙÊ-Z´@@@ÊËËaff¦õ¥¯¯Aee¥À€‘‘ ”ñîVVVZº±±±R®Ýw­Úºµû©­[{ST**++µŽsüøñhذ!h‹ò![\\¬Üp¨½yadd(((€F£ÑÚfŸ>}мys”””h­§££ƒW^yE)ׯ__ëKÂÂÂB™)`ܸqؼy3 •`÷eP¿~ýÿ¿ó¤§FóH•JÑjÓwÞy7Fyy9LMMµ¶Q[.((€®®®ÖzýúõCóæÍÖÖÖ úéa¯áÁX~ oø•MBDDÀƒ|K3fÌPÊnnn8yòäoÖõôôÄ‘#GþãÇtïÞ=XYY¡°°ð©u#""pýúuvä_XJJ Z´hÁ† 1ÓóÚPóæÍQ^^Ž3f(Áó¾}û Ñhàää•J…;wî(vxx8üüüàää„´´4´iÓpùòeÜ¿ŽŽŽZAüóâääFƒÅ‹x0Æ>""666ZI ÿ•‹‹ Î;§”³³³•±ú...8{ö¬²MX½z57nŒ-Z`Ë–-Z_"'7,**BAAr3 -- <ÿðñråJtéÒ¶¶¶/õɨ£££$ÜsrrÂ;w´ÚtéÒ¥°³³C«V­’’¢ü>33SùtqqÁÅ‹µÖûÇ?þ­¤Dúø@.€¿ýßÿ‰ˆˆ”kÒÿùŸÿѺ¦ø³™šš"99æææO­;kÖ, <˜ùÖªU+æ´¢çø·oß9r$æÎ‹ëׯ#$$kÖ¬½½= €±cÇbΜ98þ<Ö®]‹‰'âÃ?ÄÌ™3annkkk|üñÇ5j”rƒày Q’ÊùûûcÍš5ˆ‰‰AXXØS×óôôIJeËàëë‹Ù³g+oŒ7K—.ÅÌ™31dÈÄÅÅaÁ‚8sæ Fމ `Μ9èÛ·/¾üòKTWW+ÛÕÕÕŘ1cðé§Ÿ"!!IIIظq#€¯¶2 .Äwß}÷ÒŸŒ¶¶¶Ø¿?|}}1~üxøùù᫯¾Â›o¾‰7bûöíƇ~///LŸ>îîîX¹r¥rã`ìØ±ˆˆˆÀœ9s0pà@ìÛ·_|ñΞ=ËÀŸj½ ÀX;v ¢¢}úôA=?þø#lmm‘žžŽÔÔT´lÙ“&MRÞŒ[·nÁÅÅaaa033ÃÁƒ¡V«q÷î];v  UÞì¼té¾ûî;ܽ{;wÆèÑ£¡«« µZï¾ûgΜ&Nœø»îÿoËÍÍÅ?þ%)uPPÜÜÜŸŸÈÈHÌŸ?_©¿hÑ" 4vvvøúë¯1`À¬\¹ººº …Z­ÆŠ+`hhˆ°°0åÁhvv6Ö­[‡‚‚øûûcøðáÐÑÑA\\JKKqá¨T*#**JyÀ𙙉µk×B¥RÁ××cÆŒùO¡ºã¿êokkûÈ«çÿüç?Ñ¡CÌš5 7nÄÚµk•°Í›7Ã×ן|ò âââ‹fÍšáƒ>ÀŠ+°iÓ&Ì;Æ ÃÚµk;v„¿¿¿²ýŽ;*Ó€¿¿?|||ÁÁÁZƒƒ•»¡õêÕCHHLLLàéé‰ãÇ#%%E ö`bbþÿ]==„„„ÀØØ¯½ö~þùg?~óçÏÇG}„÷ßFFF°´´ÄéÓ§QRR‚©S§"-- ñññhÖ¬4h€øøxäää`öìÙèׯÂÃÃacc L™2ÁÁÁ˜?>222pâÄ X[[+Ç0xð`h׋¦o߾ʫöãÆÓúpóÍ7áíí  ƒ >ŒÖ­[ãÔ©SHOOGXXÊËË‘˜˜sssØÛÛãäÉ“055Åùóç±lÙ2€™™4h€ääd`Ê”)¸téФI4jÔï¼óÿÕÿuÙXà €ÞFÁ ŸˆèÅsïÞ=,X°¡¡¡èر#nܸž={â£>B‡pþüyå;ÿÀðóóCÆ áïïE‹)×€III;v,âââàããƒ={ö`äÈ‘ÐÕÕ…««+ôôôàéé©<ì …µµ5Z·nñãÇcß¾}ZÇedd„É“'C­Vx0ñã?FãÆ±sçN 4Ю];¡[·nÐÕÕ…³³3† †ôôtÜ¿¾¾¾ÈÏÏGß¾}aii‰Î;#//UUUˆˆˆ@ee%®]»†Y³faîܹh×®òòòйsgTVVÂÕÕÀƒ'¾ˆ‰‰Aß¾}áèè|ðÁÊ TûöíàAƒ••¬^½ááá€7"$$®®®èÝ»7¢££1~üx@bb"Þ}÷]$$$ÀÇÇQQQJ›_¸p;v„±±1zôèÈÈHÌ›7Àƒ7Z£££€²²2xyyiåìz‘ݾ}sçÎExx8Z·n •J”””   K—.Õª¿råJ\½zeeeX¸p!&Mš___ܹs½zõÂäÉ“áííÌÌL%îªm[SSStéÒË–-Ô)SÇŽÃøñãqâÄ ”––¢°°Ë—/W‚þ:@__o½õÖ¬Y£ä+#zbVúïøè£dêÔ©uúŸ–ÕÿyÛ²e‹|öÙgJùðáÃbnnÎ,ý¤•Õ¿Y³f FX @ €ù,ù ODT·=)«ÿ7€œ>}ZDD.^¼¨Ìð#"’’’"&&&""Ò©S'Y°`²nNNŽèëëËõë×eñâÅÒºukeÙ‘#G¤AƒÊŒBÖÖÖʲÆË·ß~«”GŒ!Ÿ~ú©ˆˆ8;;Kll¬ÔÔÔˆ£££2ËÕòåË¥oß¾ÊìEãÆSÖÐÐP¥Ü¡CY·näææ*³VÕjÚ´©ìÛ·OŠ‹‹€¨T*‰‹‹)(xp VUU%ºººröìYeÆ«¬¬,ñðð5kÖ(ÛKHHW^yEDDÆ'ýúõS–­_¿^Ú·o/"";wî”øøxeÙO?ý$-[¶‘Ù³gKÛ¶m•eTÚ+$$D‚‚‚”eiii²dÉILL[[[©¬¬T– 8P"""^Ьþ)))@™]ª¦¦FŒåøñã’‘‘!ÆÆÆZõdÿþýróæM 'Nœ‘¬¬, çÎSÚÏÀÀ@9ï>ùäe·nÝQ©Tò÷¿ÿ]š5k¦Ì––””$666""2iÒ$9r¤²^FF†ÌŸ?ŸÌêO¶´´4Œ=7nÜ@ZZä!¾¾¾˜6mN:sssÄÄÄà‹/¾à«J¤¥ªªª€Îxð„ÿ4€eö‚Yû‰ˆ^µ‰Ëj_/wrr˜››ãþýûÌÙÙíÚµCXX†ªÜ`xVGއ‡ÂÃÃáåå…k×®!//ïw')666V‚ooooÄÇÇÃÉÉ NNN8uêŸ:{ÖÑ£G1eÊ 8¶¶¶8tèV@ÿ8:uBtt´2eóÎ;1dÈøøøàÒ¥K¨¬¬„““¬$Â{™YYYA­Vã×_Ìœœœü‡røøøàçŸÆ«¯¾ '''ÁÏÏï©ýÒ¹sgDGG£¦¦Fùw0hÐ ý!|âODDDD/­/¾øðóóC“&MpèÐ!lذO\¯öáK«V­û»ö9fÌi 1xV}úôÁ¢E‹ðöÛoÃÒÒYYYh×®nÞ¼ù»¶ãááÞ½{cåÊ•X±bú÷ï7Þx6668xð Ö­[÷Ô7†Š©S§âèÑ£ÈÎΆµµ5îÞ½ûÔ›¡¡¡Ø·o¼½½Ñ²eKÄÅÅáûᅦ««+-Z„¿ýíoèÑ£222```€Q£F½ôç¡ † xxx   à‘70žÅܹsooo¸»»#&&³fÍÒJrþ[&Nœˆ˜˜xyyÁÝݱ±±ÊLbDLîGu2¹ѳ$÷sqqù‘ŸâDD/®'%÷»ÿ¾$''‹F£ùÍree¥¤¤¤(õ«ªªäøñã¥$ùyó¦üòË/J¹¢¢Bk½Û·oKBB‚”••IZZšÜ»wOYvíÚ5%‰Ûùó祤¤D+Ñ›››r<µ Ûnß¾ýØrff¦Ü¹sGDD $**JNž<)jµZnܸ!W¯^µZ-ÉÉÉR]]-%%%’––¦Õ.©©©RZZ*""¥¥¥’˜˜(·nÝRþ¶ÄÄD‰ŽŽVöS›ì077Wëº.##C)Ÿ9sF¢¢¢”¿5%%EJKK%77W²³³•zeeeræÌ¥\SS#§N’={öhíODä×_•èèh9v옒ˆîeHîWVV¦$W¬uîÜ9弩©©‘¤¤$Ù»w¯”••Iff¦+çom[ük¹ªªJÞ§Z­–Ó§OËîÝ»åÚµkÊïóòò´ÎçÒÒR­sD£ÑHrr²DGGK~~>/ÿbÉýtžøGGG¿[;%Ño)**˜1cT?ýô“5[ƒê‚œœtïÞýŸW®\ÌÖ "zqÿ 6|Ñ«W¯æ˜ËÊÊpòäIÌ™3ï¼óÞ{ï=väKfúôéEC† ±ôôôdcP1|øð›?üðƒ€«ÃADDDDDÏAyy9&L˜'''±Aˆ¨Îà""""¢ç aÆʴjDD/Là÷î]§ààà"33³6=ŽZ­ÖIOO7yë­·Tl ª ***tïÞ½Û„-ADDDDô”ÀßÊÊ*{Íš5þãOOÂ1þT×üßÿë*ïEqŒ?"""""""bàODDDDDDDu ³úQ#"F·nÝBvv6ƒêŒ¢¢"ÝÜÜ\4hЀAuFIII½§Åö ü‰ˆˆˆ¨.^ÈÍž=;ÖÀÀ ”­AuEYYYؘ˜R==½ ¶ÕEEE""""z¡TWW¯ÈËË[Á– "ú÷qŒ?ÿG%%%¡U«VO­…7ÞxãwoÿôéÓpuu}j=µZeË–±'ë°7n`ûöíJ¿¶lÙò¿²ßªª*DFF²ˆˆˆˆˆˆÿñꫯbæÌ™O­×¾}{|òÉ'¿{ûjµÅÅÅO­wåÊ̘1ƒ=Y‡}ûí·ˆ‹‹899aÉ’%ÿ•ý¦¦¦bÁ‚ì"""""úK{ncü‹‹‹±uëVܹs^^^èÙ³' ºº%%%€S§NA__wïÞERRš6mŠ#F ^½z€¬¬,lÛ¶ 4€£££Öö¯^½ŠØØXäääÀÞÞcÇŽ…¡¡!¶oߎšš¬^½Æ ƒ¹¹9Ξ=‹ƒÂÈÈýû÷GÓ¦MÙÓ’Ë—/#55¥¥¥Ø»w/¼¼¼”:¸sç ..ÎÎÎ=z4Ž;†ØØX¸¹¹aÈ!ÐÑÑ€Çökyy9¶lÙ‚ÜÜ\´lÙ}ûö…žžvïÞŠŠ ¬^½ãÆƒŽŽbbb‘‘òòrôéÓ>>>€ï¿ÿ;wÆîÝ»¡R©0xð`¼úê«Ø°a 1lØ0¼öÚkÈÉÉAFF5j„ÀÖÖ#FŒ€‰‰ ;›ˆˆˆˆˆê¤?üÄ?++KyÒž““ƒV­ZáäÉ“011ÁôéÓHIIÁÂ… »ví Aƒ‰ššÌž=S§NUêyyy¡°°7oÞDPP²¯ŒŒ xzzâ×_…­­-Ö¯_JJJ "(,,„F£Á¦M›Ð¯_?h4äææÂËË )))ìé?Éýû÷QYY‰ªª*”––";;ááá€Ã‡côèÑX´hŒŒŒ0kÖ,¼õÖ[ˆˆˆLœ8kÖ¬lÚ´ ½{÷FMM òòòàíí­ôkŸ>}¬_¿”””@£Ñ °°„ˆˆÔ¯_•••èÒ¥ 3gÎDÏž=‘——‡«W¯Âßß @nn.RSSáï œ={AAA˜2e ìíí±sçNôèцMDDDDD/ggçõ.\ßrôèQqpp‘÷Þ{OFŒ¡,ËÏÏSSSIMM•;vˆ§§§ˆˆÌ˜1CÚµk§ÔÛ¾}»8;;‹ˆÈàÁƒeêԩʲ¯¿þZìììDDdÏž=²råJeYrr²ˆˆÈ… ÄÈÈHDDª««ÅÚÚZ•ºŸþ¹ôêÕKè?§°°P ·|Ö¬Y"""Ç[[[‰ŒŒ©®®‘Ï>ûLš5k&jµZ9_F¥ôk||ü#ýªV«ÅÀÀ@éóÂÂBYµj•ÔÔÔȉ'ÄÆÆF97&L˜ yyyÊ6eÞ¼y""Ò´iSY¼x±ˆˆh4iРDFFŠˆ(ûHKK“]»v‰±±±]¡ìTÁIDATܾ}[DD***ÄÎÎN8À¡¹zõª¸¸¸üÈOq""""¢çôªJJŠò„6lwwwœ={fffZuÛ¶m«üìàà€ÒÒS³ž?C‡U–(O~{÷îcÇŽaÞ¼y8þ<Μ9ƒû÷ïC­VkmûÚµkP©Tøê«¯”„ùùù¸|ù2{ºŽrrr‚žÞƒÓÐÂÂ-Z´P†~XYYá—_~Qúõ›o¾ÁŠ+´úµ^½z Åë¯¿Ž¶mÛ¢ÿþ5jtuµ_fÑÓÓCDD¶mÛ†sçÎ!55ç΃»»»RÇÅÅ ££sss¸¹¹êÕ« TVVÜÝÝѨQ#€‘‘¼¼¼œœŒîÝ»³C‰ˆˆˆˆèå üMMMQSS£õ»êêê߬khh¨ü\;v»6¸zxn+V¬À‚ 0yòdL˜0xýõ×!"l_GGGÿÿpÐGu“±±±V¹6èÿ×>ÕÑÑÁ¸qã~³_—,Y‚É“'c÷îÝØ¶m>ÿüs\¼xQk¥¥¥ðôô„‡‡ˆððpLŸ>]ëz–cù×sx0œA__ŸIDDDDDu’îóØˆ‡‡öîÝ«”¯^½ŠŒŒ xyy=ó6|}}±gÏ¥¼ÿ~åçíÛ·cêÔ©˜6m”'øjµ†††Ê GGGXZZ¢ªª ]»vE×®]qñâEe*9úsèëë?rcè÷hÚ´),--QYY©ôë¥K—°eËܼyÞÞÞ¨_¿>&NœˆøøxãòåËZûMIIA^^¶mÛ†áÇÃÖÖ§OŸ~ä­‘g‘žžŽÛ·oT*’’’àççÇŽ&""""¢:é¹< Ÿ1cºwïŽ=z ]»vزe &OžŒV­ZáÒ¥KÏ´¹sç¢S§Nxûí·Ñ¨Q#$&&*ËÞxã ,[¶ P©TÈÈÈ€žž aooCCCtïÞ«V­ÂªU«‚ÇC­VcëÖ­øé§ŸØÓ"GGGDFFÂÚÚýû÷ÿý'©žV­Z… & >>555øþûï±k×.ØÙÙÁÞÞèÕ«RSSagg???£¤¤½{÷Æ7ß|333Œ9mÚ´Á¡C‡`ee…»wïþîã©W¯zõê…nݺ!** ƒF§NØÑDDDDDôâyRr¿#GŽH“&M”ree¥ìÞ½[–/_.)))Êï¯_¿.‡‘sçÎIrr²²¬  @¢¢¢”²J¥’uëÖÉöíÛ%??_kYll¬¬X±B>,FöîÝ+¹¹¹""’™™)[·n•[·n‰ˆHNNެ_¿^Ö®]«Ô¡?/¹_MM‡¯~¾%øŸYñÂè^Ót¹ ¹Ó"¾‚KÒèbˆù©960rG-BçU±†Ê`+™.cpFA NG¥|¸£D²ømâ=+ZÒg>4ŽìÈÓIlÍ".äËq…_¼O%»æ¶5ë+¨çð¥ª\}“A]Þ4»›OûlK°ç|GŒœ¯?ON>“´½µÔ-’æÊæ›wû²Ã uo¡á¾Ìëÿc]÷þÉYŸ4û $×äÒõ¹uK+‰ã¦µœ(ä6ï-Kƒòƒ€Ú:öÓøYÿ3¯ýwßû%zQ@Q@Q@ðKþI…ÿoúQ%zxÂßxÊÇáÆ“m¤øûRÅ<ï.óû^<ÌÌäüŒ20IõÆ{×aÿ oÄ?ú&ù_·ÿ ô óÍsáÅ׋.¼Iáß]h7בˆî¼¸d@$`ð=yæŸÿ oÄ?ú&ù_·ÿ ?á-ø‡ÿDÃÿ+öÿá@t‡káÜèº.³sms(žMLÆ®æL©'iã.0}MUðÿÃ9´ïk:èÕ/’‚1„v¨ç%‚¸àŸÏØRÿÂ[ñþ‰‡þWíÿÂøK~!ÿÑ0ÿÊý¿øP[…ú¿‡ÅÍŸ†|mw¥éÊeû²ŽfŒž¡$c‘ù~¼ÖÆ™à&Ó¾ Ëâ¶Õ¤¸i4ä±0ÉÌJ„Ëî䙯;ÕøK~!ÿÑ0ÿÊý¿øQÿ oÄ?ú&ù_·ÿ ô +Ïÿá-ø‡ÿDÃÿ+öÿáGü%¿ÿè˜å~ßü( ñßü“ÏÿØ*ëÿE5ÿ’yá¯ûZÿ襮Åž'ñÝǃuÈo>}’ÖM>á&¸þÛ‚O) lö–ÀÉÀëŠî< ÿ$óÃ_ö µÿÑK@Q@Q@p~<Õ5ñâ øsAÕ£Ò_W{¦–ðÚ­Ã(†0ÁB±ÆN{ð9êy^âßù+ß¿î'ÿ¤ë@ü"_ÿè§ÿåßühÿ„Kâýÿü [ÿdüHñ‰í> xoÃÚ¶º\z”l$‘­£˜ÜyÂ{v"¤øwã=nãZñN‘âmBÖö î:œq¬k´nÝ»o>Ø=hKþ/ˆôSÿòoþ4Â%ñþŠþP-ÿÆ’Ûã‡.'¶/i«Ûé÷Syú¥Å™KY8À|ç±ê;ã›eâmboŽº†Þï:L:`¸ŽßËN$ýß;±»øÇ4ÿøD¾!ÿÑOÿÊ¿øÑÿ—Ä?ú)ÿù@·ÿ[ß‹ZN™.íGCñ›åhO§•·Îq۳¹ûÿ‹•—Å9ô„ÑukÍ&+s‹{K$ò°?ëSæù¢=oÿÂ%ñþŠþP-ÿÆøD¾!ÿÑOÿÊ¿øÓìÕ^ããW…àžr–úµÆŸo7“6§™{XÛ¦ ç?ç¶h_øD¾!ÿÑOÿÊ¿øÑÿ—Ä?ú)ÿù@·ÿ»¯|Nд WKÓåŠþíõ;´ZÉe•dS wØÀÀ=GJ†Ë⿆îô _W•o¬×HuŽòÚê ³ÆÌv¨Ú äG^0sŠƒþ/ˆôSÿòoþ4Â%ñþŠþP-ÿƧ‡âÕôÝHZèšö—qŸ5Ôê6>\mµr œO à×%àïvpøSK“ŨÜ]Ï3Åq¨GfÞ&ÞÛU˜mÙƒ…Šé¿áø‡ÿE?ÿ(ÿãGü"_ÿè§ÿåßüj]wÄ—–_|7¡Å©Ï­ì<–‹i¤¤ ™ Ü¿tpéïTão‡¤³»»Jפ‚Îc̑نX{f €¤ççƒÅC®è¿ôOjz·ü,Ÿ;ì6’Üù_ØVë¿bÛœœgÎ w¾¸Ôü¡ßÞIæ]]iöóLû@Üí–8 ’zVwŠõ ]Wá^»¨YL³ZÜè×2Å"ôe0±gÀŸòO<5ÿ`«_ý´Ïüÿ’C¡ÛÇþ”I^^ð·áoƒ|GðãIÕµmíÓùÞd¿j™7m™Ôp®àÀ®Ãþ—ÃÏú¿òvãÿŽP Q^ÿ Káçý ßù;qÿÇ(ÿ…%ðóþ…ïü¸ÿã”èWŸÿÂ’øyÿB÷þNÜñÊ?áI|<ÿ¡{ÿ'n?øåzçÿð¤¾нÿ“·ür©jŸ ~èÐG5î…"¬²£X§»•ÝÈ$*¢1bp àt€4µ/…z^±s)ÔuÏ]YK1™´é5m²[vã ÐgŠíà‚+[x­àc†$ˆ£T =±^oÂo†ßÇbº §·ûH‰®n”ùy'/ÁÉÆ>Õsþ—ÃÏú¿òvãÿŽP WŸü,ÿ™×þÆ»ïý’øR_?è^ÿÉÛþ9UþØÛéšw‹,,ãòím|Ky )¸¨¢0£'“€ZôŠ*½ýž§gå…Üv²gdÐH$FÁ á‡øUŠ(¢Š(¢Šò¯†^ °ðçÀ½&úúêڱݴQÏ:Åç2Í)ؤõ'³]’xÆÂæÓI¸Óå¶½Kû´´sor® fœäŒä¸ÇkšøAe£ðOI³¸]ÐÏÔn;ᦔzpk²Ô ³Ÿ>Uýý·™`štÞKG™aMÛA,„©ùÛ•ÛרbX¼§Ãqm:ÍtZßìÛeÁòÕ3ò÷søc…sñ:Ñ!±h!²I.4ص'KýEm¶¤€•D%N÷ù[ŽL‘‘]¦}©¦Zjû¼‹¨Rx÷ ¬¡†Xpx2ÞÆ $ÓuMFÂ[[8ìLÐ4E¦Š<ìIn@æ5ÑDžT)ö}ªsœ–Çsï@^;ÿ’yâ_û]覣ÀŸòO<5ÿ`«_ý´xïþIç‰ìuÿ¢š²ô}r |Ñu›‹[»˜-4{G’;HÃÉ·Ë@H€âsÀö ÒЧ¥j¶:æ—o©é—1ÜÙÜ&ø¥NŒ?˜ äypE\ Š( ¼ÿÅ¿òW¾ÜOÿIÖºOx·HðªYNi<ÛÛ„·¶·‚3,²³0ª/$.A8ö$¨<_ÄoNðçÄêÚµÇÙì`þÑó%ØÏ·t(£…žH â/€o|_ñÃWi¿kÐàFKæóÖ= ’q÷ƒßv©hß5Í÷Å>¶…„µ{i>ËxeBÖÒ²à\ï#·CœS]ü.߇Ÿô0ÿä•Çÿ£þoÃÏúòJãÿОi õK}> 'Rø{k}Ò¼O¤è «¢hñÙOoöØàhÜ.ÌÜÝ3Þºø]¿?èaÿÉ+þ7Gü.߇Ÿô0ÿä•Çÿ mü+«ŸÚ¦¿5¡]çJËp%L—ýÞFÜîtóŒq\\>ñö‘à­SÀ6š5Ý…åÉtÕâ D%OÍù³òŽ=ëºÿ…Ûðóþ†ü’¸ÿãtÂíøyÿCþI\ñºÈÕì<à9íàûN™¢éÆÚæèȃÀ;IÜrHè3Umü­Åª|DšïÃj6š½Ä/im5ÚÆ·*$bß2’P€r Ç8®‡þoÃÏúòJãÿÑÿ ·áçý ?ù%qÿÆè’ð_k©êM™u¢è²is[2ãW[Á4¬Œoe䎸Æ<Ô¿éQÉ©ZÙÇ#™môɣ竱Œž4zãA | ÔtYeËk¡Ü¬Ž½ œ¶=²N=«wÀŸòO<5ÿ`«_ýµÃø³âÿ5?ë–zï™uu§ÜC }’q¹Ú6 2S$޵ÜxþI熿ìkÿ¢–€8/Zj׿³õ…¾“-²Ë%½ê2MHdYFÕîÒ}N~•ÒÞÙë6Ú'‡¦Ô‚]µŽ¡Ò-…œ£‹Êdå7;1¹ÛÛ·_à—ü’ þÞ?ô¢Jô ò»8^ØOs{eyfêâ{­kȦV)’°z©8ÁÏz±­XêW¶>š÷JŠ :+7Z{i²_Å Ä'—˜#`Äóm'‘ÎG¦Q@>…-Ç…¼-§Ì.¯¢TW”½³ÄD?½ÀdbYP«†=1ž¸®‡PÓ4{t‹dÑeh#Ôíã°ˆª[ÊCþñ ¶s‘–Vµî¡k§GÝK嬳$v“—v £Ü3Ò¬Ð\—ŒQïŠmÇcÑõk{X=H‘æ^Ýùð;å Ã’`ûU¨¼io<7pj:„z„m3½à14“voÜ õhÕ â¬Aáe²Ñ-4­?XԬⶑ˜ä I!·ÆÀõë€}è oÅo£éú]Ã[ÙfûïM=ï—kÉ»æ˜#pz)Ûϵ:ÛÆ6‚õ­5/&ÎBÖÉ $ÆT˜Ì…— q”‘AèvçŒâ¬¯†’ÛF±ÒôÝNûN‚Î?)L6.¸ÆÌFò—?Ã} {xá wŧ >3£*¡·,€x¤œƒqâ€"‹Ç7·¶qM§èk355;ˆä»òöBå¼°§aÜìŽÐ=kBïÅ2™´(´­=o±Ï K?’±ªª°-…cÈnÀó^àë+ÆSíõ‡úØËöGAç@¹Ú¹[ÜØ+†5û Íot»˜ÃÅý› Ão°#êZ…½ö­{¨ë>ƒQ¸76Ú&‹zÐ-²‘Ö@AÈåvãq:OøUŸõ>øçÿý…zçÿð«?ê}ñÏþ?û ?áVÔûãŸüöèP¾sðãâ §ˆo×Rñ6“v‰c¡tÏu{¦;ûÈìÄeW?1QÉÄ'‹ä¯|:ÿ¸Ÿþ“­zyÿ‹ä¯|:ÿ¸Ÿþ“­wúØêM§‰?Ò–9iû„•=:ƒ\üÞ0'Æ)áû]6y6¢½ÅİÎ3(Ûˆ˜0ùOÌYTöcÎ?ÛÇâ¦Ö>ß©•òUDgR¸?8rÜüø)Èù:uãšÒ‹K¯éwÚM¾§jÒ]ÛIòåW#r‘œÎ3@Fµ§#í*ì¿lù”€!Î7Fó¨tÿiš´×6òÌ–ðÆ%yn-¥6NàdU ¸ädV.™ ëk©›ÍJ=3jéOXc•äWpÙÜÙEùO§8÷ª¶ž ÔfÒu½.îd±Óïì~É ¤²Ý¬,C"™UJŒ-oZø·E¼·ºž+©[@ndóm匘pO˜¡”^+‘V´½rÃZRö<‘…WYÚHÑÔô(Ì 8÷Rk—¶ðûYêKsmoäÚlÖ0Ü^êï;ÀÏË(Äk•S¸ñ××°Ó­šÏL´µm»¡…#;zd8ö‥ê–ZÖ›£§Î'´wG R7 ã¡õˆ5¸<;¢]j—O,vñ³ì‚œ)nvƒ´q÷Žîj}/ûGû2íqj/öþøZ1g?ûœcjiòêÞÕtÛvEšîÎh#i Ш'œdúPM¿‹nGÓ¯®´Ï%ïo£´ò·Ê»ÿï"Bqé·±­Ý;R´Õ­~Óc/öA RÈ8%I0ÿhdƱ¼Ká“âNÓ&òšn ’áYÙCÆŸyAäþ{ú}î“¥:ît¸ŽÙÌv²‚w´ ‚N>øåIÎÐz’_ÿÉ<ñ/ý‚®¿ôSQàOù'žÿ°U¯þŠZ£Q\ÜÅgi5ÕÃì†i$l…$àsÐPžj>{û«“¨Yͨ¼^Ž(î'·9{€däq.px;†zóSëz5­Ïü":¶¯¡ý¸Â_±°7€mÜè±B8ÁÁ9®öÞxî­¢¸…·E*FÁR25Æ¡kkwik4»f¼vHi;ÙT¹åRyô  ¡Ø ÊM‘íWnÜ p1ÛéO¢Š+ÏþÌëÿc]÷þÉ^^ð³þg_ûï¿öJô (¢€ (¢€ (¢€<«á—ˆ,<9ð/I¾¾º¶„¬wmsαy̳Jv)=IÇl×QñH¶Ñ,µ K½>é¯'[dÿNE†) "IyƒØžƒÖ' (5‚zMÂî†xî£qß 4 ãÓƒ]÷‡í/lm-ÖI­^ÍÖKk‹r‘8R¹«0 ‚'Šäág¼¨ßbÒ­®ÚËrÐê£I~í >rvô ò1ZÒxÆdÑ ¿{}.ÖSs=´Ëª"GŠFŒ…ÆKä©#åuÅZŸÁÖ÷Ÿ5ö§¨ÝÌbòŒ²´aŠù‰'E@1¯AОüÒÿÂiÄ6·×¶×0Ëu"ÍŽ.%ód\2ØÁÆ@z䆣âË«Ÿ[ø‡JQ&ØdÝ8.íù_œŽÃæ¯ëW7Öž*ðØ‚þeµ½¹’Þ{M‘˜Ü y¤ ’»ÃnEèØÀéÖ¢›Á6ÒxbßÃð꺕½Œ9Üc13Ëóï™ãn‡Óù«×Þþг±ŽmRø]ØÌg‚ùB`åYNFÍ„v\léïÍlÑME)©vr›>çS¨¢Š(Ÿñßü“ÏÿØ*ëÿE5ÿ’yá¯ûZÿ襣ÇòOß©•òUDgR¸?8rÜüø)Èù:uãš´º^£mâûÍFµ–ÆþÞ¦ß3$˜Ìœ¨Cä8êWï@éþ)ѵ[ággvÏ+hËBè“8c²…yÚMlW¢øgSÓBRšÁ4ïA"C&ÒµÆ_ìÙ§K,¿e•b88 HT.AíœõãŠÌð߇®´ýOí·ö¬énmÒeÕ®®ËÊH 0ÂTOkKÂÚLÚ†l4ˆ‰¦·k˜‰*NIã zúPÍ?T²Õᬧ‹k‡¶— FÙá—‘Øþ5ÝÔvV¯q*Ìȃ$C Êýq ,PéÿÚ;n?´…¨o´?‘öbØòsònÝüxëŽ=*åqV=–÷Á÷~#m!¢†%G†&yTȬ@—‰Gà.=몳Ôìõ ®c´›Í6ÒyR²©Úº†Æ ðN¹§ðìŸ bð³KÚÖÎ;v;ò¤g ãƒÎ+SÚ$Þ[­>9#m!_}Šn&HCd¼g=T7*rNƒ÷A ”QEsþ;ÿ’yâ_û]覣ÀŸòO<5ÿ`«_ý´xïþIç‰ìuÿ¢šÉ<ð×ý‚­ôRÐà MZ÷ö~°·Òe¶Yd·½FI i €Ë(Ú¸uÚO©ÏÒµ|Emy‡ü/ˆ#†ýWU€4–Ž?v!“*P»—# uc×Á3üCðw„,tøWlû/™ûÿí«x÷n‘ŸîóŒnÇ^Õ¯w®øêùí^çá^öµ˜O ÿ„†µÂ•Ï xŠÒÍoµMSNŒ..(¨©vfT<î‘@0sØ n©â«¹¼ »¦Û·2¬Ë$Ád„‰0_‘ÕÈ`G8ãœÕ‹O cBðü2_\ÙjZU’[-Õ™Lãb«®YJ’Šy©/<ksáË} BþÊÆ!†Ù¥;ƒešDcÃ9É'4cYÖï,5}7L±Ó⺞ù&pÒܘR?,)ä„cÎîÃÿ­oAÕ—\Ðí5%…¡óÓq‰ŽJ08#=ðA昚"}»N½¸½º¹º±ŽXÒIv “nKP26Œ`|Ô:u¹ðý„Z]žŸsovJ^›$±êÊz’:”›âê'U³Ñìâ{ 8Þ\Ló„òƒ ±J°výÛ£Ž¼Ô:ŸŒ¦ÐôK ˦IæX­Ä†ïR[Y%m¹"4ØC7â£3Z:Ÿ† ×Z{—¸Ô4Ùo->Çt<`Ë[ ÙßH?1æ™uà»;‚Æ;ûûo2Á4é¼–2»¶‚Y Só·+·¯°À=__Öÿµü5ýohöZ‘f"âäÆÒ~áÜ+b'ÚÈ$’1ŒsUµ‰6šF§{k*XìÓŒkx$Ô9‹2«&"¹(aÉ+ž@ɽuá›{‹-*¯.íeÒñök˜J˜ÎC+)ʱÎWéŠlžþkË]_Q³k’­t¶Ü:¨]͹Ö*ª B¹À xïþIç‰ìuÿ¢šÉ<ð×ý‚­ôRÑã¿ù'ž%ÿ°U×þŠj< ÿ$óÃ_ö µÿÑK@Q@Q@qþ2ðn£â=cDÕ´ûûIóü¹~Æ·¼ÕU<38r^خŠóÿøD¾!ÿÑOÿÊ¿øÑÿ—Ä?ú)ÿù@·ÿô (Ïÿáø‡ÿE?ÿ(ÿãGü"_ÿè§ÿåßükÐ( ?ÿ„Kâýÿü [ÿð‰|Cÿ¢Ÿÿ” ñ¯@¢€<ÿþ/ˆôSÿòoþ4Â%ñþŠþP-ÿƽŠóÿøD¾!ÿÑOÿÊ¿øÑÿ—Ä?ú)ÿù@·ÿô (Ïÿáø‡ÿE?ÿ(ÿãGü"_ÿè§ÿåßükÐ( ?ÿ„Kâýÿü [ÿð‰|Cÿ¢Ÿÿ” ñ¯@¢€<ÞÿÀž;Ôôë› Ï‰~e­ÔO Éý…ÜŒaÙô®ãBÓ?±<=¦i>wöH­¼Ý»wì@»±“Œã8É­ (¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(ÏÿáI|<ÿ¡{ÿ'n?øåð¤¾нÿ“·ürŠ(ÿ…%ðóþ…ïü¸ÿã”Â’øyÿB÷þNÜñÊ( þ—ÃÏú¿òvãÿŽQÿ Káçý ßù;qÿÇ(¢€øR_?è^ÿÉÛþ9Gü)/‡Ÿô/äíÇÿ¢Š?áI|<ÿ¡{ÿ'n?øåð¤¾нÿ“·ürŠ(ÿ…%ðóþ…ïü¸ÿã•ÜXXÛéšuµ…œ~]­¬I )¸¨ <œ:ÑEX¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(¢Š(ÿÙpeewee-3.17.7/docs/peewee/sqlite_ext.rst000066400000000000000000002337641470346076600202340ustar00rootroot00000000000000.. _sqlite_ext: SQLite Extensions ================= The default :py:class:`SqliteDatabase` already includes many SQLite-specific features: * :ref:`General notes on using SQLite `. * :ref:`Configuring SQLite using PRAGMA statements `. * :ref:`User-defined functions, aggregate and collations `. * :ref:`Locking modes for transactions `. The ``playhouse.sqlite_ext`` includes even more SQLite features, including: * :ref:`Full-text search ` * :ref:`JSON extension integration ` * :ref:`Closure table extension support ` * :ref:`LSM1 extension support ` * :ref:`User-defined table functions ` * Support for online backups using backup API: :py:meth:`~CSqliteExtDatabase.backup_to_file` * :ref:`BLOB API support, for efficient binary data storage `. * :ref:`Additional helpers `, including bloom filter, more. Getting started --------------- To get started with the features described in this document, you will want to use the :py:class:`SqliteExtDatabase` class from the ``playhouse.sqlite_ext`` module. Furthermore, some features require the ``playhouse._sqlite_ext`` C extension -- these features will be noted in the documentation. Instantiating a :py:class:`SqliteExtDatabase`: .. code-block:: python from playhouse.sqlite_ext import SqliteExtDatabase db = SqliteExtDatabase('my_app.db', pragmas=( ('cache_size', -1024 * 64), # 64MB page-cache. ('journal_mode', 'wal'), # Use WAL-mode (you should always use this!). ('foreign_keys', 1))) # Enforce foreign-key constraints. APIs ---- .. py:class:: SqliteExtDatabase(database[, pragmas=None[, timeout=5[, c_extensions=None[, rank_functions=True[, hash_functions=False[, regexp_function=False[, bloomfilter=False]]]]]]]) :param list pragmas: A list of 2-tuples containing pragma key and value to set every time a connection is opened. :param timeout: Set the busy-timeout on the SQLite driver (in seconds). :param bool c_extensions: Declare that C extension speedups must/must-not be used. If set to ``True`` and the extension module is not available, will raise an :py:class:`ImproperlyConfigured` exception. :param bool rank_functions: Make search result ranking functions available. :param bool hash_functions: Make hashing functions available (md5, sha1, etc). :param bool regexp_function: Make the REGEXP function available. :param bool bloomfilter: Make the :ref:`bloom filter ` available. Extends :py:class:`SqliteDatabase` and inherits methods for declaring user-defined functions, pragmas, etc. .. py:class:: CSqliteExtDatabase(database[, pragmas=None[, timeout=5[, c_extensions=None[, rank_functions=True[, hash_functions=False[, regexp_function=False[, bloomfilter=False[, replace_busy_handler=False]]]]]]]]) :param list pragmas: A list of 2-tuples containing pragma key and value to set every time a connection is opened. :param timeout: Set the busy-timeout on the SQLite driver (in seconds). :param bool c_extensions: Declare that C extension speedups must/must-not be used. If set to ``True`` and the extension module is not available, will raise an :py:class:`ImproperlyConfigured` exception. :param bool rank_functions: Make search result ranking functions available. :param bool hash_functions: Make hashing functions available (md5, sha1, etc). :param bool regexp_function: Make the REGEXP function available. :param bool bloomfilter: Make the :ref:`bloom filter ` available. :param bool replace_busy_handler: Use a smarter busy-handler implementation. Extends :py:class:`SqliteExtDatabase` and requires that the ``playhouse._sqlite_ext`` extension module be available. .. py:method:: on_commit(fn) Register a callback to be executed whenever a transaction is committed on the current connection. The callback accepts no parameters and the return value is ignored. However, if the callback raises a :py:class:`ValueError`, the transaction will be aborted and rolled-back. Example: .. code-block:: python db = CSqliteExtDatabase(':memory:') @db.on_commit def on_commit(): logger.info('COMMITing changes') .. py:method:: on_rollback(fn) Register a callback to be executed whenever a transaction is rolled back on the current connection. The callback accepts no parameters and the return value is ignored. Example: .. code-block:: python @db.on_rollback def on_rollback(): logger.info('Rolling back changes') .. py:method:: on_update(fn) Register a callback to be executed whenever the database is written to (via an *UPDATE*, *INSERT* or *DELETE* query). The callback should accept the following parameters: * ``query`` - the type of query, either *INSERT*, *UPDATE* or *DELETE*. * database name - the default database is named *main*. * table name - name of table being modified. * rowid - the rowid of the row being modified. The callback's return value is ignored. Example: .. code-block:: python db = CSqliteExtDatabase(':memory:') @db.on_update def on_update(query_type, db, table, rowid): # e.g. INSERT row 3 into table users. logger.info('%s row %s into table %s', query_type, rowid, table) .. py:method:: changes() Return the number of rows modified in the currently-open transaction. .. py:attribute:: autocommit Property which returns a boolean indicating if autocommit is enabled. By default, this value will be ``True`` except when inside a transaction (or :py:meth:`~Database.atomic` block). Example: .. code-block:: pycon >>> db = CSqliteExtDatabase(':memory:') >>> db.autocommit True >>> with db.atomic(): ... print(db.autocommit) ... False >>> db.autocommit True .. py:method:: backup(destination[, pages=None, name=None, progress=None]) :param SqliteDatabase destination: Database object to serve as destination for the backup. :param int pages: Number of pages per iteration. Default value of -1 indicates all pages should be backed-up in a single step. :param str name: Name of source database (may differ if you used ATTACH DATABASE to load multiple databases). Defaults to "main". :param progress: Progress callback, called with three parameters: the number of pages remaining, the total page count, and whether the backup is complete. Example: .. code-block:: python master = CSqliteExtDatabase('master.db') replica = CSqliteExtDatabase('replica.db') # Backup the contents of master to replica. master.backup(replica) .. py:method:: backup_to_file(filename[, pages, name, progress]) :param filename: Filename to store the database backup. :param int pages: Number of pages per iteration. Default value of -1 indicates all pages should be backed-up in a single step. :param str name: Name of source database (may differ if you used ATTACH DATABASE to load multiple databases). Defaults to "main". :param progress: Progress callback, called with three parameters: the number of pages remaining, the total page count, and whether the backup is complete. Backup the current database to a file. The backed-up data is not a database dump, but an actual SQLite database file. Example: .. code-block:: python db = CSqliteExtDatabase('app.db') def nightly_backup(): filename = 'backup-%s.db' % (datetime.date.today()) db.backup_to_file(filename) .. py:method:: blob_open(table, column, rowid[, read_only=False]) :param str table: Name of table containing data. :param str column: Name of column containing data. :param int rowid: ID of row to retrieve. :param bool read_only: Open the blob for reading only. :returns: :py:class:`Blob` instance which provides efficient access to the underlying binary data. :rtype: Blob See :py:class:`Blob` and :py:class:`ZeroBlob` for more information. Example: .. code-block:: python class Image(Model): filename = TextField() data = BlobField() buf_size = 1024 * 1024 * 8 # Allocate 8MB for storing file. rowid = Image.insert({Image.filename: 'thefile.jpg', Image.data: ZeroBlob(buf_size)}).execute() # Open the blob, returning a file-like object. blob = db.blob_open('image', 'data', rowid) # Write some data to the blob. blob.write(image_data) img_size = blob.tell() # Read the data back out of the blob. blob.seek(0) image_data = blob.read(img_size) .. py:class:: RowIDField() Primary-key field that corresponds to the SQLite ``rowid`` field. For more information, see the SQLite documentation on `rowid tables `_.. Example: .. code-block:: python class Note(Model): rowid = RowIDField() # Will be primary key. content = TextField() timestamp = TimestampField() .. py:class:: DocIDField() Subclass of :py:class:`RowIDField` for use on virtual tables that specifically use the convention of ``docid`` for the primary key. As far as I know this only pertains to tables using the FTS3 and FTS4 full-text search extensions. .. attention:: In FTS3 and FTS4, "docid" is simply an alias for "rowid". To reduce confusion, it's probably best to just always use :py:class:`RowIDField` and never use :py:class:`DocIDField`. .. code-block:: python class NoteIndex(FTSModel): docid = DocIDField() # "docid" is used as an alias for "rowid". content = SearchField() class Meta: database = db .. py:class:: AutoIncrementField() SQLite, by default, may reuse primary key values after rows are deleted. To ensure that the primary key is *always* monotonically increasing, regardless of deletions, you should use :py:class:`AutoIncrementField`. There is a small performance cost for this feature. For more information, see the SQLite docs on `autoincrement `_. .. _sqlite-json1: .. py:class:: JSONField(json_dumps=None, json_loads=None, ...) Field class suitable for storing JSON data, with special methods designed to work with the `json1 extension `_. SQLite 3.9.0 added `JSON support `_ in the form of an extension library. The SQLite json1 extension provides a number of helper functions for working with JSON data. These APIs are exposed as methods of a special field-type, :py:class:`JSONField`. To access or modify specific object keys or array indexes in a JSON structure, you can treat the :py:class:`JSONField` as if it were a dictionary/list. :param json_dumps: (optional) function for serializing data to JSON strings. If not provided, will use the stdlib ``json.dumps``. :param json_loads: (optional) function for de-serializing JSON to Python objects. If not provided, will use the stdlib ``json.loads``. .. note:: To customize the JSON serialization or de-serialization, you can specify a custom ``json_dumps`` and ``json_loads`` callables. These functions should accept a single paramter: the object to serialize, and the JSON string, respectively. To modify the parameters of the stdlib JSON functions, you can use ``functools.partial``: .. code-block:: python # Do not escape unicode code-points. my_json_dumps = functools.partial(json.dumps, ensure_ascii=False) class SomeModel(Model): # Specify our custom serialization function. json_data = JSONField(json_dumps=my_json_dumps) Let's look at some examples of using the SQLite json1 extension with Peewee. Here we'll prepare a database and a simple model for testing the `json1 extension `_: .. code-block:: pycon >>> from playhouse.sqlite_ext import * >>> db = SqliteExtDatabase(':memory:') >>> class KV(Model): ... key = TextField() ... value = JSONField() ... class Meta: ... database = db ... >>> KV.create_table() Storing data works as you might expect. There's no need to serialize dictionaries or lists as JSON, as this is done automatically by Peewee: .. code-block:: pycon >>> KV.create(key='a', value={'k1': 'v1'}) >>> KV.get(KV.key == 'a').value {'k1': 'v1'} We can access specific parts of the JSON data using dictionary lookups: .. code-block:: pycon >>> KV.get(KV.value['k1'] == 'v1').key 'a' It's possible to update a JSON value in-place using the :py:meth:`~JSONField.update` method. Note that "k1=v1" is preserved: .. code-block:: pycon >>> KV.update(value=KV.value.update({'k2': 'v2', 'k3': 'v3'})).execute() 1 >>> KV.get(KV.key == 'a').value {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'} We can also update existing data atomically, or remove keys by setting their value to ``None``. In the following example, we'll update the value of "k1" and remove "k3" ("k2" will not be modified): .. code-block:: pycon >>> KV.update(value=KV.value.update({'k1': 'v1-x', 'k3': None})).execute() 1 >>> KV.get(KV.key == 'a').value {'k1': 'v1-x', 'k2': 'v2'} We can also set individual parts of the JSON data using the :py:meth:`~JSONField.set` method: .. code-block:: pycon >>> KV.update(value=KV.value['k1'].set('v1')).execute() 1 >>> KV.get(KV.key == 'a').value {'k1': 'v1', 'k2': 'v2'} The :py:meth:`~JSONField.set` method can also be used with objects, in addition to scalar values: .. code-block:: pycon >>> KV.update(value=KV.value['k2'].set({'x2': 'y2'})).execute() 1 >>> KV.get(KV.key == 'a').value {'k1': 'v1', 'k2': {'x2': 'y2'}} Individual parts of the JSON data can be removed atomically as well, using :py:meth:`~JSONField.remove`: .. code-block:: pycon >>> KV.update(value=KV.value['k2'].remove()).execute() 1 >>> KV.get(KV.key == 'a').value {'k1': 'v1'} We can also get the type of value stored at a specific location in the JSON data using the :py:meth:`~JSONField.json_type` method: .. code-block:: pycon >>> KV.select(KV.value.json_type(), KV.value['k1'].json_type()).tuples()[:] [('object', 'text')] Let's add a nested value and then see how to iterate through it's contents recursively using the :py:meth:`~JSONField.tree` method: .. code-block:: pycon >>> KV.create(key='b', value={'x1': {'y1': 'z1', 'y2': 'z2'}, 'x2': [1, 2]}) >>> tree = KV.value.tree().alias('tree') >>> query = KV.select(KV.key, tree.c.fullkey, tree.c.value).from_(KV, tree) >>> query.tuples()[:] [('a', '$', {'k1': 'v1'}), ('a', '$.k1', 'v1'), ('b', '$', {'x1': {'y1': 'z1', 'y2': 'z2'}, 'x2': [1, 2]}), ('b', '$.x2', [1, 2]), ('b', '$.x2[0]', 1), ('b', '$.x2[1]', 2), ('b', '$.x1', {'y1': 'z1', 'y2': 'z2'}), ('b', '$.x1.y1', 'z1'), ('b', '$.x1.y2', 'z2')] The :py:meth:`~JSONField.tree` and :py:meth:`~JSONField.children` methods are powerful. For more information on how to utilize them, see the `json1 extension documentation `_. Also note, that :py:class:`JSONField` lookups can be chained: .. code-block:: pycon >>> query = KV.select().where(KV.value['x1']['y1'] == 'z1') >>> for obj in query: ... print(obj.key, obj.value) ... 'b', {'x1': {'y1': 'z1', 'y2': 'z2'}, 'x2': [1, 2]} For more information, refer to the `sqlite json1 documentation `_. .. py:method:: __getitem__(item) :param item: Access a specific key or array index in the JSON data. :return: a special object exposing access to the JSON data. :rtype: JSONPath Access a specific key or array index in the JSON data. Returns a :py:class:`JSONPath` object, which exposes convenient methods for reading or modifying a particular part of a JSON object. Example: .. code-block:: python # If metadata contains {"tags": ["list", "of", "tags"]}, we can # extract the first tag in this way: Post.select(Post, Post.metadata['tags'][0].alias('first_tag')) For more examples see the :py:class:`JSONPath` API documentation. .. py:method:: extract(*paths) :param paths: One or more JSON paths to extract. Extract the value(s) at the specified JSON paths. If multiple paths are provided, then Sqlite will return the values as a ``list``. .. py:method:: extract_json(path) :param str path: JSON path Extract the value at the specified path as a JSON data-type. This corresponds to the ``->`` operator added in Sqlite 3.38. .. py:method:: extract_text(path) :param str path: JSON path Extract the value at the specified path as a SQL data-type. This corresponds to the ``->>`` operator added in Sqlite 3.38. .. py:method:: set(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Set the value stored in a :py:class:`JSONField`. Uses the `json_set() `_ function from the json1 extension. .. py:method:: replace(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Replace the existing value stored in a :py:class:`JSONField`. Uses the `json_replace() `_ function from the json1 extension. .. py:method:: insert(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Insert value into :py:class:`JSONField`. Uses the `json_insert() `_ function from the json1 extension. .. py:method:: append(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Append to the array stored in a :py:class:`JSONField`. Uses the `json_set() `_ function from the json1 extension. .. py:method:: update(data) :param data: a scalar value, list or dictionary to merge with the data currently stored in a :py:class:`JSONField`. To remove a particular key, set that key to ``None`` in the updated data. Merge new data into the JSON value using the RFC-7396 MergePatch algorithm to apply a patch (``data`` parameter) against the column data. MergePatch can add, modify, or delete elements of a JSON object, which means :py:meth:`~JSONField.update` is a generalized replacement for both :py:meth:`~JSONField.set` and :py:meth:`~JSONField.remove`. MergePatch treats JSON array objects as atomic, so ``update()`` cannot append to an array, nor modify individual elements of an array. For more information as well as examples, see the SQLite `json_patch() `_ function documentation. .. py:method:: remove() Remove the data stored in the :py:class:`JSONField`. Uses the `json_remove `_ function from the json1 extension. .. py:method:: json_type() Return a string identifying the type of value stored in the column. The type returned will be one of: * object * array * integer * real * true * false * text * null <-- the string "null" means an actual NULL value * NULL <-- an actual NULL value means the path was not found Uses the `json_type `_ function from the json1 extension. .. py:method:: length() Return the length of the array stored in the column. Uses the `json_array_length `_ function from the json1 extension. .. py:method:: children() The ``children`` function corresponds to ``json_each``, a table-valued function that walks the JSON value provided and returns the immediate children of the top-level array or object. If a path is specified, then that path is treated as the top-most element. The rows returned by calls to ``children()`` have the following attributes: * ``key``: the key of the current element relative to its parent. * ``value``: the value of the current element. * ``type``: one of the data-types (see :py:meth:`~JSONField.json_type`). * ``atom``: the scalar value for primitive types, ``NULL`` for arrays and objects. * ``id``: a unique ID referencing the current node in the tree. * ``parent``: the ID of the containing node. * ``fullkey``: the full path describing the current element. * ``path``: the path to the container of the current row. Internally this method uses the `json_each `_ (documentation link) function from the json1 extension. Example usage (compare to :py:meth:`~JSONField.tree` method): .. code-block:: python class KeyData(Model): key = TextField() data = JSONField() KeyData.create(key='a', data={'k1': 'v1', 'x1': {'y1': 'z1'}}) KeyData.create(key='b', data={'x1': {'y1': 'z1', 'y2': 'z2'}}) # We will query the KeyData model for the key and all the # top-level keys and values in it's data field. kd = KeyData.data.children().alias('children') query = (KeyData .select(kd.c.key, kd.c.value, kd.c.fullkey) .from_(KeyData, kd) .order_by(kd.c.key) .tuples()) print(query[:]) # PRINTS: [('a', 'k1', 'v1', '$.k1'), ('a', 'x1', '{"y1":"z1"}', '$.x1'), ('b', 'x1', '{"y1":"z1","y2":"z2"}', '$.x1')] .. py:method:: tree() The ``tree`` function corresponds to ``json_tree``, a table-valued function that recursively walks the JSON value provided and returns information about the keys at each level. If a path is specified, then that path is treated as the top-most element. The rows returned by calls to ``tree()`` have the same attributes as rows returned by calls to :py:meth:`~JSONField.children`: * ``key``: the key of the current element relative to its parent. * ``value``: the value of the current element. * ``type``: one of the data-types (see :py:meth:`~JSONField.json_type`). * ``atom``: the scalar value for primitive types, ``NULL`` for arrays and objects. * ``id``: a unique ID referencing the current node in the tree. * ``parent``: the ID of the containing node. * ``fullkey``: the full path describing the current element. * ``path``: the path to the container of the current row. Internally this method uses the `json_tree `_ (documentation link) function from the json1 extension. Example usage: .. code-block:: python class KeyData(Model): key = TextField() data = JSONField() KeyData.create(key='a', data={'k1': 'v1', 'x1': {'y1': 'z1'}}) KeyData.create(key='b', data={'x1': {'y1': 'z1', 'y2': 'z2'}}) # We will query the KeyData model for the key and all the # keys and values in it's data field, recursively. kd = KeyData.data.tree().alias('tree') query = (KeyData .select(kd.c.key, kd.c.value, kd.c.fullkey) .from_(KeyData, kd) .order_by(kd.c.key) .tuples()) print(query[:]) # PRINTS: [('a', None, '{"k1":"v1","x1":{"y1":"z1"}}', '$'), ('b', None, '{"x1":{"y1":"z1","y2":"z2"}}', '$'), ('a', 'k1', 'v1', '$.k1'), ('a', 'x1', '{"y1":"z1"}', '$.x1'), ('b', 'x1', '{"y1":"z1","y2":"z2"}', '$.x1'), ('a', 'y1', 'z1', '$.x1.y1'), ('b', 'y1', 'z1', '$.x1.y1'), ('b', 'y2', 'z2', '$.x1.y2')] .. py:class:: JSONPath(field[, path=None]) :param JSONField field: the field object we intend to access. :param tuple path: Components comprising the JSON path. A convenient, Pythonic way of representing JSON paths for use with :py:class:`JSONField`. The ``JSONPath`` object implements ``__getitem__``, accumulating path components, which it can turn into the corresponding json-path expression. .. py:method:: __getitem__(item) :param item: Access a sub-key key or array index. :return: a :py:class:`JSONPath` representing the new path. Access a sub-key or array index in the JSON data. Returns a :py:class:`JSONPath` object, which exposes convenient methods for reading or modifying a particular part of a JSON object. Example: .. code-block:: python # If metadata contains {"tags": ["list", "of", "tags"]}, we can # extract the first tag in this way: first_tag = Post.metadata['tags'][0] query = (Post .select(Post, first_tag.alias('first_tag')) .order_by(first_tag)) .. py:method:: set(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Set the value at the given location in the JSON data. Uses the `json_set() `_ function from the json1 extension. .. py:method:: replace(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Replace the existing value at the given location in the JSON data. Uses the `json_replace() `_ function from the json1 extension. .. py:method:: insert(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Insert a new value at the given location in the JSON data. Uses the `json_insert() `_ function from the json1 extension. .. py:method:: append(value[, as_json=None]) :param value: a scalar value, list, or dictionary. :param bool as_json: force the value to be treated as JSON, in which case it will be serialized as JSON in Python beforehand. By default, lists and dictionaries are treated as JSON to be serialized, while strings and integers are passed as-is. Append to the array stored at the given location in the JSON data. Uses the `json_set() `_ function from the json1 extension. .. py:method:: update(data) :param data: a scalar value, list or dictionary to merge with the data at the given location in the JSON data. To remove a particular key, set that key to ``None`` in the updated data. Merge new data into the JSON value using the RFC-7396 MergePatch algorithm to apply a patch (``data`` parameter) against the column data. MergePatch can add, modify, or delete elements of a JSON object, which means :py:meth:`~JSONPath.update` is a generalized replacement for both :py:meth:`~JSONPath.set` and :py:meth:`~JSONPath.remove`. MergePatch treats JSON array objects as atomic, so ``update()`` cannot append to an array, nor modify individual elements of an array. For more information as well as examples, see the SQLite `json_patch() `_ function documentation. .. py:method:: remove() Remove the data stored in at the given location in the JSON data. Uses the `json_type `_ function from the json1 extension. .. py:method:: json_type() Return a string identifying the type of value stored at the given location in the JSON data. The type returned will be one of: * object * array * integer * real * true * false * text * null <-- the string "null" means an actual NULL value * NULL <-- an actual NULL value means the path was not found Uses the `json_type `_ function from the json1 extension. .. py:method:: length() Return the length of the array stored at the given location in the JSON data. Uses the `json_array_length `_ function from the json1 extension. .. py:method:: children() Table-valued function that exposes the direct descendants of a JSON object at the given location. See also :py:meth:`JSONField.children`. .. py:method:: tree() Table-valued function that exposes all descendants, recursively, of a JSON object at the given location. See also :py:meth:`JSONField.tree`. .. py:class:: JSONBField(json_dumps=None, json_loads=None, ...) Field-class suitable for use with data stored on-disk in ``jsonb`` format (available starting Sqlite 3.45.0). This field-class should be used with care, as the data may be returned in it's encoded format depending on how you query it. For example: .. code-block:: pycon >>> KV.create(key='a', value={'k1': 'v1'}) >>> KV.get(KV.key == 'a').value b"l'k1'v1" To get the JSON value, it is necessary to use ``fn.json()`` or the helper :py:meth:`JSONBField.json` method: .. code-block:: pycon >>> kv = KV.select(KV.value.json()).get() >>> kv.value {'k1': 'v1'} .. py:class:: JSONBPath(field[, path=None]) Subclass of :py:class:`JSONPath` for working with ``jsonb`` data. .. py:class:: SearchField([unindexed=False[, column_name=None]]) Field-class to be used for columns on models representing full-text search virtual tables. The full-text search extensions prohibit the specification of any typing or constraints on columns. This behavior is enforced by the :py:class:`SearchField`, which raises an exception if any configuration is attempted that would be incompatible with the full-text search extensions. Example model for document search index (timestamp is stored in the table but it's data is not searchable): .. code-block:: python class DocumentIndex(FTSModel): title = SearchField() content = SearchField() tags = SearchField() timestamp = SearchField(unindexed=True) .. py:method:: match(term) :param str term: full-text search query/terms :return: a :py:class:`Expression` corresponding to the ``MATCH`` operator. Sqlite's full-text search supports searching either the full table, including all indexed columns, **or** searching individual columns. The :py:meth:`~SearchField.match` method can be used to restrict search to a single column: .. code-block:: python class SearchIndex(FTSModel): title = SearchField() body = SearchField() # Search *only* the title field and return results ordered by # relevance, using bm25. query = (SearchIndex .select(SearchIndex, SearchIndex.bm25().alias('score')) .where(SearchIndex.title.match('python')) .order_by(SearchIndex.bm25())) To instead search *all* indexed columns, use the :py:meth:`FTSModel.match` method: .. code-block:: python # Searches *both* the title and body and return results ordered by # relevance, using bm25. query = (SearchIndex .select(SearchIndex, SearchIndex.bm25().alias('score')) .where(SearchIndex.match('python')) .order_by(SearchIndex.bm25())) .. py:method:: highlight(left, right) :param str left: opening tag for highlight, e.g. ``''`` :param str right: closing tag for highlight, e.g. ``''`` When performing a search using the ``MATCH`` operator, FTS5 can return text highlighting matches in a given column. .. code-block:: python # Search for items matching string 'python' and return the title # highlighted with square brackets. query = (SearchIndex .search('python') .select(SearchIndex.title.highlight('[', ']').alias('hi'))) for result in query: print(result.hi) # For example, might print: # Learn [python] the hard way .. py:method:: snippet(left, right, over_length='...', max_tokens=16) :param str left: opening tag for highlight, e.g. ``''`` :param str right: closing tag for highlight, e.g. ``''`` :param str over_length: text to prepend or append when snippet exceeds the maximum number of tokens. :param int max_tokens: max tokens returned, **must be 1 - 64**. When performing a search using the ``MATCH`` operator, FTS5 can return text with a snippet containing the highlighted match in a given column. .. code-block:: python # Search for items matching string 'python' and return the title # highlighted with square brackets. query = (SearchIndex .search('python') .select(SearchIndex.title.snippet('[', ']').alias('snip'))) for result in query: print(result.snip) .. py:class:: VirtualModel() Model class designed to be used to represent virtual tables. The default metadata settings are slightly different, to match those frequently used by virtual tables. Metadata options: * ``arguments`` - arguments passed to the virtual table constructor. * ``extension_module`` - name of extension to use for virtual table. * ``options`` - a dictionary of settings to apply in virtual table constructor. * ``primary_key`` - defaults to ``False``, indicating no primary key. These all are combined in the following way: .. code-block:: sql CREATE VIRTUAL TABLE USING ([prefix_arguments, ...] fields, ... [arguments, ...], [options...]) .. _sqlite-fts: .. py:class:: FTSModel() Subclass of :py:class:`VirtualModel` to be used with the `FTS3 and FTS4 `_ full-text search extensions. FTSModel subclasses should be defined normally, however there are a couple caveats: * Unique constraints, not null constraints, check constraints and foreign keys are not supported. * Indexes on fields and multi-column indexes are ignored completely * Sqlite will treat all column types as ``TEXT`` (although you can store other data types, Sqlite will treat them as text). * FTS models contain a ``rowid`` field which is automatically created and managed by SQLite (unless you choose to explicitly set it during model creation). Lookups on this column **are fast and efficient**. Given these constraints, it is strongly recommended that all fields declared on an ``FTSModel`` subclass be instances of :py:class:`SearchField` (though an exception is made for explicitly declaring a :py:class:`RowIDField`). Using :py:class:`SearchField` will help prevent you accidentally creating invalid column constraints. If you wish to store metadata in the index but would not like it to be included in the full-text index, then specify ``unindexed=True`` when instantiating the :py:class:`SearchField`. The only exception to the above is for the ``rowid`` primary key, which can be declared using :py:class:`RowIDField`. Lookups on the ``rowid`` are very efficient. If you are using FTS4 you can also use :py:class:`DocIDField`, which is an alias for the rowid (though there is no benefit to doing so). Because of the lack of secondary indexes, it usually makes sense to use the ``rowid`` primary key as a pointer to a row in a regular table. For example: .. code-block:: python class Document(Model): # Canonical source of data, stored in a regular table. author = ForeignKeyField(User, backref='documents') title = TextField(null=False, unique=True) content = TextField(null=False) timestamp = DateTimeField() class Meta: database = db class DocumentIndex(FTSModel): # Full-text search index. rowid = RowIDField() title = SearchField() content = SearchField() class Meta: database = db # Use the porter stemming algorithm to tokenize content. options = {'tokenize': 'porter'} To store a document in the document index, we will ``INSERT`` a row into the ``DocumentIndex`` table, manually setting the ``rowid`` so that it matches the primary-key of the corresponding ``Document``: .. code-block:: python def store_document(document): DocumentIndex.insert({ DocumentIndex.rowid: document.id, DocumentIndex.title: document.title, DocumentIndex.content: document.content}).execute() To perform a search and return ranked results, we can query the ``Document`` table and join on the ``DocumentIndex``. This join will be efficient because lookups on an FTSModel's ``rowid`` field are fast: .. code-block:: python def search(phrase): # Query the search index and join the corresponding Document # object on each search result. return (Document .select() .join( DocumentIndex, on=(Document.id == DocumentIndex.rowid)) .where(DocumentIndex.match(phrase)) .order_by(DocumentIndex.bm25())) .. warning:: All SQL queries on ``FTSModel`` classes will be full-table scans **except** full-text searches and ``rowid`` lookups. If the primary source of the content you are indexing exists in a separate table, you can save some disk space by instructing SQLite to not store an additional copy of the search index content. SQLite will still create the metadata and data-structures needed to perform searches on the content, but the content itself will not be stored in the search index. To accomplish this, you can specify a table or column using the ``content`` option. The `FTS4 documentation `_ has more information. Here is a short example illustrating how to implement this with peewee: .. code-block:: python class Blog(Model): title = TextField() pub_date = DateTimeField(default=datetime.datetime.now) content = TextField() # We want to search this. class Meta: database = db class BlogIndex(FTSModel): content = SearchField() class Meta: database = db options = {'content': Blog.content} # <-- specify data source. db.create_tables([Blog, BlogIndex]) # Now, we can manage content in the BlogIndex. To populate the # search index: BlogIndex.rebuild() # Optimize the index. BlogIndex.optimize() The ``content`` option accepts either a single :py:class:`Field` or a :py:class:`Model` and can reduce the amount of storage used by the database file. However, content will need to be manually moved to/from the associated ``FTSModel``. .. py:classmethod:: match(term) :param term: Search term or expression. Generate a SQL expression representing a search for the given term or expression in the table. SQLite uses the ``MATCH`` operator to indicate a full-text search. Example: .. code-block:: python # Search index for "search phrase" and return results ranked # by relevancy using the BM25 algorithm. query = (DocumentIndex .select() .where(DocumentIndex.match('search phrase')) .order_by(DocumentIndex.bm25())) for result in query: print('Result: %s' % result.title) .. py:classmethod:: search(term[, weights=None[, with_score=False[, score_alias='score'[, explicit_ordering=False]]]]) :param str term: Search term to use. :param weights: A list of weights for the columns, ordered with respect to the column's position in the table. **Or**, a dictionary keyed by the field or field name and mapped to a value. :param with_score: Whether the score should be returned as part of the ``SELECT`` statement. :param str score_alias: Alias to use for the calculated rank score. This is the attribute you will use to access the score if ``with_score=True``. :param bool explicit_ordering: Order using full SQL function to calculate rank, as opposed to simply referencing the score alias in the ORDER BY clause. Shorthand way of searching for a term and sorting results by the quality of the match. .. note:: This method uses a simplified algorithm for determining the relevance rank of results. For more sophisticated result ranking, use the :py:meth:`~FTSModel.search_bm25` method. .. code-block:: python # Simple search. docs = DocumentIndex.search('search term') for result in docs: print(result.title) # More complete example. docs = DocumentIndex.search( 'search term', weights={'title': 2.0, 'content': 1.0}, with_score=True, score_alias='search_score') for result in docs: print(result.title, result.search_score) .. py:classmethod:: search_bm25(term[, weights=None[, with_score=False[, score_alias='score'[, explicit_ordering=False]]]]) :param str term: Search term to use. :param weights: A list of weights for the columns, ordered with respect to the column's position in the table. **Or**, a dictionary keyed by the field or field name and mapped to a value. :param with_score: Whether the score should be returned as part of the ``SELECT`` statement. :param str score_alias: Alias to use for the calculated rank score. This is the attribute you will use to access the score if ``with_score=True``. :param bool explicit_ordering: Order using full SQL function to calculate rank, as opposed to simply referencing the score alias in the ORDER BY clause. Shorthand way of searching for a term and sorting results by the quality of the match using the BM25 algorithm. .. attention:: The BM25 ranking algorithm is only available for FTS4. If you are using FTS3, use the :py:meth:`~FTSModel.search` method instead. .. py:classmethod:: search_bm25f(term[, weights=None[, with_score=False[, score_alias='score'[, explicit_ordering=False]]]]) Same as :py:meth:`FTSModel.search_bm25`, but using the BM25f variant of the BM25 ranking algorithm. .. py:classmethod:: search_lucene(term[, weights=None[, with_score=False[, score_alias='score'[, explicit_ordering=False]]]]) Same as :py:meth:`FTSModel.search_bm25`, but using the result ranking algorithm from the Lucene search engine. .. py:classmethod:: rank([col1_weight, col2_weight...coln_weight]) :param float col_weight: (Optional) weight to give to the *ith* column of the model. By default all columns have a weight of ``1.0``. Generate an expression that will calculate and return the quality of the search match. This ``rank`` can be used to sort the search results. A higher rank score indicates a better match. The ``rank`` function accepts optional parameters that allow you to specify weights for the various columns. If no weights are specified, all columns are considered of equal importance. .. note:: The algorithm used by :py:meth:`~FTSModel.rank` is simple and relatively quick. For more sophisticated result ranking, use: * :py:meth:`~FTSModel.bm25` * :py:meth:`~FTSModel.bm25f` * :py:meth:`~FTSModel.lucene` .. code-block:: python query = (DocumentIndex .select( DocumentIndex, DocumentIndex.rank().alias('score')) .where(DocumentIndex.match('search phrase')) .order_by(DocumentIndex.rank())) for search_result in query: print(search_result.title, search_result.score) .. py:classmethod:: bm25([col1_weight, col2_weight...coln_weight]) :param float col_weight: (Optional) weight to give to the *ith* column of the model. By default all columns have a weight of ``1.0``. Generate an expression that will calculate and return the quality of the search match using the `BM25 algorithm `_. This value can be used to sort the search results, with higher scores corresponding to better matches. Like :py:meth:`~FTSModel.rank`, ``bm25`` function accepts optional parameters that allow you to specify weights for the various columns. If no weights are specified, all columns are considered of equal importance. .. attention:: The BM25 result ranking algorithm requires FTS4. If you are using FTS3, use :py:meth:`~FTSModel.rank` instead. .. code-block:: python query = (DocumentIndex .select( DocumentIndex, DocumentIndex.bm25().alias('score')) .where(DocumentIndex.match('search phrase')) .order_by(DocumentIndex.bm25())) for search_result in query: print(search_result.title, search_result.score) .. note:: The above code example is equivalent to calling the :py:meth:`~FTSModel.search_bm25` method: .. code-block:: python query = DocumentIndex.search_bm25('search phrase', with_score=True) for search_result in query: print(search_result.title, search_result.score) .. py:classmethod:: bm25f([col1_weight, col2_weight...coln_weight]) Identical to :py:meth:`~FTSModel.bm25`, except that it uses the BM25f variant of the BM25 ranking algorithm. .. py:classmethod:: lucene([col1_weight, col2_weight...coln_weight]) Identical to :py:meth:`~FTSModel.bm25`, except that it uses the Lucene search result ranking algorithm. .. py:classmethod:: rebuild() Rebuild the search index -- this only works when the ``content`` option was specified during table creation. .. py:classmethod:: optimize() Optimize the search index. .. py:class:: FTS5Model() Subclass of :py:class:`VirtualModel` to be used with the `FTS5 `_ full-text search extensions. FTS5Model subclasses should be defined normally, however there are a couple caveats: * FTS5 explicitly disallows specification of any constraints, data-type or indexes on columns. For that reason, all columns **must** be instances of :py:class:`SearchField`. * FTS5 models contain a ``rowid`` field which is automatically created and managed by SQLite (unless you choose to explicitly set it during model creation). Lookups on this column **are fast and efficient**. * Indexes on fields and multi-column indexes are not supported. The ``FTS5`` extension comes with a built-in implementation of the BM25 ranking function. Therefore, the ``search`` and ``search_bm25`` methods have been overridden to use the builtin ranking functions rather than user-defined functions. .. py:classmethod:: fts5_installed() Return a boolean indicating whether the FTS5 extension is installed. If it is not installed, an attempt will be made to load the extension. .. py:classmethod:: search(term[, weights=None[, with_score=False[, score_alias='score']]]) :param str term: Search term to use. :param weights: A list of weights for the columns, ordered with respect to the column's position in the table. **Or**, a dictionary keyed by the field or field name and mapped to a value. :param with_score: Whether the score should be returned as part of the ``SELECT`` statement. :param str score_alias: Alias to use for the calculated rank score. This is the attribute you will use to access the score if ``with_score=True``. :param bool explicit_ordering: Order using full SQL function to calculate rank, as opposed to simply referencing the score alias in the ORDER BY clause. Shorthand way of searching for a term and sorting results by the quality of the match. The ``FTS5`` extension provides a built-in implementation of the BM25 algorithm, which is used to rank the results by relevance. Higher scores correspond to better matches. .. code-block:: python # Simple search. docs = DocumentIndex.search('search term') for result in docs: print(result.title) # More complete example. docs = DocumentIndex.search( 'search term', weights={'title': 2.0, 'content': 1.0}, with_score=True, score_alias='search_score') for result in docs: print(result.title, result.search_score) .. py:classmethod:: search_bm25(term[, weights=None[, with_score=False[, score_alias='score']]]) With FTS5, :py:meth:`~FTS5Model.search_bm25` is identical to the :py:meth:`~FTS5Model.search` method. .. py:classmethod:: rank([col1_weight, col2_weight...coln_weight]) :param float col_weight: (Optional) weight to give to the *ith* column of the model. By default all columns have a weight of ``1.0``. Generate an expression that will calculate and return the quality of the search match using the `BM25 algorithm `_. This value can be used to sort the search results, with higher scores corresponding to better matches. The :py:meth:`~FTS5Model.rank` function accepts optional parameters that allow you to specify weights for the various columns. If no weights are specified, all columns are considered of equal importance. .. code-block:: python query = (DocumentIndex .select( DocumentIndex, DocumentIndex.rank().alias('score')) .where(DocumentIndex.match('search phrase')) .order_by(DocumentIndex.rank())) for search_result in query: print(search_result.title, search_result.score) .. note:: The above code example is equivalent to calling the :py:meth:`~FTS5Model.search` method: .. code-block:: python query = DocumentIndex.search('search phrase', with_score=True) for search_result in query: print(search_result.title, search_result.score) .. py:classmethod:: bm25([col1_weight, col2_weight...coln_weight]) Because FTS5 provides built-in support for BM25, the :py:meth:`~FTS5Model.bm25` method is identical to the :py:meth:`~FTS5Model.rank` method. .. py:classmethod:: VocabModel([table_type='row'|'col'|'instance'[, table_name=None]]) :param str table_type: Either 'row', 'col' or 'instance'. :param table_name: Name for the vocab table. If not specified, will be "fts5tablename_v". Generate a model class suitable for accessing the `vocab table `_ corresponding to FTS5 search index. .. _sqlite-vtfunc: .. py:class:: TableFunction() Implement a user-defined table-valued function. Unlike a simple :ref:`scalar or aggregate ` function, which returns a single scalar value, a table-valued function can return any number of rows of tabular data. Simple example: .. code-block:: python from playhouse.sqlite_ext import TableFunction class Series(TableFunction): # Name of columns in each row of generated data. columns = ['value'] # Name of parameters the function may be called with. params = ['start', 'stop', 'step'] def initialize(self, start=0, stop=None, step=1): """ Table-functions declare an initialize() method, which is called with whatever arguments the user has called the function with. """ self.start = self.current = start self.stop = stop or float('Inf') self.step = step def iterate(self, idx): """ Iterate is called repeatedly by the SQLite database engine until the required number of rows has been read **or** the function raises a `StopIteration` signalling no more rows are available. """ if self.current > self.stop: raise StopIteration ret, self.current = self.current, self.current + self.step return (ret,) # Register the table-function with our database, which ensures it # is declared whenever a connection is opened. db.table_function('series')(Series) # Usage: cursor = db.execute_sql('SELECT * FROM series(?, ?, ?)', (0, 5, 2)) for value, in cursor: print(value) .. note:: A :py:class:`TableFunction` must be registered with a database connection before it can be used. To ensure the table function is always available, you can use the :py:meth:`SqliteDatabase.table_function` decorator to register the function with the database. :py:class:`TableFunction` implementations must provide two attributes and implement two methods, described below. .. py:attribute:: columns A list containing the names of the columns for the data returned by the function. For example, a function that is used to split a string on a delimiter might specify 3 columns: ``[substring, start_idx, end_idx]``. .. py:attribute:: params The names of the parameters the function may be called with. All parameters, including optional parameters, should be listed. For example, a function that is used to split a string on a delimiter might specify 2 params: ``[string, delimiter]``. .. py:attribute:: name *Optional* - specify the name for the table function. If not provided, name will be taken from the class name. .. py:attribute:: print_tracebacks = True Print a full traceback for any errors that occur in the table-function's callback methods. When set to False, only the generic OperationalError will be visible. .. py:method:: initialize(**parameter_values) :param parameter_values: Parameters the function was called with. :returns: No return value. The ``initialize`` method is called to initialize the table function with the parameters the user specified when calling the function. .. py:method:: iterate(idx) :param int idx: current iteration step :returns: A tuple of row data corresponding to the columns named in the :py:attr:`~TableFunction.columns` attribute. :raises StopIteration: To signal that no more rows are available. This function is called repeatedly and returns successive rows of data. The function may terminate before all rows are consumed (especially if the user specified a ``LIMIT`` on the results). Alternatively, the function can signal that no more data is available by raising a ``StopIteration`` exception. .. py:classmethod:: register(conn) :param conn: A ``sqlite3.Connection`` object. Register the table function with a DB-API 2.0 ``sqlite3.Connection`` object. Table-valued functions **must** be registered before they can be used in a query. Example: .. code-block:: python class MyTableFunction(TableFunction): name = 'my_func' # ... other attributes and methods ... db = SqliteDatabase(':memory:') db.connect() MyTableFunction.register(db.connection()) To ensure the :py:class:`TableFunction` is registered every time a connection is opened, use the :py:meth:`~SqliteDatabase.table_function` decorator. .. _sqlite-closure-table: .. py:function:: ClosureTable(model_class[, foreign_key=None[, referencing_class=None[, referencing_key=None]]]) :param model_class: The model class containing the nodes in the tree. :param foreign_key: The self-referential parent-node field on the model class. If not provided, peewee will introspect the model to find a suitable key. :param referencing_class: Intermediate table for a many-to-many relationship. :param referencing_key: For a many-to-many relationship, the originating side of the relation. :return: Returns a :py:class:`VirtualModel` for working with a closure table. Factory function for creating a model class suitable for working with a `transitive closure `_ table. Closure tables are :py:class:`VirtualModel` subclasses that work with the transitive closure SQLite extension. These special tables are designed to make it easy to efficiently query hierarchical data. The SQLite extension manages an AVL tree behind-the-scenes, transparently updating the tree when your table changes and making it easy to perform common queries on hierarchical data. To use the closure table extension in your project, you need: 1. A copy of the SQLite extension. The source code can be found in the `SQLite code repository `_ or by cloning `this gist `_: .. code-block:: console $ git clone https://gist.github.com/coleifer/7f3593c5c2a645913b92 closure $ cd closure/ 2. Compile the extension as a shared library, e.g. .. code-block:: console $ gcc -g -fPIC -shared closure.c -o closure.so 3. Create a model for your hierarchical data. The only requirement here is that the model has an integer primary key and a self-referential foreign key. Any additional fields are fine. .. code-block:: python class Category(Model): name = CharField() metadata = TextField() parent = ForeignKeyField('self', index=True, null=True) # Required. # Generate a model for the closure virtual table. CategoryClosure = ClosureTable(Category) The self-referentiality can also be achieved via an intermediate table (for a many-to-many relation). .. code-block:: python class User(Model): name = CharField() class UserRelations(Model): user = ForeignKeyField(User) knows = ForeignKeyField(User, backref='_known_by') class Meta: primary_key = CompositeKey('user', 'knows') # Alternatively, a unique index on both columns. # Generate a model for the closure virtual table, specifying the UserRelations as the referencing table UserClosure = ClosureTable( User, referencing_class=UserRelations, foreign_key=UserRelations.knows, referencing_key=UserRelations.user) 4. In your application code, make sure you load the extension when you instantiate your :py:class:`Database` object. This is done by passing the path to the shared library to the :py:meth:`~SqliteExtDatabase.load_extension` method. .. code-block:: python db = SqliteExtDatabase('my_database.db') db.load_extension('/path/to/closure') .. warning:: There are two caveats you should be aware of when using the ``transitive_closure`` extension. First, it requires that your *source model* have an integer primary key. Second, it is strongly recommended that you create an index on the self-referential foreign key. Example: .. code-block:: python class Category(Model): name = CharField() metadata = TextField() parent = ForeignKeyField('self', index=True, null=True) # Required. # Generate a model for the closure virtual table. CategoryClosure = ClosureTable(Category) # Create the tables if they do not exist. db.create_tables([Category, CategoryClosure], True) It is now possible to perform interesting queries using the data from the closure table: .. code-block:: python # Get all ancestors for a particular node. laptops = Category.get(Category.name == 'Laptops') for parent in Closure.ancestors(laptops): print(parent.name) # Computer Hardware # Computers # Electronics # All products # Get all descendants for a particular node. hardware = Category.get(Category.name == 'Computer Hardware') for node in Closure.descendants(hardware): print(node.name) # Laptops # Desktops # Hard-drives # Monitors # LCD Monitors # LED Monitors API of the :py:class:`VirtualModel` returned by :py:func:`ClosureTable`. .. py:class:: BaseClosureTable() .. py:attribute:: id A field for the primary key of the given node. .. py:attribute:: depth A field representing the relative depth of the given node. .. py:attribute:: root A field representing the relative root node. .. py:method:: descendants(node[, depth=None[, include_node=False]]) Retrieve all descendants of the given node. If a depth is specified, only nodes at that depth (relative to the given node) will be returned. .. code-block:: python node = Category.get(Category.name == 'Electronics') # Direct child categories. children = CategoryClosure.descendants(node, depth=1) # Grand-child categories. children = CategoryClosure.descendants(node, depth=2) # Descendants at all depths. all_descendants = CategoryClosure.descendants(node) .. py:method:: ancestors(node[, depth=None[, include_node=False]]) Retrieve all ancestors of the given node. If a depth is specified, only nodes at that depth (relative to the given node) will be returned. .. code-block:: python node = Category.get(Category.name == 'Laptops') # All ancestors. all_ancestors = CategoryClosure.ancestors(node) # Grand-parent category. grandparent = CategoryClosure.ancestores(node, depth=2) .. py:method:: siblings(node[, include_node=False]) Retrieve all nodes that are children of the specified node's parent. .. note:: For an in-depth discussion of the SQLite transitive closure extension, check out this blog post, `Querying Tree Structures in SQLite using Python and the Transitive Closure Extension `_. .. _sqlite-lsm1: .. py:class:: LSMTable() :py:class:`VirtualModel` subclass suitable for working with the `lsm1 extension `_ The *lsm1* extension is a virtual table that provides a SQL interface to the `lsm key/value storage engine from SQLite4 `_. .. note:: The LSM1 extension has not been released yet (SQLite version 3.22 at time of writing), so consider this feature experimental with potential to change in subsequent releases. LSM tables define one primary key column and an arbitrary number of additional value columns (which are serialized and stored in a single value field in the storage engine). The primary key must be all of the same type and use one of the following field types: * :py:class:`IntegerField` * :py:class:`TextField` * :py:class:`BlobField` Since the LSM storage engine is a key/value store, primary keys (including integers) must be specified by the application. .. attention:: Secondary indexes are not supported by the LSM engine, so the only efficient queries will be lookups (or range queries) on the primary key. Other fields can be queried and filtered on, but may result in a full table-scan. Example model declaration: .. code-block:: python db = SqliteExtDatabase('my_app.db') db.load_extension('lsm.so') # Load shared library. class EventLog(LSMTable): timestamp = IntegerField(primary_key=True) action = TextField() sender = TextField() target = TextField() class Meta: database = db filename = 'eventlog.ldb' # LSM data is stored in separate db. # Declare virtual table. EventLog.create_table() Example queries: .. code-block:: python # Use dictionary operators to get, set and delete rows from the LSM # table. Slices may be passed to represent a range of key values. def get_timestamp(): # Return time as integer expressing time in microseconds. return int(time.time() * 1000000) # Create a new row, at current timestamp. ts = get_timestamp() EventLog[ts] = ('pageview', 'search', '/blog/some-post/') # Retrieve row from event log. log = EventLog[ts] print(log.action, log.sender, log.target) # Prints ("pageview", "search", "/blog/some-post/") # Delete the row. del EventLog[ts] # We can also use the "create()" method. EventLog.create( timestamp=get_timestamp(), action='signup', sender='newsletter', target='sqlite-news') Simple key/value model declaration: .. code-block:: python class KV(LSMTable): key = TextField(primary_key=True) value = TextField() class Meta: database = db filename = 'kv.ldb' db.create_tables([KV]) For tables consisting of a single value field, Peewee will return the value directly when getting a single item. You can also request slices of rows, in which case Peewee returns a corresponding :py:class:`Select` query, which can be iterated over. Below are some examples: .. code-block:: pycon >>> KV['k0'] = 'v0' >>> print(KV['k0']) 'v0' >>> data = [{'key': 'k%d' % i, 'value': 'v%d' % i} for i in range(20)] >>> KV.insert_many(data).execute() >>> KV.select().count() 20 >>> KV['k8'] 'v8' >>> list(KV['k4.1':'k7.x'] [Row(key='k5', value='v5'), Row(key='k6', value='v6'), Row(key='k7', value='v7')] >>> list(KV['k6xxx':]) [Row(key='k7', value='v7'), Row(key='k8', value='v8'), Row(key='k9', value='v9')] You can also index the :py:class:`LSMTable` using expressions: .. code-block:: pycon >>> list(KV[KV.key > 'k6']) [Row(key='k7', value='v7'), Row(key='k8', value='v8'), Row(key='k9', value='v9')] >>> list(KV[(KV.key > 'k6') & (KV.value != 'v8')]) [Row(key='k7', value='v7'), Row(key='k9', value='v9')] You can delete single rows using ``del`` or multiple rows using slices or expressions: .. code-block:: pycon >>> del KV['k1'] >>> del KV['k3x':'k8'] >>> del KV[KV.key.between('k10', 'k18')] >>> list(KV[:]) [Row(key='k0', value='v0'), Row(key='k19', value='v19'), Row(key='k2', value='v2'), Row(key='k3', value='v3'), Row(key='k9', value='v9')] Attempting to get a single non-existant key will result in a ``DoesNotExist``, but slices will not raise an exception: .. code-block:: pycon >>> KV['k1'] ... KV.DoesNotExist: instance matching query does not exist: ... >>> list(KV['k1':'k1']) [] .. _sqlite-blob: .. py:class:: ZeroBlob(length) :param int length: Size of blob in bytes. :py:class:`ZeroBlob` is used solely to reserve space for storing a BLOB that supports incremental I/O. To use the `SQLite BLOB-store `_ it is necessary to first insert a ZeroBlob of the desired size into the row you wish to use with incremental I/O. For example, see :py:class:`Blob`. .. py:class:: Blob(database, table, column, rowid[, read_only=False]) :param database: :py:class:`SqliteExtDatabase` instance. :param str table: Name of table being accessed. :param str column: Name of column being accessed. :param int rowid: Primary-key of row being accessed. :param bool read_only: Prevent any modifications to the blob data. Open a blob, stored in the given table/column/row, for incremental I/O. To allocate storage for new data, you can use the :py:class:`ZeroBlob`, which is very efficient. .. code-block:: python class RawData(Model): data = BlobField() # Allocate 100MB of space for writing a large file incrementally: query = RawData.insert({'data': ZeroBlob(1024 * 1024 * 100)}) rowid = query.execute() # Now we can open the row for incremental I/O: blob = Blob(db, 'rawdata', 'data', rowid) # Read from the file and write to the blob in chunks of 4096 bytes. while True: data = file_handle.read(4096) if not data: break blob.write(data) bytes_written = blob.tell() blob.close() .. py:method:: read([n=None]) :param int n: Only read up to *n* bytes from current position in file. Read up to *n* bytes from the current position in the blob file. If *n* is not specified, the entire blob will be read. .. py:method:: seek(offset[, whence=0]) :param int offset: Seek to the given offset in the file. :param int whence: Seek relative to the specified frame of reference. Values for ``whence``: * ``0``: beginning of file * ``1``: current position * ``2``: end of file .. py:method:: tell() Return current offset within the file. .. py:method:: write(data) :param bytes data: Data to be written Writes the given data, starting at the current position in the file. .. py:method:: close() Close the file and free associated resources. .. py:method:: reopen(rowid) :param int rowid: Primary key of row to open. If a blob has already been opened for a given table/column, you can use the :py:meth:`~Blob.reopen` method to re-use the same :py:class:`Blob` object for accessing multiple rows in the table. .. _sqlite-extras: Additional Features ------------------- The :py:class:`SqliteExtDatabase` accepts an initialization option to register support for a simple `bloom filter `_. The bloom filter, once initialized, can then be used for efficient membership queries on large set of data. Here's an example: .. code-block:: python db = CSqliteExtDatabase(':memory:', bloomfilter=True) # Create and define a table to store some data. db.execute_sql('CREATE TABLE "register" ("data" TEXT)') Register = Table('register', ('data',)).bind(db) # Populate the database with a bunch of text. with db.atomic(): for i in 'abcdefghijklmnopqrstuvwxyz': keys = [i * j for j in range(1, 10)] # a, aa, aaa, ... aaaaaaaaa Register.insert([{'data': key} for key in keys]).execute() # Collect data into a 16KB bloomfilter. query = Register.select(fn.bloomfilter(Register.data, 16 * 1024).alias('buf')) row = query.get() buf = row['buf'] # Use bloomfilter buf to test whether other keys are members. test_keys = ( ('aaaa', True), ('abc', False), ('zzzzzzz', True), ('zyxwvut', False)) for key, is_present in test_keys: query = Register.select(fn.bloomfilter_contains(key, buf).alias('is_member')) answer = query.get()['is_member'] assert answer == is_present The :py:class:`SqliteExtDatabase` can also register other useful functions: * ``rank_functions`` (enabled by default): registers functions for ranking search results, such as *bm25* and *lucene*. * ``hash_functions``: registers md5, sha1, sha256, adler32, crc32 and murmurhash functions. * ``regexp_function``: registers a regexp function. Examples: .. code-block:: python def create_new_user(username, password): # DO NOT DO THIS IN REAL LIFE. PLEASE. query = User.insert({'username': username, 'password': fn.sha1(password)}) new_user_id = query.execute() You can use the *murmurhash* function to hash bytes to an integer for compact storage: .. code-block:: pycon >>> db = SqliteExtDatabase(':memory:', hash_functions=True) >>> db.execute_sql('SELECT murmurhash(?)', ('abcdefg',)).fetchone() (4188131059,) peewee-3.17.7/docs/peewee/tweepee.jpg000066400000000000000000000740021470346076600174450ustar00rootroot00000000000000ÿØÿàJFIFHHÿþCreated with GIMPÿÛCÿÛCÿÀæa"ÿÄ   ÿÄl  !Õ17STUVv”¤¥µÑÔ6AQ…“¶"9Xaq—˜Ö×#%WYˆ‘¨±Á $&'(2458‚¡§³)3BGRuy·áðbgwx¢¸ñÿÄÿÄ: !"15AUv”¶Ó#QTV•–ÔÕ$2B4bqÿÚ ?öà8ÏW(Ÿ{¤^HÛÎ㳠ŧøtro2sj“·tÍxÊÇSïÅ躅:²¦,íÕ½RC®èIV¢ÍÎã.*»§Eu•¾Ë.µŒ›õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î\¢}î‘y#o;‹Åóø#éù?=ÌCà§äüôÊïÕÊ'Þé’6ó¸ur‰÷ºEä¼î,GÌCà§äüô_1‚>Ÿ“óÐc+¿W(Ÿ{¤^HÛÎáÕÊ'Þé’6ó¸±_1‚>Ÿ“óÐq|Ä>ú~OÏAŒ®ý\¢}î‘y#o;‡W(Ÿ{¤^HÛÎâÄq|Ä>ú~OÏAÅóø#éù?=2»õr‰÷ºEä¼î:RVÝ0n9Ͱ•¤BÓßbâÈ(êœQ ”]u¶§RªÊ—[JV¦RêÝKé[)J[ußžbøâ+Ž$oÌ1Ýà샥Ñ_MßUn[éõ­ÿ´-tP˜ÍÑ2ƒ‹ý°›ú£³¡2Û/¶ƒ~ 8øÄ¯ÕŒìÀÀáTì†óâc¯#£»Ž¥NÈo>&8úò:Ëþ8Ì×QÚyƯÆÅ²6xÃ0 9 Ó+>95ʈ«ñ)Yº¤RkCëâÓª/öÄÇ^žÒϳìʺëyFÔ¡öY ®–½³P´¬ÚÉÿ´ÀLG×=:GךŸa—.²åWPÌA5ì,d¦ bM„#S1>ù˜ˆ÷çf[8æhÿî­ÓgåÓþ”‡ÍýÕºlüºbÿÒ‘Ùì}·Ù{ÁYý,áöÖ›ímgãêþ®Y01*‹ÍØ[ePÉ# º0òEÊY䑇†÷öTÖša(mxjP­½qœQ¤ÜjU—CJ0ºÝK캔ûÀÀÔf¶-‹",Æ@ÀÂdL J „„¢D„¢$f&&"c$ŹkrX S@X¦¬ÄÖŘÁ¬ÆdL fHfD†bbf'1ÌðŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ3Œê± »æ¬ì¢»àß‚N>1+õc@±ƒìC.ùƒë;(®ø7à“ŒJýXÐÎÌŒ¥NÈo>&8úò:;¸áTì†óâc¯#¡Œ¿ã=^´)È:ÇÉš‚ÌQLW“±Ô¯Cáш„º>\•ÙŽK=6þz1Ó1ÌÉIPŒ£“'R‰yËo°ë‹4’‹­Ûz»_²¹«sŸE²‡:¥ŠdÑë)²0-ñœLÙÐcµƒ0A>øž¹Á²ÖRÛ!u¯¦…Ú«r]$ ´ÞS ‰/ʱïYD‰Q(˜™Ï{3¸«bPÃãX®Çqåxb8î©’ÀÙkPê¢[7J{‰ÈZ“%NbÓ“#HAŠo.¦ÞJb ºúÚU”¦¾Gtï¦8öÇ6=Ê*´‡Ê3·;A8Üs…¢³lž¾O+‡¶¢¬¶Â±¹rãZœœ)!}téŽýL*Ç( ò¨]ùƒ³Åþ—þƒÅ¾ºdéDô¥Ú<Ó ·R•¶ì ‹iZV›t­+ h¥iZ|t­9+AöŽC²¼Ÿ‚žn-Û—Ú}IË*u¥íˆÐÛ+d>€¾ø)%ü“”Óáœw[@þ¹½H¥FʪŽWWªùV‚õÔjм’Í„ÎÉÖu0>¢g”[»%q¯`]%°âÍEÎ2;* #‹×Dà'F¥¬‘*=deSäs_Æ“º”©ñÔÖ]´ÈP¢ä÷ÒÛ-¿Dµc²to0B2¢9Û£Ôù îɸÓuMg%Ì–«¤Õy•G•%D©MË*–­‰ÔSK•jE¦Aj|ÓÍŠ;cãe4Çdö\ÙŠeäÒT¶”]å§¿äËjs’4ÖÓj‡–×’9³ÙZRë(àÓvÕ:2v©÷6K[åú—åd6-£ŽŒfâ´½ ©%Üyw …FL\t‘Âï¹9Œ.Ò^Þ™E”­†¬s°º[«Ð‰KÜ7A¿ä¼s~aaZWJÞÿfó´bë;]ºjN±D+ ¬Ù¹»¢`_&êÚ ((w4ä|gŒòž.—oøuª<{D¦¿4Úšì;já6\TjxþÀØã9Pùu¦ÆZg§ Tk¤¸œ~w2ÆÙž} {±I«e8š"Í&`ˆ“aÌÉ›N™;<Ê£Xʪ{L––Û6UcŒ(1}S!Vë+H½aè\jSmS¥©÷pÍ—ã&æìrás2Z•ä[§޳Æê+o]DS céU$‰èµ©RæÅT)â‹-¡íë#6”¥éÔUm2é&Àüu™.“gÏÄ¡KÓyi@î³q.ªU!f‰Ä-lB ÚÛ[ÌJ’®NžûªQw¸*¾Ë)rƒk| N5ÆUÁv¼Ší7Þ½¥Üž¤Š†ßÇOg!fšÆÂÝé^ ¬À·Ö KlµjY¬ U’ï(ålçÚ.7Få}}C  ×e§†]ÔwרÕjFÕs}¥’-jE.qCAÁ^Tû·¥=Í5ë jö^þöÏ6mE{š˜ñ©;¬Æ¢¯´³œ›¬BäôÆò™=÷ÙE–³=¸)o¶ò^1'a³ÍMk/OÚGfnsÌÓ.–9>X}ñ؃*ßf Whó›Ù’mot%_ûUîï ZÙ­Q[R\ãEFEþF1úõ8‡e ¨œ}Ñ5Ö:èƒR6Ên%[sÊŠâK˜É,½«h‰Luz¶›‰¥: êmÅÖ”§->¦ÈüÆI8ÙË}L{íc™9 GU-µ{˜hJÜÊ•J¾‰3jGƒŒR¼Óﶉ¬Tð­q¶Ýº™[¬)ø)Óì¹7EKwªh·š%¿a^ÝbŽ¢ ì(amꙨÈaVWÖH’yZwÂöïUŹ]‹Ôè^ÞqÞDŽ=^ÈÕ¡hí¯påÚµ^_ÔÄ2ï˜>³²Šïƒ~ 8øÄ¯ÕÄj± »æ¬ì¢»àß‚N>1+õc@c;008F•;!¼ø˜ãëÈèîã„iS²ωŽ>¼Ž†2ÿÅEæ–œó'|žY&ÞJ}ÒÒwsm²ë‹'u¿ì Ýo¥,Ý/û:.ŠîJT~»ΰ>•ær§I.HÁ8Fg4rÞ=9’Kñ 6Q"rÞmÈÐ7tÅõÖ4à½vójJ…Mð¬íê…*dend§,«7$+™ÙsÐÚÊõÕe}b"ÁIšJ›˜¡sÄ<&‡ÅαÛ]âŽ6Ÿ0#Y¸©s£Qbe'«¶ÓP퓸YuöYkô$£ö0+¤[Z’W!SÑî‰÷ã} ®j¢ÚOw›îý.Ü+2ýÁé•Uï>”rïéöbËq_ÐÿÜæÏÈ<ý+úû˜tÙùÇŸ¡Ã›ãk#êxàìm…-.Õ{Š/JâèÛKìÙ¯ tï¥dªÏ´Ö !"A0¨ï˜PDuüMTòm§*f®“vIé¯WfåÓ¯eF¢­W˜×Ž>,Ø«QUÞÏ<‹Nê¾û3Í-ˆMæ]2Ìr¶PÔ˜\¥Å¦CÂh| ëÙÖ­¹ÞVãqð÷é 4{²–Èò4ö)R]Ö”»hš‘}·ßq6G´›[¸â>D}ñº+•ñÚ—5°w§‹ÜÂâ‘䤶<Æ$"!RÔî& nV™Ñ"5Ê"¶ëQ)N©QWöž+úû˜tÙùÇŸ¡ÁÅCÿs›? 8óô8eµæÚòDr£Ù[§´©¢§¢Ó¤kÕÊ”øÔ«Û'+±ò´¶1€éµd$a-ñF½7 F—ŒYâKÕÓ½©¼VŠï¯Ý8ìÛ+F'ÞÖUãõY\W]uZ•)©ŠµÛ6B^Y룔û%IÄÉ4þó£ˆö_`Š9<Ö9mÔV0†#BÜòä©Üô.ˆ\Ny{qo%Ùzõ‰U”ÒØàJCìA{i÷aö]'iã,Ÿ3eÔ„½ºS–ó»¢S!±—uøÛD[ˆ5+L2.S¶å¾ÎªC -õÎÔD®ôiI.õ·©ÕÎcÅCÿs›? 8óô88¯èîaÓgä~‡ 6Üž–Øö¯$¦•ÍàÊö÷©iN,ßY½6^ù5ºõý]šé}Â¥^«-L8Ímp3=?½§^ª°¹÷õú&ùôúëûÕ•Z=1:œR¥»1M6ºK½jÚªA!`ĠՌλz¥ÒªØõ£Vˆ®\ˆ2IÝÙqû䥪#"nbJ½µòè¦J±¡•cm­ŠÛ™ä Ìq³¡L­k6î’»öødØ®ºµ ÓÅ“lL×¢ìPd‰•ë(Iæö<™“å,ìÊ‹_l^ \z…+c‹V”Vûqxr.ëŠ,£ªUä¡™ÚÔñ_ÐÿÜæÏÈ<ý+úû˜tÙùÇŸ¡Ã²Ç7¾ÉvÇŲÙé´ë77t„Í…xC<ÕÉ¢•zû¥]ýl+×Ñ´>rc˜&Æ´Ž¿:'¸^®]ªÖïØÆí´Ô7½u’zå6a2Þ,톾,¤¥ eúe…¥Dµ¥º³Š+V‰aÍh¬¸´M¹’!w˜a·™Njœ‹.4Û¯4Û­(»)q†_q—Ö•ºû®ºµ­höÆ®ºà[˜Jt¯Æ9¦3'ɲ&ÕPy‹ñQI2`ÕŒYÂvÇIm…ú>îÞ±¼§&cÝ8­˜›Ñ_BÖ*2¶×]›.¹ÎÆØf™q®†pì 5ÌÏ‹%µÇò<}˜”H‘Ñõ+ƒ5ñþ5vf2©æÅ;ÈÒ÷ò‡$•0û‹):~òÐÆÇÓæ ¢¨f¥q®„ó=P{ãí²ã1ü› fdï‹HgJØÌKÄû´²Y IbªnCB÷âu‹M5=÷ÑA´¶Õ¥Üñê_[Mkö´®Ü»³=¡k¶U Ýe@THÔÚ騍áÌ*fj%l«Ø“‘(‚˜:ŽÿIÈîü%höut×èÑÖéÕ¦ ަÉ*4Ëe r¬;XÀôé ‚Öðn®ÕXP²K·ªùnƒ6*s{6 ÓjW=%fxc”9OZ`¨žšd¯o™ Áj—"ä25ñãÜ#mí­NjîyD•±ÝÅZ×bSQEˆ%¾Åý#d«bžq¨ ˜~¡ôêæÄTõÙ+e³x;û‡Hì}scHB¹eìÂŒnLîbˆ8¶¼Ú€íâ[‰N…ª1Be:eÅCÿs›? 8óô88¯èîaÓgä~‡ Û>ö½¡¿FÒÅ+Êž‚:z:Ê×"‰H“*EbÞ0ÉM5¨ÌÄè$V•´=-hU¡ÚÕèv|}úš×Ѹ· ëÛû-ÙØ¼R«siz5>FÂÅiȾеL‹v¡Õ}³(l£L1‘xÝ^‘àxÏ%8G)š€‚ºC[ŽˆL“cèaR‰=í\bäV®\•*íÈã/ZžÛ›®ìšŠ+ÑJÏ&R)ŸN˜Ï¾A’ÏR¹øéÚ‡K$dJЖæ­Y¥§e%ZT,2Ú³ !µbƒn9IæO8¯èîaÓgä~‡ýýÌ:lü€ãÏÐá o}¯p˜ÐÑÃv¶ŽF Öm»Hšt› »Éo2ºé±îmzÔνU¸á¾5$—9GŽlk ù–ÿÒëìêêŽët¶­®‚urº\V‚í6Úk%-_VÍ"j‡À>Ì;"´‰¥­­ŽŒ““Áðœ7S¸Â~µ;—2Ü?©¼øú‡ £Ï©ŒŸ(Bµ±À”ŽÎ:³ÜÌêEÊ,%;×B¥§h4 jƒ8j:¨H jÂŒJÌè—éꜮr’=•q å9>LÌ•®.éj2÷Z ZQjR#誒ԋ ~íüWô?÷0é³ò?CƒŠþ‡þæ6~@qçèp“Ùó5m¬žÊÐ"7l¢Zöo£Ø¡P2ñO%-`YšFuBÒõÁa ˜ìL­RZÎ ÝM_eÔ'Ɔ/†Äxûy3Z/]¥Þù§‹FتzÄ®Ë)žÌ«XlÚ[…ÏÛPÞ0wš­÷`ácwùªßv:ϧdˆw…Œßæ«}Ø8XÁÝþj·Ýƒ"ácwùªßv0wš­÷`ÆH€GxXÁÝþj·Ýƒ…Œßæ«}Ø1’ Þ0wš­÷`ácwùªßv dˆw…Œßæ«}Ø8XÁÝþj·Ýƒ"ácwùªßv0wš­÷`ÆH€GxXÁÝþj·Ýƒ…Œßæ«}Ø1’ Þ0wš­÷`ácwùªßv dˆw…Œßæ«}Ø8XÁÝþj·Ýƒ"ácwùªßv0wš­÷`ÆH€GxXÁÝþj·Ýƒ…Œßæ«}Ø1’ Þ0wš­÷`ácwùªßv dˆw…Œßæ«}Ø8XÁÝþj·Ýƒ"ácwùªßv0wš­÷`ÆH€GxXÁÝþj·Ýƒ…Œßæ«}Ø1’ Þ0wš­÷`ácwùªßv dˆw…Œßæ«}Ø8XÁÝþj·Ýƒ"ácwùªßv0wš­÷`ÆH€GxXÁÝþj·Ýƒ…Œßæ«}Ø1’ Þ0wš­÷`ácwùªßv dˆw…Œßæ«}Ø8XÁÝþj·Ýƒ"ácwùªßv0wš­÷`ÆH€GxXÁÝþj·Ýƒ…Œßæ«}Ø1’ Þ0wš­÷`ácwùªßv dˆw…Œßæ«}Ø}¤ŠÓ®NZ¤¦n¤ÑîftÙÑtÜ]ßbe¶_M«ìºß²¶›{[tÛ¥iZ±œ‡P}ˆeß0}geßüqñ‰_«ˆÔbwÌYÙEwÁ¿œ|bWêÆ€Æv` `p*vCyñ1ÇבÑÝÇÒ§d7Ÿ}y eÿoùI¿êð}áðVÿ”›þ§ü;ü À0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ01š™Z¤®Ë)Í™!ÇRºÌw‹â¬T¯$LOHë/(16.µâ“ë’ã„ÑUоIÔGÜJ…ïutNøÄc q ftºfîžÔ)’²X×žï º"lÞ«@ ½“ªée°}§ÏXMeµ‚ dŽ}»V†ªm9pÐsÞžÙp'ÿ¬Øí ŒJö¯ZÊõS¾kNH¹6†Ì’ÐüÆþZãX^š^Êkupbs1¡Å‘mÏm'Õ3«:ëÑœu©[Ò©ÜÔTµhÏ¥JRIfR¶Œ¼’çUQج«:å7†üÁn¼"ÌÈ¢¦JÝ-'Æ=EÄ4c}«jò—»c÷+[RŸ[ ˆ*j_Hí†_rª7™ÈZ¥ìÑ×,Êd/.Nê<½oJã¼>†u"DÓ(Ž:evDÒtÔÄ)—“›ÄV0*“«”Ïœãîë"G$URd¬w1§LT½#bØw~cZêÖcÏ/ÅÙ FQÓ¦¼G>ÐwBWNËD\P 8M—3E! àÉ­eêº ´µ qæITt(õÍ>(^š³`!Ó~ºÈÓN ‹¶{¿rè&‘;·yQÐBz=Û´Ý6ï¾!¥m,®é*O½”îÑë6ÝÊÞçîˆíÜŒèeƒ+™œ[­cµõ¨mqÙFœ78ZQé­\Þ}ó#@²„Þ]#8ÂI¼Ô§Òò ¼¢î¼ºÝeµ§BÂ’ØcæX”•˜³DµŸQ‰3.Dha‡¿-ˆ5—m|wKKár¸œÚ.ï%ªFlÉ\NF¡c’µêŽ¡!çgí¯êç0F¼<ÖšåbÄ÷RÔXë\Œu³g"û&ÎĬä›Xµ³¨y?KŒ¨Ô!sÝUmuˆ­^<[žU®‰m†AÇ™¨ãi® “m‡Á EùTüÄ…Ù¥kÓJ7×âœNceTâ;³É-s±Í-ÇbÇšÊT˜×’uˆ‹PEêjU¦—[/ÌOö/1‰é¥êƧUìN—´¸£r±µñ¨ÚæÌ¾ägDn­ÇV„¯nQRÕ£6´-A%ß]¡DõVÙŽÒjKRü¥1x€CHC˜£ªå–åé¶"¤{\Í_iU!Ì"HQ¸¾š‘È’Sœ´£äe#éZš8'J!|S¤µñ™¶>“ Ï#iä›$ó¨C«œZXðÌò¢Bfg¨Š’ì‘UªÓ3ª) $XJsKª ­%{eèÜ‘¢XŸÊ||.Õ×8,5gv¹1†ÔÅCcˆjÈÍÄ(5Zz|Kb…©!& †#Ý%=k¶pÚÊbµö‰` |ú»×Äîò#l À"vhºwùZ¶’Ý $™F­È©9|.1¨ÙK^`ÍŠ–içRú+‹¼e™ÃÃz›íØf@ý˜àô¢ü¤‘Òì€þÓEy@Ùƒ£[=­ÉÙ6ž}nÈYq×3=£~Êð|—ê9KK4BwªŒ‘\á‰ROË@Íé,ŒR¯Ì™æxÖÒïhœ#|vyPöéI ³6uT³7o¯ÄÎØ%µ¯,Ôô*ÄI§ÆÅªÍ-&»X©tÏ…u7ôæù¨šÊÎU¤ÖMî•Nß5žd¦ÀZ×4^ËjL-ÞU5Õ®oè=kl":lñ˃¯†ÊìSe£×ÄÝŠZÓlö ~åÐM"wnò£ „ô2&{·iºmß|CJÚY]ÒTŸ{)Ý£Öm»•½ÏÝÛ¹ÐËW298·ZÉkzõ¨mpÙFœ78ZQé­\Þ}ó#B²„e£8ÂI¼Ô§tdyEÝ}•ºËkOçß’öèÜ÷0[“rª™S½ãöD òLÈèZ |羽>q…«Í!®l¦°?:Ñ!ŽÌ‹ÜÙÔžæGâ[›ˆKˆq‘lÕ·Ù6Ž‚MR}ll=€” ÀÈö(lï\Þã!®“0S¦dþXUNä2œ6*žÇÜ ņ̃[g›yO¸„àštøJ•ØVí€”Žæ§V@cSvBËŽ™íöWƒãü¾‡QêZY¢½Tdˆ¢åø™ü´ ñ¨þ’ÈÅ*ñüÍžgm.ö‰Â'Çg•n”›4gP…K3uÜÕä‘Î<ÅŠíY+‘À1KÆWljÍóÈ«£„qÒ1¾5'VˆÕS‹‰vƒ°;M’Eäå­J™›N­šK<ÅÄò?޽ Ô¤¬*KmÙ㘉(èiªñbUÝSPÒ;Wª‰ëš!±ZJƒJì„$éùª‰¬„è( ’ÔEpi˜jz²Ñ`¹¶$tbÝã8ôâIo¦:qf:?ŒÖ¦æó¥5DŽ53.@É0¤}±i«dm9%®¿ìçêçcšïf™d·8Tõ^=ŽÈ°»eNh‹¢ì,Ü{Ž%BZËh•¾zÞøsÂè™VØÌâ™[ÃɨöÌY_SÅ­XM6)½Çv­k*\WlË=Qif^c¬Ù:ÉÝ]…ŒMWÒ¸“SÛë9]TÛ±Y¨ ö­VkÉéˆG¥¨ËnŒôŠk¶í_n²XsÑn³ÔPRĆŸ"~brryfnzi^ï52¤N(Õ91ä’Çâ^PqŠ› ^€Ë¢-iD^©%ö)"†u/¬Ñù1_ëÿļffoǬzÅÏH¦¯M9"Un.’ÁbYŠpŠÉÃBŒLJy#ÃN5]2.=8B̵ ©;çƒOÄoE}ˆ*Ñjbif™¢ÿ&+ýø—ˆ½®¼5ͪ°72­×]#ja!-·QOxW(c!éCˆòÌ,¡ªjpj")=&ÐöÕ_`Ô”ÊïÞ¬ [å­„W²ÅÖ;A+_§{ë¬x š¦©ËlƒF‘ê± »æ¬ì¢»àß‚N>1+õc@±ƒìC.ùƒë;(®ø7à“ŒJýXÐ"òc;008F•;!¼ø˜ãëÈèîã„iS²ωŽ>¼Ž†2ÿЬ£nPeÖ–eÖ× Ú­¶]ZWjËi]ªÒ•¥v«ÉøGÚÆG7ûI¿G°7ûI¿G°HÀ1‘ÍÀþÒoÑßì ÀþÒoÑßì0 dsp?´›ôwûp?´›ôwûŒÜí&ýþÀÜí&ýþÁ#ÆG7ûI¿G°7ûI¿G°HÀ1‘ÍÀþÒoÑßì ÀþÒoÑßì0 dsp?´›ôwûp?´›ôwûŒÜí&ýþÀÜí&ýþÁ#ÆG7ûI¿G°7ûI¿G°HÀ1‘ÍÀþÒoÑßì ÀþÒoÑßì0 dsp?´›ôwûp?´›ôwûŒÜí&ýþÀÜí&ýþÁ#ÆG7ûI¿G°7ûI¿G°HÀ1‘ÍÀþÒoÑßì ÀþÒoÑßì0 dsp?´›ôwûp?´›ôwûŒÜí&ýþÀÜí&ýþÁ#ÆG7ûI¿G°7ûI¿G°HÀ1‘ÍÀþÒoÑßì ÀþÒoÑßì0 dsp?´›ôwûp?´›ôwûŒÜí&ýþÀÜí&ýþÁ#ÆG7ûI¿G°7ûI¿G°HÀ1‘ÍÀþÒoÑßì ÀþÒoÑßì0 dsp?´›ôwûF‰h‹Cˆ|NÝ&Ô™¶?ʤÕuãU©F~ÂD¶åË Üâ¹>QYù+!™ì?xÏ^±Ô¢'¡O½u å\¯R«lª°Û^l¡nð±Š4™0J#½fBQóOÉ™Žà ÅGøC©?ÏWŸ®à⣎|!ԟ烫Ï×p¶ 7{cmö¦Çñ¶W4ûMöN³ðK*wsá¤ÿ<^~»ƒŠŽ9ð‡Rž¯?]ÂØ€{cmö¦Çñ¶WÅÓ}“¬ü_ÒÊÅGøC©?ÏWŸ®à⣎|!ԟ烫Ï×p¶ ØÛ}©±ümŸÕDZtßdë?Wô²§qQÇ>êOóÁÕçë¸8¨ãŸu'ùàêóõÜ-ˆ¶6ßjlgõqì]7Ù:ÏÀUý,©ÜTqÏ„:“üðuyúî*8çÂIþx:¼ýw bí·Ú›ÆÙý\{MöN³ðK*wsá¤ÿ<^~»‡GÈ–?“:QÂ5Ù1·¤›ÿyõ?ËùƒnÝ2Þ[ã¦ýJæÐΟî{ÀáÓî™t«t[Ò½çÓ'õÚ€k-–ÄØ§û¦Ô÷øZVžLO{Yâ9d’üƒ'Ù1Ü1кǻ6†¯Xµµ!®¢ gAR¸­Þ"“W•p¸xÊdƒ¾'°¦dzO¿*wsá¤ÿ<^~»ƒŠŽ9ð‡Rž¯?]ÂØ€Ùí·Ú›ÆÙý\Õì]7Ù:ÏÀUý,©ÜTqÏ„:“üðuyúî*8çÂIþx:¼ýw bí·Ú›ÆÙý\{MöN³ðK8Eq˺—¸ûž^pZ©´æ£Ižg|ï”Ú-Jz¤jï54'dI{ 7+MBM„¼¤m!Ý:[Ö"N¸¤N Ó©Ž<éšúîêöµóP$-yr\ꬖmTj–:ÐJ§F«PST~?˜X›K4ëìBÌÈÚÞÐØ–…"mB‘§.Ï€×-ˆ´ž7îÃŒD ÑiðÓÿ¨!ä#ÿ¨ÌÌGÕ¶uzÂP µÔe "0LÔ¯*?q.WØ$QQ3Óß3”åóH0Gfw&ÄíTF®Frd²=]ê…CÃ1ÆÙ[lpm"I•¤,¬Muha6;±º ºúRŠž^Ý•†Àt¡W9¸:d½WdIs͈’ºÏæ¢Î-S†«–ʽn3—ãÆåÍ g9:ÐcQç"5ÕÆûW}™J߀!¾Ýš‘Ú^€wo’}Ke“0P"é)pÌDˆÄwÁtŽœ‡ÇtLjœz|š;¥qT+‰8í’4ˆÂZQô6¬É}fW#%35;ŠŽ9ð‡Rž¯?]ÁÅGøC©?ÏWŸ®ál@jöÆÛíMãlþ®nö.›ìgà*þ–W(Žžá™ |™ã8-rlß{Ù4»RŽŸÇŒß¨T·ÓŒó)I"ŽÝ Í1'MYVïÖ&sC½Ü‘#VE„ImÖ§.Û­­·S£Û¥Ô­+M»î­6é]ªÓnœ¿€HGÙ±há–l:à ` ÞÖ8à"d  ˜DP0DSÒ$¦zu™ÎÊõkT ]Z謲)2]t­!'0#'"±’‘)޳1פGN3¨>Ä2ï˜>³²Šïƒ~ 8øÄ¯ÕÄj± »æ¬ì¢»àß‚N>1+õc@Ó›ó³c„iS²ωŽ>¼ŽŽî8F•;!¼ø˜ãëÈèc/øÃ½›IDûâÌ_“ñfTÌÚXï’AÜŒå¼c[.5.|¡gÅX$Íñ›Ü¯q$‹Í}«ONŽN:WÞ‰9)ì•Ñêçu·×êEñXö6UQN%Ë@â€\˜Áxûæ ÈfHG©@Ä ÃïöÑ¢Ólw$‰²½mV[j£b“Ì…œ“²&@J H¢Œ"{£qy¡Ö^UËšCÈ:=|ÓÆ 3d²g“¢mŒ'<ËsŒÌÌò©Gíj6èÔéòLàßÃÅo/Z”ÒÝ[hl`ÆÅIM6ºé×´w¨Yž–éŠ3Dk2Abβ—vgô8Ë¥+S¶7´¹‰™å“(½'9sê7ÆÃÙmq±­5¶ª¶Ç¥LÇyEOìxNÆ®¾¦Ò“eRÊ·Î1Í{•SÆî.–ÒËê°Ë­@k•+°†6N<‚¢%ÁÖµ?»÷Ý­º£ÕZ ãBƒkFÅKMå•&Þ¢ YX†ëL^Ò‘ÃÞ†=Pf¨ šÀÅZØnÏØæ%Í8ù×\~sÓ×è¢å$t&/,Å9!ñÝÖåñeå*qmxkºÚTä­Y›[2£c{dKŽpn–çK5K¨‹"Y—'™#Yn™N˜Þc y­|O¦MiÚÚpN䢿ö‡ë±2×UÍÉ‘ÿn¦§ ÛÚ¯¼"\¢îz@IZG!d¦À¶P"KíqÌ©©ñ™;G¦ï:ÓTµ X²,RÞÙßT‹ë–ұǪ•‹ª±XÓ#ï­, A®zϤ°A²{©“}âPi‹2äø½™!¶0Þù².@Ç—ØâÕ)6@Å)0>´ RòÚñJž¦¼·«^Ùq ,iYc{‘÷¬ýõŸ¨)¼ ÇcÈ›òž¡±“{÷¤pVø¤†7—•.KµÙžN¡v[ŒI–-iß;NÉ%1gXÓ‰ê¹'1Ñɰôƒ¯]ÁvãZè¥jöóc¡uKU¬Cé\ÖÒ;ÌïR¡wr ãZ< â8 ð¨Ÿ<o„]f¬öD:\b¯*¯z•ºä‹Ô-^ xˆ±²¤ …ÞVK[bQ5Ö%ä‡6;°Ÿæ-lâ\E–¢8†iîUÎBKVÕ‹±K3#´…#y©”­-ÅñÂS$‡ÅÒ\…Ç úe#!A ©îsRœ–ûÉToNÃùå‹/ºÏã%C²=™â÷&f¹¬7"´37¼5!m¹Ý•Beñ™ ®.öÞâ‚ËÏNàÃ!sIu-­µ6—mRµ‚Ôl‚ì›B™¢mO´%•bÒéMµ¨ŠÊ‘q«©6e_Ô˜§É䘷ÆëVWËX7RwÃXÐGã´u|*µ‚2•Û:Je°ªÆ ‚¬ñ\®$³¹®9¿H3$¯¨+(äH$lÄù³6ȳÜhõ21å¥ XÕ¦¾@ßX•ÞÉ1Ë®N´ãJ¾ÕXU¶ÔÃz,KÓj ÖGÙ.Ô†›eš„ÕBìKŽYgKbŒdjw8!\‰C$–ÜÛqï‰';-´”NÝjµfÚuM²ói}åu²ÚN=_r‹¬¤!Úí5ýåÄz3oŠ¥ AXÕ ó*Ûa‰z×áð´{ž,‚XÄoù(wj¦Å}–ûYÇ*>n‚I—ö•ßa-•x#I^™Ék¤üÐà˜ Æ¹O¤ «)NSÒ´³HØÑóPºŸjj;9LŸ ˜1ªC[²dJA‘[ÖÇñô¹þešgnë1ìeÁ²:âš\ÀïpHûGê¸*k½ ·ß.ìbL#¨9ÎñŽ|&c$1¶Ø£Û4"8ù–’ñmå7*ŒØÝ76^îR§rTGŠ%ÓÔÚðÍä_ô±ž­RzÐ06S].6,TDSÒtpÄöN°¬ÂCqú ]¨ä¹‹žq+°Š¼\™FO{-%—šà±‘±šNìÄJâ-,åŠä’(éV©åSnpEiýO kËNÙÏMsû,ÚƧ¯MÉŸ¶Ìÿ9RWnníKŸS*¹reU®ÖEî×8©3¥©hc¡f¢.:ÇßTC,ØÕÛ(i0Û ïÇÝEÂK’†"ÙþÉ_rXøšârá%Ä¥^QÇ®YU*Ûzl¶ç]®ºþNÇMhÉߪ`È]ª‹‰s«2àŽ–%~†MËœ­›'xæìk.’Ãó&>ÄÙ~FåÇ9ªm‹"Ç2fŦ·›CjÏ8{šGQ¨…Jå*†±¡5:e‹®<´(–)"£j†q‘ {,N†D2¶Yjäô1©ÊNUŸ¨€<®5Þ\Êe,…©‘ŸBÜ¡½‘·wjmkJÔ¡QF¸˜’õÊÕ):K]ÃvÖöKÖÝS5lz6¦–ØLšæÎ2ûÔYã>©³\‚ÊK­Š®$.Â1[ns¦¡§·¸¢äíÕDõEa_ o£ÜY”o"L$lV°Ó®ð˜«i+²h²fî˜ eÇêXá²¹•1ÓÆ@Ô6¢q²û°š~;Dk+’gÖEìYrDßKß’|‘‘Ë‚­2e—H¬f“ÞÐRÈ¿±ÄvSq,ÚS” 8'TŠò."Fæ¶]O¡RBËap¹¶Cjn’äçFr®cQKmY{³»Y o5:F“œœŒÍOâ[`]6U_­ <…ò˜•*W™aC.7øÁ‹A×8}´i¯¬L¿¶`§}~i¦co¦ÓfTä–xÂ<ý7öêÑ·€¯å52Êï/ÒÔ²)ºè ¯1î7Qðî·ôùšp ƒRŒRe‘¬i¹Ø™¹ÓDµ¼ÃÖ3&Jµ[{Ã{r§‚ZjGãÛÊcZñkËÒ%o1JÓ*šÞ:Ù²wnƲé,?2cìM—änQ\sš¦Ñx²,s!vlZky´6¬ó‡¹¤uêˆQD®R¨kS¦XºãËB‰b’9Œr³f ê.úšvMé”Ȱn9Lz*. §žÝ„%¯­YGØJɨ[YÚ\¯ŽJ—ËsGÑÞ®ëu¬C –u+1jµläbe5iµÊUË/…¢£X ²Å@æÿ-ºÛ­¶ën¥ÖÝJ]mÖÖ•¶ëkMº]mi·JÒ´­+JÒ»U§-ú ²ÁbzÄûâcæ˜ýøÆ1€ `Æ1€ `Æ1€ `Æ1€ `Æ1€ `Æ1€ `Æu؆]óÖvQ]ðoÁ'•ú± XAö!—|Áõ”W|ðIÇÆ%~¬h gfÆÒ§d7Ÿ}yÜp*vCyñ1Ç×‘ÐÆ_ñ‡{6‘yöZŘ¿bÌW˜2L±£!$œ9Ä™\ÆŠ>dj\ÇC•0F\#69XâqÅWnBuÆ ±‚T_¸€%t{9Òí¨m¡dõöUmI&J€Ú’ƒ\0 |}ñâ1CÔ`Âgº!÷ú˜Þé¶:b|Ö^Ê«*5â¸i­NŽÖJÂLÉÙ3E2"S@q³Jt¥„´Ø‚ ŸAt߯ӶffÆW'¹&ŸUâ|†™ù#+yO‡§áœ67,¹2µ'mÏHl¹µÔÊ(¡+UT£º ·äì²ç7Ôî(ÊrÜC”±½Xæ8×Iò÷{¢PæZ6È­Š·96®Ñe)wwz#Ga Q8œ ¦íö©.䀕¯Ê­#kµØ˜6Ò÷®–Å{wæ&ŽßÆo¶H?dĽKey$1`#+b™E0Žá”™£ÓéÖÅÖf“i£ÜV·Vš/¿¡h•V]¬qz`“n!ëc;É‹jJÏŽÆÃ~HÂù3]Ùƒgø-¹%èù\`£°fU~!Ù;lƒ",sBÐé‰<5=8I+]­©ZV­5ü³ RÃk’b8¾£Æ\͆t=¬¼K4Òî¦ ™æ;dMé\‘Vúã$ƒðbë’Sû`mf4ôÎG½>§@̨“%k\µÕ¤úŠ,îuê'oåÓ§·wKŠÒ¶µÜp •ÄácFj̬ÍrðRÆÏ˜ìuŽù\ÈE§àûÓ³NÕn ÑíyVÖ“ŠJMÜ­ MÀ´0Ñ%Î4JB¼ü ‰€)g•|¯¥ ù¶0qvQŠr¬¿1K³Ú¼Ì¾ÇSIý!qª09±Q…ê ÈôÂÈë[Oh_s:ç$ëÍP漤ÉÔ\Öçz~ׯ†|…–£›¤ãÜ!¨ÉØ;c¾C%63iF%2Òq+ÊWOLmU }6dÚkè os±5ÍÏ- Vú=Ú¿„˾¾žÑúÊî»SÝä#"ö*¹:Þ¸õCSÃã6Ezôâ¸KÉÆÔ›ÂóH®5¿ô _kTµ¤ÒµÄ×Ä‹ºº›b*FËÚϹæ–æãî¶Ét„ u)«P&|=ìÁy~'Ê8Ÿdò-­Ò±†RÈX)ÅÒï×n:˜Hç˜íC®0M¢IF/DÒnEj1´HV,&øÅª’7¸ªBbn›·ªo뇧ò\’é?‘-ÅOîc«;d îbØãŸd$ í½Ùýú öÔÙ!ˆµ"sQ{Tq–Â_¦N­z––¤Æ¤µOsVÚrjÑÖÔ³ErÝF¡ }Ÿ;`•«©t­Ô†¨!k}Ô²¤Ù.‰mf°™NmÂm&ãªãQ¨Øm.Õ¾ÑVën{ý¥O¤]¶v¹ZûF—2ZÚÔ¬xQlªŒ›—e *¸ õ_üË…"¥¦Ø¤Ò‘¦=Q¥#H¥zà NUæßbT(Š=bÕ7Ûem!"BR ÚÚImöY_<úLlžG6T3öoá]C°â̶–`Á ™=iã5µµª_(–ÁT4^ñUÐR‹·©z…®òbšZÚÈOq®jÒ[n¯¢´;ÈÒ{_þ'ªö¾žÖ‘_)ðÕºje†E7¹ýÕÓá"ýšâ¶÷Œ¯"ÐÎü4Ëõ~’4ûú<…süòëšÕØ]EW*·Kv&ÀŒK[2ŸS 8nl¾E2EɺIGŽñ>eÈ6ã)“Œ¾jãÄ&`ÄÎÊèñ 1%ÔŽÆYW8îqÇS37®T쌲“^­¯CU­S“1l—é#6E°Æ¡ž±~9ŽÂʘÊÓé×6ï&e ¤Ò J0ƒ`v,<ä$mUt-"cÌgpªÆÇ*%skqJ—Ð ½G6=UM XÖƒã@„k²m’åóÉ Fäº!á\t­1ÚSÞÙlGl×·œ7Vy-‚ÚoŒÉãh´L[éׯ,>ÝM€ên{Êm „Ô0 —Vfàä³(–Ê®¨³¼ƒ ê (™À¥,Ñy•ÚyÍ'7»¹5 ÇÊN´ðSUÝU¶CßKh2©í)õQHQ´^µ[»Q+*þ˜4»œçºL×FŸbÌ³Ž²O‘E²9#"âüŽJÑB$)žNe¶Y)4EÑ:¼_jd]’¬¼Ã·ñ‰èÚÞâ©/©À—Ï^šAQå¬Ó¥âštX+$Â\pýˆí5V¥rˆ4í)Smeû&¬L*–w³ø<®Ûåqû&1m䜧‘¾´U€†·—jìéö”¡£bMu•·ÅSó©Ò¶›+?88wMщ†•qöËÚ[ÕÄç=Ã^³¡ÆyK©˜fmt2B½1S¶ùiDá8¼u aÔåK¯Š›YÃírDÐÊâjûMSÐ5aš[²_¤üùâEŒ0TjšÌc8G2LNnr“¯­±:ˉLÜô×¾ØÏy³|Þ¥2•5pF¼¢wøù ángnê†Þ÷î­"Ù[*«µ¼®5mME2ªµ”©|×H(Ÿ |yí½Epç/ƒ-|qòЦâÒ>“Q® €êéò¡¥¶7k« •¶Ý¦95ÃØÁA&¼ÊéªÓÝh°&>†l—f6]ÔaÜþV!tŒpöí?æ^üT=ºíõHGíl•zmV]’S-²>b"ìu±Êæ³JY|FÉf°=ekÃ+L0ž¢X`ùm&@vÇŽrÍÅV@ŒÉK¬¤ä"¬ ®)\W¶T“3­J™ÍÅeÄ·$H{Å¥¯¡`‡Ì¡´ ë#²xjxT0nH”PKŽÀÚ˜šÅn\CÝó'Æ2¸"òhðNÛÓxvÓÜ\é<ô×4bFvI¥[^5bÔHÒšÕ£º'¹òâ–C`#Å>_´—¥ìá9ØõÕ¶˜œqŽSÆÙb]6kÈðæ¼“§P9;|vøšÊåL-й<¬+lNÜcµª“™R\WCQF¬³¡áÝ7F&UÇØg/ioWœ÷ xR·ä .¦a˜µÐÉ ôÅNÛå¦=„âñÔ1‡S•.¾*md µÉC+‰«í5O£à6¾v]±`ÓôÁ°ÜkwÒ/Ú¬Ô쵺åj†ÒÄ2ï˜>³²Šïƒ~ 8øÄ¯ÕÄj± »æ¬ì¢»àß‚N>1+õc@c;008F•;!¼ø˜ãëÈèîã„iS²ωŽ>¼Ž†2ÿ€cÀ06'ÔÔ=5œp&ÑWŽ ýM8W+ãÅý” é&l_¸S8¯ýðÛûšòñþgß‹:_Ó©GùÆd;ø0?ž®ý(âÛnÄ™ž–¿¨­ZÚ×o“qªv==ÊêµUŒ«kl›)óVrœå,û3#r³<»OäzÖ½íOjÕ6¶Ÿå7«zš6[NÚ×nž™õ_à´‡ Íbü‹(‚ž™¥3_ŒþÈwð`=\ú> ó}uë'Ö§!l|Äá=.±µ éN¼ôúÆ[’ë®*Ú¦Es‹Bz)0½Ø«Ïܺ+SuQRŠ¥o£â¾Ëøž;ýßÄÿÝããn«øNMý•Ì¿Ðæ°Ì¶íXkõá7VŒôNŽ ÊVÅ»[ø!j©N²†¥"´Ìf§Rœë.¥åI—–eµ¥Ö]ZWlgíïàÁþz¸?˜â¾Ëøž;ýßÄÿÝããn«øNMý•Ì¿Ðæ”Í~3Û!ßÁƒüõp0ýïàÀþz¸?ô|>+쿉ã¿ÝüOýÞ>6ê¿„äßÙ\Ëýi@ ×ã=²ü?ÏWóqŸÙþ 竃ÿGÃâ¾Ëøž;ýßÄÿÝããn«øNMý•Ì¿Ðæ”͈>µ5]D`ü ž´cÔ#«¿T¾ JøÅAò‡bø:¹›ïî2+ÿs¶þé<³ÿž7â>˜t½RQ¤â;eª»ª:ápkË­êë2¥ê;î¯ê,U–.ֺͪÅÛf­„˜C{ÀÔPcî²z½½Â첉Zÿ‡kÑÚUÍ~ÃYe}5k¶TÙÕ§h{ªÜ¬ð9OŒÖá"÷ô…ŲF;œºÍ˜a3Ø\ÅóÈí‡dfh´¥Ž@ë—\ÔÞûlZlÜÒ¹ZÈ¬ŽæGf§‹XßI@çV§6÷ %ªE‰Ž2h#¢bD '¨±kjÊ=âjh TÀ˜÷Ú³,ÆdLLfD¢fOë!ÿØ Öqõƒd¶,£ç[€Æz”ADÄ> àF<\»Îaù(k›ó!Rx,™–]1æ,öá“´õZàÛ{œrFÔé~Ajš«h{mpjp):ôj-Ö:ôëïé%Óë퉒éóô‚!‰Ÿš$†>yŒt˜Žé“Ý×êï!3ëówH­…óÈÌGA)‰€sþDÇñI$&)C£Rü–àîÓŽ"¯òvFi&@u³)‘¿6BXÜW&s•¸2G‘,~wFÄ•z–ÖdŠ]–J >Ɉ|þø÷ÄLŒÏÿhˆ™ÿbfc爘Ÿ®3Ù‰Žbc¸`ǯ»¨IAGï5˜ÁG»¸ zõˆ…¬ÉíºzÉŠœ'°´9BKy˜Ç1ºÉKiì‚#ZÜÛ!”²CÏ]d…Ö8ÄâîÒåñqík]’®TAëSlÐ"`„N'¨| G¼JTÖ!%éñ¹MIô™ìjزè`QO¸ˆgÜAÙÜ3ÿaò,Ñóz˜¶‡XŽå°:‰ ÈÆ9¯"c÷Éœ³2N¡ÏßvœÀÚäìŽ8cTÄ·b.rȺEÇ>G[å%3»Xð…gÂÚÜol1U¨”Ô§×õÏ^‘õÏH’ž‘õôˆ™ŸÝ3>èÇIèEÓäl”ýCb±’Ÿš;˜`×ç3”Q1)Üú ‹bù&ÍbXêo1ÚS7ÈÙâ1ÓYWYa®OòYÆöVfò¯0» Xâµ2k.¾Ën2•ºÚWɘ’)ˆˆŽ³31<ÌϺ"?|ç¢$D"#$E0"#$E3Òb:ÌÌÌÄDDu™÷FK@~eœIÄ– £K4ƒK´â.ûo$Âo¶—Øif[ZÙywÙZ_möÝ[n¶´º•­+Jˆ”"cü«mâùÌ;$AÞLq)že“2Ì"ަ³º-cw)¶CZâиƷ¦×‡¬6ä.ˆ·©¡JÒžIytŸ•13פî™í.‘??lôù§1‰‚1˜.¥ÔKº$‡¡GºzŒIGI÷ÄLǺ2bÌ÷À0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÎ3¨>Ä2ï˜>³²Šïƒ~ 8øÄ¯ÕÄj± »æ¬ì¢»àß‚N>1+õc@c;008F•;!¼ø˜ãëÈèîã„iS²ωŽ>¼Ž†2ÿ€cÀ35ôÉZÓdCdÿ—ký ¾OÞ9ÿå­:Ýé8±jF䪜ªN… Ç+\±YĦH‘"bï9J¥*N2ÂS§NM—šyÆßid•eÆu¶[ZÓ8ôËöÄ6Oÿ‰WýŸÞ9ÿå§ÇÖþ‘eõV”µXunÿ@k¬Y,Ï/È Å£!À¥ÙG‹ªÈ8 6ëlZΆ(S«“ÒNM´£ZMQ¬¶û“båIÕû»Ä?)i2³Ä¾Š·÷›šþrßdÒ ž0ÖM\cT&ÃeŽDÐûè™ñ•j Lr”ǪDž‡Zk‚"Ô#VMW!µB=Ñ*›(ujA´²²æ8üݾ§Þ1›s3ΠTcؼ›]*jkyLªÔÕcc¬š×÷2S«“·É sES #¦S&E+©k,©9ï²A¯¹^гÆ#i˸n=3ØÿËѶDɲŒ^mò¼_6@ª¦½–@ÀaE°˜‰Œo¢Û¨…j«Í4ö§4† Z¢ËfH¿CuÊ\áŒãçV_…3Âw$Í®V¶®ImÒœž/iLbDgË#µRÇ–Þ¾³¶¨q“†Ä–æG—úîY²¡lblŠD2< K+-‹ŒÐjïOeœsa8–CUD~oÅÑe÷Ùs óªà£.DÚn-¢û”K#É/tFüKï¢TªHX•:´g¡"²JR•I7XaG}¶šIÅ_mÕ´ÂÍ.ûo²û~Æën¥Ô®ÕG—i¾Å®¡²–Èc6È^Š]â8 ?bJó”ãùõ¹Wd›å(‹C5u.&{Wl~@ÐIh¥M³U0µ†¼ÑBÔ¨ÔÒâUÓyp® ÌØ‚#|!BÆçÄÜŽ¾ ;ÊLƒÅÎ,›Èƒ$’_’[OxccU¾É*\ÈBö¶C4*¹Ò­Ö+9Œµ›uûÿß%>ÿÿÇý]nPÛ®Õ:ÿ{’œ¼•ëý—ãø¾AÉ\$Ùi‰9ª”ãy)Ê;¡M¡¤‘y¶[“ïfióL"6µÓs:ÕSûª’Û­¸»ÖÒ—ÒÞkµ ¥ùÞËõ×åyâyÈ ŠT;HФ_o¹ÕísC¢‘˜ì‚Äí½”ïSTݺ¤é™+,(ƒØËGZ×j»í¥:ûTÚÿ¥òòü'ÄwRœ´Ú®ÕzÔ¥iñ}ý¾OÅ×ûÃüä®ÞÝi·òíÛµ^JrWã¯/ËɵJß-9iµñ‹·N·_“jŸ‹úC›š›®ÞÈ~ÆñÖù?xö’µëu†“Œ×Ô×ÛØÁü:ÖåûÞ=‡k­ýˆi@±nþŒâwm~måYÐý+Í~óTü›Ä³6-Ò¯f³ÿ”?þ­`Ù×îhж£2a+剖bL‘±¹šò>š¡«cÑ]´ºÌÁ3V†»¼Ñ’Éù¹)›0c²Ê=»;´lAI­-ˆÌptªÍ<ÀzQÇzuÈ É—™££æ¬óIYÛ#%”¸±­je—ˆc«[¡D4ÇÖ7G*É jUrGÕÒG:º¨p>ŽôHjd)9vµ6;ôÿ¯7½0¿ç&©×é?<Ƶ+ ubj)õþ8juÂçV½Æd—¼c™¤ ¤¬M&0:8ÊÙ¸?#,“‹>¦šÒT¸­'5ª]7ǸþÚk´–jéÂUĶW+°"HÛ£¶öoµ²¦ûºš^#I˜9vŶg}c§M®A»ÞQt8~7·”kê3ߥî„é¶RbÐU •àj^jfLèç]:×ɹWB,Ê™] Φ±Þj×#~ÈŒ­¢ŽZxsŒô²„–|{u‘Ô±"Ùà­™]ß8éþJÎÝt½#zĈ\ L½ÊÅ<;êƒ\÷hëJ¬pã–1¶’jdE“T™»D,ÃY³1ã¤pNdæ &—O xºGI”Ø…øì§“ذFE—œêÈÝÿ z”¼ÉÕî¾/ØîÓö$×>¢6Aâ¦MŒÎš—‚Ãà†çGvEù®$‚2Üs”),iÛCܼˆ\Bé’Ç)3ÚgC£†£DÙ[QG.dؾcüQĸ7XÚÞÓÓ\i¦ä¾â\‡‡Št•¯ÔR{Ë’ÔxüóÎñ¬~Jþ⎺°$²HÛ¦"«úåJÜW­ÜÓsãWiŠP_e'»zšâK×Nèï¤zÓ®«Íz“£F­„Ñ®êɪ÷ÙLèmº×u©k@[¬kHÖ>ĉ[-4i÷†À´ÂK”Ý]žòhúæV–ݯ®×¸Ì&µg£%3dîi©l°vé¦Ít¥ÍóWíNkB0ŸVÓ,ŽL#–6iË&£˜7Éð(ö50fp·Âf\ 2¹3[Ô­¿0¸j6Çö_Ô‰œ×¦‹õšŒÔ¢Í-:éò]Žs›Î>ÇøÊu!ƒj¾I©F±KO(u„¾EÒ·È#±f*¼58§«’*+MS/šâ­Š­8a÷}2É#2lÊå+Ó6iÏz‡K,•ÌÙdrLÑ™µ)‘Bò¼ï8º-ˆîÒG„RUŽ(ˆ…ð¹±É#iIÑô•'I̳xëKXÿêGR¢ax˜«ŸêŸ ²OÙÝÜCÚ`¨üŠ72ÞŠ<é½CŠ2ódw½H$«VJC h$³ˆQÔ¤²ÜC‹pDÞèƒRœT´p~„eH(.ëí‚- j¶ìT{&â×6Nº±>R<½Ðtè‚iö‘¬å‘Ê7–ºÝ˜ke²z ¨Kj›zú·ÆY¬›%žyíõè§ÿmdç.ž}Ù/ϹgKp9gx°¸ö5‹kOqO¶©dº#éó2J¯ÃO÷(p|gqqðNg<ƒMúu^ÆçiÕU.9¥jôJ,4“J8îQ«Œ_¬ç™¡9Cál—‚cŒ(ÜXË-ˆåI42U!q{k>8¢B¦FÆÒK2´–ÖÄè”8–¹¡Äó“(IöµY¦¬u¬]9æ0å““ãÜÕ t„I"‹›¥Lé×гR?Åœ]Ú_šÛä¬IÑ=0®peuH‘ÕE Õ”]Ä_=M^”CbÖ§g²Ù¶‘2‚Ëá {Ê•«s¤LF¶ÛWr½ ¥+`©vì,ÖÏ f@»-Ñ1’š»j´(…¡7ÒˆúN6ý‚ Qú½mê–nQíjÈZ»EÝ>c]6\õª‹dM™œÇ BžÏ]ˆ+HcûL‹¢zÖ”ÃA_MÞ0j|š ‰=qŽs³‡…K ½ždRí¹Š”G[¨çI&¶5'¥†íZêJ½Äp¤RÙ~ö6²¸&¹ï3d¬fƒ$¿Jrc¼OM9ßc<{ow77ÀXà)bõr'VéSS2Vâ胆Þ‘ŽØÜ5A¹6Ÿ±t‘<QR½…LË@ÙN”ÌÕma7].8©&=?¡‘«¡?ÝÂKÝzæy•éÙÃc¦•3kÞ£±ž µC¤,Õ5‹Gá™Ng¥ÉÄ…>`Cí\\,¼— Ëx»1ã·ÇÈrg7øÜÍEªnÜвöZHÌh%"$ý¥ªµzäÆÚmWlµ~°{Ôvµtk¢·Ü´ ÀÁl›U»rh.Þj7/‹·hRˆòN[fݬ»ˆ¯²UBìhkö[ ÉvûJ¦;Y®¬:úîÑ7ÑnjTuªÛ²ÕåqºÙÙÎð­†èë½fòæ¯3¦qV¦TH4ðZE®ÑüŽòZË'ðìY¨­“,|®hžŸ%ãFÙTå´*’0§š4LâÈ—ÇÞ>î¯u“ž0¾¬ñ6ǵšÂÔŽ>i‡é€ÝFæ-]c­Ù«ýJågyvS’Á±ì9ÀúYÈ8?16$Ž¿9Kg *'‹QÇ#Ñëž;8¬|E1úÙ.&’œ©ž§ó-Irt·K'ó–YL!>å˜$ŸÉÕeçuñ¬~)+,±Àèú8™°„ŒªRµ#LEXÊd¯ã©½cÍHäh&pkÊ™ßMz„Ç1§˜${=iªeˆÏÖc‰ ŠG§Œm/l²V6ŸAÔ> Fü‰šoU…ð›cjYÖ«\r¬Ý=.‘Äyª3u°·d; ë3Bý~¶*ˆ­Z¾åˆÛ6‚Ž­r¶]Zè‚uÍô{&¸~ÁÁ¤­V¼ŸS\ŽOgasÔ“&ë-½¼zU¦ ö å€)YªÅw ÞS±]© í¨¬#“+¨g96 ÏSœKÌ“<ä-9ê3µ6Fä°<Ï~Ê8Kô9ÍÙªQXÌ­|u¾5|Â,ü¢>‘ qÄ·£Ïbq^§rîÍÉœwj½n“Ûô× ×IT¶#‰±^VÈr ìy½4!©"LÎÁ3„5A‘[Â5³ʈß)|QVL2¨¹D:·s´û„ÕàX!°ÇÕœ³óšÇç –EÔ½–];\­Ä„I®BQ‘h¬"ÂÂŒ´\ߊDXžzÕe!ßk•žu+Ì;xï'êk,êò©ýdéÇ:fh=Ųé&žòn=6[ÇhÞÒ£aC›b,‚Âq¯F¼Øàá |Bõ)arjB§>B¨­ø·¬.}­kЯRlÔÉ«/¶\uÚó¸b…U mØm…tÕBĜꔵ'TïjŸm=ºŠ|'uŠ:ÀÀõ”/–k6óTeÇlº+UQõi“Üæ÷puÅ´Šê³µÈöxÊÚFÓ”eË,fØ~·%º…Õž#zÓÎ ËYP.:?›Ê±öGŸÁYõL¤ü‰qùÄ6Fæ²§ùÃhÜÉŠ2+.ó“Ò•»4ê»QyëaÿfÛ ê$øÌŸ¤²îÇI¤yn…1Þ_‘B§˜óå½ùZ%§ ÞEÁHæíT’¸7㌤)˜˜‹aqS?ðΛk ØžÓÄv¥˜†œç5K4svA®Í8ŽS xÊÄÛ˜K©™¢ÉÙÙ¿æ,’é—÷9<èÉÎ?|=T „Ïl沫NUÖÄ™ö4äÙƒõÍ‚×åmKËöBÞ亃žL² >E’¯”%‹²FdçÕ˜Û¥,ª_¨È[ÂÆ·(ûìm¡rÅ pöHÄI,e§,;cÃnißZXIZýÐÚãâ›úºk‚^¦•ºU7.³R ARÕÉ©Y ®^¤½¦Ù¯°ã¶ 5¨o×yë69:Xný¡Bí†0Û²µJ,è—¸îEGܵp\´×gnÈzÚÕÞ­õkˆp&©Óé/h^1 í ­˜[e…ùÛ1e<<טÜ]òºì˜ÌêðljcìÏqȲ8î,q€Ì2Dén@m1;yEM¿±ó5ÔýˆÝ)žûD½œ£<šókUÆ\Ùk©š“̸Ѻã¯0ÛQ]N¢;0Ã*Ÿs­÷ÝvÝÕ²Y3c¢5ËòÌÝsÆ¥t×4ÊXú'‹³½¸Y޳ÌB ‘sdB“ÔÙäulèó£œy§&bøÃ'!bYFäÓ‰@ÓÒÿÏlnã3á $iï fÍOC±®ò, EÛ²“[iùq,™lýÉf:Ô Z!­¨rN3Jå–¹#ÚU\ãñW%îË—¶)R¿½n¯eR3jæŒLö΃U¸ïvžbêÀªø¿_cOY oˆšªé¢ªݲàZÜšu•îcjÖªbºké’ŠteQÕedm-Õ| ûãddó¿b¶·C€rçV1€ `Æ1€ `Æ1€ `Æ1€ `Æ1œgP}ˆeß0}geßüqñ‰_«ˆÔbwÌYÙEwÁ¿œ|bWêÆ€Æv` `p*vCyñ1ÇבÑÝÇÒ§d7Ÿ}y eÿÆ1€ fké“íˆlŸrí¡_ǵNÁÏÿz´äûãHN(µ˜Aöi'qF’e-¼³‹2•¶û ²û+eömkmöVÚÛuµ­.¥i]¡›Údûb'ÿ'øÚýãŸöºß×ø†’ÿ/òßøþ._ö (úN¯ÝÞ!ùKI•ž%ôU¿¼Ü×ó–û3ß=adˈ¥Ø§-C–ä&ÉS«-Syí†I×`”Úe´1µ"K/Š›.¾Ó›-i!s¾2*È&ë …¢*ægHº˜ÓÎÈ ;JÚ–c}Ñfh‹HçS)B |æ §R¶ÇHùLèÕÀléÁ=Ö´°)$ÔN JµÁÐë/k‹È­§°MG©rK€3=YŽ=3Ú¬e4kcP˜ÃŠPCãËæ–Sˆ4¿Û 8·E©/*òÿl¶úRë>ΖŽ?ÒôÒ7aÈd½l¦>ézGÆŠã,}\á'±¸¤.«¢-L-¯æ8–]I9ª4üÖ¬ƒ”žîÞèê}Ê®å›>¶žá £9.?²>š«ËÛqº”Lt^ðä<á­±í¾Q+@¡ñ{Ââxk¶èY&»¸TØÙ15]7šmÊ,4ÎIH»Q+ÌXÒÔ”Õ¶¦[ :„°GSoUŠîty6§¡²Ä—”¶â(jä%šà½7**†rýƆ„ h ml"â‘SoÚ¼å*<õÞ¡RÅŠÔÜj¥«–©4åK—+8åkUj•'q—ßu!ÌZ†žË§ç`-5¶Úé+Nr”SlˆmöØÇ9¥êÐΰôËQU[çLÚVr'DQ'ÑÃØØåy j易Þ#š‚(Ç1Ùìåú;µýµK‘ ®ï–XrÄ-\™+³ ‘µ¼¼³½¤GÓ¶%e2’zöUIVU-”ºêR6³RZb™l]ÚuvJº¥îˆÚ^ @u,2·’ªŠYHEm„NìJûO¶ÔÊ¡ÅQÄÒëc0-bvJUß#¤?/Ì›U¯N3u..ÑåJÌ.Ën¶±'‹Z(MöZq.ó$òk•Vå&>r'%7hQ€ðR¤tB« âu­¥m¢C±ÔHÄÖÛ]­ºZMìÕ*ÞZS’–S­OŽ”­ÈÞ8Ÿ4—+·#™$ž±;ÇœeØÞZä‰Ê™š¤Ñ7‰ SöçRôÂ{[Ò¥‡>¾0Z{ i@±nþŒâwm~måYÐý+Í~óTü›Ä°»–lcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÀ0ŒcÎ3¨>Ä2ï˜>³²Šïƒ~ 8øÄ¯ÕÄj± »æ¬ì¢»àß‚N>1+õc@c;008F•;!¼ø˜ãëÈèîã„iS²ωŽ>¼Ž†2ÿ€cÀ35ôÉöÄ6O¿‰_.ÕkµýãŸþJÓúþðÒ^]º~ rí]øºÕåûû/Æ3kL´ÿ”?dÿø•üŸ¼sÿËZ~­ñ %ÚërSâø­åëýÿú¿±r¤êýÝâ”´™Yâ_E[ûÍÍ9o²½ê‡"Çq†”HdI¤«l:¨›Ûâ‘I½Ý\…BŠ)h²æØãk²¤-DŽ«$+ÉLÇgLµáéz$Î>ÛMªÓ’µ­+NMª]µZmrm}–ÖÖ×'þƒ˜äè“4Ñ×&âšÏÂÝ–¾±¢{´‹^®_z:ÇÖÞÔü¡™¶DÌö¹­sãSBçVÔÊ ½:GÆ-jpŒiòHøá"8µ|ŸŠ:_ò"/'È“ÜìÜÈØµž@•ékk×fù„as,¡+‚¶fÅæ­Ä)[—¤O]Ë6¨²šüA…år¦3Š"T®Ä‘¨‰ç–QÅ¡~¨µ ÷¤WZ&pOm¹Êh¹¬ã±Íº:±ø"õl_H8M.ÄŒÆ-JªÉ¬ÕdŽ^k‘«º¢=Y&¯CVâàa®Kb«šÓÖ­<ÃÞ%ÎR™)–'9ôÔ¤üÍmÆL‘a}Ûz±3¡šÔgt4¢¢Ý¡ÈÊ.µ»jÛêí)mº¦V´´‚­1EÕÚ*´­®krDôÖÜòÚuŠ›]¤roT]»V)B¸‚Õ$>ÊV¶ÝKN Òï¶•¶—m]OŒ1•Šw²ì[VØ0iÆe0ÃÙJ “¤ÓíJ·/¡pÜE …¤L¦1·ZÚ} Y34Ë‘ 1K«iæ¨XŽö¤ŽÄ ^×kyv©×ëü—ò~=¿“â§&ßòò¼›u­:Ôø­§õ]ׯÅþÏŒyÎ[$úÇžIH^ÈóÇÙ?%¿Cq; Ï(Å\t¶Óò…ÅÞIœà¬ëî‹du9) ˜äVan@TÍBääÕbh!#šµ‹XͯÇXûS0©öœ–jšyɹ’Kžóò %¸Õ3Ì">V!²3>—ãe¬i™[Ñ»I"-–0¹Ü­!8¥6¡Zý&5 ÏŽÚG ¶¤°ŠÞBœÙQ[µZÔ¶Ö7ç&†Ânº•Û2â[Ñ%*ã/­o2ë*a•èî®Ý ÀÙÖIœâÚ6ÌJ0Š 8}Áù;'È ´RkKõ²!ns‰Dž•˜ö­’Z…Ùéæ2Z«Õ®mh|h@µÄõKªb­`o9±½š”bËÝœ &•©J_r·EVVî‚î…SêTS¢²Í­×k ·’Ú1™å©¯¶!±ƒøu­ñVŸõÁËËZü'ãP3_STÚÙØÁ§ÿz¿ùÁ÷ëÖë/Ƕ4 X·Fq»¶¿6òŒ¬è~•æ¿yª~MâX]Ë61€ `Æ1€ `Æ1€ `Æ1€ `Æ1€ `Æ1€ `Æ1€ gÔbwÌYÙEwÁ¿œ|bWêÆb5؆]óÖvQ]ðoÁ'•ú± 1˜#JÞ|LqõätwqÂ4©Ù çÄÇ^GCÀ1€ `šúeûb'ÿįäýãŸþ_—­ÉøÆ’rrrüœ»vr³âûÃ6ôËöÃöOÿŠù6ö¶ÿ¼sÿÞ¯Åø)·´4—o–œ¿6¾Ë¯×ëíÛñü‹ãÚ.Qô_»¼Cò–“+å(ÖÑ15L°¤Š‹ânZ|j¥ê_¡¬zšu¶ É©j}—Ÿ¹Ý~çÔ –ªM(ªònÛ­m­ÖP«­lº2fÜ wJéÒ“C)w¸"qpŒWT2hö¹LªeÃÈt%åBèsyišút¥¶1qTÆòÄR²Z Wf™r{æÆuéF-,œåMAdœ¹Å}1Že&ØìQT1’Dk¥Ycv»»-yšËä²—YB&¤ó¶è³[±è$]bg›YâH^‰5Y:'·µZWn›}nJÓ“ðýŽÕ6¿÷ÈÌÛÔ×ÛØÁù?ÃWä­{°õëOëüR3[S<›!ûäÚ¥u«µµµûÇ0}êëµÉÉ×Ò‘bÝýÄ>îÚüÛÊ2³¡úWšýæ©ù7‰`w,ØÆ1€ `Æ1€ `Æ1€ `Æ1€ `Æ1€ `Æ1€ `Æ1œgP}ˆeß0}geßüqñ‰_«ˆÔbwÌYÙEwÁ¿œ|bWêÆ€Æv` `p*vCyñ1ÇבÑÝÇÒ§d7Ÿ}y eÿÆ1€ fké—í‡ìŸÿ®^^Oïÿò}ÿ”i/òí|ãþ1‰ Z¼ÓÆ•vCöD:½ä.ðóŠW?¹9Ì£¦¼ÁÆô÷àdnC¼w™éû¥½7ÎýþÓßÝ^ád¿eßcÓü!)Ö§ýSg¿×þöÜ¿'Ç×ü"ù½ãœ†ýºVèèwWj·Žñ/šš»Ök·ÇÅ´Ê?’ƒYö0 gÚSÚ`A=f#çœ{”q­u;ôö‹EFây72óU¹¶¡Vʼœ»vÕùûÐïQƒ¸c¹f&=D¢gH¯èú ºñúô<·íVíªíuéµMºíuöé´?çÓ—ukšt²$õœ#5ÊM3ù*Œ¢Êáe­=Qb¯ëŒ‘G«Si¶«BôBwfgÓ˜SuJf\ÜžÒۚɳקì»ìxr„/û¦Î\œ•ÿå·âä%ÃegcQÚ†Ñ×64¹ÐâêI´pÂÙ™m *¶ÝmJ2Šq”¼ºÛ}öÖËöíèo¾›[WWnâ‡,þWä_Ñ6_ãdïÇ^üÛÆ¯j¿ËË#£ÍdàÝqa–,Ù‚e$>°9^ssÛA×Ô™>NßB­z‹InÚ9 «IçYa›Š´æ¥^ŽóQ,Lq–«—ïÓïíßìù?þ|Cžµõ±6îôL¡&]"%/Oe…1€âì÷“Ԃ͸ëR-vŒc¶Óž[ªa†Ý{Så®mfTÓjb;«}j?"öU´«J¼¶í]1å—¢ômsÜ)›!’ u¦ÒÔWΠx–æ‚Ë#l«S\dbÛ©C.P¾ó.°ÂŸ9gò¿"þ‰²ÿxoóoþ½ªÿ/6õ)ÓYSTYR´¥o4Ú—oEuv©NŠíªmÖ»T¥)ËZü¿tj/[H‰Æ™þÍ5ÒÜ›6ÁñgW|‚àËZ¨c„…±S™ŒNŒÈ¦ªÑj„‘åªly@Ø ·õ…µÜ•Yù”òwƒâº¹G•Y²DíN YŒŸ¢SÈ$¢o›'&:p5<1Ì1L2S PÂÞ¼’[:É îÊڜؒ>¦òÌFT¯dK9ëP'=ê5—âìà{;kÔ >ž²4›(ºÒÜtrA7á¤u±Þƪe)†4R%ì3eÉ‘Fº›sJÅ*ƒâ‡,þWä_Ñ6_ãc㯠þmã?×µ_åææàŒg\I‹bðÕKºq %)ŽÓ)m¶Ó$SgÓ¯u•=ßqE[ÉVò©UÖј –öúyh˨ëß'_ùoåÿñü|ŸÐ35·e£c¥©¹Z]CrVÔIP'ªŒYTqÈ-9U<ó1½L<Ú–]»¡Æ][Í¿¢¾úÖêÖµþïÙwØðäÿ_÷Oœ¹?ݰ|PåŸÊü‹ú&Ëül|uá¿Í¼gúö«ü¼jkíˆl`ÿ_Ž¿¼sËJïñ (Ší«Íó9NBFH˜ ”TÊŒP(VEáZäR°½Ö{;\ÖR–W-ÕZ[´¶ÒÊÕV{­”Á¡R'@ ¢LK…€„0’C’“œœ)çìáþ8æu­jW¿ö:{|Ÿ÷}Þ÷}¾ï;‚iš&ÿÂCüG¼äJ|Õþ®Ó4¡ïw("‰ i¢(¢ª*v»Y–“¾8ˆÅb‚€¢(Øl6ÚÚÚhkk# ×4¸ô-EQðxyyy2räH233©ªªbÏž=?~{;q-Ži‚$I8DQ¤­­h$Œ$ (ŠŒE²‹ÇéîŽ âr»=º”™3g2iÒ$¬Vk_d? $!W"ݵkëׯ§¹©‰áÊ]\Œ/Ç¢¨hšF0ØESãÎÖÕÑÔÒŠ®Øl6ü~?ù¹Ù\?~YÙ9Ø à ÔÕESó S °hÙƒ Pín{“MôöN8Îηßá𑣌.-åž{¼ªã[óæQ]}‚ÆÆÆþšššhnjbly9«ˆizÒ²ÕkŽ®›¸íV–L+ç¯ûª¨mòcWL®^³/;@裓ۮòÑ™Oøù¦ÝÌ«(fÚØ"4ÝHHTY’¨®ofPV†®úèèè@Ó52³s¨kjOH¾„>qééŽ1vX™©N^yï86Uæ¹¾pW¶) ;?8ÅÞcgY6³œt·ÃHìîq]G–{¡ëzÑhLÙê ‰]Í~„KFÅt‰£s¾µ“šÆ‹H¢H0#é&׈^bÏ1M“hÛ«béß=/y>¦iüø–ÉÜ2åZ;Ãè†NNº‡X\ÃnUè EûªX¾ÏCå‘ó¸Ün<O6›ˆ‡»pÛUB`˜&²$b‘$nž| uÍ~þ´ãCêš;pØ®•ǃߙIMc;ÕuÍ¨Š¥Ÿ÷ánnžTÊŠ¹×ñÜ›`³*¼¬–á¹iL,-Ó¡çâ²‘íµ²©ªŠ¢ÂB¼^oÿðz½(ŠJã' äø<ÂÀJ[{êô oaöšgøí+ûð8­ülÅ,Ú:Ã\hïbî„‘Dâñ„ôïõ>Àœ #y·ò¯î;ÁЬT¢±8›JW$F¸[C@À0 ²}^¼ŠÎÑã'(..Æjµö@jj*n‡ººZ &”p©w%å° @\ÓÑtƒêúV"Ýq|^'ox†OZ;©>ßJšÇNiÁ K51¡M@‹„?áÃÓ ,ùÙ&$Iä–)chéöÐYM7?"—óuçðûýŒ3&Á®„äççsêä)rS8¬ê€}À ‡Ã)N’("ÑXœýUuÔ7wpªá"C¥`WdŒ+‹‚i" š¦S}¾•â!™¸VYÂã°bWeüpßí²Å”²Bö¾û.>_ÅÅÅ võPU•#Fpâä)Rm¾'Æu¼·æ¦ÑçUIqÙUlª…–ö.\6U±$¼ÇìmN‰½•çÈÏð2*ÏGw\G€„FiŒœA¡Ïζ—ÿÊĉÉÈÈè?¥ìåäˆ#h¹ØF¤ó"Ãr}”SIQd‰HLK¤Ç%ºtÇ5D±Gû|8€Ý*óÁ©óÔ6ùY<­Œ¸¦‰Å Ec¤¸lÄâ:‹¦¥êX%çjë˜7o^¿™ðöaÆ!I§ObÜð\ ÝLÚQuÝÄë´a±ˆ´v±H‰43 ›ª`=´ MQ‰Æ4ž{ëCfÎÈü :BQ¡nRÝvbšN~¦—Y×å‰ß>ɨQ£˜4iR?Z'¨Ñ‚‚ Ù½ç]®žÃ¦`$IbÍ0HuÙ¸ØFº${š˜ ÏóÑÔ$#~º‘]úm·*¼q¨š–ö +æŒG×M:‚ìªBw\cùÜ Ú›ØöâˬX±"i>ŠŸ.&Là½}HU †æ¤ëý"±8’ bU-}StÝÀ®ÈL-+ä@U¡h 1.%s0ã©×rãµÃ¹vXјF$£0;ESJYû?““ÃÒ¥K“²¡_±ŸË½÷Þ‹(ŠIÅa?ãÆ#??Ÿ-Û¶1çúQ¸lJ¿2Ø#ܺÙôÎan›>–‘y>B‘šn Š‹§Ž¡òl#•gQ¹'³Õ‘˜Æó;?bJY%ùÈ™Ùã†ò½ïßEYYwÜqǧ&T—#!&YóçÏç…M[H· L(B(ï»ÞË_§UáOo}HU]3¬šÃ5C³‰Åu ³Ò]É o!ë-Ï $QàPõyv:Iw\crùp¶¼ð<;vîdÍš5;vŒ]»v±{÷nZ[[À']ت««cä¨Q¼òâÒ†çÖžÃmWЋ¢ÈÅÎ ÓÆ㙟ÜÌ…¶NÞ;шC‘øZq¿Ø¼›w+ÏEQdË€‹S¦iîŽSQœÏ£«çáVL&M™ÊÑcóõ¯O$Ø$ ©¨¨`ݺudee ¼2·hÑ"jkk9xð ËÞÂãµxÖ¾Uƒ@¸›ü /}>M§óܳÏrÝøqÜô­›È\Ä…¶µMíÜûÌ[t#ÈéªÆ¦É 4?¹e2ƒ3SPDƒÚ“èì$âp§—“Íñª*Ö¬YÃ3Ï<ÃôéÓ“ç@/žûî»C‡±ãÍ7øÑ­Ó$¸fôð?#Ýã`ÃÒrú#¾5Ï¿°‰m/½ &lÞ}”ÿÞø:£g°hÊ5„£±gh¦ G!7ÝÃ=ù*w>ñ7Þ¬Ñ9N匑C<µˆÑ¥¥¨ŠB<GQ”«'qo˜KJJX¶l+W­ft^*·Ï™@{0Œ?FU$žüÑ"‚g¸ïÁŸ²ú»+ÙùÆv¶¿ò2·h<¶õ]ö¯£3¥0'­O^B>·EQ’ŽÛi£#FUUfUŒfú˜Álܸ‘Ûo¿ùóçSVVvõµÑ+“ù±ÇcèС|ÿÎÕü~ãF¢Ñï=˃+¿‰5ÒÂOÿ×_{€fáïGÎðÃ?îa÷‘38¬*¿ùÏoS˜•ÆG§?ùŒ4îYå°[e†dºxdù $YÆÔu>i¨ç[YùÔFŽ=ÊÝwßÍÚµkq¹\èõXZZ[·nåÆoÄãõðÓ@½}:§Nž`ÆÌYàp89[ßFK['ù^Xþ f_?‚Î uœ«9ER0úÚ— I&ý¢(’îáw¿y‚üð'8B¡š¦!Ë23gÎdÏž=TTTô“ø–Ê›išÌ˜1ƒÍ›7³zõj¶lÙJnn••G)))¡úäI**&°rÅw˜V^ŽÃ᥵µ…õëþÌ£¿zœ—¶lBÉ+cào¨²ŒÓj¡¶¶–iÓ§óÙ³q¹\ 2„òòrRRR_IEËçY¿Y¸p!ååålÚ´‰ÆÆFî¸cK—.åôéÓ<üðÃ<òèzºººú<–›—‡ÕfíQ¢‚p…˜’ÒÕiW°YD.45sËÂ…¬X±"iaI–C–ϳû"EEE¬]»6áZII Ï?ÿ<­­­444‰Dp¹\”––róÍ ƒø,–^ñ“à´©8Tæ–Vâ}SÐþŽü\_äá^p>ŸŸÏ—pM–-ĺ»Qdé3Þ§´(þŽÎ„ û—¾G6av»@0€Ãª" \µ¦Ivš›N;áH˜ôôô/´ª÷ÙäK6<Í-Iu;$᪕T×uò3Rð·µõtäAƒ>Ó9_ €üü|šššIóX‘-ý¥ðåÄš“ÎÙ³5äæä1xðà¯~›5Ù?~<5gÏb—  +hü EÛ»¾ªéd¥º[˜Îî½û?~\B“ú§¸á†èèè¤òð‡¬œ7Ý€˜¦¡ëFÏò£f`S~|ëêkNòÑáJ.\ø…÷š-_†ñ½ÕiÉ’%|wÕ*ÞÚ±“{þm¯í?Ž¦ë¤º ÏõññCƒM¬ºóÜzëâ>óyù? œþGŒp8̬Y³ð¸ÝÜ{ï=x3s‰i‚£íBï¿÷Û_‹ÉS¦ðÈÏþ™¢ï+ÐkHMM ÷ß?Ç?>†×ãÆf³×4DÉB^^>7ÝtsçÎí›ó~ã¿ôôÔÙÙIuu5~¿AvùNçgþàŸà‹€ü§þ[åËêæŸgü?£sZ ƒ­tIEND®B`‚peewee-3.17.7/docs/requirements.txt000066400000000000000000000000371470346076600173140ustar00rootroot00000000000000docutils<0.18 sphinx-rtd-theme peewee-3.17.7/docs/sqlite.png000066400000000000000000000046471470346076600160520ustar00rootroot00000000000000‰PNG  IHDR0(¸‡yqsRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÜ .†ÂCOtEXtCommentCreated with GIMPW IDATXÃÕ™YT]åÇçœ{/w†Ë<ÏÈ2›Ähšµ.µuµv­vùÜÕ§>ö±}°]jm«µ5j0I$‰H#3$Œ—y¼Ì—Ë|çÓÉ5mk0Ùoç|ûûÎþßÞûûï}Y–e6Y–ï»Ë‰ËíÆ ÑòÿŠÓíâµOΓÅOö§¾âÞ×K}We·ë˜[ZÄ‹—S0Ï'Äßtßdóð¥ ß`±N!Ë29œÚ¿6Þ6ØÇ¥Ú òR39™{ßü%»òæ: 2r0huLÎÚ°-Íã§T~/À>Þ*½ÈÈÔ8 áѼzêôì¨,Ãû׸Õß 2¤ÇÄãöxHŒˆôÑë`Á¾Ì¸Íºî:ËN-ƒ½l‹OY0a³" "á¦àÿÀé¼jÌ­tŒôó—K¿u;¹I©hüÔ>“ÎW”Ò2ÐKRx4g÷$@g b”÷¾ºJD@0&ƒqe‡vR£ãÖýxÑŸßž}ÉçÝ‚}Y–Y´/11;ƒQ£C¥P ŠâºkÅ€Ãå䣛å4öv°=1•§ò ѪÕÜêëäƒÊ/ÈNHá\þaŸ9WÝ Ë2È/Ž>…IoäÍ+%X¬“¼|ä4ñ¡ëp­©†ÄðHB£8_y•Îá¼²ŒËíÆ¨ÕJjT,ËN×›ª‘퉩œÙ[Œ €¸ÑÑø)U3íƒ}ÈȨU*^,>ÁïÎýœ—ž ;!…òæz4*5’(ÒØÓÁÀ„x€;FV| ¶Åy:-LÍÍ’Ÿì£ãöxøÃùÐ3:ÌΔ­,,-Ò7nÁér!FnÃ,çözYr؉0“›œF~F Q"=6aMopr Y–yjO!GròD»Ëy@e[eSëü,ý£ x½2j¥Šá©qIÑk´´ô ×h‘ѨԈ‚€ÖOB’Ö÷_A@¯Ö *¥’§÷êÈüò2c3ÓkzÞUÛ þÔ˜[ˆ ¿Ä­Ž×/HBx:?5n¯ëƒ£LÚfP*‚@\HEÛvðYM³‹ gí$>,IHŽŒ¡e°‡ËI€Î@x`J…‡ËM÷è0sK Ôw·3a›A’$R"c8¼}7‘!Øçq8Wv4%2I(¹YÆ+ÇÏ ””ݪàµOÞCEžÞ[„v5±(²ãS¨no¦²µ i5Úï\` ÅÊî݃V˾ô,àJÝ×¼UúÑê H€Œ$J(JÎåA!J5z&l3¼sýDADÅ•“v»hèî *0”ôØZ{Ði4hT~ìKÏæFK=,y—_=q†ŒØ$,ÓS¨”Jö¤fn ÂbDß,d Úܨu 6ú“›Ä®”t’OÆÅåvcégpr‡Ë…ŸR‰Q£§ìv /:I|h$Ý£Cü³ì2v—‹@ƒ‘˜0Œj•ßJæ ‹ÂíñðES ÇvìõYÿëö[T›[ШÔüúÄYFg¦(­ÿ†ž±Ü7&⬧Ñÿ ¸)»UËÑœ¼µÜíözPˆC~p›-"¹<öŠ!6+n­Ÿš½áñp©®’êŽÔ*?tz4~~H¢DtP(IÑÄmÀ¡~Ô –e™éùY–Lzzõú\É+{ÛÂÓó³¤DÆúÔ.’(nî ˜Gøª¹»ËÉÜÒ" ö%~ÿÜ/ï79g#Èà¿v©¶ õ“õ-þuglS‚xÌ6MEk#ðÊñ•›U¥Tò›“çÖjÛ}Ý4ö˜™[^$Ôß„$Џ=.|]†€ŒQ«Û\jê£ÚÜBjT{Ò¶! v§ƒ·¯Š¿VdzŽÑ;6LïØA†r’R×vuÉa§¡§§ËINR:¦õîB£3Óü»üs¦ælä¥f’—š¸Ê«Þ¸|g ނ׻âßÇrïRˆþq µÝíćD¿uûæßÄu]mŒÍL31;C°1€Óy|ÆoõvÑ=6Dbx49‰[î‚¶Nq­©šÌ¸$r“Ò6ÿ¸ÑÒ@Rx™ñɨ”Jú'F9½ûÀ*ésñåízšú:)ÌÌå쾃>{±ªx¡è‰ ëÞ‡À2=ÉŪr¦ççˆ A­TQ~»žŸÃéqQÚPEçÈG¶çq28½Z‹ÇëA%¬ós”6Vq(k'¡›GædY¦m¨šÎVôj iÑñÔu·óòá'×S­MT››ÙŸžÛãA”D²ã·`ÐhYt,#ã6+[îÉë€ÓíâÍ+%„™‚HŽ'#. QxûÚ§ß±‡ˆÀÚ‡ú(¹YFvâYÀd4²/-kmæþnº,ƒ´ öa]˜£hÛŽûâ䡘°Yù²¹žg Žøæø™itzþu£I”2úãt¹x2¯åj䕽|ÕÒHMg+¡þ&r’ÒHŠC­Rmn=p©¶’©9?;tjí]Ë@—j+ÉÛ’Ù2È¡¬]$GÆÜÓ¤¢­‘ì„-ìLN4 š¿_ýQ1juÌ/-ÊÐä/Ÿ@¥Pú´[º,ƒLÌÎàõÊý‰ Ç_§ÿq,;üíêE²âSè·a æhîžµñ¹¥En´Ô3=?KFl9‰©¶VZ=p‡í}[úÇG)ù¦ŒS» ¸XUΉûÈŒ[!ZëŸ×ßÄîtpfo!?^A³žñ =4ö˜9·ÿ0ï|ñÏ£¶« ‡ËM}w;¯‡'w¶i ÷{»Pek³3oÛÁŸ/¿Ï E'H‹d~yi5Í: 2lzAô½|y»×CAFºø.§ó ÙzOïò‘.êË›ë8”½‹¿~^ÂÌÜGÆøïäB =($‰}iY¼~ùÛ’WZ‹<Ð…:†ûqº\\kªæ`ö.rS¯¶ŠËí¦ÊÜÌ‹OºÎO¾Gþù^ÂØ*¯™)IEND®B`‚peewee-3.17.7/peewee.py000066400000000000000000010460121470346076600147300ustar00rootroot00000000000000from bisect import bisect_left from bisect import bisect_right from contextlib import contextmanager from copy import deepcopy from functools import wraps from inspect import isclass import calendar import collections import datetime import decimal import hashlib import itertools import logging import operator import re import socket import struct import sys import threading import time import uuid import warnings try: from collections.abc import Mapping except ImportError: from collections import Mapping try: from pysqlite3 import dbapi2 as pysq3 except ImportError: try: from pysqlite2 import dbapi2 as pysq3 except ImportError: pysq3 = None try: import sqlite3 except ImportError: sqlite3 = pysq3 else: if pysq3 and pysq3.sqlite_version_info >= sqlite3.sqlite_version_info: sqlite3 = pysq3 try: from psycopg2cffi import compat compat.register() except ImportError: pass try: import psycopg2 from psycopg2 import extensions as pg_extensions try: from psycopg2 import errors as pg_errors except ImportError: pg_errors = None except ImportError: psycopg2 = pg_errors = None try: from psycopg2.extras import register_uuid as pg_register_uuid pg_register_uuid() except Exception: pass try: from psycopg import errors as pg3_errors except ImportError: pg3_errors = None mysql_passwd = False try: import pymysql as mysql except ImportError: try: import MySQLdb as mysql mysql_passwd = True except ImportError: mysql = None __version__ = '3.17.7' __all__ = [ 'AnyField', 'AsIs', 'AutoField', 'BareField', 'BigAutoField', 'BigBitField', 'BigIntegerField', 'BinaryUUIDField', 'BitField', 'BlobField', 'BooleanField', 'Case', 'Cast', 'CharField', 'Check', 'chunked', 'Column', 'CompositeKey', 'Context', 'Database', 'DatabaseError', 'DatabaseProxy', 'DataError', 'DateField', 'DateTimeField', 'DecimalField', 'DeferredForeignKey', 'DeferredThroughModel', 'DJANGO_MAP', 'DoesNotExist', 'DoubleField', 'DQ', 'EXCLUDED', 'Field', 'FixedCharField', 'FloatField', 'fn', 'ForeignKeyField', 'IdentityField', 'ImproperlyConfigured', 'Index', 'IntegerField', 'IntegrityError', 'InterfaceError', 'InternalError', 'IPField', 'JOIN', 'ManyToManyField', 'Model', 'ModelIndex', 'MySQLDatabase', 'NotSupportedError', 'OP', 'OperationalError', 'PostgresqlDatabase', 'PrimaryKeyField', # XXX: Deprecated, change to AutoField. 'prefetch', 'PREFETCH_TYPE', 'ProgrammingError', 'Proxy', 'QualifiedNames', 'SchemaManager', 'SmallIntegerField', 'Select', 'SQL', 'SqliteDatabase', 'Table', 'TextField', 'TimeField', 'TimestampField', 'Tuple', 'UUIDField', 'Value', 'ValuesList', 'Window', ] try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logger = logging.getLogger('peewee') logger.addHandler(NullHandler()) if sys.version_info[0] == 2: text_type = unicode bytes_type = str buffer_type = buffer izip_longest = itertools.izip_longest callable_ = callable multi_types = (list, tuple, frozenset, set) exec('def reraise(tp, value, tb=None): raise tp, value, tb') def print_(s): sys.stdout.write(s) sys.stdout.write('\n') else: import builtins try: from collections.abc import Callable except ImportError: from collections import Callable from functools import reduce callable_ = lambda c: isinstance(c, Callable) text_type = str bytes_type = bytes buffer_type = memoryview basestring = str long = int multi_types = (list, tuple, frozenset, set, range) print_ = getattr(builtins, 'print') izip_longest = itertools.zip_longest def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value # Other compat issues. if sys.version_info < (3, 12): utcfromtimestamp = datetime.datetime.utcfromtimestamp utcnow = datetime.datetime.utcnow else: def utcfromtimestamp(ts): return (datetime.datetime .fromtimestamp(ts, tz=datetime.timezone.utc) .replace(tzinfo=None)) def utcnow(): return (datetime.datetime .now(datetime.timezone.utc) .replace(tzinfo=None)) if sqlite3: sqlite3.register_adapter(decimal.Decimal, str) sqlite3.register_adapter(datetime.date, str) sqlite3.register_adapter(datetime.time, str) if sys.version_info >= (3, 12): # We need to register datetime adapters as these are deprecated. def datetime_adapter(d): return d.isoformat(' ') def convert_date(d): return datetime.date(*map(int, d.split(b'-'))) def convert_timestamp(t): date, time = t.split(b' ') y, m, d = map(int, date.split(b'-')) t_full = time.split(b'.') hour, minute, second = map(int, t_full[0].split(b':')) if len(t_full) == 2: usec = int('{:0<6.6}'.format(t_full[1].decode())) else: usec = 0 return datetime.datetime(y, m, d, hour, minute, second, usec) sqlite3.register_adapter(datetime.datetime, datetime_adapter) sqlite3.register_converter('date', convert_date) sqlite3.register_converter('timestamp', convert_timestamp) __sqlite_version__ = sqlite3.sqlite_version_info else: __sqlite_version__ = (0, 0, 0) __date_parts__ = set(('year', 'month', 'day', 'hour', 'minute', 'second')) # Sqlite does not support the `date_part` SQL function, so we will define an # implementation in python. __sqlite_datetime_formats__ = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d', '%H:%M:%S', '%H:%M:%S.%f', '%H:%M') __sqlite_date_trunc__ = { 'year': '%Y-01-01 00:00:00', 'month': '%Y-%m-01 00:00:00', 'day': '%Y-%m-%d 00:00:00', 'hour': '%Y-%m-%d %H:00:00', 'minute': '%Y-%m-%d %H:%M:00', 'second': '%Y-%m-%d %H:%M:%S'} __mysql_date_trunc__ = __sqlite_date_trunc__.copy() __mysql_date_trunc__['minute'] = '%Y-%m-%d %H:%i:00' __mysql_date_trunc__['second'] = '%Y-%m-%d %H:%i:%S' def _sqlite_date_part(lookup_type, datetime_string): assert lookup_type in __date_parts__ if not datetime_string: return dt = format_date_time(datetime_string, __sqlite_datetime_formats__) return getattr(dt, lookup_type) def _sqlite_date_trunc(lookup_type, datetime_string): assert lookup_type in __sqlite_date_trunc__ if not datetime_string: return dt = format_date_time(datetime_string, __sqlite_datetime_formats__) return dt.strftime(__sqlite_date_trunc__[lookup_type]) def __deprecated__(s): warnings.warn(s, DeprecationWarning) class attrdict(dict): def __getattr__(self, attr): try: return self[attr] except KeyError: raise AttributeError(attr) def __setattr__(self, attr, value): self[attr] = value def __iadd__(self, rhs): self.update(rhs); return self def __add__(self, rhs): d = attrdict(self); d.update(rhs); return d SENTINEL = object() #: Operations for use in SQL expressions. OP = attrdict( AND='AND', OR='OR', ADD='+', SUB='-', MUL='*', DIV='/', BIN_AND='&', BIN_OR='|', XOR='#', MOD='%', EQ='=', LT='<', LTE='<=', GT='>', GTE='>=', NE='!=', IN='IN', NOT_IN='NOT IN', IS='IS', IS_NOT='IS NOT', LIKE='LIKE', ILIKE='ILIKE', BETWEEN='BETWEEN', REGEXP='REGEXP', IREGEXP='IREGEXP', CONCAT='||', BITWISE_NEGATION='~') # To support "django-style" double-underscore filters, create a mapping between # operation name and operation code, e.g. "__eq" == OP.EQ. DJANGO_MAP = attrdict({ 'eq': operator.eq, 'lt': operator.lt, 'lte': operator.le, 'gt': operator.gt, 'gte': operator.ge, 'ne': operator.ne, 'in': operator.lshift, 'is': lambda l, r: Expression(l, OP.IS, r), 'like': lambda l, r: Expression(l, OP.LIKE, r), 'ilike': lambda l, r: Expression(l, OP.ILIKE, r), 'regexp': lambda l, r: Expression(l, OP.REGEXP, r), }) #: Mapping of field type to the data-type supported by the database. Databases #: may override or add to this list. FIELD = attrdict( AUTO='INTEGER', BIGAUTO='BIGINT', BIGINT='BIGINT', BLOB='BLOB', BOOL='SMALLINT', CHAR='CHAR', DATE='DATE', DATETIME='DATETIME', DECIMAL='DECIMAL', DEFAULT='', DOUBLE='REAL', FLOAT='REAL', INT='INTEGER', SMALLINT='SMALLINT', TEXT='TEXT', TIME='TIME', UUID='TEXT', UUIDB='BLOB', VARCHAR='VARCHAR') #: Join helpers (for convenience) -- all join types are supported, this object #: is just to help avoid introducing errors by using strings everywhere. JOIN = attrdict( INNER='INNER JOIN', LEFT_OUTER='LEFT OUTER JOIN', RIGHT_OUTER='RIGHT OUTER JOIN', FULL='FULL JOIN', FULL_OUTER='FULL OUTER JOIN', CROSS='CROSS JOIN', NATURAL='NATURAL JOIN', LATERAL='LATERAL', LEFT_LATERAL='LEFT JOIN LATERAL') # Row representations. ROW = attrdict( TUPLE=1, DICT=2, NAMED_TUPLE=3, CONSTRUCTOR=4, MODEL=5) # Query type to use with prefetch PREFETCH_TYPE = attrdict( WHERE=1, JOIN=2) SCOPE_NORMAL = 1 SCOPE_SOURCE = 2 SCOPE_VALUES = 4 SCOPE_CTE = 8 SCOPE_COLUMN = 16 # Rules for parentheses around subqueries in compound select. CSQ_PARENTHESES_NEVER = 0 CSQ_PARENTHESES_ALWAYS = 1 CSQ_PARENTHESES_UNNESTED = 2 # Regular expressions used to convert class names to snake-case table names. # First regex handles acronym followed by word or initial lower-word followed # by a capitalized word. e.g. APIResponse -> API_Response / fooBar -> foo_Bar. # Second regex handles the normal case of two title-cased words. SNAKE_CASE_STEP1 = re.compile('(.)_*([A-Z][a-z]+)') SNAKE_CASE_STEP2 = re.compile('([a-z0-9])_*([A-Z])') # Helper functions that are used in various parts of the codebase. MODEL_BASE = '_metaclass_helper_' def with_metaclass(meta, base=object): return meta(MODEL_BASE, (base,), {}) def merge_dict(source, overrides): merged = source.copy() if overrides: merged.update(overrides) return merged def quote(path, quote_chars): if len(path) == 1: return path[0].join(quote_chars) return '.'.join([part.join(quote_chars) for part in path]) is_model = lambda o: isclass(o) and issubclass(o, Model) def ensure_tuple(value): if value is not None: return value if isinstance(value, (list, tuple)) else (value,) def ensure_entity(value): if value is not None: return value if isinstance(value, Node) else Entity(value) def make_snake_case(s): first = SNAKE_CASE_STEP1.sub(r'\1_\2', s) return SNAKE_CASE_STEP2.sub(r'\1_\2', first).lower() def chunked(it, n): marker = object() for group in (list(g) for g in izip_longest(*[iter(it)] * n, fillvalue=marker)): if group[-1] is marker: del group[group.index(marker):] yield group class _callable_context_manager(object): def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with self: return fn(*args, **kwargs) return inner class Proxy(object): """ Create a proxy or placeholder for another object. """ __slots__ = ('obj', '_callbacks') def __init__(self): self._callbacks = [] self.initialize(None) def initialize(self, obj): self.obj = obj for callback in self._callbacks: callback(obj) def attach_callback(self, callback): self._callbacks.append(callback) return callback def passthrough(method): def inner(self, *args, **kwargs): if self.obj is None: raise AttributeError('Cannot use uninitialized Proxy.') return getattr(self.obj, method)(*args, **kwargs) return inner # Allow proxy to be used as a context-manager. __enter__ = passthrough('__enter__') __exit__ = passthrough('__exit__') def __getattr__(self, attr): if self.obj is None: raise AttributeError('Cannot use uninitialized Proxy.') return getattr(self.obj, attr) def __setattr__(self, attr, value): if attr not in self.__slots__: raise AttributeError('Cannot set attribute on proxy.') return super(Proxy, self).__setattr__(attr, value) class DatabaseProxy(Proxy): """ Proxy implementation specifically for proxying `Database` objects. """ __slots__ = ('obj', '_callbacks', '_Model') def connection_context(self): return ConnectionContext(self) def atomic(self, *args, **kwargs): return _atomic(self, *args, **kwargs) def manual_commit(self): return _manual(self) def transaction(self, *args, **kwargs): return _transaction(self, *args, **kwargs) def savepoint(self): return _savepoint(self) @property def Model(self): if not hasattr(self, '_Model'): class Meta: database = self self._Model = type('BaseModel', (Model,), {'Meta': Meta}) return self._Model class ModelDescriptor(object): pass # SQL Generation. class AliasManager(object): __slots__ = ('_counter', '_current_index', '_mapping') def __init__(self): # A list of dictionaries containing mappings at various depths. self._counter = 0 self._current_index = 0 self._mapping = [] self.push() @property def mapping(self): return self._mapping[self._current_index - 1] def add(self, source): if source not in self.mapping: self._counter += 1 self[source] = 't%d' % self._counter return self.mapping[source] def get(self, source, any_depth=False): if any_depth: for idx in reversed(range(self._current_index)): if source in self._mapping[idx]: return self._mapping[idx][source] return self.add(source) def __getitem__(self, source): return self.get(source) def __setitem__(self, source, alias): self.mapping[source] = alias def push(self): self._current_index += 1 if self._current_index > len(self._mapping): self._mapping.append({}) def pop(self): if self._current_index == 1: raise ValueError('Cannot pop() from empty alias manager.') self._current_index -= 1 class State(collections.namedtuple('_State', ('scope', 'parentheses', 'settings'))): def __new__(cls, scope=SCOPE_NORMAL, parentheses=False, **kwargs): return super(State, cls).__new__(cls, scope, parentheses, kwargs) def __call__(self, scope=None, parentheses=None, **kwargs): # Scope and settings are "inherited" (parentheses is not, however). scope = self.scope if scope is None else scope # Try to avoid unnecessary dict copying. if kwargs and self.settings: settings = self.settings.copy() # Copy original settings dict. settings.update(kwargs) # Update copy with overrides. elif kwargs: settings = kwargs else: settings = self.settings return State(scope, parentheses, **settings) def __getattr__(self, attr_name): return self.settings.get(attr_name) def __scope_context__(scope): @contextmanager def inner(self, **kwargs): with self(scope=scope, **kwargs): yield self return inner class Context(object): __slots__ = ('stack', '_sql', '_values', 'alias_manager', 'state') def __init__(self, **settings): self.stack = [] self._sql = [] self._values = [] self.alias_manager = AliasManager() self.state = State(**settings) def as_new(self): return Context(**self.state.settings) def column_sort_key(self, item): return item[0].get_sort_key(self) @property def scope(self): return self.state.scope @property def parentheses(self): return self.state.parentheses @property def subquery(self): return self.state.subquery def __call__(self, **overrides): if overrides and overrides.get('scope') == self.scope: del overrides['scope'] self.stack.append(self.state) self.state = self.state(**overrides) return self scope_normal = __scope_context__(SCOPE_NORMAL) scope_source = __scope_context__(SCOPE_SOURCE) scope_values = __scope_context__(SCOPE_VALUES) scope_cte = __scope_context__(SCOPE_CTE) scope_column = __scope_context__(SCOPE_COLUMN) def __enter__(self): if self.parentheses: self.literal('(') return self def __exit__(self, exc_type, exc_val, exc_tb): if self.parentheses: self.literal(')') self.state = self.stack.pop() @contextmanager def push_alias(self): self.alias_manager.push() yield self.alias_manager.pop() def sql(self, obj): if isinstance(obj, (Node, Context)): return obj.__sql__(self) elif is_model(obj): return obj._meta.table.__sql__(self) else: return self.sql(Value(obj)) def literal(self, keyword): self._sql.append(keyword) return self def value(self, value, converter=None, add_param=True): if converter: value = converter(value) elif converter is None and self.state.converter: # Explicitly check for None so that "False" can be used to signify # that no conversion should be applied. value = self.state.converter(value) if isinstance(value, Node): with self(converter=None): return self.sql(value) elif is_model(value): # Under certain circumstances, we could end-up treating a model- # class itself as a value. This check ensures that we drop the # table alias into the query instead of trying to parameterize a # model (for instance, passing a model as a function argument). with self.scope_column(): return self.sql(value) if self.state.value_literals: return self.literal(_query_val_transform(value)) self._values.append(value) return self.literal(self.state.param or '?') if add_param else self def __sql__(self, ctx): ctx._sql.extend(self._sql) ctx._values.extend(self._values) return ctx def parse(self, node): return self.sql(node).query() def query(self): return ''.join(self._sql), self._values def query_to_string(query): # NOTE: this function is not exported by default as it might be misused -- # and this misuse could lead to sql injection vulnerabilities. This # function is intended for debugging or logging purposes ONLY. db = getattr(query, '_database', None) if db is not None: ctx = db.get_sql_context() else: ctx = Context() sql, params = ctx.sql(query).query() if not params: return sql param = ctx.state.param or '?' if param == '?': sql = sql.replace('?', '%s') return sql % tuple(map(_query_val_transform, params)) def _query_val_transform(v): # Interpolate parameters. if isinstance(v, (text_type, datetime.datetime, datetime.date, datetime.time)): v = "'%s'" % v elif isinstance(v, bytes_type): try: v = v.decode('utf8') except UnicodeDecodeError: v = v.decode('raw_unicode_escape') v = "'%s'" % v elif isinstance(v, int): v = '%s' % int(v) # Also handles booleans -> 1 or 0. elif v is None: v = 'NULL' else: v = str(v) return v # AST. class Node(object): _coerce = True __isabstractmethod__ = False # Avoid issue w/abc and __getattr__, eg fn.X def clone(self): obj = self.__class__.__new__(self.__class__) obj.__dict__ = self.__dict__.copy() return obj def __sql__(self, ctx): raise NotImplementedError @staticmethod def copy(method): def inner(self, *args, **kwargs): clone = self.clone() method(clone, *args, **kwargs) return clone return inner def coerce(self, _coerce=True): if _coerce != self._coerce: clone = self.clone() clone._coerce = _coerce return clone return self def is_alias(self): return False def unwrap(self): return self class ColumnFactory(object): __slots__ = ('node',) def __init__(self, node): self.node = node def __getattr__(self, attr): return Column(self.node, attr) __getitem__ = __getattr__ class _DynamicColumn(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: return ColumnFactory(instance) # Implements __getattr__(). return self class _ExplicitColumn(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: raise AttributeError( '%s specifies columns explicitly, and does not support ' 'dynamic column lookups.' % instance) return self class Star(Node): def __init__(self, source): self.source = source def __sql__(self, ctx): return ctx.sql(QualifiedNames(self.source)).literal('.*') class Source(Node): c = _DynamicColumn() def __init__(self, alias=None): super(Source, self).__init__() self._alias = alias @Node.copy def alias(self, name): self._alias = name def select(self, *columns): if not columns: columns = (SQL('*'),) return Select((self,), columns) @property def __star__(self): return Star(self) def join(self, dest, join_type=JOIN.INNER, on=None): return Join(self, dest, join_type, on) def left_outer_join(self, dest, on=None): return Join(self, dest, JOIN.LEFT_OUTER, on) def cte(self, name, recursive=False, columns=None, materialized=None): return CTE(name, self, recursive=recursive, columns=columns, materialized=materialized) def get_sort_key(self, ctx): if self._alias: return (self._alias,) return (ctx.alias_manager[self],) def apply_alias(self, ctx): # If we are defining the source, include the "AS alias" declaration. An # alias is created for the source if one is not already defined. if ctx.scope == SCOPE_SOURCE: if self._alias: ctx.alias_manager[self] = self._alias ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self])) return ctx def apply_column(self, ctx): if self._alias: ctx.alias_manager[self] = self._alias return ctx.sql(Entity(ctx.alias_manager[self])) class _HashableSource(object): def __init__(self, *args, **kwargs): super(_HashableSource, self).__init__(*args, **kwargs) self._update_hash() @Node.copy def alias(self, name): self._alias = name self._update_hash() def _update_hash(self): self._hash = self._get_hash() def _get_hash(self): return hash((self.__class__, self._path, self._alias)) def __hash__(self): return self._hash def __eq__(self, other): if isinstance(other, _HashableSource): return self._hash == other._hash return Expression(self, OP.EQ, other) def __ne__(self, other): if isinstance(other, _HashableSource): return self._hash != other._hash return Expression(self, OP.NE, other) def _e(op): def inner(self, rhs): return Expression(self, op, rhs) return inner __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) def __bind_database__(meth): @wraps(meth) def inner(self, *args, **kwargs): result = meth(self, *args, **kwargs) if self._database: return result.bind(self._database) return result return inner def __join__(join_type=JOIN.INNER, inverted=False): def method(self, other): if inverted: self, other = other, self return Join(self, other, join_type=join_type) return method class BaseTable(Source): __and__ = __join__(JOIN.INNER) __add__ = __join__(JOIN.LEFT_OUTER) __sub__ = __join__(JOIN.RIGHT_OUTER) __or__ = __join__(JOIN.FULL_OUTER) __mul__ = __join__(JOIN.CROSS) __rand__ = __join__(JOIN.INNER, inverted=True) __radd__ = __join__(JOIN.LEFT_OUTER, inverted=True) __rsub__ = __join__(JOIN.RIGHT_OUTER, inverted=True) __ror__ = __join__(JOIN.FULL_OUTER, inverted=True) __rmul__ = __join__(JOIN.CROSS, inverted=True) class _BoundTableContext(object): def __init__(self, table, database): self.table = table self.database = database def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with _BoundTableContext(self.table, self.database): return fn(*args, **kwargs) return inner def __enter__(self): self._orig_database = self.table._database self.table.bind(self.database) if self.table._model is not None: self.table._model.bind(self.database) return self.table def __exit__(self, exc_type, exc_val, exc_tb): self.table.bind(self._orig_database) if self.table._model is not None: self.table._model.bind(self._orig_database) class Table(_HashableSource, BaseTable): def __init__(self, name, columns=None, primary_key=None, schema=None, alias=None, _model=None, _database=None): self.__name__ = name self._columns = columns self._primary_key = primary_key self._schema = schema self._path = (schema, name) if schema else (name,) self._model = _model self._database = _database super(Table, self).__init__(alias=alias) # Allow tables to restrict what columns are available. if columns is not None: self.c = _ExplicitColumn() for column in columns: setattr(self, column, Column(self, column)) if primary_key: col_src = self if self._columns else self.c self.primary_key = getattr(col_src, primary_key) else: self.primary_key = None def clone(self): # Ensure a deep copy of the column instances. return Table( self.__name__, columns=self._columns, primary_key=self._primary_key, schema=self._schema, alias=self._alias, _model=self._model, _database=self._database) def bind(self, database=None): self._database = database return self def bind_ctx(self, database=None): return _BoundTableContext(self, database) def _get_hash(self): return hash((self.__class__, self._path, self._alias, self._model)) @__bind_database__ def select(self, *columns): if not columns and self._columns: columns = [Column(self, column) for column in self._columns] return Select((self,), columns) @__bind_database__ def insert(self, insert=None, columns=None, **kwargs): if kwargs: insert = {} if insert is None else insert src = self if self._columns else self.c for key, value in kwargs.items(): insert[getattr(src, key)] = value return Insert(self, insert=insert, columns=columns) @__bind_database__ def replace(self, insert=None, columns=None, **kwargs): return (self .insert(insert=insert, columns=columns) .on_conflict('REPLACE')) @__bind_database__ def update(self, update=None, **kwargs): if kwargs: update = {} if update is None else update for key, value in kwargs.items(): src = self if self._columns else self.c update[getattr(src, key)] = value return Update(self, update=update) @__bind_database__ def delete(self): return Delete(self) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: # Return the quoted table name. return ctx.sql(Entity(*self._path)) if self._alias: ctx.alias_manager[self] = self._alias if ctx.scope == SCOPE_SOURCE: # Define the table and its alias. return self.apply_alias(ctx.sql(Entity(*self._path))) else: # Refer to the table using the alias. return self.apply_column(ctx) class Join(BaseTable): def __init__(self, lhs, rhs, join_type=JOIN.INNER, on=None, alias=None): super(Join, self).__init__(alias=alias) self.lhs = lhs self.rhs = rhs self.join_type = join_type self._on = on def on(self, predicate): self._on = predicate return self def __sql__(self, ctx): (ctx .sql(self.lhs) .literal(' %s ' % self.join_type) .sql(self.rhs)) if self._on is not None: ctx.literal(' ON ').sql(self._on) return ctx class ValuesList(_HashableSource, BaseTable): def __init__(self, values, columns=None, alias=None): self._values = values self._columns = columns super(ValuesList, self).__init__(alias=alias) def _get_hash(self): return hash((self.__class__, id(self._values), self._alias)) @Node.copy def columns(self, *names): self._columns = names def __sql__(self, ctx): if self._alias: ctx.alias_manager[self] = self._alias if ctx.scope == SCOPE_SOURCE or ctx.scope == SCOPE_NORMAL: with ctx(parentheses=not ctx.parentheses): ctx = (ctx .literal('VALUES ') .sql(CommaNodeList([ EnclosedNodeList(row) for row in self._values]))) if ctx.scope == SCOPE_SOURCE: ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self])) if self._columns: entities = [Entity(c) for c in self._columns] ctx.sql(EnclosedNodeList(entities)) else: ctx.sql(Entity(ctx.alias_manager[self])) return ctx class CTE(_HashableSource, Source): def __init__(self, name, query, recursive=False, columns=None, materialized=None): self._alias = name self._query = query self._recursive = recursive self._materialized = materialized if columns is not None: columns = [Entity(c) if isinstance(c, basestring) else c for c in columns] self._columns = columns query._cte_list = () super(CTE, self).__init__(alias=name) def select_from(self, *columns): if not columns: raise ValueError('select_from() must specify one or more columns ' 'from the CTE to select.') query = (Select((self,), columns) .with_cte(self) .bind(self._query._database)) try: query = query.objects(self._query.model) except AttributeError: pass return query def _get_hash(self): return hash((self.__class__, self._alias, id(self._query))) def union_all(self, rhs): clone = self._query.clone() return CTE(self._alias, clone + rhs, self._recursive, self._columns) __add__ = union_all def union(self, rhs): clone = self._query.clone() return CTE(self._alias, clone | rhs, self._recursive, self._columns) __or__ = union def __sql__(self, ctx): if ctx.scope != SCOPE_CTE: return ctx.sql(Entity(self._alias)) with ctx.push_alias(): ctx.alias_manager[self] = self._alias ctx.sql(Entity(self._alias)) if self._columns: ctx.literal(' ').sql(EnclosedNodeList(self._columns)) ctx.literal(' AS ') if self._materialized: ctx.literal('MATERIALIZED ') elif self._materialized is False: ctx.literal('NOT MATERIALIZED ') with ctx.scope_normal(parentheses=True): ctx.sql(self._query) return ctx class ColumnBase(Node): _converter = None @Node.copy def converter(self, converter=None): self._converter = converter def alias(self, alias): if alias: return Alias(self, alias) return self def unalias(self): return self def bind_to(self, dest): return BindTo(self, dest) def cast(self, as_type): return Cast(self, as_type) def asc(self, collation=None, nulls=None): return Asc(self, collation=collation, nulls=nulls) __pos__ = asc def desc(self, collation=None, nulls=None): return Desc(self, collation=collation, nulls=nulls) __neg__ = desc def __invert__(self): return Negated(self) def _e(op, inv=False): """ Lightweight factory which returns a method that builds an Expression consisting of the left-hand and right-hand operands, using `op`. """ def inner(self, rhs): if inv: return Expression(rhs, op, self) return Expression(self, op, rhs) return inner __and__ = _e(OP.AND) __or__ = _e(OP.OR) __add__ = _e(OP.ADD) __sub__ = _e(OP.SUB) __mul__ = _e(OP.MUL) __div__ = __truediv__ = _e(OP.DIV) __xor__ = _e(OP.XOR) __radd__ = _e(OP.ADD, inv=True) __rsub__ = _e(OP.SUB, inv=True) __rmul__ = _e(OP.MUL, inv=True) __rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True) __rand__ = _e(OP.AND, inv=True) __ror__ = _e(OP.OR, inv=True) __rxor__ = _e(OP.XOR, inv=True) def __eq__(self, rhs): op = OP.IS if rhs is None else OP.EQ return Expression(self, op, rhs) def __ne__(self, rhs): op = OP.IS_NOT if rhs is None else OP.NE return Expression(self, op, rhs) __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) __lshift__ = _e(OP.IN) __rshift__ = _e(OP.IS) __mod__ = _e(OP.LIKE) __pow__ = _e(OP.ILIKE) like = _e(OP.LIKE) ilike = _e(OP.ILIKE) bin_and = _e(OP.BIN_AND) bin_or = _e(OP.BIN_OR) in_ = _e(OP.IN) not_in = _e(OP.NOT_IN) regexp = _e(OP.REGEXP) iregexp = _e(OP.IREGEXP) # Special expressions. def is_null(self, is_null=True): op = OP.IS if is_null else OP.IS_NOT return Expression(self, op, None) def _escape_like_expr(self, s, template): if s.find('_') >= 0 or s.find('%') >= 0 or s.find('\\') >= 0: s = s.replace('\\', '\\\\').replace('_', '\\_').replace('%', '\\%') # Pass the expression and escape string as unconverted values, to # avoid (e.g.) a Json field converter turning the escaped LIKE # pattern into a Json-quoted string. return NodeList(( Value(template % s, converter=False), SQL('ESCAPE'), Value('\\', converter=False))) return template % s def contains(self, rhs): if isinstance(rhs, Node): rhs = Expression('%', OP.CONCAT, Expression(rhs, OP.CONCAT, '%')) else: rhs = self._escape_like_expr(rhs, '%%%s%%') return Expression(self, OP.ILIKE, rhs) def startswith(self, rhs): if isinstance(rhs, Node): rhs = Expression(rhs, OP.CONCAT, '%') else: rhs = self._escape_like_expr(rhs, '%s%%') return Expression(self, OP.ILIKE, rhs) def endswith(self, rhs): if isinstance(rhs, Node): rhs = Expression('%', OP.CONCAT, rhs) else: rhs = self._escape_like_expr(rhs, '%%%s') return Expression(self, OP.ILIKE, rhs) def between(self, lo, hi): return Expression(self, OP.BETWEEN, NodeList((lo, SQL('AND'), hi))) def concat(self, rhs): return StringExpression(self, OP.CONCAT, rhs) def __getitem__(self, item): if isinstance(item, slice): if item.start is None or item.stop is None: raise ValueError('BETWEEN range must have both a start- and ' 'end-point.') return self.between(item.start, item.stop) return self == item __iter__ = None # Prevent infinite loop. def distinct(self): return NodeList((SQL('DISTINCT'), self)) def collate(self, collation): return NodeList((self, SQL('COLLATE %s' % collation))) def get_sort_key(self, ctx): return () class Column(ColumnBase): def __init__(self, source, name): self.source = source self.name = name def get_sort_key(self, ctx): if ctx.scope == SCOPE_VALUES: return (self.name,) else: return self.source.get_sort_key(ctx) + (self.name,) def __hash__(self): return hash((self.source, self.name)) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: return ctx.sql(Entity(self.name)) else: with ctx.scope_column(): return ctx.sql(self.source).literal('.').sql(Entity(self.name)) class WrappedNode(ColumnBase): def __init__(self, node): self.node = node self._coerce = getattr(node, '_coerce', True) self._converter = getattr(node, '_converter', None) def is_alias(self): return self.node.is_alias() def unwrap(self): return self.node.unwrap() class EntityFactory(object): __slots__ = ('node',) def __init__(self, node): self.node = node def __getattr__(self, attr): return Entity(self.node, attr) class _DynamicEntity(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: return EntityFactory(instance._alias) # Implements __getattr__(). return self class Alias(WrappedNode): c = _DynamicEntity() def __init__(self, node, alias): super(Alias, self).__init__(node) self._alias = alias def __hash__(self): return hash(self._alias) @property def name(self): return self._alias @name.setter def name(self, value): self._alias = value def alias(self, alias=None): if alias is None: return self.node else: return Alias(self.node, alias) def unalias(self): return self.node def is_alias(self): return True def __sql__(self, ctx): if ctx.scope == SCOPE_SOURCE: return (ctx .sql(self.node) .literal(' AS ') .sql(Entity(self._alias))) else: return ctx.sql(Entity(self._alias)) class BindTo(WrappedNode): def __init__(self, node, dest): super(BindTo, self).__init__(node) self.dest = dest def __sql__(self, ctx): return ctx.sql(self.node) class Negated(WrappedNode): def __invert__(self): return self.node def __sql__(self, ctx): return ctx.literal('NOT ').sql(self.node) class BitwiseMixin(object): def __and__(self, other): return self.bin_and(other) def __or__(self, other): return self.bin_or(other) def __sub__(self, other): return self.bin_and(other.bin_negated()) def __invert__(self): return BitwiseNegated(self) class BitwiseNegated(BitwiseMixin, WrappedNode): def __invert__(self): return self.node def __sql__(self, ctx): if ctx.state.operations: op_sql = ctx.state.operations.get(self.op, self.op) else: op_sql = self.op return ctx.literal(op_sql).sql(self.node) class Value(ColumnBase): def __init__(self, value, converter=None, unpack=True): self.value = value self.converter = converter self.multi = unpack and isinstance(self.value, multi_types) if self.multi: self.values = [] for item in self.value: if isinstance(item, Node): self.values.append(item) else: self.values.append(Value(item, self.converter)) def __sql__(self, ctx): if self.multi: # For multi-part values (e.g. lists of IDs). return ctx.sql(EnclosedNodeList(self.values)) return ctx.value(self.value, self.converter) class ValueLiterals(WrappedNode): def __sql__(self, ctx): with ctx(value_literals=True): return ctx.sql(self.node) def AsIs(value): return Value(value, unpack=False) class Cast(WrappedNode): def __init__(self, node, cast): super(Cast, self).__init__(node) self._cast = cast self._coerce = False def __sql__(self, ctx): return (ctx .literal('CAST(') .sql(self.node) .literal(' AS %s)' % self._cast)) class Ordering(WrappedNode): def __init__(self, node, direction, collation=None, nulls=None): super(Ordering, self).__init__(node) self.direction = direction self.collation = collation self.nulls = nulls if nulls and nulls.lower() not in ('first', 'last'): raise ValueError('Ordering nulls= parameter must be "first" or ' '"last", got: %s' % nulls) def collate(self, collation=None): return Ordering(self.node, self.direction, collation) def _null_ordering_case(self, nulls): if nulls.lower() == 'last': ifnull, notnull = 1, 0 elif nulls.lower() == 'first': ifnull, notnull = 0, 1 else: raise ValueError('unsupported value for nulls= ordering.') return Case(None, ((self.node.is_null(), ifnull),), notnull) def __sql__(self, ctx): if self.nulls and not ctx.state.nulls_ordering: ctx.sql(self._null_ordering_case(self.nulls)).literal(', ') ctx.sql(self.node).literal(' %s' % self.direction) if self.collation: ctx.literal(' COLLATE %s' % self.collation) if self.nulls and ctx.state.nulls_ordering: ctx.literal(' NULLS %s' % self.nulls) return ctx def Asc(node, collation=None, nulls=None): return Ordering(node, 'ASC', collation, nulls) def Desc(node, collation=None, nulls=None): return Ordering(node, 'DESC', collation, nulls) class Expression(ColumnBase): def __init__(self, lhs, op, rhs, flat=False): self.lhs = lhs self.op = op self.rhs = rhs self.flat = flat def __sql__(self, ctx): overrides = {'parentheses': not self.flat, 'in_expr': True} # First attempt to unwrap the node on the left-hand-side, so that we # can get at the underlying Field if one is present. node = raw_node = self.lhs if isinstance(raw_node, WrappedNode): node = raw_node.unwrap() # Set up the appropriate converter if we have a field on the left side. if isinstance(node, Field) and raw_node._coerce: overrides['converter'] = node.db_value overrides['is_fk_expr'] = isinstance(node, ForeignKeyField) else: overrides['converter'] = None if ctx.state.operations: op_sql = ctx.state.operations.get(self.op, self.op) else: op_sql = self.op with ctx(**overrides): # Postgresql reports an error for IN/NOT IN (), so convert to # the equivalent boolean expression. op_in = self.op == OP.IN or self.op == OP.NOT_IN if op_in and ctx.as_new().parse(self.rhs)[0] == '()': return ctx.literal('0 = 1' if self.op == OP.IN else '1 = 1') rhs = self.rhs if rhs is None and (self.op == OP.IS or self.op == OP.IS_NOT): rhs = SQL('NULL') return (ctx .sql(self.lhs) .literal(' %s ' % op_sql) .sql(rhs)) class StringExpression(Expression): def __add__(self, rhs): return self.concat(rhs) def __radd__(self, lhs): return StringExpression(lhs, OP.CONCAT, self) class Entity(ColumnBase): def __init__(self, *path): self._path = [part.replace('"', '""') for part in path if part] def __getattr__(self, attr): return Entity(*self._path + [attr]) def get_sort_key(self, ctx): return tuple(self._path) def __hash__(self): return hash((self.__class__.__name__, tuple(self._path))) def __sql__(self, ctx): return ctx.literal(quote(self._path, ctx.state.quote or '""')) class SQL(ColumnBase): def __init__(self, sql, params=None): self.sql = sql self.params = params def __sql__(self, ctx): ctx.literal(self.sql) if self.params: for param in self.params: ctx.value(param, False, add_param=False) return ctx def Check(constraint, name=None): check = SQL('CHECK (%s)' % constraint) if not name: return check return NodeList((SQL('CONSTRAINT'), Entity(name), check)) class Function(ColumnBase): no_coerce_functions = set(('sum', 'count', 'avg', 'cast', 'array_agg')) def __init__(self, name, arguments, coerce=True, python_value=None): self.name = name self.arguments = arguments self._filter = None self._order_by = None self._python_value = python_value if name and name.lower() in self.no_coerce_functions: self._coerce = False else: self._coerce = coerce def __getattr__(self, attr): def decorator(*args, **kwargs): return Function(attr, args, **kwargs) return decorator @Node.copy def filter(self, where=None): self._filter = where @Node.copy def order_by(self, *ordering): self._order_by = ordering @Node.copy def python_value(self, func=None): self._python_value = func def over(self, partition_by=None, order_by=None, start=None, end=None, frame_type=None, window=None, exclude=None): if isinstance(partition_by, Window) and window is None: window = partition_by if window is not None: node = WindowAlias(window) else: node = Window(partition_by=partition_by, order_by=order_by, start=start, end=end, frame_type=frame_type, exclude=exclude, _inline=True) return NodeList((self, SQL('OVER'), node)) def __sql__(self, ctx): ctx.literal(self.name) if not len(self.arguments): ctx.literal('()') else: args = self.arguments # If this is an ordered aggregate, then we will modify the last # argument to append the ORDER BY ... clause. We do this to avoid # double-wrapping any expression args in parentheses, as NodeList # has a special check (hack) in place to work around this. if self._order_by: args = list(args) args[-1] = NodeList((args[-1], SQL('ORDER BY'), CommaNodeList(self._order_by))) with ctx(in_function=True, function_arg_count=len(self.arguments)): ctx.sql(EnclosedNodeList([ (arg if isinstance(arg, Node) else Value(arg, False)) for arg in args])) if self._filter: ctx.literal(' FILTER (WHERE ').sql(self._filter).literal(')') return ctx fn = Function(None, None) class Window(Node): # Frame start/end and frame exclusion. CURRENT_ROW = SQL('CURRENT ROW') GROUP = SQL('GROUP') TIES = SQL('TIES') NO_OTHERS = SQL('NO OTHERS') # Frame types. GROUPS = 'GROUPS' RANGE = 'RANGE' ROWS = 'ROWS' def __init__(self, partition_by=None, order_by=None, start=None, end=None, frame_type=None, extends=None, exclude=None, alias=None, _inline=False): super(Window, self).__init__() if start is not None and not isinstance(start, SQL): start = SQL(start) if end is not None and not isinstance(end, SQL): end = SQL(end) self.partition_by = ensure_tuple(partition_by) self.order_by = ensure_tuple(order_by) self.start = start self.end = end if self.start is None and self.end is not None: raise ValueError('Cannot specify WINDOW end without start.') self._alias = alias or 'w' self._inline = _inline self.frame_type = frame_type self._extends = extends self._exclude = exclude def alias(self, alias=None): self._alias = alias or 'w' return self @Node.copy def as_range(self): self.frame_type = Window.RANGE @Node.copy def as_rows(self): self.frame_type = Window.ROWS @Node.copy def as_groups(self): self.frame_type = Window.GROUPS @Node.copy def extends(self, window=None): self._extends = window @Node.copy def exclude(self, frame_exclusion=None): if isinstance(frame_exclusion, basestring): frame_exclusion = SQL(frame_exclusion) self._exclude = frame_exclusion @staticmethod def following(value=None): if value is None: return SQL('UNBOUNDED FOLLOWING') return SQL('%d FOLLOWING' % value) @staticmethod def preceding(value=None): if value is None: return SQL('UNBOUNDED PRECEDING') return SQL('%d PRECEDING' % value) def __sql__(self, ctx): if ctx.scope != SCOPE_SOURCE and not self._inline: ctx.literal(self._alias) ctx.literal(' AS ') with ctx(parentheses=True): parts = [] if self._extends is not None: ext = self._extends if isinstance(ext, Window): ext = SQL(ext._alias) elif isinstance(ext, basestring): ext = SQL(ext) parts.append(ext) if self.partition_by: parts.extend(( SQL('PARTITION BY'), CommaNodeList(self.partition_by))) if self.order_by: parts.extend(( SQL('ORDER BY'), CommaNodeList(self.order_by))) if self.start is not None and self.end is not None: frame = self.frame_type or 'ROWS' parts.extend(( SQL('%s BETWEEN' % frame), self.start, SQL('AND'), self.end)) elif self.start is not None: parts.extend((SQL(self.frame_type or 'ROWS'), self.start)) elif self.frame_type is not None: parts.append(SQL('%s UNBOUNDED PRECEDING' % self.frame_type)) if self._exclude is not None: parts.extend((SQL('EXCLUDE'), self._exclude)) ctx.sql(NodeList(parts)) return ctx class WindowAlias(Node): def __init__(self, window): self.window = window def alias(self, window_alias): self.window._alias = window_alias return self def __sql__(self, ctx): return ctx.literal(self.window._alias or 'w') class _InFunction(Node): def __init__(self, node, in_function=True): self.node = node self.in_function = in_function def __sql__(self, ctx): with ctx(in_function=self.in_function): return ctx.sql(self.node) class Case(ColumnBase): def __init__(self, predicate, expression_tuples, default=None): self.predicate = predicate self.expression_tuples = expression_tuples self.default = default def __sql__(self, ctx): clauses = [SQL('CASE')] if self.predicate is not None: clauses.append(self.predicate) for expr, value in self.expression_tuples: clauses.extend((SQL('WHEN'), expr, SQL('THEN'), _InFunction(value))) if self.default is not None: clauses.extend((SQL('ELSE'), _InFunction(self.default))) clauses.append(SQL('END')) with ctx(in_function=False): return ctx.sql(NodeList(clauses)) class ForUpdate(Node): def __init__(self, expr, of=None, nowait=None): expr = 'FOR UPDATE' if expr is True else expr if expr.lower().endswith('nowait'): expr = expr[:-7] # Strip off the "nowait" bit. nowait = True self._expr = expr if of is not None and not isinstance(of, (list, set, tuple)): of = (of,) self._of = of self._nowait = nowait def __sql__(self, ctx): ctx.literal(self._expr) if self._of is not None: ctx.literal(' OF ').sql(CommaNodeList(self._of)) if self._nowait: ctx.literal(' NOWAIT') return ctx class NodeList(ColumnBase): def __init__(self, nodes, glue=' ', parens=False): self.nodes = nodes self.glue = glue self.parens = parens if parens and len(self.nodes) == 1 and \ isinstance(self.nodes[0], Expression) and \ not self.nodes[0].flat: # Hack to avoid double-parentheses. self.nodes = (self.nodes[0].clone(),) self.nodes[0].flat = True def __sql__(self, ctx): n_nodes = len(self.nodes) if n_nodes == 0: return ctx.literal('()') if self.parens else ctx with ctx(parentheses=self.parens): for i in range(n_nodes - 1): ctx.sql(self.nodes[i]) ctx.literal(self.glue) ctx.sql(self.nodes[n_nodes - 1]) return ctx def CommaNodeList(nodes): return NodeList(nodes, ', ') def EnclosedNodeList(nodes): return NodeList(nodes, ', ', True) class _Namespace(Node): __slots__ = ('_name',) def __init__(self, name): self._name = name def __getattr__(self, attr): return NamespaceAttribute(self, attr) __getitem__ = __getattr__ class NamespaceAttribute(ColumnBase): def __init__(self, namespace, attribute): self._namespace = namespace self._attribute = attribute def __sql__(self, ctx): return (ctx .literal(self._namespace._name + '.') .sql(Entity(self._attribute))) EXCLUDED = _Namespace('EXCLUDED') class DQ(ColumnBase): def __init__(self, **query): super(DQ, self).__init__() self.query = query self._negated = False @Node.copy def __invert__(self): self._negated = not self._negated def clone(self): node = DQ(**self.query) node._negated = self._negated return node #: Represent a row tuple. Tuple = lambda *a: EnclosedNodeList(a) class QualifiedNames(WrappedNode): def __sql__(self, ctx): with ctx.scope_column(): return ctx.sql(self.node) def qualify_names(node): # Search a node heirarchy to ensure that any column-like objects are # referenced using fully-qualified names. if isinstance(node, Expression): return node.__class__(qualify_names(node.lhs), node.op, qualify_names(node.rhs), node.flat) elif isinstance(node, ColumnBase): return QualifiedNames(node) return node class OnConflict(Node): def __init__(self, action=None, update=None, preserve=None, where=None, conflict_target=None, conflict_where=None, conflict_constraint=None): self._action = action self._update = update self._preserve = ensure_tuple(preserve) self._where = where if conflict_target is not None and conflict_constraint is not None: raise ValueError('only one of "conflict_target" and ' '"conflict_constraint" may be specified.') self._conflict_target = ensure_tuple(conflict_target) self._conflict_where = conflict_where self._conflict_constraint = conflict_constraint def get_conflict_statement(self, ctx, query): return ctx.state.conflict_statement(self, query) def get_conflict_update(self, ctx, query): return ctx.state.conflict_update(self, query) @Node.copy def preserve(self, *columns): self._preserve = columns @Node.copy def update(self, _data=None, **kwargs): if _data and kwargs and not isinstance(_data, dict): raise ValueError('Cannot mix data with keyword arguments in the ' 'OnConflict update method.') _data = _data or {} if kwargs: _data.update(kwargs) self._update = _data @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def conflict_target(self, *constraints): self._conflict_constraint = None self._conflict_target = constraints @Node.copy def conflict_where(self, *expressions): if self._conflict_where is not None: expressions = (self._conflict_where,) + expressions self._conflict_where = reduce(operator.and_, expressions) @Node.copy def conflict_constraint(self, constraint): self._conflict_constraint = constraint self._conflict_target = None def database_required(method): @wraps(method) def inner(self, database=None, *args, **kwargs): database = self._database if database is None else database if not database: raise InterfaceError('Query must be bound to a database in order ' 'to call "%s".' % method.__name__) return method(self, database, *args, **kwargs) return inner # BASE QUERY INTERFACE. class BaseQuery(Node): default_row_type = ROW.DICT def __init__(self, _database=None, **kwargs): self._database = _database self._cursor_wrapper = None self._row_type = None self._constructor = None super(BaseQuery, self).__init__(**kwargs) def bind(self, database=None): self._database = database return self def clone(self): query = super(BaseQuery, self).clone() query._cursor_wrapper = None return query @Node.copy def dicts(self, as_dict=True): self._row_type = ROW.DICT if as_dict else None return self @Node.copy def tuples(self, as_tuple=True): self._row_type = ROW.TUPLE if as_tuple else None return self @Node.copy def namedtuples(self, as_namedtuple=True): self._row_type = ROW.NAMED_TUPLE if as_namedtuple else None return self @Node.copy def objects(self, constructor=None): self._row_type = ROW.CONSTRUCTOR if constructor else None self._constructor = constructor return self def _get_cursor_wrapper(self, cursor): row_type = self._row_type or self.default_row_type if row_type == ROW.DICT: return DictCursorWrapper(cursor) elif row_type == ROW.TUPLE: return CursorWrapper(cursor) elif row_type == ROW.NAMED_TUPLE: return NamedTupleCursorWrapper(cursor) elif row_type == ROW.CONSTRUCTOR: return ObjectCursorWrapper(cursor, self._constructor) else: raise ValueError('Unrecognized row type: "%s".' % row_type) def __sql__(self, ctx): raise NotImplementedError def sql(self): if self._database: context = self._database.get_sql_context() else: context = Context() return context.parse(self) @database_required def execute(self, database): return self._execute(database) def _execute(self, database): raise NotImplementedError def iterator(self, database=None): return iter(self.execute(database).iterator()) def _ensure_execution(self): if self._cursor_wrapper is None: if not self._database: raise ValueError('Query has not been executed.') self.execute() def __iter__(self): self._ensure_execution() return iter(self._cursor_wrapper) def __getitem__(self, value): self._ensure_execution() if isinstance(value, slice): index = value.stop else: index = value if index is not None: index = index + 1 if index >= 0 else 0 self._cursor_wrapper.fill_cache(index) return self._cursor_wrapper.row_cache[value] def __len__(self): self._ensure_execution() return len(self._cursor_wrapper) def __str__(self): return query_to_string(self) class RawQuery(BaseQuery): def __init__(self, sql=None, params=None, **kwargs): super(RawQuery, self).__init__(**kwargs) self._sql = sql self._params = params def __sql__(self, ctx): ctx.literal(self._sql) if self._params: for param in self._params: ctx.value(param, add_param=False) return ctx def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper class Query(BaseQuery): def __init__(self, where=None, order_by=None, limit=None, offset=None, **kwargs): super(Query, self).__init__(**kwargs) self._where = where self._order_by = order_by self._limit = limit self._offset = offset self._cte_list = None @Node.copy def with_cte(self, *cte_list): self._cte_list = cte_list @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def orwhere(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.or_, expressions) @Node.copy def order_by(self, *values): self._order_by = values @Node.copy def order_by_extend(self, *values): self._order_by = ((self._order_by or ()) + values) or None @Node.copy def limit(self, value=None): self._limit = value @Node.copy def offset(self, value=None): self._offset = value @Node.copy def paginate(self, page, paginate_by=20): if page > 0: page -= 1 self._limit = paginate_by self._offset = page * paginate_by def _apply_ordering(self, ctx): if self._order_by: (ctx .literal(' ORDER BY ') .sql(CommaNodeList(self._order_by))) if self._limit is not None or (self._offset is not None and ctx.state.limit_max): limit = ctx.state.limit_max if self._limit is None else self._limit ctx.literal(' LIMIT ').sql(limit) if self._offset is not None: ctx.literal(' OFFSET ').sql(self._offset) return ctx def __sql__(self, ctx): if self._cte_list: # The CTE scope is only used at the very beginning of the query, # when we are describing the various CTEs we will be using. recursive = any(cte._recursive for cte in self._cte_list) # Explicitly disable the "subquery" flag here, so as to avoid # unnecessary parentheses around subsequent selects. with ctx.scope_cte(subquery=False): (ctx .literal('WITH RECURSIVE ' if recursive else 'WITH ') .sql(CommaNodeList(self._cte_list)) .literal(' ')) return ctx def __compound_select__(operation, inverted=False): @__bind_database__ def method(self, other): if inverted: self, other = other, self return CompoundSelectQuery(self, operation, other) return method class SelectQuery(Query): union_all = __add__ = __compound_select__('UNION ALL') union = __or__ = __compound_select__('UNION') intersect = __and__ = __compound_select__('INTERSECT') except_ = __sub__ = __compound_select__('EXCEPT') __radd__ = __compound_select__('UNION ALL', inverted=True) __ror__ = __compound_select__('UNION', inverted=True) __rand__ = __compound_select__('INTERSECT', inverted=True) __rsub__ = __compound_select__('EXCEPT', inverted=True) def select_from(self, *columns): if not columns: raise ValueError('select_from() must specify one or more columns.') query = (Select((self,), columns) .bind(self._database)) if getattr(self, 'model', None) is not None: # Bind to the sub-select's model type, if defined. query = query.objects(self.model) return query class SelectBase(_HashableSource, Source, SelectQuery): def _get_hash(self): return hash((self.__class__, self._alias or id(self))) def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper @database_required def peek(self, database, n=1): rows = self.execute(database)[:n] if rows: return rows[0] if n == 1 else rows @database_required def first(self, database, n=1): if self._limit != n: self._limit = n self._cursor_wrapper = None return self.peek(database, n=n) @database_required def scalar(self, database, as_tuple=False, as_dict=False): if as_dict: return self.dicts().peek(database) row = self.tuples().peek(database) return row[0] if row and not as_tuple else row @database_required def scalars(self, database): for row in self.tuples().execute(database): yield row[0] @database_required def count(self, database, clear_limit=False): clone = self.order_by().alias('_wrapped') if clear_limit: clone._limit = clone._offset = None try: if clone._having is None and clone._group_by is None and \ clone._windows is None and clone._distinct is None and \ clone._simple_distinct is not True: clone = clone.select(SQL('1')) except AttributeError: pass return Select([clone], [fn.COUNT(SQL('1'))]).scalar(database) @database_required def exists(self, database): clone = self.columns(SQL('1')) clone._limit = 1 clone._offset = None return bool(clone.scalar()) @database_required def get(self, database): self._cursor_wrapper = None try: return self.execute(database)[0] except IndexError: pass # QUERY IMPLEMENTATIONS. class CompoundSelectQuery(SelectBase): def __init__(self, lhs, op, rhs): super(CompoundSelectQuery, self).__init__() self.lhs = lhs self.op = op self.rhs = rhs @property def _returning(self): return self.lhs._returning @database_required def exists(self, database): query = Select((self.limit(1),), (SQL('1'),)).bind(database) return bool(query.scalar()) def _get_query_key(self): return (self.lhs.get_query_key(), self.rhs.get_query_key()) def _wrap_parens(self, ctx, subq): csq_setting = ctx.state.compound_select_parentheses if not csq_setting or csq_setting == CSQ_PARENTHESES_NEVER: return False elif csq_setting == CSQ_PARENTHESES_ALWAYS: return True elif csq_setting == CSQ_PARENTHESES_UNNESTED: if ctx.state.in_expr or ctx.state.in_function: # If this compound select query is being used inside an # expression, e.g., an IN or EXISTS(). return False # If the query on the left or right is itself a compound select # query, then we do not apply parentheses. However, if it is a # regular SELECT query, we will apply parentheses. return not isinstance(subq, CompoundSelectQuery) def __sql__(self, ctx): if ctx.scope == SCOPE_COLUMN: return self.apply_column(ctx) # Call parent method to handle any CTEs. super(CompoundSelectQuery, self).__sql__(ctx) outer_parens = ctx.subquery or (ctx.scope == SCOPE_SOURCE) with ctx(parentheses=outer_parens): # Should the left-hand query be wrapped in parentheses? lhs_parens = self._wrap_parens(ctx, self.lhs) with ctx.scope_normal(parentheses=lhs_parens, subquery=False): ctx.sql(self.lhs) ctx.literal(' %s ' % self.op) with ctx.push_alias(): # Should the right-hand query be wrapped in parentheses? rhs_parens = self._wrap_parens(ctx, self.rhs) with ctx.scope_normal(parentheses=rhs_parens, subquery=False): ctx.sql(self.rhs) # Apply ORDER BY, LIMIT, OFFSET. We use the "values" scope so that # entity names are not fully-qualified. This is a bit of a hack, as # we're relying on the logic in Column.__sql__() to not fully # qualify column names. with ctx.scope_values(): self._apply_ordering(ctx) return self.apply_alias(ctx) class Select(SelectBase): def __init__(self, from_list=None, columns=None, group_by=None, having=None, distinct=None, windows=None, for_update=None, for_update_of=None, nowait=None, lateral=None, **kwargs): super(Select, self).__init__(**kwargs) self._from_list = (list(from_list) if isinstance(from_list, tuple) else from_list) or [] self._returning = columns self._group_by = group_by self._having = having self._windows = None self._for_update = for_update # XXX: consider reorganizing. self._for_update_of = for_update_of self._for_update_nowait = nowait self._lateral = lateral self._distinct = self._simple_distinct = None if distinct: if isinstance(distinct, bool): self._simple_distinct = distinct else: self._distinct = distinct self._cursor_wrapper = None def clone(self): clone = super(Select, self).clone() if clone._from_list: clone._from_list = list(clone._from_list) return clone @Node.copy def columns(self, *columns, **kwargs): self._returning = columns select = columns @Node.copy def select_extend(self, *columns): self._returning = tuple(self._returning) + columns @property def selected_columns(self): return self._returning @selected_columns.setter def selected_columns(self, value): self._returning = value @Node.copy def from_(self, *sources): self._from_list = list(sources) @Node.copy def join(self, dest, join_type=JOIN.INNER, on=None): if not self._from_list: raise ValueError('No sources to join on.') item = self._from_list.pop() self._from_list.append(Join(item, dest, join_type, on)) def left_outer_join(self, dest, on=None): return self.join(dest, JOIN.LEFT_OUTER, on) @Node.copy def group_by(self, *columns): grouping = [] for column in columns: if isinstance(column, Table): if not column._columns: raise ValueError('Cannot pass a table to group_by() that ' 'does not have columns explicitly ' 'declared.') grouping.extend([getattr(column, col_name) for col_name in column._columns]) else: grouping.append(column) self._group_by = grouping def group_by_extend(self, *values): """@Node.copy used from group_by() call""" group_by = tuple(self._group_by or ()) + values return self.group_by(*group_by) @Node.copy def having(self, *expressions): if self._having is not None: expressions = (self._having,) + expressions self._having = reduce(operator.and_, expressions) @Node.copy def distinct(self, *columns): if len(columns) == 1 and (columns[0] is True or columns[0] is False): self._simple_distinct = columns[0] else: self._simple_distinct = False self._distinct = columns @Node.copy def window(self, *windows): self._windows = windows if windows else None @Node.copy def for_update(self, for_update=True, of=None, nowait=None): if not for_update and (of is not None or nowait): for_update = True self._for_update = for_update self._for_update_of = of self._for_update_nowait = nowait @Node.copy def lateral(self, lateral=True): self._lateral = lateral def _get_query_key(self): return self._alias def __sql_selection__(self, ctx, is_subquery=False): return ctx.sql(CommaNodeList(self._returning)) def __sql__(self, ctx): if ctx.scope == SCOPE_COLUMN: return self.apply_column(ctx) if self._lateral and ctx.scope == SCOPE_SOURCE: ctx.literal('LATERAL ') is_subquery = ctx.subquery state = { 'converter': None, 'in_function': False, 'parentheses': is_subquery or (ctx.scope == SCOPE_SOURCE), 'subquery': True, } if ctx.state.in_function and ctx.state.function_arg_count == 1: state['parentheses'] = False with ctx.scope_normal(**state): # Defer calling parent SQL until here. This ensures that any CTEs # for this query will be properly nested if this query is a # sub-select or is used in an expression. See GH#1809 for example. super(Select, self).__sql__(ctx) ctx.literal('SELECT ') if self._simple_distinct or self._distinct is not None: ctx.literal('DISTINCT ') if self._distinct: (ctx .literal('ON ') .sql(EnclosedNodeList(self._distinct)) .literal(' ')) with ctx.scope_source(): ctx = self.__sql_selection__(ctx, is_subquery) if self._from_list: with ctx.scope_source(parentheses=False): ctx.literal(' FROM ').sql(CommaNodeList(self._from_list)) if self._where is not None: ctx.literal(' WHERE ').sql(self._where) if self._group_by: ctx.literal(' GROUP BY ').sql(CommaNodeList(self._group_by)) if self._having is not None: ctx.literal(' HAVING ').sql(self._having) if self._windows is not None: ctx.literal(' WINDOW ') ctx.sql(CommaNodeList(self._windows)) # Apply ORDER BY, LIMIT, OFFSET. self._apply_ordering(ctx) if self._for_update: if not ctx.state.for_update: raise ValueError('FOR UPDATE specified but not supported ' 'by database.') ctx.literal(' ') ctx.sql(ForUpdate(self._for_update, self._for_update_of, self._for_update_nowait)) # If the subquery is inside a function -or- we are evaluating a # subquery on either side of an expression w/o an explicit alias, do # not generate an alias + AS clause. if ctx.state.in_function or (ctx.state.in_expr and self._alias is None): return ctx return self.apply_alias(ctx) class _WriteQuery(Query): def __init__(self, table, returning=None, **kwargs): self.table = table self._returning = returning self._return_cursor = True if returning else False super(_WriteQuery, self).__init__(**kwargs) def cte(self, name, recursive=False, columns=None, materialized=None): return CTE(name, self, recursive=recursive, columns=columns, materialized=materialized) @Node.copy def returning(self, *returning): self._returning = returning self._return_cursor = True if returning else False def apply_returning(self, ctx): if self._returning: with ctx.scope_source(): ctx.literal(' RETURNING ').sql(CommaNodeList(self._returning)) return ctx def _execute(self, database): if self._returning: cursor = self.execute_returning(database) else: cursor = database.execute(self) return self.handle_result(database, cursor) def execute_returning(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper def handle_result(self, database, cursor): if self._return_cursor: return cursor return database.rows_affected(cursor) def _set_table_alias(self, ctx): ctx.alias_manager[self.table] = self.table.__name__ def __sql__(self, ctx): super(_WriteQuery, self).__sql__(ctx) # We explicitly set the table alias to the table's name, which ensures # that if a sub-select references a column on the outer table, we won't # assign it a new alias (e.g. t2) but will refer to it as table.column. self._set_table_alias(ctx) return ctx class Update(_WriteQuery): def __init__(self, table, update=None, **kwargs): super(Update, self).__init__(table, **kwargs) self._update = update self._from = None @Node.copy def from_(self, *sources): self._from = sources def __sql__(self, ctx): super(Update, self).__sql__(ctx) with ctx.scope_values(subquery=True): ctx.literal('UPDATE ') expressions = [] for k, v in sorted(self._update.items(), key=ctx.column_sort_key): if not isinstance(v, Node): if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) elif isinstance(v, Model) and isinstance(k, ForeignKeyField): # NB: we want to ensure that when passed a model instance # in the context of a foreign-key, we apply the fk-specific # adaptation of the model. v = k.to_value(v) if not isinstance(v, Value): v = qualify_names(v) expressions.append(NodeList((k, SQL('='), v))) (ctx .sql(self.table) .literal(' SET ') .sql(CommaNodeList(expressions))) if self._from: with ctx.scope_source(parentheses=False): ctx.literal(' FROM ').sql(CommaNodeList(self._from)) if self._where: with ctx.scope_normal(): ctx.literal(' WHERE ').sql(self._where) self._apply_ordering(ctx) return self.apply_returning(ctx) class Insert(_WriteQuery): SIMPLE = 0 QUERY = 1 MULTI = 2 class DefaultValuesException(Exception): pass def __init__(self, table, insert=None, columns=None, on_conflict=None, **kwargs): super(Insert, self).__init__(table, **kwargs) self._insert = insert self._columns = columns self._on_conflict = on_conflict self._query_type = None self._as_rowcount = False def where(self, *expressions): raise NotImplementedError('INSERT queries cannot have a WHERE clause.') @Node.copy def as_rowcount(self, _as_rowcount=True): self._as_rowcount = _as_rowcount @Node.copy def on_conflict_ignore(self, ignore=True): self._on_conflict = OnConflict('IGNORE') if ignore else None @Node.copy def on_conflict_replace(self, replace=True): self._on_conflict = OnConflict('REPLACE') if replace else None @Node.copy def on_conflict(self, *args, **kwargs): self._on_conflict = (OnConflict(*args, **kwargs) if (args or kwargs) else None) def _simple_insert(self, ctx): if not self._insert: raise self.DefaultValuesException('Error: no data to insert.') return self._generate_insert((self._insert,), ctx) def get_default_data(self): return {} def get_default_columns(self): if self.table._columns: return [getattr(self.table, col) for col in self.table._columns if col != self.table._primary_key] def _generate_insert(self, insert, ctx): rows_iter = iter(insert) columns = self._columns # Load and organize column defaults (if provided). defaults = self.get_default_data() # First figure out what columns are being inserted (if they weren't # specified explicitly). Resulting columns are normalized and ordered. if not columns: try: row = next(rows_iter) except StopIteration: raise self.DefaultValuesException('Error: no rows to insert.') if not isinstance(row, Mapping): columns = self.get_default_columns() if columns is None: raise ValueError('Bulk insert must specify columns.') else: # Infer column names from the dict of data being inserted. accum = [] for column in row: if isinstance(column, basestring): column = getattr(self.table, column) accum.append(column) # Add any columns present in the default data that are not # accounted for by the dictionary of row data. column_set = set(accum) for col in (set(defaults) - column_set): accum.append(col) columns = sorted(accum, key=lambda obj: obj.get_sort_key(ctx)) rows_iter = itertools.chain(iter((row,)), rows_iter) else: clean_columns = [] seen = set() for column in columns: if isinstance(column, basestring): column_obj = getattr(self.table, column) else: column_obj = column clean_columns.append(column_obj) seen.add(column_obj) columns = clean_columns for col in sorted(defaults, key=lambda obj: obj.get_sort_key(ctx)): if col not in seen: columns.append(col) fk_fields = set() nullable_columns = set() value_lookups = {} for column in columns: lookups = [column, column.name] if isinstance(column, Field): if column.name != column.column_name: lookups.append(column.column_name) if column.null: nullable_columns.add(column) if isinstance(column, ForeignKeyField): fk_fields.add(column) value_lookups[column] = lookups ctx.sql(EnclosedNodeList(columns)).literal(' VALUES ') columns_converters = [ (column, column.db_value if isinstance(column, Field) else None) for column in columns] all_values = [] for row in rows_iter: values = [] is_dict = isinstance(row, Mapping) for i, (column, converter) in enumerate(columns_converters): try: if is_dict: # The logic is a bit convoluted, but in order to be # flexible in what we accept (dict keyed by # column/field, field name, or underlying column name), # we try accessing the row data dict using each # possible key. If no match is found, throw an error. for lookup in value_lookups[column]: try: val = row[lookup] except KeyError: pass else: break else: raise KeyError else: val = row[i] except (KeyError, IndexError): if column in defaults: val = defaults[column] if callable_(val): val = val() elif column in nullable_columns: val = None else: raise ValueError('Missing value for %s.' % column.name) if not isinstance(val, Node) or (isinstance(val, Model) and column in fk_fields): val = Value(val, converter=converter, unpack=False) values.append(val) all_values.append(EnclosedNodeList(values)) if not all_values: raise self.DefaultValuesException('Error: no data to insert.') with ctx.scope_values(subquery=True): return ctx.sql(CommaNodeList(all_values)) def _query_insert(self, ctx): return (ctx .sql(EnclosedNodeList(self._columns)) .literal(' ') .sql(self._insert)) def _default_values(self, ctx): if not self._database: return ctx.literal('DEFAULT VALUES') return self._database.default_values_insert(ctx) def __sql__(self, ctx): super(Insert, self).__sql__(ctx) with ctx.scope_values(): stmt = None if self._on_conflict is not None: stmt = self._on_conflict.get_conflict_statement(ctx, self) (ctx .sql(stmt or SQL('INSERT')) .literal(' INTO ') .sql(self.table) .literal(' ')) if isinstance(self._insert, Mapping) and not self._columns: try: self._simple_insert(ctx) except self.DefaultValuesException: self._default_values(ctx) self._query_type = Insert.SIMPLE elif isinstance(self._insert, (SelectQuery, SQL)): self._query_insert(ctx) self._query_type = Insert.QUERY else: self._generate_insert(self._insert, ctx) self._query_type = Insert.MULTI if self._on_conflict is not None: update = self._on_conflict.get_conflict_update(ctx, self) if update is not None: ctx.literal(' ').sql(update) return self.apply_returning(ctx) def _execute(self, database): if self._returning is None and database.returning_clause \ and self.table._primary_key: self._returning = (self.table._primary_key,) try: return super(Insert, self)._execute(database) except self.DefaultValuesException: pass def handle_result(self, database, cursor): if self._return_cursor: return cursor if self._as_rowcount: return database.rows_affected(cursor) return database.last_insert_id(cursor, self._query_type) class Delete(_WriteQuery): def __sql__(self, ctx): super(Delete, self).__sql__(ctx) with ctx.scope_values(subquery=True): ctx.literal('DELETE FROM ').sql(self.table) if self._where is not None: with ctx.scope_normal(): ctx.literal(' WHERE ').sql(self._where) self._apply_ordering(ctx) return self.apply_returning(ctx) class Index(Node): def __init__(self, name, table, expressions, unique=False, safe=False, where=None, using=None): self._name = name self._table = Entity(table) if not isinstance(table, Table) else table self._expressions = expressions self._where = where self._unique = unique self._safe = safe self._using = using @Node.copy def safe(self, _safe=True): self._safe = _safe @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def using(self, _using=None): self._using = _using def __sql__(self, ctx): statement = 'CREATE UNIQUE INDEX ' if self._unique else 'CREATE INDEX ' with ctx.scope_values(subquery=True): ctx.literal(statement) if self._safe: ctx.literal('IF NOT EXISTS ') # Sqlite uses CREATE INDEX . ON , whereas most # others use: CREATE INDEX ON .
. if ctx.state.index_schema_prefix and \ isinstance(self._table, Table) and self._table._schema: index_name = Entity(self._table._schema, self._name) table_name = Entity(self._table.__name__) else: index_name = Entity(self._name) table_name = self._table ctx.sql(index_name) if self._using is not None and \ ctx.state.index_using_precedes_table: ctx.literal(' USING %s' % self._using) # MySQL style. (ctx .literal(' ON ') .sql(table_name) .literal(' ')) if self._using is not None and not \ ctx.state.index_using_precedes_table: ctx.literal('USING %s ' % self._using) # Postgres/default. ctx.sql(EnclosedNodeList([ SQL(expr) if isinstance(expr, basestring) else expr for expr in self._expressions])) if self._where is not None: ctx.literal(' WHERE ').sql(self._where) return ctx class ModelIndex(Index): def __init__(self, model, fields, unique=False, safe=True, where=None, using=None, name=None): self._model = model if name is None: name = self._generate_name_from_fields(model, fields) if using is None: for field in fields: if isinstance(field, Field) and hasattr(field, 'index_type'): using = field.index_type super(ModelIndex, self).__init__( name=name, table=model._meta.table, expressions=fields, unique=unique, safe=safe, where=where, using=using) def _generate_name_from_fields(self, model, fields): accum = [] for field in fields: if isinstance(field, basestring): accum.append(field.split()[0]) else: if isinstance(field, Node) and not isinstance(field, Field): field = field.unwrap() if isinstance(field, Field): accum.append(field.column_name) if not accum: raise ValueError('Unable to generate a name for the index, please ' 'explicitly specify a name.') clean_field_names = re.sub(r'[^\w]+', '', '_'.join(accum)) meta = model._meta prefix = meta.name if meta.legacy_table_names else meta.table_name return _truncate_constraint_name('_'.join((prefix, clean_field_names))) def _truncate_constraint_name(constraint, maxlen=64): if len(constraint) > maxlen: name_hash = hashlib.md5(constraint.encode('utf-8')).hexdigest() constraint = '%s_%s' % (constraint[:(maxlen - 8)], name_hash[:7]) return constraint # DB-API 2.0 EXCEPTIONS. class PeeweeException(Exception): def __init__(self, *args): if args and isinstance(args[0], Exception): self.orig, args = args[0], args[1:] super(PeeweeException, self).__init__(*args) class ImproperlyConfigured(PeeweeException): pass class DatabaseError(PeeweeException): pass class DataError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InterfaceError(PeeweeException): pass class InternalError(DatabaseError): pass class NotSupportedError(DatabaseError): pass class OperationalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class ExceptionWrapper(object): __slots__ = ('exceptions',) def __init__(self, exceptions): self.exceptions = exceptions def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: return # psycopg shits out a million cute error types. Try to catch em all. if pg_errors is not None and exc_type.__name__ not in self.exceptions \ and issubclass(exc_type, pg_errors.Error): exc_type = exc_type.__bases__[0] elif pg3_errors is not None and \ exc_type.__name__ not in self.exceptions \ and issubclass(exc_type, pg3_errors.Error): exc_type = exc_type.__bases__[0] if exc_type.__name__ in self.exceptions: new_type = self.exceptions[exc_type.__name__] exc_args = exc_value.args reraise(new_type, new_type(exc_value, *exc_args), traceback) EXCEPTIONS = { 'ConstraintError': IntegrityError, 'DatabaseError': DatabaseError, 'DataError': DataError, 'IntegrityError': IntegrityError, 'InterfaceError': InterfaceError, 'InternalError': InternalError, 'NotSupportedError': NotSupportedError, 'OperationalError': OperationalError, 'ProgrammingError': ProgrammingError, 'TransactionRollbackError': OperationalError, 'UndefinedFunction': ProgrammingError, 'UniqueViolation': IntegrityError} __exception_wrapper__ = ExceptionWrapper(EXCEPTIONS) # DATABASE INTERFACE AND CONNECTION MANAGEMENT. IndexMetadata = collections.namedtuple( 'IndexMetadata', ('name', 'sql', 'columns', 'unique', 'table')) ColumnMetadata = collections.namedtuple( 'ColumnMetadata', ('name', 'data_type', 'null', 'primary_key', 'table', 'default')) ForeignKeyMetadata = collections.namedtuple( 'ForeignKeyMetadata', ('column', 'dest_table', 'dest_column', 'table')) ViewMetadata = collections.namedtuple('ViewMetadata', ('name', 'sql')) class _ConnectionState(object): def __init__(self, **kwargs): super(_ConnectionState, self).__init__(**kwargs) self.reset() def reset(self): self.closed = True self.conn = None self.ctx = [] self.transactions = [] def set_connection(self, conn): self.conn = conn self.closed = False self.ctx = [] self.transactions = [] class _ConnectionLocal(_ConnectionState, threading.local): pass class _NoopLock(object): __slots__ = () def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class ConnectionContext(object): __slots__ = ('db',) def __init__(self, db): self.db = db def __enter__(self): if self.db.is_closed(): self.db.connect() def __exit__(self, exc_type, exc_val, exc_tb): self.db.close() def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with ConnectionContext(self.db): return fn(*args, **kwargs) return inner class Database(_callable_context_manager): context_class = Context field_types = {} operations = {} param = '?' quote = '""' server_version = None # Feature toggles. compound_select_parentheses = CSQ_PARENTHESES_NEVER for_update = False index_schema_prefix = False index_using_precedes_table = False limit_max = None nulls_ordering = False returning_clause = False safe_create_index = True safe_drop_index = True sequences = False truncate_table = True def __init__(self, database, thread_safe=True, autorollback=False, field_types=None, operations=None, autocommit=None, autoconnect=True, **kwargs): self._field_types = merge_dict(FIELD, self.field_types) self._operations = merge_dict(OP, self.operations) if field_types: self._field_types.update(field_types) if operations: self._operations.update(operations) self.autoconnect = autoconnect self.thread_safe = thread_safe if thread_safe: self._state = _ConnectionLocal() self._lock = threading.Lock() else: self._state = _ConnectionState() self._lock = _NoopLock() if autorollback: __deprecated__('Peewee no longer uses the "autorollback" option, ' 'as we always run in autocommit-mode now. This ' 'changes psycopg2\'s semantics so that the conn ' 'is not left in a transaction-aborted state.') if autocommit is not None: __deprecated__('Peewee no longer uses the "autocommit" option, as ' 'the semantics now require it to always be True. ' 'Because some database-drivers also use the ' '"autocommit" parameter, you are receiving a ' 'warning so you may update your code and remove ' 'the parameter, as in the future, specifying ' 'autocommit could impact the behavior of the ' 'database driver you are using.') self.connect_params = {} self.init(database, **kwargs) def init(self, database, **kwargs): if not self.is_closed(): self.close() self.database = database self.connect_params.update(kwargs) self.deferred = not bool(database) def __enter__(self): if self.is_closed(): self.connect() ctx = self.atomic() self._state.ctx.append(ctx) ctx.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): ctx = self._state.ctx.pop() try: ctx.__exit__(exc_type, exc_val, exc_tb) finally: if not self._state.ctx: self.close() def connection_context(self): return ConnectionContext(self) def _connect(self): raise NotImplementedError def connect(self, reuse_if_open=False): with self._lock: if self.deferred: raise InterfaceError('Error, database must be initialized ' 'before opening a connection.') if not self._state.closed: if reuse_if_open: return False raise OperationalError('Connection already opened.') self._state.reset() with __exception_wrapper__: self._state.set_connection(self._connect()) if self.server_version is None: self._set_server_version(self._state.conn) self._initialize_connection(self._state.conn) return True def _initialize_connection(self, conn): pass def _set_server_version(self, conn): self.server_version = 0 def close(self): with self._lock: if self.deferred: raise InterfaceError('Error, database must be initialized ' 'before opening a connection.') if self.in_transaction(): raise OperationalError('Attempting to close database while ' 'transaction is open.') is_open = not self._state.closed try: if is_open: with __exception_wrapper__: self._close(self._state.conn) finally: self._state.reset() return is_open def _close(self, conn): conn.close() def is_closed(self): return self._state.closed def is_connection_usable(self): return not self._state.closed def connection(self): if self.is_closed(): self.connect() return self._state.conn def cursor(self, commit=None, named_cursor=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') if self.is_closed(): if self.autoconnect: self.connect() else: raise InterfaceError('Error, database connection not opened.') return self._state.conn.cursor() def execute_sql(self, sql, params=None, commit=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') logger.debug((sql, params)) with __exception_wrapper__: cursor = self.cursor() cursor.execute(sql, params or ()) return cursor def execute(self, query, commit=None, **context_options): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') ctx = self.get_sql_context(**context_options) sql, params = ctx.sql(query).query() return self.execute_sql(sql, params) def get_context_options(self): return { 'field_types': self._field_types, 'operations': self._operations, 'param': self.param, 'quote': self.quote, 'compound_select_parentheses': self.compound_select_parentheses, 'conflict_statement': self.conflict_statement, 'conflict_update': self.conflict_update, 'for_update': self.for_update, 'index_schema_prefix': self.index_schema_prefix, 'index_using_precedes_table': self.index_using_precedes_table, 'limit_max': self.limit_max, 'nulls_ordering': self.nulls_ordering, } def get_sql_context(self, **context_options): context = self.get_context_options() if context_options: context.update(context_options) return self.context_class(**context) def conflict_statement(self, on_conflict, query): raise NotImplementedError def conflict_update(self, on_conflict, query): raise NotImplementedError def _build_on_conflict_update(self, on_conflict, query): if on_conflict._conflict_target: stmt = SQL('ON CONFLICT') target = EnclosedNodeList([ Entity(col) if isinstance(col, basestring) else col for col in on_conflict._conflict_target]) if on_conflict._conflict_where is not None: target = NodeList([target, SQL('WHERE'), on_conflict._conflict_where]) else: stmt = SQL('ON CONFLICT ON CONSTRAINT') target = on_conflict._conflict_constraint if isinstance(target, basestring): target = Entity(target) updates = [] if on_conflict._preserve: for column in on_conflict._preserve: excluded = NodeList((SQL('EXCLUDED'), ensure_entity(column)), glue='.') expression = NodeList((ensure_entity(column), SQL('='), excluded)) updates.append(expression) if on_conflict._update: for k, v in on_conflict._update.items(): if not isinstance(v, Node): # Attempt to resolve string field-names to their respective # field object, to apply data-type conversions. if isinstance(k, basestring): k = getattr(query.table, k) if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) else: v = QualifiedNames(v) updates.append(NodeList((ensure_entity(k), SQL('='), v))) parts = [stmt, target, SQL('DO UPDATE SET'), CommaNodeList(updates)] if on_conflict._where: parts.extend((SQL('WHERE'), QualifiedNames(on_conflict._where))) return NodeList(parts) def last_insert_id(self, cursor, query_type=None): return cursor.lastrowid def rows_affected(self, cursor): return cursor.rowcount def default_values_insert(self, ctx): return ctx.literal('DEFAULT VALUES') def session_start(self): return self.transaction().__enter__() def session_commit(self): try: txn = self.pop_transaction() except IndexError: return False txn.commit(begin=self.in_transaction()) return True def session_rollback(self): try: txn = self.pop_transaction() except IndexError: return False txn.rollback(begin=self.in_transaction()) return True def in_transaction(self): return bool(self._state.transactions) def push_transaction(self, transaction): self._state.transactions.append(transaction) def pop_transaction(self): return self._state.transactions.pop() def transaction_depth(self): return len(self._state.transactions) def top_transaction(self): if self._state.transactions: return self._state.transactions[-1] def atomic(self, *args, **kwargs): return _atomic(self, *args, **kwargs) def manual_commit(self): return _manual(self) def transaction(self, *args, **kwargs): return _transaction(self, *args, **kwargs) def savepoint(self): return _savepoint(self) def begin(self): if self.is_closed(): self.connect() with __exception_wrapper__: self.cursor().execute('BEGIN') def rollback(self): with __exception_wrapper__: self.cursor().execute('ROLLBACK') def commit(self): with __exception_wrapper__: self.cursor().execute('COMMIT') def batch_commit(self, it, n): for group in chunked(it, n): with self.atomic(): for obj in group: yield obj def table_exists(self, table_name, schema=None): if is_model(table_name): model = table_name table_name = model._meta.table_name schema = model._meta.schema return table_name in self.get_tables(schema=schema) def get_tables(self, schema=None): raise NotImplementedError def get_indexes(self, table, schema=None): raise NotImplementedError def get_columns(self, table, schema=None): raise NotImplementedError def get_primary_keys(self, table, schema=None): raise NotImplementedError def get_foreign_keys(self, table, schema=None): raise NotImplementedError def sequence_exists(self, seq): raise NotImplementedError def create_tables(self, models, **options): for model in sort_models(models): model.create_table(**options) def drop_tables(self, models, **kwargs): for model in reversed(sort_models(models)): model.drop_table(**kwargs) def extract_date(self, date_part, date_field): raise NotImplementedError def truncate_date(self, date_part, date_field): raise NotImplementedError def to_timestamp(self, date_field): raise NotImplementedError def from_timestamp(self, date_field): raise NotImplementedError def random(self): return fn.random() def bind(self, models, bind_refs=True, bind_backrefs=True): for model in models: model.bind(self, bind_refs=bind_refs, bind_backrefs=bind_backrefs) def bind_ctx(self, models, bind_refs=True, bind_backrefs=True): return _BoundModelsContext(models, self, bind_refs, bind_backrefs) def get_noop_select(self, ctx): return ctx.sql(Select().columns(SQL('0')).where(SQL('0'))) @property def Model(self): if not hasattr(self, '_Model'): class Meta: database = self self._Model = type('BaseModel', (Model,), {'Meta': Meta}) return self._Model def __pragma__(name): def __get__(self): return self.pragma(name) def __set__(self, value): return self.pragma(name, value) return property(__get__, __set__) class SqliteDatabase(Database): field_types = { 'BIGAUTO': FIELD.AUTO, 'BIGINT': FIELD.INT, 'BOOL': FIELD.INT, 'DOUBLE': FIELD.FLOAT, 'SMALLINT': FIELD.INT, 'UUID': FIELD.TEXT} operations = { 'LIKE': 'GLOB', 'ILIKE': 'LIKE'} index_schema_prefix = True limit_max = -1 server_version = __sqlite_version__ truncate_table = False def __init__(self, database, *args, **kwargs): self._pragmas = kwargs.pop('pragmas', ()) super(SqliteDatabase, self).__init__(database, *args, **kwargs) self._aggregates = {} self._collations = {} self._functions = {} self._window_functions = {} self._table_functions = [] self._extensions = set() self._attached = {} self.register_function(_sqlite_date_part, 'date_part', 2) self.register_function(_sqlite_date_trunc, 'date_trunc', 2) self.nulls_ordering = self.server_version >= (3, 30, 0) def init(self, database, pragmas=None, timeout=5, returning_clause=None, **kwargs): if pragmas is not None: self._pragmas = pragmas if isinstance(self._pragmas, dict): self._pragmas = list(self._pragmas.items()) if returning_clause is not None: if __sqlite_version__ < (3, 35, 0): warnings.warn('RETURNING clause requires Sqlite 3.35 or newer') self.returning_clause = returning_clause self._timeout = timeout super(SqliteDatabase, self).init(database, **kwargs) def _set_server_version(self, conn): pass def _connect(self): if sqlite3 is None: raise ImproperlyConfigured('SQLite driver not installed!') conn = sqlite3.connect(self.database, timeout=self._timeout, isolation_level=None, **self.connect_params) try: self._add_conn_hooks(conn) except: conn.close() raise return conn def _add_conn_hooks(self, conn): if self._attached: self._attach_databases(conn) if self._pragmas: self._set_pragmas(conn) self._load_aggregates(conn) self._load_collations(conn) self._load_functions(conn) if self.server_version >= (3, 25, 0): self._load_window_functions(conn) if self._table_functions: for table_function in self._table_functions: table_function.register(conn) if self._extensions: self._load_extensions(conn) def _set_pragmas(self, conn): cursor = conn.cursor() for pragma, value in self._pragmas: cursor.execute('PRAGMA %s = %s;' % (pragma, value)) cursor.close() def _attach_databases(self, conn): cursor = conn.cursor() for name, db in self._attached.items(): cursor.execute('ATTACH DATABASE "%s" AS "%s"' % (db, name)) cursor.close() def pragma(self, key, value=SENTINEL, permanent=False, schema=None): if schema is not None: key = '"%s".%s' % (schema, key) sql = 'PRAGMA %s' % key if value is not SENTINEL: sql += ' = %s' % (value or 0) if permanent: pragmas = dict(self._pragmas or ()) pragmas[key] = value self._pragmas = list(pragmas.items()) elif permanent: raise ValueError('Cannot specify a permanent pragma without value') row = self.execute_sql(sql).fetchone() if row: return row[0] cache_size = __pragma__('cache_size') foreign_keys = __pragma__('foreign_keys') journal_mode = __pragma__('journal_mode') journal_size_limit = __pragma__('journal_size_limit') mmap_size = __pragma__('mmap_size') page_size = __pragma__('page_size') read_uncommitted = __pragma__('read_uncommitted') synchronous = __pragma__('synchronous') wal_autocheckpoint = __pragma__('wal_autocheckpoint') application_id = __pragma__('application_id') user_version = __pragma__('user_version') data_version = __pragma__('data_version') @property def timeout(self): return self._timeout @timeout.setter def timeout(self, seconds): if self._timeout == seconds: return self._timeout = seconds if not self.is_closed(): # PySQLite multiplies user timeout by 1000, but the unit of the # timeout PRAGMA is actually milliseconds. self.execute_sql('PRAGMA busy_timeout=%d;' % (seconds * 1000)) def _load_aggregates(self, conn): for name, (klass, num_params) in self._aggregates.items(): conn.create_aggregate(name, num_params, klass) def _load_collations(self, conn): for name, fn in self._collations.items(): conn.create_collation(name, fn) def _load_functions(self, conn): for name, (fn, n_params, deterministic) in self._functions.items(): kwargs = {'deterministic': deterministic} if deterministic else {} conn.create_function(name, n_params, fn, **kwargs) def _load_window_functions(self, conn): for name, (klass, num_params) in self._window_functions.items(): conn.create_window_function(name, num_params, klass) def register_aggregate(self, klass, name=None, num_params=-1): self._aggregates[name or klass.__name__.lower()] = (klass, num_params) if not self.is_closed(): self._load_aggregates(self.connection()) def aggregate(self, name=None, num_params=-1): def decorator(klass): self.register_aggregate(klass, name, num_params) return klass return decorator def register_collation(self, fn, name=None): name = name or fn.__name__ def _collation(*args): expressions = args + (SQL('collate %s' % name),) return NodeList(expressions) fn.collation = _collation self._collations[name] = fn if not self.is_closed(): self._load_collations(self.connection()) def collation(self, name=None): def decorator(fn): self.register_collation(fn, name) return fn return decorator def register_function(self, fn, name=None, num_params=-1, deterministic=None): self._functions[name or fn.__name__] = (fn, num_params, deterministic) if not self.is_closed(): self._load_functions(self.connection()) def func(self, name=None, num_params=-1, deterministic=None): def decorator(fn): self.register_function(fn, name, num_params, deterministic) return fn return decorator def register_window_function(self, klass, name=None, num_params=-1): name = name or klass.__name__.lower() self._window_functions[name] = (klass, num_params) if not self.is_closed(): self._load_window_functions(self.connection()) def window_function(self, name=None, num_params=-1): def decorator(klass): self.register_window_function(klass, name, num_params) return klass return decorator def register_table_function(self, klass, name=None): if name is not None: klass.name = name self._table_functions.append(klass) if not self.is_closed(): klass.register(self.connection()) def table_function(self, name=None): def decorator(klass): self.register_table_function(klass, name) return klass return decorator def unregister_aggregate(self, name): del(self._aggregates[name]) def unregister_collation(self, name): del(self._collations[name]) def unregister_function(self, name): del(self._functions[name]) def unregister_window_function(self, name): del(self._window_functions[name]) def unregister_table_function(self, name): for idx, klass in enumerate(self._table_functions): if klass.name == name: break else: return False self._table_functions.pop(idx) return True def _load_extensions(self, conn): conn.enable_load_extension(True) for extension in self._extensions: conn.load_extension(extension) def load_extension(self, extension): self._extensions.add(extension) if not self.is_closed(): conn = self.connection() conn.enable_load_extension(True) conn.load_extension(extension) def unload_extension(self, extension): self._extensions.remove(extension) def attach(self, filename, name): if name in self._attached: if self._attached[name] == filename: return False raise OperationalError('schema "%s" already attached.' % name) self._attached[name] = filename if not self.is_closed(): self.execute_sql('ATTACH DATABASE "%s" AS "%s"' % (filename, name)) return True def detach(self, name): if name not in self._attached: return False del self._attached[name] if not self.is_closed(): self.execute_sql('DETACH DATABASE "%s"' % name) return True def last_insert_id(self, cursor, query_type=None): if not self.returning_clause: return cursor.lastrowid elif query_type == Insert.SIMPLE: try: return cursor[0][0] except (IndexError, KeyError, TypeError): pass return cursor def rows_affected(self, cursor): try: return cursor.rowcount except AttributeError: return cursor.cursor.rowcount # This was a RETURNING query. def begin(self, lock_type=None): statement = 'BEGIN %s' % lock_type if lock_type else 'BEGIN' self.execute_sql(statement) def commit(self): with __exception_wrapper__: return self._state.conn.commit() def rollback(self): with __exception_wrapper__: return self._state.conn.rollback() def get_tables(self, schema=None): schema = schema or 'main' cursor = self.execute_sql('SELECT name FROM "%s".sqlite_master WHERE ' 'type=? ORDER BY name' % schema, ('table',)) return [row for row, in cursor.fetchall()] def get_views(self, schema=None): sql = ('SELECT name, sql FROM "%s".sqlite_master WHERE type=? ' 'ORDER BY name') % (schema or 'main') return [ViewMetadata(*row) for row in self.execute_sql(sql, ('view',))] def get_indexes(self, table, schema=None): schema = schema or 'main' query = ('SELECT name, sql FROM "%s".sqlite_master ' 'WHERE tbl_name = ? AND type = ? ORDER BY name') % schema cursor = self.execute_sql(query, (table, 'index')) index_to_sql = dict(cursor.fetchall()) # Determine which indexes have a unique constraint. unique_indexes = set() cursor = self.execute_sql('PRAGMA "%s".index_list("%s")' % (schema, table)) for row in cursor.fetchall(): name = row[1] is_unique = int(row[2]) == 1 if is_unique: unique_indexes.add(name) # Retrieve the indexed columns. index_columns = {} for index_name in sorted(index_to_sql): cursor = self.execute_sql('PRAGMA "%s".index_info("%s")' % (schema, index_name)) index_columns[index_name] = [row[2] for row in cursor.fetchall()] return [ IndexMetadata( name, index_to_sql[name], index_columns[name], name in unique_indexes, table) for name in sorted(index_to_sql)] def get_columns(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' % (schema or 'main', table)) return [ColumnMetadata(r[1], r[2], not r[3], bool(r[5]), table, r[4]) for r in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' % (schema or 'main', table)) return [row[1] for row in filter(lambda r: r[-1], cursor.fetchall())] def get_foreign_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".foreign_key_list("%s")' % (schema or 'main', table)) return [ForeignKeyMetadata(row[3], row[2], row[4], table) for row in cursor.fetchall()] def get_binary_type(self): return sqlite3.Binary def conflict_statement(self, on_conflict, query): action = on_conflict._action.lower() if on_conflict._action else '' if action and action not in ('nothing', 'update'): return SQL('INSERT OR %s' % on_conflict._action.upper()) def conflict_update(self, oc, query): # Sqlite prior to 3.24.0 does not support Postgres-style upsert. if self.server_version < (3, 24, 0) and \ any((oc._preserve, oc._update, oc._where, oc._conflict_target, oc._conflict_constraint)): raise ValueError('SQLite does not support specifying which values ' 'to preserve or update.') action = oc._action.lower() if oc._action else '' if action and action not in ('nothing', 'update', ''): return if action == 'nothing': return SQL('ON CONFLICT DO NOTHING') elif not oc._update and not oc._preserve: raise ValueError('If you are not performing any updates (or ' 'preserving any INSERTed values), then the ' 'conflict resolution action should be set to ' '"NOTHING".') elif oc._conflict_constraint: raise ValueError('SQLite does not support specifying named ' 'constraints for conflict resolution.') elif not oc._conflict_target: raise ValueError('SQLite requires that a conflict target be ' 'specified when doing an upsert.') return self._build_on_conflict_update(oc, query) def extract_date(self, date_part, date_field): return fn.date_part(date_part, date_field, python_value=int) def truncate_date(self, date_part, date_field): return fn.date_trunc(date_part, date_field, python_value=simple_date_time) def to_timestamp(self, date_field): return fn.strftime('%s', date_field).cast('integer') def from_timestamp(self, date_field): return fn.datetime(date_field, 'unixepoch') class PostgresqlDatabase(Database): field_types = { 'AUTO': 'SERIAL', 'BIGAUTO': 'BIGSERIAL', 'BLOB': 'BYTEA', 'BOOL': 'BOOLEAN', 'DATETIME': 'TIMESTAMP', 'DECIMAL': 'NUMERIC', 'DOUBLE': 'DOUBLE PRECISION', 'UUID': 'UUID', 'UUIDB': 'BYTEA'} operations = {'REGEXP': '~', 'IREGEXP': '~*'} param = '%s' compound_select_parentheses = CSQ_PARENTHESES_ALWAYS for_update = True nulls_ordering = True returning_clause = True safe_create_index = False sequences = True def init(self, database, register_unicode=True, encoding=None, isolation_level=None, **kwargs): self._register_unicode = register_unicode self._encoding = encoding self._isolation_level = isolation_level super(PostgresqlDatabase, self).init(database, **kwargs) def _connect(self): if psycopg2 is None: raise ImproperlyConfigured('Postgres driver not installed!') # Handle connection-strings nicely, since psycopg2 will accept them, # and they may be easier when lots of parameters are specified. params = self.connect_params.copy() if self.database.startswith('postgresql://'): params.setdefault('dsn', self.database) else: params.setdefault('dbname', self.database) conn = psycopg2.connect(**params) if self._register_unicode: pg_extensions.register_type(pg_extensions.UNICODE, conn) pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn) if self._encoding: conn.set_client_encoding(self._encoding) if self._isolation_level: conn.set_isolation_level(self._isolation_level) conn.autocommit = True return conn def _set_server_version(self, conn): self.server_version = conn.server_version if self.server_version >= 90600: self.safe_create_index = True def is_connection_usable(self): if self._state.closed: return False # Returns True if we are idle, running a command, or in an active # connection. If the connection is in an error state or the connection # is otherwise unusable, return False. txn_status = self._state.conn.get_transaction_status() return txn_status < pg_extensions.TRANSACTION_STATUS_INERROR def last_insert_id(self, cursor, query_type=None): try: return cursor if query_type != Insert.SIMPLE else cursor[0][0] except (IndexError, KeyError, TypeError): pass def rows_affected(self, cursor): try: return cursor.rowcount except AttributeError: return cursor.cursor.rowcount def begin(self, isolation_level=None): if self.is_closed(): self.connect() if isolation_level: stmt = 'BEGIN TRANSACTION ISOLATION LEVEL %s' % isolation_level else: stmt = 'BEGIN' with __exception_wrapper__: self.cursor().execute(stmt) def get_tables(self, schema=None): query = ('SELECT tablename FROM pg_catalog.pg_tables ' 'WHERE schemaname = %s ORDER BY tablename') cursor = self.execute_sql(query, (schema or 'public',)) return [table for table, in cursor.fetchall()] def get_views(self, schema=None): query = ('SELECT viewname, definition FROM pg_catalog.pg_views ' 'WHERE schemaname = %s ORDER BY viewname') cursor = self.execute_sql(query, (schema or 'public',)) return [ViewMetadata(view_name, sql.strip(' \t;')) for (view_name, sql) in cursor.fetchall()] def get_indexes(self, table, schema=None): query = """ SELECT i.relname, idxs.indexdef, idx.indisunique, array_to_string(ARRAY( SELECT pg_get_indexdef(idx.indexrelid, k + 1, TRUE) FROM generate_subscripts(idx.indkey, 1) AS k ORDER BY k), ',') FROM pg_catalog.pg_class AS t INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid INNER JOIN pg_catalog.pg_indexes AS idxs ON (idxs.tablename = t.relname AND idxs.indexname = i.relname) WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s ORDER BY idx.indisunique DESC, i.relname;""" cursor = self.execute_sql(query, (table, 'r', schema or 'public')) return [IndexMetadata(name, sql.rstrip(' ;'), columns.split(','), is_unique, table) for name, sql, is_unique, columns in cursor.fetchall()] def get_columns(self, table, schema=None): query = """ SELECT column_name, is_nullable, data_type, column_default FROM information_schema.columns WHERE table_name = %s AND table_schema = %s ORDER BY ordinal_position""" cursor = self.execute_sql(query, (table, schema or 'public')) pks = set(self.get_primary_keys(table, schema)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df) for name, null, dt, df in cursor.fetchall()] def get_primary_keys(self, table, schema=None): query = """ SELECT kc.column_name FROM information_schema.table_constraints AS tc INNER JOIN information_schema.key_column_usage AS kc ON ( tc.table_name = kc.table_name AND tc.table_schema = kc.table_schema AND tc.constraint_name = kc.constraint_name) WHERE tc.constraint_type = %s AND tc.table_name = %s AND tc.table_schema = %s""" ctype = 'PRIMARY KEY' cursor = self.execute_sql(query, (ctype, table, schema or 'public')) return [pk for pk, in cursor.fetchall()] def get_foreign_keys(self, table, schema=None): sql = """ SELECT DISTINCT kcu.column_name, ccu.table_name, ccu.column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON (tc.constraint_name = kcu.constraint_name AND tc.constraint_schema = kcu.constraint_schema AND tc.table_name = kcu.table_name AND tc.table_schema = kcu.table_schema) JOIN information_schema.constraint_column_usage AS ccu ON (ccu.constraint_name = tc.constraint_name AND ccu.constraint_schema = tc.constraint_schema) WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = %s AND tc.table_schema = %s""" cursor = self.execute_sql(sql, (table, schema or 'public')) return [ForeignKeyMetadata(row[0], row[1], row[2], table) for row in cursor.fetchall()] def sequence_exists(self, sequence): res = self.execute_sql(""" SELECT COUNT(*) FROM pg_class, pg_namespace WHERE relkind='S' AND pg_class.relnamespace = pg_namespace.oid AND relname=%s""", (sequence,)) return bool(res.fetchone()[0]) def get_binary_type(self): return psycopg2.Binary def conflict_statement(self, on_conflict, query): return def conflict_update(self, oc, query): action = oc._action.lower() if oc._action else '' if action in ('ignore', 'nothing'): parts = [SQL('ON CONFLICT')] if oc._conflict_target: parts.append(EnclosedNodeList([ Entity(col) if isinstance(col, basestring) else col for col in oc._conflict_target])) parts.append(SQL('DO NOTHING')) return NodeList(parts) elif action and action != 'update': raise ValueError('The only supported actions for conflict ' 'resolution with Postgresql are "ignore" or ' '"update".') elif not oc._update and not oc._preserve: raise ValueError('If you are not performing any updates (or ' 'preserving any INSERTed values), then the ' 'conflict resolution action should be set to ' '"IGNORE".') elif not (oc._conflict_target or oc._conflict_constraint): raise ValueError('Postgres requires that a conflict target be ' 'specified when doing an upsert.') return self._build_on_conflict_update(oc, query) def extract_date(self, date_part, date_field): return fn.EXTRACT(NodeList((date_part, SQL('FROM'), date_field))) def truncate_date(self, date_part, date_field): return fn.DATE_TRUNC(date_part, date_field) def to_timestamp(self, date_field): return self.extract_date('EPOCH', date_field) def from_timestamp(self, date_field): # Ironically, here, Postgres means "to the Postgresql timestamp type". return fn.to_timestamp(date_field) def get_noop_select(self, ctx): return ctx.sql(Select().columns(SQL('0')).where(SQL('false'))) def set_time_zone(self, timezone): self.execute_sql('set time zone "%s";' % timezone) class MySQLDatabase(Database): field_types = { 'AUTO': 'INTEGER AUTO_INCREMENT', 'BIGAUTO': 'BIGINT AUTO_INCREMENT', 'BOOL': 'BOOL', 'DECIMAL': 'NUMERIC', 'DOUBLE': 'DOUBLE PRECISION', 'FLOAT': 'FLOAT', 'UUID': 'VARCHAR(40)', 'UUIDB': 'VARBINARY(16)'} operations = { 'LIKE': 'LIKE BINARY', 'ILIKE': 'LIKE', 'REGEXP': 'REGEXP BINARY', 'IREGEXP': 'REGEXP', 'XOR': 'XOR'} param = '%s' quote = '``' compound_select_parentheses = CSQ_PARENTHESES_UNNESTED for_update = True index_using_precedes_table = True limit_max = 2 ** 64 - 1 safe_create_index = False safe_drop_index = False sql_mode = 'PIPES_AS_CONCAT' def init(self, database, **kwargs): params = { 'charset': 'utf8', 'sql_mode': self.sql_mode, 'use_unicode': True} params.update(kwargs) if 'password' in params and mysql_passwd: params['passwd'] = params.pop('password') super(MySQLDatabase, self).init(database, **params) def _connect(self): if mysql is None: raise ImproperlyConfigured('MySQL driver not installed!') conn = mysql.connect(db=self.database, autocommit=True, **self.connect_params) return conn def _set_server_version(self, conn): try: version_raw = conn.server_version except AttributeError: version_raw = conn.get_server_info() self.server_version = self._extract_server_version(version_raw) def _extract_server_version(self, version): version = version.lower() if 'maria' in version: match_obj = re.search(r'(1\d\.\d+\.\d+)', version) else: match_obj = re.search(r'(\d\.\d+\.\d+)', version) if match_obj is not None: return tuple(int(num) for num in match_obj.groups()[0].split('.')) warnings.warn('Unable to determine MySQL version: "%s"' % version) return (0, 0, 0) # Unable to determine version! def is_connection_usable(self): if self._state.closed: return False conn = self._state.conn if hasattr(conn, 'ping'): if self.server_version[0] == 8: args = () else: args = (False,) try: conn.ping(*args) except Exception: return False return True def default_values_insert(self, ctx): return ctx.literal('() VALUES ()') def begin(self, isolation_level=None): if self.is_closed(): self.connect() with __exception_wrapper__: curs = self.cursor() if isolation_level: curs.execute('SET TRANSACTION ISOLATION LEVEL %s' % isolation_level) curs.execute('BEGIN') def get_tables(self, schema=None): query = ('SELECT table_name FROM information_schema.tables ' 'WHERE table_schema = DATABASE() AND table_type != %s ' 'ORDER BY table_name') return [table for table, in self.execute_sql(query, ('VIEW',))] def get_views(self, schema=None): query = ('SELECT table_name, view_definition ' 'FROM information_schema.views ' 'WHERE table_schema = DATABASE() ORDER BY table_name') cursor = self.execute_sql(query) return [ViewMetadata(*row) for row in cursor.fetchall()] def get_indexes(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) unique = set() indexes = {} for row in cursor.fetchall(): if not row[1]: unique.add(row[2]) indexes.setdefault(row[2], []) indexes[row[2]].append(row[4]) return [IndexMetadata(name, None, indexes[name], name in unique, table) for name in indexes] def get_columns(self, table, schema=None): sql = """ SELECT column_name, is_nullable, data_type, column_default FROM information_schema.columns WHERE table_name = %s AND table_schema = DATABASE() ORDER BY ordinal_position""" cursor = self.execute_sql(sql, (table,)) pks = set(self.get_primary_keys(table)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df) for name, null, dt, df in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) return [row[4] for row in filter(lambda row: row[2] == 'PRIMARY', cursor.fetchall())] def get_foreign_keys(self, table, schema=None): query = """ SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""" cursor = self.execute_sql(query, (table,)) return [ ForeignKeyMetadata(column, dest_table, dest_column, table) for column, dest_table, dest_column in cursor.fetchall()] def get_binary_type(self): return mysql.Binary def conflict_statement(self, on_conflict, query): if not on_conflict._action: return action = on_conflict._action.lower() if action == 'replace': return SQL('REPLACE') elif action == 'ignore': return SQL('INSERT IGNORE') elif action != 'update': raise ValueError('Un-supported action for conflict resolution. ' 'MySQL supports REPLACE, IGNORE and UPDATE.') def conflict_update(self, on_conflict, query): if on_conflict._where or on_conflict._conflict_target or \ on_conflict._conflict_constraint: raise ValueError('MySQL does not support the specification of ' 'where clauses or conflict targets for conflict ' 'resolution.') updates = [] if on_conflict._preserve: # Here we need to determine which function to use, which varies # depending on the MySQL server version. MySQL and MariaDB prior to # 10.3.3 use "VALUES", while MariaDB 10.3.3+ use "VALUE". version = self.server_version or (0,) if version[0] == 10 and version >= (10, 3, 3): VALUE_FN = fn.VALUE else: VALUE_FN = fn.VALUES for column in on_conflict._preserve: entity = ensure_entity(column) expression = NodeList(( ensure_entity(column), SQL('='), VALUE_FN(entity))) updates.append(expression) if on_conflict._update: for k, v in on_conflict._update.items(): if not isinstance(v, Node): # Attempt to resolve string field-names to their respective # field object, to apply data-type conversions. if isinstance(k, basestring): k = getattr(query.table, k) if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) updates.append(NodeList((ensure_entity(k), SQL('='), v))) if updates: return NodeList((SQL('ON DUPLICATE KEY UPDATE'), CommaNodeList(updates))) def extract_date(self, date_part, date_field): return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field))) def truncate_date(self, date_part, date_field): return fn.DATE_FORMAT(date_field, __mysql_date_trunc__[date_part], python_value=simple_date_time) def to_timestamp(self, date_field): return fn.UNIX_TIMESTAMP(date_field) def from_timestamp(self, date_field): return fn.FROM_UNIXTIME(date_field) def random(self): return fn.rand() def get_noop_select(self, ctx): return ctx.literal('DO 0') # TRANSACTION CONTROL. class _manual(object): def __init__(self, db): self.db = db def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with _manual(self.db): return fn(*args, **kwargs) return inner def __enter__(self): top = self.db.top_transaction() if top is not None and not isinstance(top, _manual): raise ValueError('Cannot enter manual commit block while a ' 'transaction is active.') self.db.push_transaction(self) def __exit__(self, exc_type, exc_val, exc_tb): if self.db.pop_transaction() is not self: raise ValueError('Transaction stack corrupted while exiting ' 'manual commit block.') class _atomic(object): def __init__(self, db, *args, **kwargs): self.db = db self._transaction_args = (args, kwargs) def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): a, k = self._transaction_args with _atomic(self.db, *a, **k): return fn(*args, **kwargs) return inner def __enter__(self): if self.db.transaction_depth() == 0: args, kwargs = self._transaction_args self._helper = self.db.transaction(*args, **kwargs) elif isinstance(self.db.top_transaction(), _manual): raise ValueError('Cannot enter atomic commit block while in ' 'manual commit mode.') else: self._helper = self.db.savepoint() return self._helper.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): return self._helper.__exit__(exc_type, exc_val, exc_tb) class _transaction(object): def __init__(self, db, *args, **kwargs): self.db = db self._begin_args = (args, kwargs) def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): a, k = self._begin_args with _transaction(self.db, *a, **k): return fn(*args, **kwargs) return inner def _begin(self): args, kwargs = self._begin_args self.db.begin(*args, **kwargs) def commit(self, begin=True): self.db.commit() if begin: self._begin() def rollback(self, begin=True): self.db.rollback() if begin: self._begin() def __enter__(self): if self.db.transaction_depth() == 0: self._begin() self.db.push_transaction(self) return self def __exit__(self, exc_type, exc_val, exc_tb): depth = self.db.transaction_depth() try: if exc_type and depth == 1: self.rollback(False) elif depth == 1: try: self.commit(False) except: self.rollback(False) raise finally: self.db.pop_transaction() class _savepoint(object): def __init__(self, db, sid=None): self.db = db self.sid = sid or 's' + uuid.uuid4().hex self.quoted_sid = self.sid.join(self.db.quote) def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with _savepoint(self.db): return fn(*args, **kwargs) return inner def _begin(self): self.db.execute_sql('SAVEPOINT %s;' % self.quoted_sid) def commit(self, begin=True): self.db.execute_sql('RELEASE SAVEPOINT %s;' % self.quoted_sid) if begin: self._begin() def rollback(self): self.db.execute_sql('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid) def __enter__(self): self._begin() return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type: self.rollback() else: try: self.commit(begin=False) except: self.rollback() raise # CURSOR REPRESENTATIONS. class CursorWrapper(object): def __init__(self, cursor): self.cursor = cursor self.count = 0 self.index = 0 self.initialized = False self.populated = False self.row_cache = [] def __iter__(self): if self.populated: return iter(self.row_cache) return ResultIterator(self) def __getitem__(self, item): if isinstance(item, slice): stop = item.stop if stop is None or stop < 0: self.fill_cache() else: self.fill_cache(stop) return self.row_cache[item] elif isinstance(item, int): self.fill_cache(item if item > 0 else 0) return self.row_cache[item] else: raise ValueError('CursorWrapper only supports integer and slice ' 'indexes.') def __len__(self): self.fill_cache() return self.count def initialize(self): pass def iterate(self, cache=True): row = self.cursor.fetchone() if row is None: self.populated = True self.cursor.close() raise StopIteration elif not self.initialized: self.initialize() # Lazy initialization. self.initialized = True self.count += 1 result = self.process_row(row) if cache: self.row_cache.append(result) return result def process_row(self, row): return row def iterator(self): """Efficient one-pass iteration over the result set.""" while True: try: yield self.iterate(False) except StopIteration: return def fill_cache(self, n=0): n = n or float('Inf') if n < 0: raise ValueError('Negative values are not supported.') iterator = ResultIterator(self) iterator.index = self.count while not self.populated and (n > self.count): try: iterator.next() except StopIteration: break class DictCursorWrapper(CursorWrapper): def _initialize_columns(self): description = self.cursor.description self.columns = [t[0][t[0].rfind('.') + 1:].strip('()"`') for t in description] self.ncols = len(description) initialize = _initialize_columns def _row_to_dict(self, row): result = {} for i in range(self.ncols): result.setdefault(self.columns[i], row[i]) # Do not overwrite. return result process_row = _row_to_dict class NamedTupleCursorWrapper(CursorWrapper): def initialize(self): description = self.cursor.description self.tuple_class = collections.namedtuple('Row', [ t[0][t[0].rfind('.') + 1:].strip('()"`') for t in description]) def process_row(self, row): return self.tuple_class(*row) class ObjectCursorWrapper(DictCursorWrapper): def __init__(self, cursor, constructor): super(ObjectCursorWrapper, self).__init__(cursor) self.constructor = constructor def process_row(self, row): row_dict = self._row_to_dict(row) return self.constructor(**row_dict) class ResultIterator(object): def __init__(self, cursor_wrapper): self.cursor_wrapper = cursor_wrapper self.index = 0 def __iter__(self): return self def next(self): if self.index < self.cursor_wrapper.count: obj = self.cursor_wrapper.row_cache[self.index] elif not self.cursor_wrapper.populated: self.cursor_wrapper.iterate() obj = self.cursor_wrapper.row_cache[self.index] else: raise StopIteration self.index += 1 return obj __next__ = next # FIELDS class FieldAccessor(object): def __init__(self, model, field, name): self.model = model self.field = field self.name = name def __get__(self, instance, instance_type=None): if instance is not None: return instance.__data__.get(self.name) return self.field def __set__(self, instance, value): instance.__data__[self.name] = value instance._dirty.add(self.name) class ForeignKeyAccessor(FieldAccessor): def __init__(self, model, field, name): super(ForeignKeyAccessor, self).__init__(model, field, name) self.rel_model = field.rel_model def get_rel_instance(self, instance): value = instance.__data__.get(self.name) if value is not None or self.name in instance.__rel__: if self.name not in instance.__rel__ and self.field.lazy_load: obj = self.rel_model.get(self.field.rel_field == value) instance.__rel__[self.name] = obj return instance.__rel__.get(self.name, value) elif not self.field.null and self.field.lazy_load: raise self.rel_model.DoesNotExist return value def __get__(self, instance, instance_type=None): if instance is not None: return self.get_rel_instance(instance) return self.field def __set__(self, instance, obj): if isinstance(obj, self.rel_model): instance.__data__[self.name] = getattr(obj, self.field.rel_field.name) instance.__rel__[self.name] = obj else: fk_value = instance.__data__.get(self.name) instance.__data__[self.name] = obj if (obj != fk_value or obj is None) and \ self.name in instance.__rel__: del instance.__rel__[self.name] instance._dirty.add(self.name) class BackrefAccessor(object): def __init__(self, field): self.field = field self.model = field.rel_model self.rel_model = field.model def __get__(self, instance, instance_type=None): if instance is not None: dest = self.field.rel_field.name return (self.rel_model .select() .where(self.field == getattr(instance, dest))) return self class ObjectIdAccessor(object): """Gives direct access to the underlying id""" def __init__(self, field): self.field = field def __get__(self, instance, instance_type=None): if instance is not None: value = instance.__data__.get(self.field.name) # Pull the object-id from the related object if it is not set. if value is None and self.field.name in instance.__rel__: rel_obj = instance.__rel__[self.field.name] value = getattr(rel_obj, self.field.rel_field.name) return value return self.field def __set__(self, instance, value): setattr(instance, self.field.name, value) class Field(ColumnBase): _field_counter = 0 _order = 0 accessor_class = FieldAccessor auto_increment = False default_index_type = None field_type = 'DEFAULT' unpack = True def __init__(self, null=False, index=False, unique=False, column_name=None, default=None, primary_key=False, constraints=None, sequence=None, collation=None, unindexed=False, choices=None, help_text=None, verbose_name=None, index_type=None, db_column=None, _hidden=False): if db_column is not None: __deprecated__('"db_column" has been deprecated in favor of ' '"column_name" for Field objects.') column_name = db_column self.null = null self.index = index self.unique = unique self.column_name = column_name self.default = default self.primary_key = primary_key self.constraints = constraints # List of column constraints. self.sequence = sequence # Name of sequence, e.g. foo_id_seq. self.collation = collation self.unindexed = unindexed self.choices = choices self.help_text = help_text self.verbose_name = verbose_name self.index_type = index_type or self.default_index_type self._hidden = _hidden # Used internally for recovering the order in which Fields were defined # on the Model class. Field._field_counter += 1 self._order = Field._field_counter self._sort_key = (self.primary_key and 1 or 2), self._order def __hash__(self): return hash(self.name + '.' + self.model.__name__) def __repr__(self): if hasattr(self, 'model') and getattr(self, 'name', None): return '<%s: %s.%s>' % (type(self).__name__, self.model.__name__, self.name) return '<%s: (unbound)>' % type(self).__name__ def bind(self, model, name, set_attribute=True): self.model = model self.name = self.safe_name = name self.column_name = self.column_name or name if set_attribute: setattr(model, name, self.accessor_class(model, self, name)) @property def column(self): return Column(self.model._meta.table, self.column_name) def adapt(self, value): return value def db_value(self, value): return value if value is None else self.adapt(value) def python_value(self, value): return value if value is None else self.adapt(value) def to_value(self, value): return Value(value, self.db_value, unpack=False) def get_sort_key(self, ctx): return self._sort_key def __sql__(self, ctx): return ctx.sql(self.column) def get_modifiers(self): pass def ddl_datatype(self, ctx): if ctx and ctx.state.field_types: column_type = ctx.state.field_types.get(self.field_type, self.field_type) else: column_type = self.field_type modifiers = self.get_modifiers() if column_type and modifiers: modifier_literal = ', '.join([str(m) for m in modifiers]) return SQL('%s(%s)' % (column_type, modifier_literal)) else: return SQL(column_type) def ddl(self, ctx): accum = [Entity(self.column_name)] data_type = self.ddl_datatype(ctx) if data_type: accum.append(data_type) if self.unindexed: accum.append(SQL('UNINDEXED')) if not self.null: accum.append(SQL('NOT NULL')) if self.primary_key: accum.append(SQL('PRIMARY KEY')) if self.sequence: accum.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence)) if self.constraints: accum.extend(self.constraints) if self.collation: accum.append(SQL('COLLATE %s' % self.collation)) return NodeList(accum) class AnyField(Field): field_type = 'ANY' class IntegerField(Field): field_type = 'INT' def adapt(self, value): try: return int(value) except ValueError: return value class BigIntegerField(IntegerField): field_type = 'BIGINT' class SmallIntegerField(IntegerField): field_type = 'SMALLINT' class AutoField(IntegerField): auto_increment = True field_type = 'AUTO' def __init__(self, *args, **kwargs): if kwargs.get('primary_key') is False: raise ValueError('%s must always be a primary key.' % type(self)) kwargs['primary_key'] = True super(AutoField, self).__init__(*args, **kwargs) class BigAutoField(AutoField): field_type = 'BIGAUTO' class IdentityField(AutoField): field_type = 'INT GENERATED BY DEFAULT AS IDENTITY' def __init__(self, generate_always=False, **kwargs): if generate_always: self.field_type = 'INT GENERATED ALWAYS AS IDENTITY' super(IdentityField, self).__init__(**kwargs) class PrimaryKeyField(AutoField): def __init__(self, *args, **kwargs): __deprecated__('"PrimaryKeyField" has been renamed to "AutoField". ' 'Please update your code accordingly as this will be ' 'completely removed in a subsequent release.') super(PrimaryKeyField, self).__init__(*args, **kwargs) class FloatField(Field): field_type = 'FLOAT' def adapt(self, value): try: return float(value) except ValueError: return value class DoubleField(FloatField): field_type = 'DOUBLE' class DecimalField(Field): field_type = 'DECIMAL' def __init__(self, max_digits=10, decimal_places=5, auto_round=False, rounding=None, *args, **kwargs): self.max_digits = max_digits self.decimal_places = decimal_places self.auto_round = auto_round self.rounding = rounding or decimal.DefaultContext.rounding self._exp = decimal.Decimal(10) ** (-self.decimal_places) super(DecimalField, self).__init__(*args, **kwargs) def get_modifiers(self): return [self.max_digits, self.decimal_places] def db_value(self, value): D = decimal.Decimal if not value: return value if value is None else D(0) if self.auto_round: decimal_value = D(text_type(value)) return decimal_value.quantize(self._exp, rounding=self.rounding) return value def python_value(self, value): if value is not None: if isinstance(value, decimal.Decimal): return value return decimal.Decimal(text_type(value)) class _StringField(Field): def adapt(self, value): if isinstance(value, text_type): return value elif isinstance(value, bytes_type): return value.decode('utf-8') return text_type(value) def __add__(self, other): return StringExpression(self, OP.CONCAT, other) def __radd__(self, other): return StringExpression(other, OP.CONCAT, self) class CharField(_StringField): field_type = 'VARCHAR' def __init__(self, max_length=255, *args, **kwargs): self.max_length = max_length super(CharField, self).__init__(*args, **kwargs) def get_modifiers(self): return self.max_length and [self.max_length] or None class FixedCharField(CharField): field_type = 'CHAR' def python_value(self, value): value = super(FixedCharField, self).python_value(value) if value: value = value.strip() return value class TextField(_StringField): field_type = 'TEXT' class BlobField(Field): field_type = 'BLOB' def _db_hook(self, database): if database is None: self._constructor = bytearray else: self._constructor = database.get_binary_type() def bind(self, model, name, set_attribute=True): self._constructor = bytearray if model._meta.database: if isinstance(model._meta.database, Proxy): model._meta.database.attach_callback(self._db_hook) else: self._db_hook(model._meta.database) # Attach a hook to the model metadata; in the event the database is # changed or set at run-time, we will be sure to apply our callback and # use the proper data-type for our database driver. model._meta._db_hooks.append(self._db_hook) return super(BlobField, self).bind(model, name, set_attribute) def db_value(self, value): if isinstance(value, text_type): value = value.encode('raw_unicode_escape') if isinstance(value, bytes_type): return self._constructor(value) return value class BitField(BitwiseMixin, BigIntegerField): def __init__(self, *args, **kwargs): kwargs.setdefault('default', 0) super(BitField, self).__init__(*args, **kwargs) self.__current_flag = 1 def flag(self, value=None): if value is None: value = self.__current_flag self.__current_flag <<= 1 else: self.__current_flag = value << 1 class FlagDescriptor(ColumnBase): def __init__(self, field, value): self._field = field self._value = value super(FlagDescriptor, self).__init__() def clear(self): return self._field.bin_and(~self._value) def set(self): return self._field.bin_or(self._value) def __get__(self, instance, instance_type=None): if instance is None: return self value = getattr(instance, self._field.name) or 0 return (value & self._value) != 0 def __set__(self, instance, is_set): if is_set not in (True, False): raise ValueError('Value must be either True or False') value = getattr(instance, self._field.name) or 0 if is_set: value |= self._value else: value &= ~self._value setattr(instance, self._field.name, value) def __sql__(self, ctx): return ctx.sql(self._field.bin_and(self._value) != 0) return FlagDescriptor(self, value) class BigBitFieldData(object): def __init__(self, instance, name): self.instance = instance self.name = name value = self.instance.__data__.get(self.name) if not value: value = bytearray() elif not isinstance(value, bytearray): value = bytearray(value) self._buffer = self.instance.__data__[self.name] = value def clear(self): self._buffer.clear() def _ensure_length(self, idx): byte_num, byte_offset = divmod(idx, 8) cur_size = len(self._buffer) if cur_size <= byte_num: self._buffer.extend(b'\x00' * ((byte_num + 1) - cur_size)) return byte_num, byte_offset def set_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] |= (1 << byte_offset) def clear_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] &= ~(1 << byte_offset) def toggle_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] ^= (1 << byte_offset) return bool(self._buffer[byte_num] & (1 << byte_offset)) def is_set(self, idx): byte_num, byte_offset = divmod(idx, 8) cur_size = len(self._buffer) if cur_size <= byte_num: return False return bool(self._buffer[byte_num] & (1 << byte_offset)) __getitem__ = is_set def __setitem__(self, item, value): self.set_bit(item) if value else self.clear_bit(item) __delitem__ = clear_bit def __len__(self): return len(self._buffer) def _get_compatible_data(self, other): if isinstance(other, BigBitFieldData): data = other._buffer elif isinstance(other, (bytes, bytearray, memoryview)): data = other else: raise ValueError('Incompatible data-type') diff = len(data) - len(self) if diff > 0: self._buffer.extend(b'\x00' * diff) return data def _bitwise_op(self, other, op): if isinstance(other, BigBitFieldData): data = other._buffer elif isinstance(other, (bytes, bytearray, memoryview)): data = other else: raise ValueError('Incompatible data-type') buf = bytearray(b'\x00' * max(len(self), len(other))) it = itertools.zip_longest(self._buffer, data, fillvalue=0) for i, (a, b) in enumerate(it): buf[i] = op(a, b) return buf def __and__(self, other): return self._bitwise_op(other, operator.and_) def __or__(self, other): return self._bitwise_op(other, operator.or_) def __xor__(self, other): return self._bitwise_op(other, operator.xor) def __iter__(self): for b in self._buffer: for j in range(8): yield 1 if (b & (1 << j)) else 0 def __repr__(self): return repr(self._buffer) if sys.version_info[0] < 3: def __str__(self): return bytes_type(self._buffer) else: def __bytes__(self): return bytes_type(self._buffer) class BigBitFieldAccessor(FieldAccessor): def __get__(self, instance, instance_type=None): if instance is None: return self.field return BigBitFieldData(instance, self.name) def __set__(self, instance, value): if isinstance(value, memoryview): value = value.tobytes() elif isinstance(value, buffer_type): value = bytes(value) elif isinstance(value, bytearray): value = bytes_type(value) elif isinstance(value, BigBitFieldData): value = bytes_type(value._buffer) elif isinstance(value, text_type): value = value.encode('utf-8') elif not isinstance(value, bytes_type): raise ValueError('Value must be either a bytes, memoryview or ' 'BigBitFieldData instance.') super(BigBitFieldAccessor, self).__set__(instance, value) class BigBitField(BlobField): accessor_class = BigBitFieldAccessor def __init__(self, *args, **kwargs): kwargs.setdefault('default', bytes_type) super(BigBitField, self).__init__(*args, **kwargs) def db_value(self, value): return bytes_type(value) if value is not None else value class UUIDField(Field): field_type = 'UUID' def db_value(self, value): if isinstance(value, basestring) and len(value) == 32: # Hex string. No transformation is necessary. return value elif isinstance(value, bytes) and len(value) == 16: # Allow raw binary representation. value = uuid.UUID(bytes=value) if isinstance(value, uuid.UUID): return value.hex try: return uuid.UUID(value).hex except: return value def python_value(self, value): if isinstance(value, uuid.UUID): return value return uuid.UUID(value) if value is not None else None class BinaryUUIDField(BlobField): field_type = 'UUIDB' def db_value(self, value): if isinstance(value, bytes) and len(value) == 16: # Raw binary value. No transformation is necessary. return self._constructor(value) elif isinstance(value, basestring) and len(value) == 32: # Allow hex string representation. value = uuid.UUID(hex=value) if isinstance(value, uuid.UUID): return self._constructor(value.bytes) elif value is not None: raise ValueError('value for binary UUID field must be UUID(), ' 'a hexadecimal string, or a bytes object.') def python_value(self, value): if isinstance(value, uuid.UUID): return value elif isinstance(value, memoryview): value = value.tobytes() elif value and not isinstance(value, bytes): value = bytes(value) return uuid.UUID(bytes=value) if value is not None else None def _date_part(date_part): def dec(self): return self.model._meta.database.extract_date(date_part, self) return dec def format_date_time(value, formats, post_process=None): post_process = post_process or (lambda x: x) for fmt in formats: try: return post_process(datetime.datetime.strptime(value, fmt)) except ValueError: pass return value def simple_date_time(value): try: return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S') except (TypeError, ValueError): return value class _BaseFormattedField(Field): formats = None def __init__(self, formats=None, *args, **kwargs): if formats is not None: self.formats = formats super(_BaseFormattedField, self).__init__(*args, **kwargs) class DateTimeField(_BaseFormattedField): field_type = 'DATETIME' formats = [ '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', ] def adapt(self, value): if value and isinstance(value, basestring): return format_date_time(value, self.formats) return value def to_timestamp(self): return self.model._meta.database.to_timestamp(self) def truncate(self, part): return self.model._meta.database.truncate_date(part, self) year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) class DateField(_BaseFormattedField): field_type = 'DATE' formats = [ '%Y-%m-%d', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', ] def adapt(self, value): if value and isinstance(value, basestring): pp = lambda x: x.date() return format_date_time(value, self.formats, pp) elif value and isinstance(value, datetime.datetime): return value.date() return value def to_timestamp(self): return self.model._meta.database.to_timestamp(self) def truncate(self, part): return self.model._meta.database.truncate_date(part, self) year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) class TimeField(_BaseFormattedField): field_type = 'TIME' formats = [ '%H:%M:%S.%f', '%H:%M:%S', '%H:%M', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', ] def adapt(self, value): if value: if isinstance(value, basestring): pp = lambda x: x.time() return format_date_time(value, self.formats, pp) elif isinstance(value, datetime.datetime): return value.time() if value is not None and isinstance(value, datetime.timedelta): return (datetime.datetime.min + value).time() return value hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) def _timestamp_date_part(date_part): def dec(self): db = self.model._meta.database expr = ((self / Value(self.resolution, converter=False)) if self.resolution > 1 else self) return db.extract_date(date_part, db.from_timestamp(expr)) return dec class TimestampField(BigIntegerField): # Support second -> microsecond resolution. valid_resolutions = [10**i for i in range(7)] def __init__(self, *args, **kwargs): self.resolution = kwargs.pop('resolution', None) if not self.resolution: self.resolution = 1 elif self.resolution in range(2, 7): self.resolution = 10 ** self.resolution elif self.resolution not in self.valid_resolutions: raise ValueError('TimestampField resolution must be one of: %s' % ', '.join(str(i) for i in self.valid_resolutions)) self.ticks_to_microsecond = 1000000 // self.resolution self.utc = kwargs.pop('utc', False) or False dflt = utcnow if self.utc else datetime.datetime.now kwargs.setdefault('default', dflt) super(TimestampField, self).__init__(*args, **kwargs) def local_to_utc(self, dt): # Convert naive local datetime into naive UTC, e.g.: # 2019-03-01T12:00:00 (local=US/Central) -> 2019-03-01T18:00:00. # 2019-05-01T12:00:00 (local=US/Central) -> 2019-05-01T17:00:00. # 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00. return datetime.datetime(*time.gmtime(time.mktime(dt.timetuple()))[:6]) def utc_to_local(self, dt): # Convert a naive UTC datetime into local time, e.g.: # 2019-03-01T18:00:00 (local=US/Central) -> 2019-03-01T12:00:00. # 2019-05-01T17:00:00 (local=US/Central) -> 2019-05-01T12:00:00. # 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00. ts = calendar.timegm(dt.utctimetuple()) return datetime.datetime.fromtimestamp(ts) def get_timestamp(self, value): if self.utc: # If utc-mode is on, then we assume all naive datetimes are in UTC. return calendar.timegm(value.utctimetuple()) else: return time.mktime(value.timetuple()) def db_value(self, value): if value is None: return if isinstance(value, datetime.datetime): pass elif isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) else: return int(round(value * self.resolution)) timestamp = self.get_timestamp(value) if self.resolution > 1: timestamp += (value.microsecond * .000001) timestamp *= self.resolution return int(round(timestamp)) def python_value(self, value): if value is not None and isinstance(value, (int, float, long)): if self.resolution > 1: value, ticks = divmod(value, self.resolution) microseconds = int(ticks * self.ticks_to_microsecond) else: microseconds = 0 if self.utc: value = utcfromtimestamp(value) else: value = datetime.datetime.fromtimestamp(value) if microseconds: value = value.replace(microsecond=microseconds) return value def from_timestamp(self): expr = ((self / Value(self.resolution, converter=False)) if self.resolution > 1 else self) return self.model._meta.database.from_timestamp(expr) year = property(_timestamp_date_part('year')) month = property(_timestamp_date_part('month')) day = property(_timestamp_date_part('day')) hour = property(_timestamp_date_part('hour')) minute = property(_timestamp_date_part('minute')) second = property(_timestamp_date_part('second')) class IPField(BigIntegerField): def db_value(self, val): if val is not None: return struct.unpack('!I', socket.inet_aton(val))[0] def python_value(self, val): if val is not None: return socket.inet_ntoa(struct.pack('!I', val)) class BooleanField(Field): field_type = 'BOOL' adapt = bool class BareField(Field): def __init__(self, adapt=None, *args, **kwargs): super(BareField, self).__init__(*args, **kwargs) if adapt is not None: self.adapt = adapt def ddl_datatype(self, ctx): return class ForeignKeyField(Field): accessor_class = ForeignKeyAccessor backref_accessor_class = BackrefAccessor def __init__(self, model, field=None, backref=None, on_delete=None, on_update=None, deferrable=None, _deferred=None, rel_model=None, to_field=None, object_id_name=None, lazy_load=True, constraint_name=None, related_name=None, *args, **kwargs): kwargs.setdefault('index', True) super(ForeignKeyField, self).__init__(*args, **kwargs) if rel_model is not None: __deprecated__('"rel_model" has been deprecated in favor of ' '"model" for ForeignKeyField objects.') model = rel_model if to_field is not None: __deprecated__('"to_field" has been deprecated in favor of ' '"field" for ForeignKeyField objects.') field = to_field if related_name is not None: __deprecated__('"related_name" has been deprecated in favor of ' '"backref" for Field objects.') backref = related_name self._is_self_reference = model == 'self' self.rel_model = model self.rel_field = field self.declared_backref = backref self.backref = None self.on_delete = on_delete self.on_update = on_update self.deferrable = deferrable self.deferred = _deferred self.object_id_name = object_id_name self.lazy_load = lazy_load self.constraint_name = constraint_name @property def field_type(self): if not isinstance(self.rel_field, AutoField): return self.rel_field.field_type elif isinstance(self.rel_field, BigAutoField): return BigIntegerField.field_type return IntegerField.field_type def get_modifiers(self): if not isinstance(self.rel_field, AutoField): return self.rel_field.get_modifiers() return super(ForeignKeyField, self).get_modifiers() def adapt(self, value): return self.rel_field.adapt(value) def db_value(self, value): if isinstance(value, self.rel_model): value = getattr(value, self.rel_field.name) return self.rel_field.db_value(value) def python_value(self, value): if isinstance(value, self.rel_model): return value return self.rel_field.python_value(value) def bind(self, model, name, set_attribute=True): if not self.column_name: self.column_name = name if name.endswith('_id') else name + '_id' if not self.object_id_name: self.object_id_name = self.column_name if self.object_id_name == name: self.object_id_name += '_id' elif self.object_id_name == name: raise ValueError('ForeignKeyField "%s"."%s" specifies an ' 'object_id_name that conflicts with its field ' 'name.' % (model._meta.name, name)) if self._is_self_reference: self.rel_model = model if isinstance(self.rel_field, basestring): self.rel_field = getattr(self.rel_model, self.rel_field) elif self.rel_field is None: self.rel_field = self.rel_model._meta.primary_key # Bind field before assigning backref, so field is bound when # calling declared_backref() (if callable). super(ForeignKeyField, self).bind(model, name, set_attribute) self.safe_name = self.object_id_name if callable_(self.declared_backref): self.backref = self.declared_backref(self) else: self.backref, self.declared_backref = self.declared_backref, None if not self.backref: self.backref = '%s_set' % model._meta.name if set_attribute: setattr(model, self.object_id_name, ObjectIdAccessor(self)) if self.backref not in '!+': setattr(self.rel_model, self.backref, self.backref_accessor_class(self)) def foreign_key_constraint(self): parts = [] if self.constraint_name: parts.extend((SQL('CONSTRAINT'), Entity(self.constraint_name))) parts.extend([ SQL('FOREIGN KEY'), EnclosedNodeList((self,)), SQL('REFERENCES'), self.rel_model, EnclosedNodeList((self.rel_field,))]) if self.on_delete: parts.append(SQL('ON DELETE %s' % self.on_delete)) if self.on_update: parts.append(SQL('ON UPDATE %s' % self.on_update)) if self.deferrable: parts.append(SQL('DEFERRABLE %s' % self.deferrable)) return NodeList(parts) def __getattr__(self, attr): if attr.startswith('__'): # Prevent recursion error when deep-copying. raise AttributeError('Cannot look-up non-existant "__" methods.') if attr in self.rel_model._meta.fields: return self.rel_model._meta.fields[attr] raise AttributeError('Foreign-key has no attribute %s, nor is it a ' 'valid field on the related model.' % attr) class DeferredForeignKey(Field): _unresolved = set() def __init__(self, rel_model_name, **kwargs): self.field_kwargs = kwargs self.rel_model_name = rel_model_name.lower() DeferredForeignKey._unresolved.add(self) super(DeferredForeignKey, self).__init__( column_name=kwargs.get('column_name'), null=kwargs.get('null'), primary_key=kwargs.get('primary_key')) __hash__ = object.__hash__ def __deepcopy__(self, memo=None): return DeferredForeignKey(self.rel_model_name, **self.field_kwargs) def set_model(self, rel_model): field = ForeignKeyField(rel_model, _deferred=True, **self.field_kwargs) if field.primary_key: # NOTE: this calls add_field() under-the-hood. self.model._meta.set_primary_key(self.name, field) else: self.model._meta.add_field(self.name, field) @staticmethod def resolve(model_cls): unresolved = sorted(DeferredForeignKey._unresolved, key=operator.attrgetter('_order')) for dr in unresolved: if dr.rel_model_name == model_cls.__name__.lower(): dr.set_model(model_cls) DeferredForeignKey._unresolved.discard(dr) class DeferredThroughModel(object): def __init__(self): self._refs = [] def set_field(self, model, field, name): self._refs.append((model, field, name)) def set_model(self, through_model): for src_model, m2mfield, name in self._refs: m2mfield.through_model = through_model src_model._meta.add_field(name, m2mfield) class MetaField(Field): column_name = default = model = name = None primary_key = False class ManyToManyFieldAccessor(FieldAccessor): def __init__(self, model, field, name): super(ManyToManyFieldAccessor, self).__init__(model, field, name) self.model = field.model self.rel_model = field.rel_model self.through_model = field.through_model src_fks = self.through_model._meta.model_refs[self.model] dest_fks = self.through_model._meta.model_refs[self.rel_model] if not src_fks: raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' % (self.model, self.through_model)) elif not dest_fks: raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' % (self.rel_model, self.through_model)) self.src_fk = src_fks[0] self.dest_fk = dest_fks[0] def __get__(self, instance, instance_type=None, force_query=False): if instance is not None: if not force_query and self.src_fk.backref != '+': backref = getattr(instance, self.src_fk.backref) if isinstance(backref, list): return [getattr(obj, self.dest_fk.name) for obj in backref] src_id = getattr(instance, self.src_fk.rel_field.name) if src_id is None and self.field._prevent_unsaved: raise ValueError('Cannot get many-to-many "%s" for unsaved ' 'instance "%s".' % (self.field, instance)) return (ManyToManyQuery(instance, self, self.rel_model) .join(self.through_model) .join(self.model) .where(self.src_fk == src_id)) return self.field def __set__(self, instance, value): src_id = getattr(instance, self.src_fk.rel_field.name) if src_id is None and self.field._prevent_unsaved: raise ValueError('Cannot set many-to-many "%s" for unsaved ' 'instance "%s".' % (self.field, instance)) query = self.__get__(instance, force_query=True) query.add(value, clear_existing=True) class ManyToManyField(MetaField): accessor_class = ManyToManyFieldAccessor def __init__(self, model, backref=None, through_model=None, on_delete=None, on_update=None, prevent_unsaved=True, _is_backref=False): if through_model is not None: if not (isinstance(through_model, DeferredThroughModel) or is_model(through_model)): raise TypeError('Unexpected value for through_model. Expected ' 'Model or DeferredThroughModel.') if not _is_backref and (on_delete is not None or on_update is not None): raise ValueError('Cannot specify on_delete or on_update when ' 'through_model is specified.') self.rel_model = model self.backref = backref self._through_model = through_model self._on_delete = on_delete self._on_update = on_update self._prevent_unsaved = prevent_unsaved self._is_backref = _is_backref def _get_descriptor(self): return ManyToManyFieldAccessor(self) def bind(self, model, name, set_attribute=True): if isinstance(self._through_model, DeferredThroughModel): self._through_model.set_field(model, self, name) return super(ManyToManyField, self).bind(model, name, set_attribute) if not self._is_backref: many_to_many_field = ManyToManyField( self.model, backref=name, through_model=self.through_model, on_delete=self._on_delete, on_update=self._on_update, _is_backref=True) self.backref = self.backref or model._meta.name + 's' self.rel_model._meta.add_field(self.backref, many_to_many_field) def get_models(self): return [model for _, model in sorted(( (self._is_backref, self.model), (not self._is_backref, self.rel_model)))] @property def through_model(self): if self._through_model is None: self._through_model = self._create_through_model() return self._through_model @through_model.setter def through_model(self, value): self._through_model = value def _create_through_model(self): lhs, rhs = self.get_models() tables = [model._meta.table_name for model in (lhs, rhs)] class Meta: database = self.model._meta.database schema = self.model._meta.schema table_name = '%s_%s_through' % tuple(tables) indexes = ( ((lhs._meta.name, rhs._meta.name), True),) params = {'on_delete': self._on_delete, 'on_update': self._on_update} attrs = { lhs._meta.name: ForeignKeyField(lhs, **params), rhs._meta.name: ForeignKeyField(rhs, **params), 'Meta': Meta} klass_name = '%s%sThrough' % (lhs.__name__, rhs.__name__) return type(klass_name, (Model,), attrs) def get_through_model(self): # XXX: Deprecated. Just use the "through_model" property. return self.through_model class VirtualField(MetaField): field_class = None def __init__(self, field_class=None, *args, **kwargs): Field = field_class if field_class is not None else self.field_class self.field_instance = Field() if Field is not None else None super(VirtualField, self).__init__(*args, **kwargs) def db_value(self, value): if self.field_instance is not None: return self.field_instance.db_value(value) return value def python_value(self, value): if self.field_instance is not None: return self.field_instance.python_value(value) return value def bind(self, model, name, set_attribute=True): self.model = model self.column_name = self.name = self.safe_name = name setattr(model, name, self.accessor_class(model, self, name)) class CompositeKey(MetaField): sequence = None def __init__(self, *field_names): self.field_names = field_names self._safe_field_names = None @property def safe_field_names(self): if self._safe_field_names is None: if self.model is None: return self.field_names self._safe_field_names = [self.model._meta.fields[f].safe_name for f in self.field_names] return self._safe_field_names def __get__(self, instance, instance_type=None): if instance is not None: return tuple([getattr(instance, f) for f in self.safe_field_names]) return self def __set__(self, instance, value): if not isinstance(value, (list, tuple)): raise TypeError('A list or tuple must be used to set the value of ' 'a composite primary key.') if len(value) != len(self.field_names): raise ValueError('The length of the value must equal the number ' 'of columns of the composite primary key.') for idx, field_value in enumerate(value): setattr(instance, self.field_names[idx], field_value) def __eq__(self, other): expressions = [(self.model._meta.fields[field] == value) for field, value in zip(self.field_names, other)] return reduce(operator.and_, expressions) def __ne__(self, other): return ~(self == other) def __hash__(self): return hash((self.model.__name__, self.field_names)) def __sql__(self, ctx): # If the composite PK is being selected, do not use parens. Elsewhere, # such as in an expression, we want to use parentheses and treat it as # a row value. parens = ctx.scope != SCOPE_SOURCE return ctx.sql(NodeList([self.model._meta.fields[field] for field in self.field_names], ', ', parens)) def bind(self, model, name, set_attribute=True): self.model = model self.column_name = self.name = self.safe_name = name setattr(model, self.name, self) class _SortedFieldList(object): __slots__ = ('_keys', '_items') def __init__(self): self._keys = [] self._items = [] def __getitem__(self, i): return self._items[i] def __iter__(self): return iter(self._items) def __contains__(self, item): k = item._sort_key i = bisect_left(self._keys, k) j = bisect_right(self._keys, k) return item in self._items[i:j] def index(self, field): return self._keys.index(field._sort_key) def insert(self, item): k = item._sort_key i = bisect_left(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item) def remove(self, item): idx = self.index(item) del self._items[idx] del self._keys[idx] # MODELS class SchemaManager(object): def __init__(self, model, database=None, **context_options): self.model = model self._database = database context_options.setdefault('scope', SCOPE_VALUES) self.context_options = context_options @property def database(self): db = self._database or self.model._meta.database if db is None: raise ImproperlyConfigured('database attribute does not appear to ' 'be set on the model: %s' % self.model) return db @database.setter def database(self, value): self._database = value def _create_context(self): return self.database.get_sql_context(**self.context_options) def _create_table(self, safe=True, **options): is_temp = options.pop('temporary', False) ctx = self._create_context() ctx.literal('CREATE TEMPORARY TABLE ' if is_temp else 'CREATE TABLE ') if safe: ctx.literal('IF NOT EXISTS ') ctx.sql(self.model).literal(' ') columns = [] constraints = [] meta = self.model._meta if meta.composite_key: pk_columns = [meta.fields[field_name].column for field_name in meta.primary_key.field_names] constraints.append(NodeList((SQL('PRIMARY KEY'), EnclosedNodeList(pk_columns)))) for field in meta.sorted_fields: columns.append(field.ddl(ctx)) if isinstance(field, ForeignKeyField) and not field.deferred: constraints.append(field.foreign_key_constraint()) if meta.constraints: constraints.extend(meta.constraints) constraints.extend(self._create_table_option_sql(options)) ctx.sql(EnclosedNodeList(columns + constraints)) if meta.table_settings is not None: table_settings = ensure_tuple(meta.table_settings) for setting in table_settings: if not isinstance(setting, basestring): raise ValueError('table_settings must be strings') ctx.literal(' ').literal(setting) extra_opts = [] if meta.strict_tables: extra_opts.append('STRICT') if meta.without_rowid: extra_opts.append('WITHOUT ROWID') if extra_opts: ctx.literal(' %s' % ', '.join(extra_opts)) return ctx def _create_table_option_sql(self, options): accum = [] options = merge_dict(self.model._meta.options or {}, options) if not options: return accum for key, value in sorted(options.items()): if not isinstance(value, Node): if is_model(value): value = value._meta.table else: value = SQL(str(value)) accum.append(NodeList((SQL(key), value), glue='=')) return accum def create_table(self, safe=True, **options): self.database.execute(self._create_table(safe=safe, **options)) def _create_table_as(self, table_name, query, safe=True, **meta): ctx = (self._create_context() .literal('CREATE TEMPORARY TABLE ' if meta.get('temporary') else 'CREATE TABLE ')) if safe: ctx.literal('IF NOT EXISTS ') return (ctx .sql(Entity(*ensure_tuple(table_name))) .literal(' AS ') .sql(query)) def create_table_as(self, table_name, query, safe=True, **meta): ctx = self._create_table_as(table_name, query, safe=safe, **meta) self.database.execute(ctx) def _drop_table(self, safe=True, **options): ctx = (self._create_context() .literal('DROP TABLE IF EXISTS ' if safe else 'DROP TABLE ') .sql(self.model)) if options.get('cascade'): ctx = ctx.literal(' CASCADE') elif options.get('restrict'): ctx = ctx.literal(' RESTRICT') return ctx def drop_table(self, safe=True, **options): self.database.execute(self._drop_table(safe=safe, **options)) def _truncate_table(self, restart_identity=False, cascade=False): db = self.database if not db.truncate_table: return (self._create_context() .literal('DELETE FROM ').sql(self.model)) ctx = self._create_context().literal('TRUNCATE TABLE ').sql(self.model) if restart_identity: ctx = ctx.literal(' RESTART IDENTITY') if cascade: ctx = ctx.literal(' CASCADE') return ctx def truncate_table(self, restart_identity=False, cascade=False): self.database.execute(self._truncate_table(restart_identity, cascade)) def _create_indexes(self, safe=True): return [self._create_index(index, safe) for index in self.model._meta.fields_to_index()] def _create_index(self, index, safe=True): if isinstance(index, Index): if not self.database.safe_create_index: index = index.safe(False) elif index._safe != safe: index = index.safe(safe) if isinstance(self._database, SqliteDatabase): # Ensure we do not use value placeholders with Sqlite, as they # are not supported. index = ValueLiterals(index) return self._create_context().sql(index) def create_indexes(self, safe=True): for query in self._create_indexes(safe=safe): self.database.execute(query) def _drop_indexes(self, safe=True): return [self._drop_index(index, safe) for index in self.model._meta.fields_to_index() if isinstance(index, Index)] def _drop_index(self, index, safe): statement = 'DROP INDEX ' if safe and self.database.safe_drop_index: statement += 'IF EXISTS ' if isinstance(index._table, Table) and index._table._schema: index_name = Entity(index._table._schema, index._name) else: index_name = Entity(index._name) return (self ._create_context() .literal(statement) .sql(index_name)) def drop_indexes(self, safe=True): for query in self._drop_indexes(safe=safe): self.database.execute(query) def _check_sequences(self, field): if not field.sequence or not self.database.sequences: raise ValueError('Sequences are either not supported, or are not ' 'defined for "%s".' % field.name) def _sequence_for_field(self, field): if field.model._meta.schema: return Entity(field.model._meta.schema, field.sequence) else: return Entity(field.sequence) def _create_sequence(self, field): self._check_sequences(field) if not self.database.sequence_exists(field.sequence): return (self ._create_context() .literal('CREATE SEQUENCE ') .sql(self._sequence_for_field(field))) def create_sequence(self, field): seq_ctx = self._create_sequence(field) if seq_ctx is not None: self.database.execute(seq_ctx) def _drop_sequence(self, field): self._check_sequences(field) if self.database.sequence_exists(field.sequence): return (self ._create_context() .literal('DROP SEQUENCE ') .sql(self._sequence_for_field(field))) def drop_sequence(self, field): seq_ctx = self._drop_sequence(field) if seq_ctx is not None: self.database.execute(seq_ctx) def _create_foreign_key(self, field): name = 'fk_%s_%s_refs_%s' % (field.model._meta.table_name, field.column_name, field.rel_model._meta.table_name) return (self ._create_context() .literal('ALTER TABLE ') .sql(field.model) .literal(' ADD CONSTRAINT ') .sql(Entity(_truncate_constraint_name(name))) .literal(' ') .sql(field.foreign_key_constraint())) def create_foreign_key(self, field): self.database.execute(self._create_foreign_key(field)) def create_sequences(self): if self.database.sequences: for field in self.model._meta.sorted_fields: if field.sequence: self.create_sequence(field) def create_all(self, safe=True, **table_options): self.create_sequences() self.create_table(safe, **table_options) self.create_indexes(safe=safe) def drop_sequences(self): if self.database.sequences: for field in self.model._meta.sorted_fields: if field.sequence: self.drop_sequence(field) def drop_all(self, safe=True, drop_sequences=True, **options): self.drop_table(safe, **options) if drop_sequences: self.drop_sequences() class Metadata(object): def __init__(self, model, database=None, table_name=None, indexes=None, primary_key=None, constraints=None, schema=None, only_save_dirty=False, depends_on=None, options=None, db_table=None, table_function=None, table_settings=None, without_rowid=False, temporary=False, strict_tables=None, legacy_table_names=True, **kwargs): if db_table is not None: __deprecated__('"db_table" has been deprecated in favor of ' '"table_name" for Models.') table_name = db_table self.model = model self.database = database self.fields = {} self.columns = {} self.combined = {} self._sorted_field_list = _SortedFieldList() self.sorted_fields = [] self.sorted_field_names = [] self.defaults = {} self._default_by_name = {} self._default_dict = {} self._default_callables = {} self._default_callable_list = [] self.name = model.__name__.lower() self.table_function = table_function self.legacy_table_names = legacy_table_names if not table_name: table_name = (self.table_function(model) if self.table_function else self.make_table_name()) self.table_name = table_name self._table = None self.indexes = list(indexes) if indexes else [] self.constraints = constraints self._schema = schema self.primary_key = primary_key self.composite_key = self.auto_increment = None self.only_save_dirty = only_save_dirty self.depends_on = depends_on self.table_settings = table_settings self.without_rowid = without_rowid self.strict_tables = strict_tables self.temporary = temporary self.refs = {} self.backrefs = {} self.model_refs = collections.defaultdict(list) self.model_backrefs = collections.defaultdict(list) self.manytomany = {} self.options = options or {} for key, value in kwargs.items(): setattr(self, key, value) self._additional_keys = set(kwargs.keys()) # Allow objects to register hooks that are called if the model is bound # to a different database. For example, BlobField uses a different # Python data-type depending on the db driver / python version. When # the database changes, we need to update any BlobField so they can use # the appropriate data-type. self._db_hooks = [] def make_table_name(self): if self.legacy_table_names: return re.sub(r'[^\w]+', '_', self.name) return make_snake_case(self.model.__name__) def model_graph(self, refs=True, backrefs=True, depth_first=True): if not refs and not backrefs: raise ValueError('One of `refs` or `backrefs` must be True.') accum = [(None, self.model, None)] seen = set() queue = collections.deque((self,)) method = queue.pop if depth_first else queue.popleft while queue: curr = method() if curr in seen: continue seen.add(curr) if refs: for fk, model in curr.refs.items(): accum.append((fk, model, False)) queue.append(model._meta) if backrefs: for fk, model in curr.backrefs.items(): accum.append((fk, model, True)) queue.append(model._meta) return accum def add_ref(self, field): rel = field.rel_model self.refs[field] = rel self.model_refs[rel].append(field) rel._meta.backrefs[field] = self.model rel._meta.model_backrefs[self.model].append(field) def remove_ref(self, field): rel = field.rel_model del self.refs[field] self.model_refs[rel].remove(field) del rel._meta.backrefs[field] rel._meta.model_backrefs[self.model].remove(field) def add_manytomany(self, field): self.manytomany[field.name] = field def remove_manytomany(self, field): del self.manytomany[field.name] @property def table(self): if self._table is None: self._table = Table( self.table_name, [field.column_name for field in self.sorted_fields], schema=self.schema, _model=self.model, _database=self.database) return self._table @table.setter def table(self, value): raise AttributeError('Cannot set the "table".') @table.deleter def table(self): self._table = None @property def schema(self): return self._schema @schema.setter def schema(self, value): self._schema = value del self.table @property def entity(self): if self._schema: return Entity(self._schema, self.table_name) else: return Entity(self.table_name) def _update_sorted_fields(self): self.sorted_fields = list(self._sorted_field_list) self.sorted_field_names = [f.name for f in self.sorted_fields] def get_rel_for_model(self, model): if isinstance(model, ModelAlias): model = model.model forwardrefs = self.model_refs.get(model, []) backrefs = self.model_backrefs.get(model, []) return (forwardrefs, backrefs) def add_field(self, field_name, field, set_attribute=True): if field_name in self.fields: self.remove_field(field_name) elif field_name in self.manytomany: self.remove_manytomany(self.manytomany[field_name]) if not isinstance(field, MetaField): del self.table field.bind(self.model, field_name, set_attribute) self.fields[field.name] = field self.columns[field.column_name] = field self.combined[field.name] = field self.combined[field.column_name] = field self._sorted_field_list.insert(field) self._update_sorted_fields() if field.default is not None: # This optimization helps speed up model instance construction. self.defaults[field] = field.default if callable_(field.default): self._default_callables[field] = field.default self._default_callable_list.append((field.name, field.default)) else: self._default_dict[field] = field.default self._default_by_name[field.name] = field.default else: field.bind(self.model, field_name, set_attribute) if isinstance(field, ForeignKeyField): self.add_ref(field) elif isinstance(field, ManyToManyField) and field.name: self.add_manytomany(field) def remove_field(self, field_name): if field_name not in self.fields: return del self.table original = self.fields.pop(field_name) del self.columns[original.column_name] del self.combined[field_name] try: del self.combined[original.column_name] except KeyError: pass self._sorted_field_list.remove(original) self._update_sorted_fields() if original.default is not None: del self.defaults[original] if self._default_callables.pop(original, None): for i, (name, _) in enumerate(self._default_callable_list): if name == field_name: self._default_callable_list.pop(i) break else: self._default_dict.pop(original, None) self._default_by_name.pop(original.name, None) if isinstance(original, ForeignKeyField): self.remove_ref(original) def set_primary_key(self, name, field): self.composite_key = isinstance(field, CompositeKey) self.add_field(name, field) self.primary_key = field self.auto_increment = ( field.auto_increment or bool(field.sequence)) def get_primary_keys(self): if self.composite_key: return tuple([self.fields[field_name] for field_name in self.primary_key.field_names]) else: return (self.primary_key,) if self.primary_key is not False else () def get_default_dict(self): dd = self._default_by_name.copy() for field_name, default in self._default_callable_list: dd[field_name] = default() return dd def fields_to_index(self): indexes = [] for f in self.sorted_fields: if f.primary_key: continue if f.index or f.unique: indexes.append(ModelIndex(self.model, (f,), unique=f.unique, using=f.index_type)) for index_obj in self.indexes: if isinstance(index_obj, Node): indexes.append(index_obj) elif isinstance(index_obj, (list, tuple)): index_parts, unique = index_obj fields = [] for part in index_parts: if isinstance(part, basestring): fields.append(self.combined[part]) elif isinstance(part, Node): fields.append(part) else: raise ValueError('Expected either a field name or a ' 'subclass of Node. Got: %s' % part) indexes.append(ModelIndex(self.model, fields, unique=unique)) return indexes def set_database(self, database): self.database = database self.model._schema._database = database del self.table # Apply any hooks that have been registered. If we have an # uninitialized proxy object, we will treat that as `None`. if isinstance(database, Proxy) and database.obj is None: database = None for hook in self._db_hooks: hook(database) def set_table_name(self, table_name): self.table_name = table_name del self.table class SubclassAwareMetadata(Metadata): models = [] def __init__(self, model, *args, **kwargs): super(SubclassAwareMetadata, self).__init__(model, *args, **kwargs) self.models.append(model) def map_models(self, fn): for model in self.models: fn(model) class DoesNotExist(Exception): pass class ModelBase(type): inheritable = set(['constraints', 'database', 'indexes', 'primary_key', 'options', 'schema', 'table_function', 'temporary', 'only_save_dirty', 'legacy_table_names', 'table_settings', 'strict_tables']) def __new__(cls, name, bases, attrs, **kwargs): if name == MODEL_BASE or bases[0].__name__ == MODEL_BASE: return super(ModelBase, cls).__new__(cls, name, bases, attrs, **kwargs) meta_options = {} meta = attrs.pop('Meta', None) if meta: for k, v in meta.__dict__.items(): if not k.startswith('_'): meta_options[k] = v pk = getattr(meta, 'primary_key', None) pk_name = parent_pk = None # Inherit any field descriptors by deep copying the underlying field # into the attrs of the new model, additionally see if the bases define # inheritable model options and swipe them. for b in bases: if not hasattr(b, '_meta'): continue base_meta = b._meta if parent_pk is None: parent_pk = deepcopy(base_meta.primary_key) all_inheritable = cls.inheritable | base_meta._additional_keys for k in base_meta.__dict__: if k in all_inheritable and k not in meta_options: meta_options[k] = base_meta.__dict__[k] meta_options.setdefault('database', base_meta.database) meta_options.setdefault('schema', base_meta.schema) for (k, v) in b.__dict__.items(): if k in attrs: continue if isinstance(v, FieldAccessor) and not v.field.primary_key: attrs[k] = deepcopy(v.field) sopts = meta_options.pop('schema_options', None) or {} Meta = meta_options.get('model_metadata_class', Metadata) Schema = meta_options.get('schema_manager_class', SchemaManager) # Construct the new class. cls = super(ModelBase, cls).__new__(cls, name, bases, attrs, **kwargs) cls.__data__ = cls.__rel__ = None cls._meta = Meta(cls, **meta_options) cls._schema = Schema(cls, **sopts) fields = [] for key, value in cls.__dict__.items(): if isinstance(value, Field): if value.primary_key and pk: raise ValueError('over-determined primary key %s.' % name) elif value.primary_key: pk, pk_name = value, key else: fields.append((key, value)) if pk is None: if parent_pk is not False: pk, pk_name = ((parent_pk, parent_pk.name) if parent_pk is not None else (AutoField(), 'id')) else: pk = False elif isinstance(pk, CompositeKey): pk_name = '__composite_key__' cls._meta.composite_key = True if pk is not False: cls._meta.set_primary_key(pk_name, pk) for name, field in fields: cls._meta.add_field(name, field) # Create a repr and error class before finalizing. if hasattr(cls, '__str__') and '__repr__' not in attrs: setattr(cls, '__repr__', lambda self: '<%s: %s>' % ( cls.__name__, self.__str__())) exc_name = '%sDoesNotExist' % cls.__name__ exc_attrs = {'__module__': cls.__module__} exception_class = type(exc_name, (DoesNotExist,), exc_attrs) cls.DoesNotExist = exception_class # Call validation hook, allowing additional model validation. cls.validate_model() DeferredForeignKey.resolve(cls) return cls def __repr__(self): return '' % self.__name__ def __iter__(self): return iter(self.select()) def __getitem__(self, key): return self.get_by_id(key) def __setitem__(self, key, value): self.set_by_id(key, value) def __delitem__(self, key): self.delete_by_id(key) def __contains__(self, key): try: self.get_by_id(key) except self.DoesNotExist: return False else: return True def __len__(self): return self.select().count() def __bool__(self): return True __nonzero__ = __bool__ # Python 2. def __sql__(self, ctx): return ctx.sql(self._meta.table) class _BoundModelsContext(object): def __init__(self, models, database, bind_refs, bind_backrefs): self.models = models self.database = database self.bind_refs = bind_refs self.bind_backrefs = bind_backrefs def __enter__(self): self._orig_database = [] for model in self.models: self._orig_database.append(model._meta.database) model.bind(self.database, self.bind_refs, self.bind_backrefs, _exclude=set(self.models)) return self.models def __exit__(self, exc_type, exc_val, exc_tb): for model, db in zip(self.models, self._orig_database): model.bind(db, self.bind_refs, self.bind_backrefs, _exclude=set(self.models)) class Model(with_metaclass(ModelBase, Node)): def __init__(self, *args, **kwargs): if kwargs.pop('__no_default__', None): self.__data__ = {} else: self.__data__ = self._meta.get_default_dict() self._dirty = set(self.__data__) self.__rel__ = {} for k in kwargs: setattr(self, k, kwargs[k]) def __str__(self): return str(self._pk) if self._meta.primary_key is not False else 'n/a' @classmethod def validate_model(cls): pass @classmethod def alias(cls, alias=None): return ModelAlias(cls, alias) @classmethod def select(cls, *fields): is_default = not fields if not fields: fields = cls._meta.sorted_fields return ModelSelect(cls, fields, is_default=is_default) @classmethod def _normalize_data(cls, data, kwargs): normalized = {} if data: if not isinstance(data, dict): if kwargs: raise ValueError('Data cannot be mixed with keyword ' 'arguments: %s' % data) return data for key in data: try: field = (key if isinstance(key, Field) else cls._meta.combined[key]) except KeyError: if not isinstance(key, Node): raise ValueError('Unrecognized field name: "%s" in %s.' % (key, data)) field = key normalized[field] = data[key] if kwargs: for key in kwargs: try: normalized[cls._meta.combined[key]] = kwargs[key] except KeyError: normalized[getattr(cls, key)] = kwargs[key] return normalized @classmethod def update(cls, __data=None, **update): return ModelUpdate(cls, cls._normalize_data(__data, update)) @classmethod def insert(cls, __data=None, **insert): return ModelInsert(cls, cls._normalize_data(__data, insert)) @classmethod def insert_many(cls, rows, fields=None): return ModelInsert(cls, insert=rows, columns=fields) @classmethod def insert_from(cls, query, fields): columns = [getattr(cls, field) if isinstance(field, basestring) else field for field in fields] return ModelInsert(cls, insert=query, columns=columns) @classmethod def replace(cls, __data=None, **insert): return cls.insert(__data, **insert).on_conflict('REPLACE') @classmethod def replace_many(cls, rows, fields=None): return (cls .insert_many(rows=rows, fields=fields) .on_conflict('REPLACE')) @classmethod def raw(cls, sql, *params): return ModelRaw(cls, sql, params) @classmethod def delete(cls): return ModelDelete(cls) @classmethod def create(cls, **query): inst = cls(**query) inst.save(force_insert=True) return inst @classmethod def bulk_create(cls, model_list, batch_size=None): if batch_size is not None: batches = chunked(model_list, batch_size) else: batches = [model_list] field_names = list(cls._meta.sorted_field_names) if cls._meta.auto_increment: pk_name = cls._meta.primary_key.name field_names.remove(pk_name) if cls._meta.database.returning_clause and \ cls._meta.primary_key is not False: pk_fields = cls._meta.get_primary_keys() else: pk_fields = None fields = [cls._meta.fields[field_name] for field_name in field_names] attrs = [] for field in fields: if isinstance(field, ForeignKeyField): attrs.append(field.object_id_name) else: attrs.append(field.name) for batch in batches: accum = ([getattr(model, f) for f in attrs] for model in batch) res = cls.insert_many(accum, fields=fields).execute() if pk_fields and res is not None: for row, model in zip(res, batch): for (pk_field, obj_id) in zip(pk_fields, row): setattr(model, pk_field.name, obj_id) @classmethod def bulk_update(cls, model_list, fields, batch_size=None): if isinstance(cls._meta.primary_key, CompositeKey): raise ValueError('bulk_update() is not supported for models with ' 'a composite primary key.') # First normalize list of fields so all are field instances. fields = [cls._meta.fields[f] if isinstance(f, basestring) else f for f in fields] # Now collect list of attribute names to use for values. attrs = [field.object_id_name if isinstance(field, ForeignKeyField) else field.name for field in fields] if batch_size is not None: batches = chunked(model_list, batch_size) else: batches = [model_list] n = 0 pk = cls._meta.primary_key for batch in batches: id_list = [model._pk for model in batch] update = {} for field, attr in zip(fields, attrs): accum = [] for model in batch: value = getattr(model, attr) if not isinstance(value, Node): value = field.to_value(value) accum.append((pk.to_value(model._pk), value)) case = Case(pk, accum) update[field] = case n += (cls.update(update) .where(cls._meta.primary_key.in_(id_list)) .execute()) return n @classmethod def noop(cls): return NoopModelSelect(cls, ()) @classmethod def get(cls, *query, **filters): sq = cls.select() if query: # Handle simple lookup using just the primary key. if len(query) == 1 and isinstance(query[0], int): sq = sq.where(cls._meta.primary_key == query[0]) else: sq = sq.where(*query) if filters: sq = sq.filter(**filters) return sq.get() @classmethod def get_or_none(cls, *query, **filters): try: return cls.get(*query, **filters) except DoesNotExist: pass @classmethod def get_by_id(cls, pk): return cls.get(cls._meta.primary_key == pk) @classmethod def set_by_id(cls, key, value): if key is None: return cls.insert(value).execute() else: return (cls.update(value) .where(cls._meta.primary_key == key).execute()) @classmethod def delete_by_id(cls, pk): return cls.delete().where(cls._meta.primary_key == pk).execute() @classmethod def get_or_create(cls, **kwargs): defaults = kwargs.pop('defaults', {}) query = cls.select() for field, value in kwargs.items(): query = query.where(getattr(cls, field) == value) try: return query.get(), False except cls.DoesNotExist: try: if defaults: kwargs.update(defaults) with cls._meta.database.atomic(): return cls.create(**kwargs), True except IntegrityError as exc: try: return query.get(), False except cls.DoesNotExist: raise exc @classmethod def filter(cls, *dq_nodes, **filters): return cls.select().filter(*dq_nodes, **filters) def get_id(self): # Using getattr(self, pk-name) could accidentally trigger a query if # the primary-key is a foreign-key. So we use the safe_name attribute, # which defaults to the field-name, but will be the object_id_name for # foreign-key fields. if self._meta.primary_key is not False: return getattr(self, self._meta.primary_key.safe_name) _pk = property(get_id) @_pk.setter def _pk(self, value): setattr(self, self._meta.primary_key.name, value) def _pk_expr(self): return self._meta.primary_key == self._pk def _prune_fields(self, field_dict, only): new_data = {} for field in only: if isinstance(field, basestring): field = self._meta.combined[field] if field.name in field_dict: new_data[field.name] = field_dict[field.name] return new_data def _populate_unsaved_relations(self, field_dict): for foreign_key_field in self._meta.refs: foreign_key = foreign_key_field.name conditions = ( foreign_key in field_dict and field_dict[foreign_key] is None and self.__rel__.get(foreign_key) is not None) if conditions: setattr(self, foreign_key, getattr(self, foreign_key)) field_dict[foreign_key] = self.__data__[foreign_key] def save(self, force_insert=False, only=None): field_dict = self.__data__.copy() if self._meta.primary_key is not False: pk_field = self._meta.primary_key pk_value = self._pk else: pk_field = pk_value = None if only is not None: field_dict = self._prune_fields(field_dict, only) elif self._meta.only_save_dirty and not force_insert: field_dict = self._prune_fields(field_dict, self.dirty_fields) if not field_dict: self._dirty.clear() return False self._populate_unsaved_relations(field_dict) rows = 1 if self._meta.auto_increment and pk_value is None: field_dict.pop(pk_field.name, None) if pk_value is not None and not force_insert: if self._meta.composite_key: for pk_part_name in pk_field.field_names: field_dict.pop(pk_part_name, None) else: field_dict.pop(pk_field.name, None) if not field_dict: raise ValueError('no data to save!') rows = self.update(**field_dict).where(self._pk_expr()).execute() elif pk_field is not None: pk = self.insert(**field_dict).execute() if pk is not None and (self._meta.auto_increment or pk_value is None): self._pk = pk # Although we set the primary-key, do not mark it as dirty. self._dirty.discard(pk_field.name) else: self.insert(**field_dict).execute() self._dirty -= set(field_dict) # Remove any fields we saved. return rows def is_dirty(self): return bool(self._dirty) @property def dirty_fields(self): return [f for f in self._meta.sorted_fields if f.name in self._dirty] def dependencies(self, search_nullable=True): model_class = type(self) stack = [(type(self), None)] queries = {} seen = set() while stack: klass, query = stack.pop() if klass in seen: continue seen.add(klass) for fk, rel_model in klass._meta.backrefs.items(): if rel_model is model_class or query is None: node = (fk == self.__data__[fk.rel_field.name]) else: node = fk << query subquery = (rel_model.select(rel_model._meta.primary_key) .where(node)) if not fk.null or search_nullable: queries.setdefault(rel_model, []).append((node, fk)) stack.append((rel_model, subquery)) for m in reversed(sort_models(seen)): for sq, q in queries.get(m, ()): yield sq, q def delete_instance(self, recursive=False, delete_nullable=False): if recursive: for query, fk in self.dependencies(): model = fk.model if fk.null and not delete_nullable: model.update(**{fk.name: None}).where(query).execute() else: model.delete().where(query).execute() return type(self).delete().where(self._pk_expr()).execute() def __hash__(self): return hash((self.__class__, self._pk)) def __eq__(self, other): return ( other.__class__ == self.__class__ and self._pk is not None and self._pk == other._pk) def __ne__(self, other): return not self == other def __sql__(self, ctx): # NOTE: when comparing a foreign-key field whose related-field is not a # primary-key, then doing an equality test for the foreign-key with a # model instance will return the wrong value; since we would return # the primary key for a given model instance. # # This checks to see if we have a converter in the scope, and that we # are converting a foreign-key expression. If so, we hand the model # instance to the converter rather than blindly grabbing the primary- # key. In the event the provided converter fails to handle the model # instance, then we will return the primary-key. if ctx.state.converter is not None and ctx.state.is_fk_expr: try: return ctx.sql(Value(self, converter=ctx.state.converter)) except (TypeError, ValueError): pass return ctx.sql(Value(getattr(self, self._meta.primary_key.name), converter=self._meta.primary_key.db_value)) @classmethod def bind(cls, database, bind_refs=True, bind_backrefs=True, _exclude=None): is_different = cls._meta.database is not database cls._meta.set_database(database) if bind_refs or bind_backrefs: if _exclude is None: _exclude = set() G = cls._meta.model_graph(refs=bind_refs, backrefs=bind_backrefs) for _, model, is_backref in G: if model not in _exclude: model._meta.set_database(database) _exclude.add(model) return is_different @classmethod def bind_ctx(cls, database, bind_refs=True, bind_backrefs=True): return _BoundModelsContext((cls,), database, bind_refs, bind_backrefs) @classmethod def table_exists(cls): M = cls._meta return cls._schema.database.table_exists(M.table.__name__, M.schema) @classmethod def create_table(cls, safe=True, **options): if 'fail_silently' in options: __deprecated__('"fail_silently" has been deprecated in favor of ' '"safe" for the create_table() method.') safe = options.pop('fail_silently') if safe and not cls._schema.database.safe_create_index \ and cls.table_exists(): return if cls._meta.temporary: options.setdefault('temporary', cls._meta.temporary) cls._schema.create_all(safe, **options) @classmethod def drop_table(cls, safe=True, drop_sequences=True, **options): if safe and not cls._schema.database.safe_drop_index \ and not cls.table_exists(): return if cls._meta.temporary: options.setdefault('temporary', cls._meta.temporary) cls._schema.drop_all(safe, drop_sequences, **options) @classmethod def truncate_table(cls, **options): cls._schema.truncate_table(**options) @classmethod def index(cls, *fields, **kwargs): return ModelIndex(cls, fields, **kwargs) @classmethod def add_index(cls, *fields, **kwargs): if len(fields) == 1 and isinstance(fields[0], (SQL, Index)): cls._meta.indexes.append(fields[0]) else: cls._meta.indexes.append(ModelIndex(cls, fields, **kwargs)) class ModelAlias(Node): """Provide a separate reference to a model in a query.""" def __init__(self, model, alias=None): self.__dict__['model'] = model self.__dict__['alias'] = alias def __getattr__(self, attr): # Hack to work-around the fact that properties or other objects # implementing the descriptor protocol (on the model being aliased), # will not work correctly when we use getattr(). So we explicitly pass # the model alias to the descriptor's getter. for b in (self.model,) + self.model.__bases__: try: obj = b.__dict__[attr] if isinstance(obj, ModelDescriptor): return obj.__get__(None, self) except KeyError: continue model_attr = getattr(self.model, attr) if isinstance(model_attr, Field): self.__dict__[attr] = FieldAlias.create(self, model_attr) return self.__dict__[attr] return model_attr def __setattr__(self, attr, value): raise AttributeError('Cannot set attributes on model aliases.') def get_field_aliases(self): return [getattr(self, n) for n in self.model._meta.sorted_field_names] def select(self, *selection): if not selection: selection = self.get_field_aliases() return ModelSelect(self, selection) def __call__(self, **kwargs): return self.model(**kwargs) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: # Return the quoted table name. return ctx.sql(self.model) if self.alias: ctx.alias_manager[self] = self.alias if ctx.scope == SCOPE_SOURCE: # Define the table and its alias. return (ctx .sql(self.model._meta.entity) .literal(' AS ') .sql(Entity(ctx.alias_manager[self]))) else: # Refer to the table using the alias. return ctx.sql(Entity(ctx.alias_manager[self])) class FieldAlias(Field): def __init__(self, source, field): self.source = source self.model = source.model self.field = field @classmethod def create(cls, source, field): class _FieldAlias(cls, type(field)): pass return _FieldAlias(source, field) def clone(self): return FieldAlias(self.source, self.field) def adapt(self, value): return self.field.adapt(value) def python_value(self, value): return self.field.python_value(value) def db_value(self, value): return self.field.db_value(value) def __getattr__(self, attr): return self.source if attr == 'model' else getattr(self.field, attr) def __sql__(self, ctx): return ctx.sql(Column(self.source, self.field.column_name)) def sort_models(models): models = set(models) seen = set() ordering = [] def dfs(model): if model in models and model not in seen: seen.add(model) for foreign_key, rel_model in model._meta.refs.items(): # Do not depth-first search deferred foreign-keys as this can # cause tables to be created in the incorrect order. if not foreign_key.deferred: dfs(rel_model) if model._meta.depends_on: for dependency in model._meta.depends_on: dfs(dependency) ordering.append(model) names = lambda m: (m._meta.name, m._meta.table_name) for m in sorted(models, key=names): dfs(m) return ordering class _ModelQueryHelper(object): default_row_type = ROW.MODEL def __init__(self, *args, **kwargs): super(_ModelQueryHelper, self).__init__(*args, **kwargs) if not self._database: self._database = self.model._meta.database @Node.copy def objects(self, constructor=None): self._row_type = ROW.CONSTRUCTOR self._constructor = self.model if constructor is None else constructor def _get_cursor_wrapper(self, cursor): row_type = self._row_type or self.default_row_type if row_type == ROW.MODEL: return self._get_model_cursor_wrapper(cursor) elif row_type == ROW.DICT: return ModelDictCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.TUPLE: return ModelTupleCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.NAMED_TUPLE: return ModelNamedTupleCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.CONSTRUCTOR: return ModelObjectCursorWrapper(cursor, self.model, self._returning, self._constructor) else: raise ValueError('Unrecognized row type: "%s".' % row_type) def _get_model_cursor_wrapper(self, cursor): return ModelObjectCursorWrapper(cursor, self.model, [], self.model) class ModelRaw(_ModelQueryHelper, RawQuery): def __init__(self, model, sql, params, **kwargs): self.model = model self._returning = () super(ModelRaw, self).__init__(sql=sql, params=params, **kwargs) def get(self): try: return self.execute()[0] except IndexError: sql, params = self.sql() raise self.model.DoesNotExist('%s instance matching query does ' 'not exist:\nSQL: %s\nParams: %s' % (self.model, sql, params)) class BaseModelSelect(_ModelQueryHelper): def union_all(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'UNION ALL', rhs) __add__ = union_all def union(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'UNION', rhs) __or__ = union def intersect(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'INTERSECT', rhs) __and__ = intersect def except_(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'EXCEPT', rhs) __sub__ = except_ def __iter__(self): if not self._cursor_wrapper: self.execute() return iter(self._cursor_wrapper) def prefetch(self, *subqueries, **kwargs): return prefetch(self, *subqueries, **kwargs) def get(self, database=None): clone = self.paginate(1, 1) clone._cursor_wrapper = None try: return clone.execute(database)[0] except IndexError: sql, params = clone.sql() raise self.model.DoesNotExist('%s instance matching query does ' 'not exist:\nSQL: %s\nParams: %s' % (clone.model, sql, params)) def get_or_none(self, database=None): try: return self.get(database=database) except self.model.DoesNotExist: pass @Node.copy def group_by(self, *columns): grouping = [] for column in columns: if is_model(column): grouping.extend(column._meta.sorted_fields) elif isinstance(column, Table): if not column._columns: raise ValueError('Cannot pass a table to group_by() that ' 'does not have columns explicitly ' 'declared.') grouping.extend([getattr(column, col_name) for col_name in column._columns]) else: grouping.append(column) self._group_by = grouping class ModelCompoundSelectQuery(BaseModelSelect, CompoundSelectQuery): def __init__(self, model, *args, **kwargs): self.model = model super(ModelCompoundSelectQuery, self).__init__(*args, **kwargs) def _get_model_cursor_wrapper(self, cursor): return self.lhs._get_model_cursor_wrapper(cursor) def _normalize_model_select(fields_or_models): fields = [] for fm in fields_or_models: if is_model(fm): fields.extend(fm._meta.sorted_fields) elif isinstance(fm, ModelAlias): fields.extend(fm.get_field_aliases()) elif isinstance(fm, Table) and fm._columns: fields.extend([getattr(fm, col) for col in fm._columns]) else: fields.append(fm) return fields class ModelSelect(BaseModelSelect, Select): def __init__(self, model, fields_or_models, is_default=False): self.model = self._join_ctx = model self._joins = {} self._is_default = is_default fields = _normalize_model_select(fields_or_models) super(ModelSelect, self).__init__([model], fields) def clone(self): clone = super(ModelSelect, self).clone() if clone._joins: clone._joins = dict(clone._joins) return clone def select(self, *fields_or_models): if fields_or_models or not self._is_default: self._is_default = False fields = _normalize_model_select(fields_or_models) return super(ModelSelect, self).select(*fields) return self def select_extend(self, *columns): self._is_default = False fields = _normalize_model_select(columns) return super(ModelSelect, self).select_extend(*fields) def switch(self, ctx=None): self._join_ctx = self.model if ctx is None else ctx return self def _get_model(self, src): if is_model(src): return src, True elif isinstance(src, Table) and src._model: return src._model, False elif isinstance(src, ModelAlias): return src.model, False elif isinstance(src, ModelSelect): return src.model, False return None, False def _normalize_join(self, src, dest, on, attr): # Allow "on" expression to have an alias that determines the # destination attribute for the joined data. on_alias = isinstance(on, Alias) if on_alias: attr = attr or on._alias on = on.alias() # Obtain references to the source and destination models being joined. src_model, src_is_model = self._get_model(src) dest_model, dest_is_model = self._get_model(dest) if src_model and dest_model: self._join_ctx = dest constructor = dest_model # In the case where the "on" clause is a Column or Field, we will # convert that field into the appropriate predicate expression. if not (src_is_model and dest_is_model) and isinstance(on, Column): if on.source is src: to_field = src_model._meta.columns[on.name] elif on.source is dest: to_field = dest_model._meta.columns[on.name] else: raise AttributeError('"on" clause Column %s does not ' 'belong to %s or %s.' % (on, src_model, dest_model)) on = None elif isinstance(on, Field): to_field = on on = None else: to_field = None fk_field, is_backref = self._generate_on_clause( src_model, dest_model, to_field, on) if on is None: src_attr = 'name' if src_is_model else 'column_name' dest_attr = 'name' if dest_is_model else 'column_name' if is_backref: lhs = getattr(dest, getattr(fk_field, dest_attr)) rhs = getattr(src, getattr(fk_field.rel_field, src_attr)) else: lhs = getattr(src, getattr(fk_field, src_attr)) rhs = getattr(dest, getattr(fk_field.rel_field, dest_attr)) on = (lhs == rhs) if not attr: if fk_field is not None and not is_backref: attr = fk_field.name else: attr = dest_model._meta.name elif on_alias and fk_field is not None and \ attr == fk_field.object_id_name and not is_backref: raise ValueError('Cannot assign join alias to "%s", as this ' 'attribute is the object_id_name for the ' 'foreign-key field "%s"' % (attr, fk_field)) elif isinstance(dest, Source): constructor = dict attr = attr or dest._alias if not attr and isinstance(dest, Table): attr = attr or dest.__name__ return (on, attr, constructor) def _generate_on_clause(self, src, dest, to_field=None, on=None): meta = src._meta is_backref = fk_fields = False # Get all the foreign keys between source and dest, and determine if # the join is via a back-reference. if dest in meta.model_refs: fk_fields = meta.model_refs[dest] elif dest in meta.model_backrefs: fk_fields = meta.model_backrefs[dest] is_backref = True if not fk_fields: if on is not None: return None, False raise ValueError('Unable to find foreign key between %s and %s. ' 'Please specify an explicit join condition.' % (src, dest)) elif to_field is not None: # If the foreign-key field was specified explicitly, remove all # other foreign-key fields from the list. target = (to_field.field if isinstance(to_field, FieldAlias) else to_field) fk_fields = [f for f in fk_fields if ( (f is target) or (is_backref and f.rel_field is to_field))] if len(fk_fields) == 1: return fk_fields[0], is_backref if on is None: # If multiple foreign-keys exist, try using the FK whose name # matches that of the related model. If not, raise an error as this # is ambiguous. for fk in fk_fields: if fk.name == dest._meta.name: return fk, is_backref raise ValueError('More than one foreign key between %s and %s.' ' Please specify which you are joining on.' % (src, dest)) # If there are multiple foreign-keys to choose from and the join # predicate is an expression, we'll try to figure out which # foreign-key field we're joining on so that we can assign to the # correct attribute when resolving the model graph. to_field = None if isinstance(on, Expression): lhs, rhs = on.lhs, on.rhs # Coerce to set() so that we force Python to compare using the # object's hash rather than equality test, which returns a # false-positive due to overriding __eq__. fk_set = set(fk_fields) if isinstance(lhs, Field): lhs_f = lhs.field if isinstance(lhs, FieldAlias) else lhs if lhs_f in fk_set: to_field = lhs_f elif isinstance(rhs, Field): rhs_f = rhs.field if isinstance(rhs, FieldAlias) else rhs if rhs_f in fk_set: to_field = rhs_f return to_field, False @Node.copy def join(self, dest, join_type=JOIN.INNER, on=None, src=None, attr=None): src = self._join_ctx if src is None else src if join_type == JOIN.LATERAL or join_type == JOIN.LEFT_LATERAL: on = True elif join_type != JOIN.CROSS: on, attr, constructor = self._normalize_join(src, dest, on, attr) if attr: self._joins.setdefault(src, []) self._joins[src].append((dest, attr, constructor, join_type)) elif on is not None: raise ValueError('Cannot specify on clause with cross join.') if not self._from_list: raise ValueError('No sources to join on.') item = self._from_list.pop() self._from_list.append(Join(item, dest, join_type, on)) def left_outer_join(self, dest, on=None, src=None, attr=None): return self.join(dest, JOIN.LEFT_OUTER, on, src, attr) def join_from(self, src, dest, join_type=JOIN.INNER, on=None, attr=None): return self.join(dest, join_type, on, src, attr) def _get_model_cursor_wrapper(self, cursor): if len(self._from_list) == 1 and not self._joins: return ModelObjectCursorWrapper(cursor, self.model, self._returning, self.model) return ModelCursorWrapper(cursor, self.model, self._returning, self._from_list, self._joins) def ensure_join(self, lm, rm, on=None, **join_kwargs): join_ctx = self._join_ctx for dest, _, constructor, _ in self._joins.get(lm, []): if dest == rm: return self return self.switch(lm).join(rm, on=on, **join_kwargs).switch(join_ctx) def convert_dict_to_node(self, qdict): accum = [] joins = [] fks = (ForeignKeyField, BackrefAccessor) for key, value in sorted(qdict.items()): curr = self.model if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP: key, op = key.rsplit('__', 1) op = DJANGO_MAP[op] elif value is None: op = DJANGO_MAP['is'] else: op = DJANGO_MAP['eq'] if '__' not in key: # Handle simplest case. This avoids joining over-eagerly when a # direct FK lookup is all that is required. model_attr = getattr(curr, key) else: for piece in key.split('__'): for dest, attr, _, _ in self._joins.get(curr, ()): try: model_attr = getattr(curr, piece, None) except: pass if attr == piece or (isinstance(dest, ModelAlias) and dest.alias == piece): curr = dest break else: model_attr = getattr(curr, piece) if value is not None and isinstance(model_attr, fks): curr = model_attr.rel_model joins.append(model_attr) accum.append(op(model_attr, value)) return accum, joins def filter(self, *args, **kwargs): # normalize args and kwargs into a new expression if args and kwargs: dq_node = (reduce(operator.and_, [a.clone() for a in args]) & DQ(**kwargs)) elif args: dq_node = (reduce(operator.and_, [a.clone() for a in args]) & ColumnBase()) elif kwargs: dq_node = DQ(**kwargs) & ColumnBase() else: return self.clone() # dq_node should now be an Expression, lhs = Node(), rhs = ... q = collections.deque([dq_node]) dq_joins = [] seen_joins = set() while q: curr = q.popleft() if not isinstance(curr, Expression): continue for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)): if isinstance(piece, DQ): query, joins = self.convert_dict_to_node(piece.query) for join in joins: if join not in seen_joins: dq_joins.append(join) seen_joins.add(join) expression = reduce(operator.and_, query) # Apply values from the DQ object. if piece._negated: expression = Negated(expression) #expression._alias = piece._alias setattr(curr, side, expression) else: q.append(piece) if not args or not kwargs: dq_node = dq_node.lhs query = self.clone() for field in dq_joins: if isinstance(field, ForeignKeyField): lm, rm = field.model, field.rel_model field_obj = field elif isinstance(field, BackrefAccessor): lm, rm = field.model, field.rel_model field_obj = field.field query = query.ensure_join(lm, rm, field_obj) return query.where(dq_node) def create_table(self, name, safe=True, **meta): return self.model._schema.create_table_as(name, self, safe, **meta) def __sql_selection__(self, ctx, is_subquery=False): if self._is_default and is_subquery and len(self._returning) > 1 and \ self.model._meta.primary_key is not False: return ctx.sql(self.model._meta.primary_key) return ctx.sql(CommaNodeList(self._returning)) class NoopModelSelect(ModelSelect): def __sql__(self, ctx): return self.model._meta.database.get_noop_select(ctx) def _get_cursor_wrapper(self, cursor): return CursorWrapper(cursor) class _ModelWriteQueryHelper(_ModelQueryHelper): def __init__(self, model, *args, **kwargs): self.model = model super(_ModelWriteQueryHelper, self).__init__(model, *args, **kwargs) def returning(self, *returning): accum = [] for item in returning: if is_model(item): accum.extend(item._meta.sorted_fields) else: accum.append(item) return super(_ModelWriteQueryHelper, self).returning(*accum) def _set_table_alias(self, ctx): table = self.model._meta.table ctx.alias_manager[table] = table.__name__ class ModelUpdate(_ModelWriteQueryHelper, Update): pass class ModelInsert(_ModelWriteQueryHelper, Insert): default_row_type = ROW.TUPLE def __init__(self, *args, **kwargs): super(ModelInsert, self).__init__(*args, **kwargs) if self._returning is None and self.model._meta.database is not None: if self.model._meta.database.returning_clause: self._returning = self.model._meta.get_primary_keys() def returning(self, *returning): # By default ModelInsert will yield a `tuple` containing the # primary-key of the newly inserted row. But if we are explicitly # specifying a returning clause and have not set a row type, we will # default to returning model instances instead. if returning and self._row_type is None: self._row_type = ROW.MODEL return super(ModelInsert, self).returning(*returning) def get_default_data(self): return self.model._meta.defaults def get_default_columns(self): fields = self.model._meta.sorted_fields return fields[1:] if self.model._meta.auto_increment else fields class ModelDelete(_ModelWriteQueryHelper, Delete): pass class ManyToManyQuery(ModelSelect): def __init__(self, instance, accessor, rel, *args, **kwargs): self._instance = instance self._accessor = accessor self._src_attr = accessor.src_fk.rel_field.name self._dest_attr = accessor.dest_fk.rel_field.name super(ManyToManyQuery, self).__init__(rel, (rel,), *args, **kwargs) def _id_list(self, model_or_id_list): if isinstance(model_or_id_list[0], Model): return [getattr(obj, self._dest_attr) for obj in model_or_id_list] return model_or_id_list def add(self, value, clear_existing=False): if clear_existing: self.clear() accessor = self._accessor src_id = getattr(self._instance, self._src_attr) if isinstance(value, SelectQuery): query = value.columns( Value(src_id), accessor.dest_fk.rel_field) accessor.through_model.insert_from( fields=[accessor.src_fk, accessor.dest_fk], query=query).execute() else: value = ensure_tuple(value) if not value: return inserts = [{ accessor.src_fk.name: src_id, accessor.dest_fk.name: rel_id} for rel_id in self._id_list(value)] accessor.through_model.insert_many(inserts).execute() def remove(self, value): src_id = getattr(self._instance, self._src_attr) if isinstance(value, SelectQuery): column = getattr(value.model, self._dest_attr) subquery = value.columns(column) return (self._accessor.through_model .delete() .where( (self._accessor.dest_fk << subquery) & (self._accessor.src_fk == src_id)) .execute()) else: value = ensure_tuple(value) if not value: return return (self._accessor.through_model .delete() .where( (self._accessor.dest_fk << self._id_list(value)) & (self._accessor.src_fk == src_id)) .execute()) def clear(self): src_id = getattr(self._instance, self._src_attr) return (self._accessor.through_model .delete() .where(self._accessor.src_fk == src_id) .execute()) def safe_python_value(conv_func): def validate(value): try: return conv_func(value) except (TypeError, ValueError): return value return validate class BaseModelCursorWrapper(DictCursorWrapper): def __init__(self, cursor, model, columns): super(BaseModelCursorWrapper, self).__init__(cursor) self.model = model self.select = columns or [] def _initialize_columns(self): combined = self.model._meta.combined table = self.model._meta.table description = self.cursor.description self.ncols = len(self.cursor.description) self.columns = [] self.converters = converters = [None] * self.ncols self.fields = fields = [None] * self.ncols for idx, description_item in enumerate(description): column = orig_column = description_item[0] # Try to clean-up messy column descriptions when people do not # provide an alias. The idea is that we take something like: # SUM("t1"."price") -> "price") -> price dot_index = column.rfind('.') if dot_index != -1: column = column[dot_index + 1:] column = column.strip('()"`') self.columns.append(column) # Now we'll see what they selected and see if we can improve the # column-name being returned - e.g. by mapping it to the selected # field's name. try: raw_node = self.select[idx] except IndexError: if column in combined: raw_node = node = combined[column] else: continue else: node = raw_node.unwrap() # If this column was given an alias, then we will use whatever # alias was returned by the cursor. is_alias = raw_node.is_alias() if is_alias: self.columns[idx] = orig_column # Heuristics used to attempt to get the field associated with a # given SELECT column, so that we can accurately convert the value # returned by the database-cursor into a Python object. if isinstance(node, Field): if raw_node._coerce: converters[idx] = node.python_value fields[idx] = node if not is_alias: self.columns[idx] = node.name elif isinstance(node, ColumnBase) and raw_node._converter: converters[idx] = raw_node._converter elif isinstance(node, Function) and node._coerce: if node._python_value is not None: converters[idx] = node._python_value elif node.arguments and isinstance(node.arguments[0], Node): # If the first argument is a field or references a column # on a Model, try using that field's conversion function. # This usually works, but we use "safe_python_value()" so # that if a TypeError or ValueError occurs during # conversion we can just fall-back to the raw cursor value. first = node.arguments[0].unwrap() if isinstance(first, Entity): path = first._path[-1] # Try to look-up by name. first = combined.get(path) if isinstance(first, Field): converters[idx] = safe_python_value(first.python_value) elif column in combined: if node._coerce: converters[idx] = combined[column].python_value if isinstance(node, Column) and node.source == table: fields[idx] = combined[column] initialize = _initialize_columns def process_row(self, row): raise NotImplementedError class ModelDictCursorWrapper(BaseModelCursorWrapper): def process_row(self, row): result = {} columns, converters = self.columns, self.converters fields = self.fields for i in range(self.ncols): attr = columns[i] if attr in result: continue # Don't overwrite if we have dupes. if converters[i] is not None: result[attr] = converters[i](row[i]) else: result[attr] = row[i] return result class ModelTupleCursorWrapper(ModelDictCursorWrapper): constructor = tuple def process_row(self, row): columns, converters = self.columns, self.converters return self.constructor([ (converters[i](row[i]) if converters[i] is not None else row[i]) for i in range(self.ncols)]) class ModelNamedTupleCursorWrapper(ModelTupleCursorWrapper): def initialize(self): self._initialize_columns() attributes = [] for i in range(self.ncols): attributes.append(self.columns[i]) self.tuple_class = collections.namedtuple('Row', attributes) self.constructor = lambda row: self.tuple_class(*row) class ModelObjectCursorWrapper(ModelDictCursorWrapper): def __init__(self, cursor, model, select, constructor): self.constructor = constructor self.is_model = is_model(constructor) super(ModelObjectCursorWrapper, self).__init__(cursor, model, select) def process_row(self, row): data = super(ModelObjectCursorWrapper, self).process_row(row) if self.is_model: # Clear out any dirty fields before returning to the user. obj = self.constructor(__no_default__=1, **data) obj._dirty.clear() return obj else: return self.constructor(**data) class ModelCursorWrapper(BaseModelCursorWrapper): def __init__(self, cursor, model, select, from_list, joins): super(ModelCursorWrapper, self).__init__(cursor, model, select) self.from_list = from_list self.joins = joins def initialize(self): self._initialize_columns() selected_src = set([field.model for field in self.fields if field is not None]) select, columns = self.select, self.columns self.key_to_constructor = {self.model: self.model} self.src_is_dest = {} self.src_to_dest = [] accum = collections.deque(self.from_list) dests = set() while accum: curr = accum.popleft() if isinstance(curr, Join): accum.append(curr.lhs) accum.append(curr.rhs) continue if curr not in self.joins: continue is_dict = isinstance(curr, dict) for key, attr, constructor, join_type in self.joins[curr]: if key not in self.key_to_constructor: self.key_to_constructor[key] = constructor # (src, attr, dest, is_dict, join_type). self.src_to_dest.append((curr, attr, key, is_dict, join_type)) dests.add(key) accum.append(key) # Ensure that we accommodate everything selected. for src in selected_src: if src not in self.key_to_constructor: if is_model(src): self.key_to_constructor[src] = src elif isinstance(src, ModelAlias): self.key_to_constructor[src] = src.model # Indicate which sources are also dests. for src, _, dest, _, _ in self.src_to_dest: self.src_is_dest[src] = src in dests and (dest in selected_src or src in selected_src) self.column_keys = [] for idx, node in enumerate(select): key = self.model field = self.fields[idx] if field is not None: if isinstance(field, FieldAlias): key = field.source else: key = field.model elif isinstance(node, BindTo): if node.dest not in self.key_to_constructor: raise ValueError('%s specifies bind-to %s, but %s is not ' 'among the selected sources.' % (node.unwrap(), node.dest, node.dest)) key = node.dest else: if isinstance(node, Node): node = node.unwrap() if isinstance(node, Column): key = node.source self.column_keys.append(key) def process_row(self, row): objects = {} object_list = [] for key, constructor in self.key_to_constructor.items(): objects[key] = constructor(__no_default__=True) object_list.append(objects[key]) default_instance = objects[self.model] set_keys = set() for idx, key in enumerate(self.column_keys): # Get the instance corresponding to the selected column/value, # falling back to the "root" model instance. instance = objects.get(key, default_instance) column = self.columns[idx] value = row[idx] if value is not None: set_keys.add(key) if self.converters[idx]: value = self.converters[idx](value) if isinstance(instance, dict): instance[column] = value else: setattr(instance, column, value) # Need to do some analysis on the joins before this. for (src, attr, dest, is_dict, join_type) in self.src_to_dest: instance = objects[src] try: joined_instance = objects[dest] except KeyError: continue # If no fields were set on the destination instance then do not # assign an "empty" instance. if instance is None or dest is None or \ (dest not in set_keys and not self.src_is_dest.get(dest)): continue # If no fields were set on either the source or the destination, # then we have nothing to do here. if instance not in set_keys and dest not in set_keys \ and join_type.endswith('OUTER JOIN'): continue if is_dict: instance[attr] = joined_instance else: setattr(instance, attr, joined_instance) # When instantiating models from a cursor, we clear the dirty fields. for instance in object_list: if isinstance(instance, Model): instance._dirty.clear() return objects[self.model] class PrefetchQuery(collections.namedtuple('_PrefetchQuery', ( 'query', 'fields', 'is_backref', 'rel_models', 'field_to_name', 'model'))): def __new__(cls, query, fields=None, is_backref=None, rel_models=None, field_to_name=None, model=None): if fields: if is_backref: if rel_models is None: rel_models = [field.model for field in fields] foreign_key_attrs = [field.rel_field.name for field in fields] else: if rel_models is None: rel_models = [field.rel_model for field in fields] foreign_key_attrs = [field.name for field in fields] field_to_name = list(zip(fields, foreign_key_attrs)) model = query.model return super(PrefetchQuery, cls).__new__( cls, query, fields, is_backref, rel_models, field_to_name, model) def populate_instance(self, instance, id_map): if self.is_backref: for field in self.fields: identifier = instance.__data__[field.name] key = (field, identifier) if key in id_map: setattr(instance, field.name, id_map[key]) else: for field, attname in self.field_to_name: identifier = instance.__data__[field.rel_field.name] key = (field, identifier) rel_instances = id_map.get(key, []) for inst in rel_instances: setattr(inst, attname, instance) inst._dirty.clear() setattr(instance, field.backref, rel_instances) def store_instance(self, instance, id_map): for field, attname in self.field_to_name: identity = field.rel_field.python_value(instance.__data__[attname]) key = (field, identity) if self.is_backref: id_map[key] = instance else: id_map.setdefault(key, []) id_map[key].append(instance) def prefetch_add_subquery(sq, subqueries, prefetch_type): fixed_queries = [PrefetchQuery(sq)] for i, subquery in enumerate(subqueries): if isinstance(subquery, tuple): subquery, target_model = subquery else: target_model = None if not isinstance(subquery, Query) and is_model(subquery) or \ isinstance(subquery, ModelAlias): subquery = subquery.select() subquery_model = subquery.model for j in reversed(range(i + 1)): fks = backrefs = None fixed = fixed_queries[j] last_query = fixed.query last_model = last_obj = fixed.model if isinstance(last_model, ModelAlias): last_model = last_model.model rels = subquery_model._meta.model_refs.get(last_model, []) if rels: fks = [getattr(subquery_model, fk.name) for fk in rels] pks = [getattr(last_obj, fk.rel_field.name) for fk in rels] else: backrefs = subquery_model._meta.model_backrefs.get(last_model) if (fks or backrefs) and ((target_model is last_obj) or (target_model is None)): break else: tgt_err = ' using %s' % target_model if target_model else '' raise AttributeError('Error: unable to find foreign key for ' 'query: %s%s' % (subquery, tgt_err)) dest = (target_model,) if target_model else None if fks: if prefetch_type == PREFETCH_TYPE.WHERE: expr = reduce(operator.or_, [ (fk << last_query.select(pk)) for (fk, pk) in zip(fks, pks)]) subquery = subquery.where(expr) elif prefetch_type == PREFETCH_TYPE.JOIN: expr = [] select_pks = set() for fk, pk in zip(fks, pks): expr.append(getattr(last_query.c, pk.column_name) == fk) select_pks.add(pk) subquery = subquery.distinct().join( last_query.select(*select_pks), on=reduce(operator.or_, expr)) fixed_queries.append(PrefetchQuery(subquery, fks, False, dest)) elif backrefs: expr = [] fields = [] for backref in backrefs: rel_field = getattr(subquery_model, backref.rel_field.name) fk_field = getattr(last_obj, backref.name) fields.append((rel_field, fk_field)) if prefetch_type == PREFETCH_TYPE.WHERE: for rel_field, fk_field in fields: expr.append(rel_field << last_query.select(fk_field)) subquery = subquery.where(reduce(operator.or_, expr)) elif prefetch_type == PREFETCH_TYPE.JOIN: select_fks = [] for rel_field, fk_field in fields: select_fks.append(fk_field) target = getattr(last_query.c, fk_field.column_name) expr.append(rel_field == target) subquery = subquery.distinct().join( last_query.select(*select_fks), on=reduce(operator.or_, expr)) fixed_queries.append(PrefetchQuery(subquery, backrefs, True, dest)) return fixed_queries def prefetch(sq, *subqueries, **kwargs): if not subqueries: return sq prefetch_type = kwargs.pop('prefetch_type', PREFETCH_TYPE.WHERE) if kwargs: raise ValueError('Unrecognized arguments: %s' % kwargs) fixed_queries = prefetch_add_subquery(sq, subqueries, prefetch_type) deps = {} rel_map = {} for pq in reversed(fixed_queries): query_model = pq.model if pq.fields: for rel_model in pq.rel_models: rel_map.setdefault(rel_model, []) rel_map[rel_model].append(pq) deps.setdefault(query_model, {}) id_map = deps[query_model] has_relations = bool(rel_map.get(query_model)) for instance in pq.query: if pq.fields: pq.store_instance(instance, id_map) if has_relations: for rel in rel_map[query_model]: rel.populate_instance(instance, deps[rel.model]) return list(pq.query) peewee-3.17.7/playhouse/000077500000000000000000000000001470346076600151115ustar00rootroot00000000000000peewee-3.17.7/playhouse/README.md000066400000000000000000000064671470346076600164050ustar00rootroot00000000000000## Playhouse The `playhouse` namespace contains numerous extensions to Peewee. These include vendor-specific database extensions, high-level abstractions to simplify working with databases, and tools for low-level database operations and introspection. ### Vendor extensions * [SQLite extensions](http://docs.peewee-orm.com/en/latest/peewee/sqlite_ext.html) * Full-text search (FTS3/4/5) * BM25 ranking algorithm implemented as SQLite C extension, backported to FTS4 * Virtual tables and C extensions * Closure tables * JSON extension support * LSM1 (key/value database) support * BLOB API * Online backup API * [APSW extensions](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#apsw): use Peewee with the powerful [APSW](https://github.com/rogerbinns/apsw) SQLite driver. * [SQLCipher](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#sqlcipher-ext): encrypted SQLite databases. * [SqliteQ](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#sqliteq): dedicated writer thread for multi-threaded SQLite applications. [More info here](http://charlesleifer.com/blog/multi-threaded-sqlite-without-the-operationalerrors/). * [Postgresql extensions](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#postgres-ext) * JSON and JSONB * HStore * Arrays * Server-side cursors * Full-text search * [MySQL extensions](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#mysql-ext) ### High-level libraries * [Extra fields](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#extra-fields) * Compressed field * PickleField * [Shortcuts / helpers](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#shortcuts) * Model-to-dict serializer * Dict-to-model deserializer * [Hybrid attributes](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#hybrid) * [Signals](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#signals): pre/post-save, pre/post-delete, pre-init. * [Dataset](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#dataset): high-level API for working with databases popuarlized by the [project of the same name](https://dataset.readthedocs.io/). * [Key/Value Store](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#kv): key/value store using SQLite. Supports *smart indexing*, for *Pandas*-style queries. ### Database management and framework support * [pwiz](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#pwiz): generate model code from a pre-existing database. * [Schema migrations](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#migrate): modify your schema using high-level APIs. Even supports dropping or renaming columns in SQLite. * [Connection pool](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#pool): simple connection pooling. * [Reflection](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#reflection): low-level, cross-platform database introspection * [Database URLs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#db-url): use URLs to connect to database * [Test utils](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#test-utils): helpers for unit-testing Peewee applications. * [Flask utils](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#flask-utils): paginated object lists, database connection management, and more. peewee-3.17.7/playhouse/__init__.py000066400000000000000000000000001470346076600172100ustar00rootroot00000000000000peewee-3.17.7/playhouse/_pysqlite/000077500000000000000000000000001470346076600171225ustar00rootroot00000000000000peewee-3.17.7/playhouse/_pysqlite/cache.h000066400000000000000000000044331470346076600203420ustar00rootroot00000000000000/* cache.h - definitions for the LRU cache * * Copyright (C) 2004-2015 Gerhard Häring * * This file is part of pysqlite. * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #ifndef PYSQLITE_CACHE_H #define PYSQLITE_CACHE_H #include "Python.h" /* The LRU cache is implemented as a combination of a doubly-linked with a * dictionary. The list items are of type 'Node' and the dictionary has the * nodes as values. */ typedef struct _pysqlite_Node { PyObject_HEAD PyObject* key; PyObject* data; long count; struct _pysqlite_Node* prev; struct _pysqlite_Node* next; } pysqlite_Node; typedef struct { PyObject_HEAD int size; /* a dictionary mapping keys to Node entries */ PyObject* mapping; /* the factory callable */ PyObject* factory; pysqlite_Node* first; pysqlite_Node* last; /* if set, decrement the factory function when the Cache is deallocated. * this is almost always desirable, but not in the pysqlite context */ int decref_factory; } pysqlite_Cache; extern PyTypeObject pysqlite_NodeType; extern PyTypeObject pysqlite_CacheType; int pysqlite_node_init(pysqlite_Node* self, PyObject* args, PyObject* kwargs); void pysqlite_node_dealloc(pysqlite_Node* self); int pysqlite_cache_init(pysqlite_Cache* self, PyObject* args, PyObject* kwargs); void pysqlite_cache_dealloc(pysqlite_Cache* self); PyObject* pysqlite_cache_get(pysqlite_Cache* self, PyObject* args); int pysqlite_cache_setup_types(void); #endif peewee-3.17.7/playhouse/_pysqlite/connection.h000066400000000000000000000113321470346076600214320ustar00rootroot00000000000000/* connection.h - definitions for the connection type * * Copyright (C) 2004-2015 Gerhard Häring * * This file is part of pysqlite. * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #ifndef PYSQLITE_CONNECTION_H #define PYSQLITE_CONNECTION_H #include "Python.h" #include "pythread.h" #include "structmember.h" #include "cache.h" #include "module.h" #include "sqlite3.h" typedef struct { PyObject_HEAD sqlite3* db; /* the type detection mode. Only 0, PARSE_DECLTYPES, PARSE_COLNAMES or a * bitwise combination thereof makes sense */ int detect_types; /* the timeout value in seconds for database locks */ double timeout; /* for internal use in the timeout handler: when did the timeout handler * first get called with count=0? */ double timeout_started; /* None for autocommit, otherwise a PyString with the isolation level */ PyObject* isolation_level; /* NULL for autocommit, otherwise a string with the BEGIN statement; will be * freed in connection destructor */ char* begin_statement; /* 1 if a check should be performed for each API call if the connection is * used from the same thread it was created in */ int check_same_thread; int initialized; /* thread identification of the thread the connection was created in */ long thread_ident; pysqlite_Cache* statement_cache; /* Lists of weak references to statements and cursors used within this connection */ PyObject* statements; PyObject* cursors; /* Counters for how many statements/cursors were created in the connection. May be * reset to 0 at certain intervals */ int created_statements; int created_cursors; PyObject* row_factory; /* Determines how bytestrings from SQLite are converted to Python objects: * - PyUnicode_Type: Python Unicode objects are constructed from UTF-8 bytestrings * - OptimizedUnicode: Like before, but for ASCII data, only PyStrings are created. * - PyString_Type: PyStrings are created as-is. * - Any custom callable: Any object returned from the callable called with the bytestring * as single parameter. */ PyObject* text_factory; /* remember references to functions/classes used in * create_function/create/aggregate, use these as dictionary keys, so we * can keep the total system refcount constant by clearing that dictionary * in connection_dealloc */ PyObject* function_pinboard; /* a dictionary of registered collation name => collation callable mappings */ PyObject* collations; /* Exception objects */ PyObject* Warning; PyObject* Error; PyObject* InterfaceError; PyObject* DatabaseError; PyObject* DataError; PyObject* OperationalError; PyObject* IntegrityError; PyObject* InternalError; PyObject* ProgrammingError; PyObject* NotSupportedError; } pysqlite_Connection; extern PyTypeObject pysqlite_ConnectionType; PyObject* pysqlite_connection_alloc(PyTypeObject* type, int aware); void pysqlite_connection_dealloc(pysqlite_Connection* self); PyObject* pysqlite_connection_cursor(pysqlite_Connection* self, PyObject* args, PyObject* kwargs); PyObject* pysqlite_connection_close(pysqlite_Connection* self, PyObject* args); PyObject* _pysqlite_connection_begin(pysqlite_Connection* self); PyObject* pysqlite_connection_commit(pysqlite_Connection* self, PyObject* args); PyObject* pysqlite_connection_rollback(pysqlite_Connection* self, PyObject* args); PyObject* pysqlite_connection_new(PyTypeObject* type, PyObject* args, PyObject* kw); int pysqlite_connection_init(pysqlite_Connection* self, PyObject* args, PyObject* kwargs); int pysqlite_connection_register_cursor(pysqlite_Connection* connection, PyObject* cursor); int pysqlite_check_thread(pysqlite_Connection* self); int pysqlite_check_connection(pysqlite_Connection* con); int pysqlite_connection_setup_types(void); #endif peewee-3.17.7/playhouse/_pysqlite/module.h000066400000000000000000000037421470346076600205660ustar00rootroot00000000000000/* module.h - definitions for the module * * Copyright (C) 2004-2015 Gerhard Häring * * This file is part of pysqlite. * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #ifndef PYSQLITE_MODULE_H #define PYSQLITE_MODULE_H #include "Python.h" #define PYSQLITE_VERSION "2.8.2" extern PyObject* pysqlite_Error; extern PyObject* pysqlite_Warning; extern PyObject* pysqlite_InterfaceError; extern PyObject* pysqlite_DatabaseError; extern PyObject* pysqlite_InternalError; extern PyObject* pysqlite_OperationalError; extern PyObject* pysqlite_ProgrammingError; extern PyObject* pysqlite_IntegrityError; extern PyObject* pysqlite_DataError; extern PyObject* pysqlite_NotSupportedError; extern PyObject* pysqlite_OptimizedUnicode; /* the functions time.time() and time.sleep() */ extern PyObject* time_time; extern PyObject* time_sleep; /* A dictionary, mapping colum types (INTEGER, VARCHAR, etc.) to converter * functions, that convert the SQL value to the appropriate Python value. * The key is uppercase. */ extern PyObject* converters; extern int _enable_callback_tracebacks; extern int pysqlite_BaseTypeAdapted; #define PARSE_DECLTYPES 1 #define PARSE_COLNAMES 2 #endif peewee-3.17.7/playhouse/_sqlite_ext.pyx000066400000000000000000001445631470346076600202100ustar00rootroot00000000000000# cython: language_level=3 import hashlib import zlib cimport cython from cpython cimport datetime from cpython.bytes cimport PyBytes_AsStringAndSize from cpython.bytes cimport PyBytes_Check from cpython.bytes cimport PyBytes_FromStringAndSize from cpython.bytes cimport PyBytes_AS_STRING from cpython.object cimport PyObject from cpython.ref cimport Py_INCREF, Py_DECREF from cpython.unicode cimport PyUnicode_AsUTF8String from cpython.unicode cimport PyUnicode_Check from cpython.unicode cimport PyUnicode_DecodeUTF8 from cpython.version cimport PY_MAJOR_VERSION from libc.float cimport DBL_MAX from libc.math cimport ceil, log, sqrt from libc.math cimport pow as cpow #from libc.stdint cimport ssize_t from libc.stdint cimport uint8_t from libc.stdint cimport uint32_t from libc.stdlib cimport calloc, free, malloc, rand from libc.string cimport memcpy, memset, strlen from peewee import InterfaceError from peewee import Node from peewee import OperationalError from peewee import sqlite3 as pysqlite import traceback cdef struct sqlite3_index_constraint: int iColumn # Column constrained, -1 for rowid. unsigned char op # Constraint operator. unsigned char usable # True if this constraint is usable. int iTermOffset # Used internally - xBestIndex should ignore. cdef struct sqlite3_index_orderby: int iColumn unsigned char desc cdef struct sqlite3_index_constraint_usage: int argvIndex # if > 0, constraint is part of argv to xFilter. unsigned char omit cdef extern from "sqlite3.h" nogil: ctypedef struct sqlite3: int busyTimeout ctypedef struct sqlite3_backup ctypedef struct sqlite3_blob ctypedef struct sqlite3_context ctypedef struct sqlite3_value ctypedef long long sqlite3_int64 ctypedef unsigned long long sqlite_uint64 # Virtual tables. ctypedef struct sqlite3_module # Forward reference. ctypedef struct sqlite3_vtab: const sqlite3_module *pModule int nRef char *zErrMsg ctypedef struct sqlite3_vtab_cursor: sqlite3_vtab *pVtab ctypedef struct sqlite3_index_info: int nConstraint sqlite3_index_constraint *aConstraint int nOrderBy sqlite3_index_orderby *aOrderBy sqlite3_index_constraint_usage *aConstraintUsage int idxNum char *idxStr int needToFreeIdxStr int orderByConsumed double estimatedCost sqlite3_int64 estimatedRows int idxFlags ctypedef struct sqlite3_module: int iVersion int (*xCreate)(sqlite3*, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char**) int (*xConnect)(sqlite3*, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char**) int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*) int (*xDisconnect)(sqlite3_vtab *pVTab) int (*xDestroy)(sqlite3_vtab *pVTab) int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) int (*xClose)(sqlite3_vtab_cursor*) int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) int (*xNext)(sqlite3_vtab_cursor*) int (*xEof)(sqlite3_vtab_cursor*) int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context *, int) int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid) int (*xUpdate)(sqlite3_vtab *pVTab, int, sqlite3_value **, sqlite3_int64 **) int (*xBegin)(sqlite3_vtab *pVTab) int (*xSync)(sqlite3_vtab *pVTab) int (*xCommit)(sqlite3_vtab *pVTab) int (*xRollback)(sqlite3_vtab *pVTab) int (*xFindFunction)(sqlite3_vtab *pVTab, int nArg, const char *zName, void (**pxFunc)(sqlite3_context *, int, sqlite3_value **), void **ppArg) int (*xRename)(sqlite3_vtab *pVTab, const char *zNew) int (*xSavepoint)(sqlite3_vtab *pVTab, int) int (*xRelease)(sqlite3_vtab *pVTab, int) int (*xRollbackTo)(sqlite3_vtab *pVTab, int) cdef int sqlite3_declare_vtab(sqlite3 *db, const char *zSQL) cdef int sqlite3_create_module(sqlite3 *db, const char *zName, const sqlite3_module *p, void *pClientData) cdef const char sqlite3_version[] # Encoding. cdef int SQLITE_UTF8 = 1 # Return values. cdef int SQLITE_OK = 0 cdef int SQLITE_ERROR = 1 cdef int SQLITE_INTERNAL = 2 cdef int SQLITE_PERM = 3 cdef int SQLITE_ABORT = 4 cdef int SQLITE_BUSY = 5 cdef int SQLITE_LOCKED = 6 cdef int SQLITE_NOMEM = 7 cdef int SQLITE_READONLY = 8 cdef int SQLITE_INTERRUPT = 9 cdef int SQLITE_DONE = 101 # Function type. cdef int SQLITE_DETERMINISTIC = 0x800 # Types of filtering operations. cdef int SQLITE_INDEX_CONSTRAINT_EQ = 2 cdef int SQLITE_INDEX_CONSTRAINT_GT = 4 cdef int SQLITE_INDEX_CONSTRAINT_LE = 8 cdef int SQLITE_INDEX_CONSTRAINT_LT = 16 cdef int SQLITE_INDEX_CONSTRAINT_GE = 32 cdef int SQLITE_INDEX_CONSTRAINT_MATCH = 64 # sqlite_value_type. cdef int SQLITE_INTEGER = 1 cdef int SQLITE_FLOAT = 2 cdef int SQLITE3_TEXT = 3 cdef int SQLITE_TEXT = 3 cdef int SQLITE_BLOB = 4 cdef int SQLITE_NULL = 5 ctypedef void (*sqlite3_destructor_type)(void*) # Converting from Sqlite -> Python. cdef const void *sqlite3_value_blob(sqlite3_value*) cdef int sqlite3_value_bytes(sqlite3_value*) cdef double sqlite3_value_double(sqlite3_value*) cdef int sqlite3_value_int(sqlite3_value*) cdef sqlite3_int64 sqlite3_value_int64(sqlite3_value*) cdef const unsigned char *sqlite3_value_text(sqlite3_value*) cdef int sqlite3_value_type(sqlite3_value*) cdef int sqlite3_value_numeric_type(sqlite3_value*) # Converting from Python -> Sqlite. cdef void sqlite3_result_blob(sqlite3_context*, const void *, int, void(*)(void*)) cdef void sqlite3_result_double(sqlite3_context*, double) cdef void sqlite3_result_error(sqlite3_context*, const char*, int) cdef void sqlite3_result_error_toobig(sqlite3_context*) cdef void sqlite3_result_error_nomem(sqlite3_context*) cdef void sqlite3_result_error_code(sqlite3_context*, int) cdef void sqlite3_result_int(sqlite3_context*, int) cdef void sqlite3_result_int64(sqlite3_context*, sqlite3_int64) cdef void sqlite3_result_null(sqlite3_context*) cdef void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)) cdef void sqlite3_result_value(sqlite3_context*, sqlite3_value*) # Memory management. cdef void* sqlite3_malloc(int) cdef void sqlite3_free(void *) cdef int sqlite3_changes(sqlite3 *db) cdef int sqlite3_get_autocommit(sqlite3 *db) cdef sqlite3_int64 sqlite3_last_insert_rowid(sqlite3 *db) cdef void *sqlite3_commit_hook(sqlite3 *, int(*)(void *), void *) cdef void *sqlite3_rollback_hook(sqlite3 *, void(*)(void *), void *) cdef void *sqlite3_update_hook( sqlite3 *, void(*)(void *, int, char *, char *, sqlite3_int64), void *) cdef int SQLITE_STATUS_MEMORY_USED = 0 cdef int SQLITE_STATUS_PAGECACHE_USED = 1 cdef int SQLITE_STATUS_PAGECACHE_OVERFLOW = 2 cdef int SQLITE_STATUS_SCRATCH_USED = 3 cdef int SQLITE_STATUS_SCRATCH_OVERFLOW = 4 cdef int SQLITE_STATUS_MALLOC_SIZE = 5 cdef int SQLITE_STATUS_PARSER_STACK = 6 cdef int SQLITE_STATUS_PAGECACHE_SIZE = 7 cdef int SQLITE_STATUS_SCRATCH_SIZE = 8 cdef int SQLITE_STATUS_MALLOC_COUNT = 9 cdef int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag) cdef int SQLITE_DBSTATUS_LOOKASIDE_USED = 0 cdef int SQLITE_DBSTATUS_CACHE_USED = 1 cdef int SQLITE_DBSTATUS_SCHEMA_USED = 2 cdef int SQLITE_DBSTATUS_STMT_USED = 3 cdef int SQLITE_DBSTATUS_LOOKASIDE_HIT = 4 cdef int SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5 cdef int SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6 cdef int SQLITE_DBSTATUS_CACHE_HIT = 7 cdef int SQLITE_DBSTATUS_CACHE_MISS = 8 cdef int SQLITE_DBSTATUS_CACHE_WRITE = 9 cdef int SQLITE_DBSTATUS_DEFERRED_FKS = 10 #cdef int SQLITE_DBSTATUS_CACHE_USED_SHARED = 11 cdef int sqlite3_db_status(sqlite3 *, int op, int *pCur, int *pHigh, int reset) cdef int SQLITE_DELETE = 9 cdef int SQLITE_INSERT = 18 cdef int SQLITE_UPDATE = 23 cdef int SQLITE_CONFIG_SINGLETHREAD = 1 # None cdef int SQLITE_CONFIG_MULTITHREAD = 2 # None cdef int SQLITE_CONFIG_SERIALIZED = 3 # None cdef int SQLITE_CONFIG_SCRATCH = 6 # void *, int sz, int N cdef int SQLITE_CONFIG_PAGECACHE = 7 # void *, int sz, int N cdef int SQLITE_CONFIG_HEAP = 8 # void *, int nByte, int min cdef int SQLITE_CONFIG_MEMSTATUS = 9 # boolean cdef int SQLITE_CONFIG_LOOKASIDE = 13 # int, int cdef int SQLITE_CONFIG_URI = 17 # int cdef int SQLITE_CONFIG_MMAP_SIZE = 22 # sqlite3_int64, sqlite3_int64 cdef int SQLITE_CONFIG_STMTJRNL_SPILL = 26 # int nByte cdef int SQLITE_DBCONFIG_MAINDBNAME = 1000 # const char* cdef int SQLITE_DBCONFIG_LOOKASIDE = 1001 # void* int int cdef int SQLITE_DBCONFIG_ENABLE_FKEY = 1002 # int int* cdef int SQLITE_DBCONFIG_ENABLE_TRIGGER = 1003 # int int* cdef int SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER = 1004 # int int* cdef int SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION = 1005 # int int* cdef int SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE = 1006 # int int* cdef int SQLITE_DBCONFIG_ENABLE_QPSG = 1007 # int int* cdef int sqlite3_config(int, ...) cdef int sqlite3_db_config(sqlite3*, int op, ...) # Misc. cdef int sqlite3_busy_handler(sqlite3 *db, int(*)(void *, int), void *) cdef int sqlite3_sleep(int ms) cdef sqlite3_backup *sqlite3_backup_init( sqlite3 *pDest, const char *zDestName, sqlite3 *pSource, const char *zSourceName) # Backup. cdef int sqlite3_backup_step(sqlite3_backup *p, int nPage) cdef int sqlite3_backup_finish(sqlite3_backup *p) cdef int sqlite3_backup_remaining(sqlite3_backup *p) cdef int sqlite3_backup_pagecount(sqlite3_backup *p) # Error handling. cdef int sqlite3_errcode(sqlite3 *db) cdef int sqlite3_errstr(int) cdef const char *sqlite3_errmsg(sqlite3 *db) cdef char *sqlite3_mprintf(const char *, ...) cdef int sqlite3_blob_open( sqlite3*, const char *zDb, const char *zTable, const char *zColumn, sqlite3_int64 iRow, int flags, sqlite3_blob **ppBlob) cdef int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64) cdef int sqlite3_blob_close(sqlite3_blob *) cdef int sqlite3_blob_bytes(sqlite3_blob *) cdef int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset) cdef int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset) cdef extern from "_pysqlite/connection.h": ctypedef struct pysqlite_Connection: sqlite3* db double timeout int initialized cdef sqlite_to_python(int argc, sqlite3_value **params): cdef: int i int vtype list pyargs = [] for i in range(argc): vtype = sqlite3_value_type(params[i]) if vtype == SQLITE_INTEGER: pyval = sqlite3_value_int(params[i]) elif vtype == SQLITE_FLOAT: pyval = sqlite3_value_double(params[i]) elif vtype == SQLITE_TEXT: pyval = PyUnicode_DecodeUTF8( sqlite3_value_text(params[i]), sqlite3_value_bytes(params[i]), NULL) elif vtype == SQLITE_BLOB: pyval = PyBytes_FromStringAndSize( sqlite3_value_blob(params[i]), sqlite3_value_bytes(params[i])) elif vtype == SQLITE_NULL: pyval = None else: pyval = None pyargs.append(pyval) return pyargs cdef python_to_sqlite(sqlite3_context *context, value): if value is None: sqlite3_result_null(context) elif isinstance(value, (int, long)): sqlite3_result_int64(context, value) elif isinstance(value, float): sqlite3_result_double(context, value) elif isinstance(value, unicode): bval = PyUnicode_AsUTF8String(value) sqlite3_result_text( context, bval, len(bval), -1) elif isinstance(value, bytes): if PY_MAJOR_VERSION > 2: sqlite3_result_blob( context, (value), len(value), -1) else: sqlite3_result_text( context, value, len(value), -1) else: sqlite3_result_error( context, encode('Unsupported type %s' % type(value)), -1) return SQLITE_ERROR return SQLITE_OK cdef int SQLITE_CONSTRAINT = 19 # Abort due to constraint violation. USE_SQLITE_CONSTRAINT = sqlite3_version[:4] >= b'3.26' # The peewee_vtab struct embeds the base sqlite3_vtab struct, and adds a field # to store a reference to the Python implementation. ctypedef struct peewee_vtab: sqlite3_vtab base void *table_func_cls # Like peewee_vtab, the peewee_cursor embeds the base sqlite3_vtab_cursor and # adds fields to store references to the current index, the Python # implementation, the current rows' data, and a flag for whether the cursor has # been exhausted. ctypedef struct peewee_cursor: sqlite3_vtab_cursor base long long idx void *table_func void *row_data bint stopped # We define an xConnect function, but leave xCreate NULL so that the # table-function can be called eponymously. cdef int pwConnect(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVtab, char **pzErr) noexcept with gil: cdef: int rc object table_func_cls = pAux peewee_vtab *pNew = 0 rc = sqlite3_declare_vtab( db, encode('CREATE TABLE x(%s);' % table_func_cls.get_table_columns_declaration())) if rc == SQLITE_OK: pNew = sqlite3_malloc(sizeof(pNew[0])) memset(pNew, 0, sizeof(pNew[0])) ppVtab[0] = &(pNew.base) pNew.table_func_cls = table_func_cls Py_INCREF(table_func_cls) return rc cdef int pwDisconnect(sqlite3_vtab *pBase) noexcept with gil: cdef: peewee_vtab *pVtab = pBase object table_func_cls = (pVtab.table_func_cls) Py_DECREF(table_func_cls) sqlite3_free(pVtab) return SQLITE_OK # The xOpen method is used to initialize a cursor. In this method we # instantiate the TableFunction class and zero out a new cursor for iteration. cdef int pwOpen(sqlite3_vtab *pBase, sqlite3_vtab_cursor **ppCursor) \ noexcept with gil: cdef: peewee_vtab *pVtab = pBase peewee_cursor *pCur = 0 object table_func_cls = pVtab.table_func_cls pCur = sqlite3_malloc(sizeof(pCur[0])) memset(pCur, 0, sizeof(pCur[0])) ppCursor[0] = &(pCur.base) pCur.idx = 0 try: table_func = table_func_cls() except: if table_func_cls.print_tracebacks: traceback.print_exc() sqlite3_free(pCur) return SQLITE_ERROR Py_INCREF(table_func) pCur.table_func = table_func pCur.stopped = False return SQLITE_OK cdef int pwClose(sqlite3_vtab_cursor *pBase) noexcept with gil: cdef: peewee_cursor *pCur = pBase object table_func = pCur.table_func Py_DECREF(table_func) sqlite3_free(pCur) return SQLITE_OK # Iterate once, advancing the cursor's index and assigning the row data to the # `row_data` field on the peewee_cursor struct. cdef int pwNext(sqlite3_vtab_cursor *pBase) noexcept with gil: cdef: peewee_cursor *pCur = pBase object table_func = pCur.table_func tuple result if pCur.row_data: Py_DECREF(pCur.row_data) pCur.row_data = NULL try: result = tuple(table_func.iterate(pCur.idx)) except StopIteration: pCur.stopped = True except: if table_func.print_tracebacks: traceback.print_exc() return SQLITE_ERROR else: Py_INCREF(result) pCur.row_data = result pCur.idx += 1 pCur.stopped = False return SQLITE_OK # Return the requested column from the current row. cdef int pwColumn(sqlite3_vtab_cursor *pBase, sqlite3_context *ctx, int iCol) noexcept with gil: cdef: bytes bval peewee_cursor *pCur = pBase sqlite3_int64 x = 0 tuple row_data if iCol == -1: sqlite3_result_int64(ctx, pCur.idx) return SQLITE_OK if not pCur.row_data: sqlite3_result_error(ctx, encode('no row data'), -1) return SQLITE_ERROR row_data = pCur.row_data return python_to_sqlite(ctx, row_data[iCol]) cdef int pwRowid(sqlite3_vtab_cursor *pBase, sqlite3_int64 *pRowid) noexcept: cdef: peewee_cursor *pCur = pBase pRowid[0] = pCur.idx return SQLITE_OK # Return a boolean indicating whether the cursor has been consumed. cdef int pwEof(sqlite3_vtab_cursor *pBase) noexcept: cdef: peewee_cursor *pCur = pBase return 1 if pCur.stopped else 0 # The filter method is called on the first iteration. This method is where we # get access to the parameters that the function was called with, and call the # TableFunction's `initialize()` function. cdef int pwFilter(sqlite3_vtab_cursor *pBase, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) \ noexcept with gil: cdef: peewee_cursor *pCur = pBase object table_func = pCur.table_func dict query = {} int idx int value_type tuple row_data void *row_data_raw if not idxStr or argc == 0 and len(table_func.params): return SQLITE_ERROR elif len(idxStr): params = decode(idxStr).split(',') else: params = [] py_values = sqlite_to_python(argc, argv) for idx, param in enumerate(params): value = argv[idx] if not value: query[param] = None else: query[param] = py_values[idx] try: table_func.initialize(**query) except: if table_func.print_tracebacks: traceback.print_exc() return SQLITE_ERROR pCur.stopped = False try: row_data = tuple(table_func.iterate(0)) except StopIteration: pCur.stopped = True except: if table_func.print_tracebacks: traceback.print_exc() return SQLITE_ERROR else: Py_INCREF(row_data) pCur.row_data = row_data pCur.idx += 1 return SQLITE_OK # SQLite will (in some cases, repeatedly) call the xBestIndex method to try and # find the best query plan. cdef int pwBestIndex(sqlite3_vtab *pBase, sqlite3_index_info *pIdxInfo) \ noexcept with gil: cdef: int i int col_idx int idxNum = 0, nArg = 0 peewee_vtab *pVtab = pBase object table_func_cls = pVtab.table_func_cls sqlite3_index_constraint *pConstraint = 0 list columns = [] char *idxStr int nParams = len(table_func_cls.params) for i in range(pIdxInfo.nConstraint): pConstraint = pIdxInfo.aConstraint + i if not pConstraint.usable: continue if pConstraint.op != SQLITE_INDEX_CONSTRAINT_EQ: continue col_idx = pConstraint.iColumn - table_func_cls._ncols if col_idx >= 0: columns.append(table_func_cls.params[col_idx]) nArg += 1 pIdxInfo.aConstraintUsage[i].argvIndex = nArg pIdxInfo.aConstraintUsage[i].omit = 1 if nArg > 0 or nParams == 0: if nArg == nParams: # All parameters are present, this is ideal. pIdxInfo.estimatedCost = 1 pIdxInfo.estimatedRows = 10 else: # Penalize score based on number of missing params. pIdxInfo.estimatedCost = 10000000000000 * (nParams - nArg) pIdxInfo.estimatedRows = 10 * (nParams - nArg) # Store a reference to the columns in the index info structure. joinedCols = encode(','.join(columns)) pIdxInfo.idxStr = sqlite3_mprintf("%s", joinedCols) pIdxInfo.needToFreeIdxStr = 1 elif USE_SQLITE_CONSTRAINT: return SQLITE_CONSTRAINT else: pIdxInfo.estimatedCost = DBL_MAX pIdxInfo.estimatedRows = 100000 return SQLITE_OK cdef class _TableFunctionImpl(object): cdef: sqlite3_module module object table_function def __cinit__(self, table_function): self.table_function = table_function cdef create_module(self, pysqlite_Connection* sqlite_conn): cdef: bytes name = encode(self.table_function.name) sqlite3 *db = sqlite_conn.db int rc # Populate the SQLite module struct members. self.module.iVersion = 0 self.module.xCreate = NULL self.module.xConnect = pwConnect self.module.xBestIndex = pwBestIndex self.module.xDisconnect = pwDisconnect self.module.xDestroy = NULL self.module.xOpen = pwOpen self.module.xClose = pwClose self.module.xFilter = pwFilter self.module.xNext = pwNext self.module.xEof = pwEof self.module.xColumn = pwColumn self.module.xRowid = pwRowid self.module.xUpdate = NULL self.module.xBegin = NULL self.module.xSync = NULL self.module.xCommit = NULL self.module.xRollback = NULL self.module.xFindFunction = NULL self.module.xRename = NULL # Create the SQLite virtual table. rc = sqlite3_create_module( db, name, &self.module, (self.table_function)) Py_INCREF(self) return rc == SQLITE_OK class TableFunction(object): columns = None params = None name = None print_tracebacks = True _ncols = None @classmethod def register(cls, conn): cdef _TableFunctionImpl impl = _TableFunctionImpl(cls) impl.create_module(conn) cls._ncols = len(cls.columns) def initialize(self, **filters): raise NotImplementedError def iterate(self, idx): raise NotImplementedError @classmethod def get_table_columns_declaration(cls): cdef list accum = [] for column in cls.columns: if isinstance(column, tuple): if len(column) != 2: raise ValueError('Column must be either a string or a ' '2-tuple of name, type') accum.append('%s %s' % column) else: accum.append(column) for param in cls.params: accum.append('%s HIDDEN' % param) return ', '.join(accum) cdef inline bytes encode(key): cdef bytes bkey if PyUnicode_Check(key): bkey = PyUnicode_AsUTF8String(key) elif PyBytes_Check(key): bkey = key elif key is None: return None else: bkey = PyUnicode_AsUTF8String(str(key)) return bkey cdef inline unicode decode(key): cdef unicode ukey if PyBytes_Check(key): ukey = key.decode('utf-8') elif PyUnicode_Check(key): ukey = key elif key is None: return None else: ukey = unicode(key) return ukey cdef double *get_weights(int ncol, tuple raw_weights): cdef: int argc = len(raw_weights) int icol double *weights = malloc(sizeof(double) * ncol) for icol in range(ncol): if argc == 0: weights[icol] = 1.0 elif icol < argc: weights[icol] = raw_weights[icol] else: weights[icol] = 0.0 return weights def peewee_rank(py_match_info, *raw_weights): cdef: unsigned int *match_info unsigned int *phrase_info bytes _match_info_buf = bytes(py_match_info) char *match_info_buf = _match_info_buf int nphrase, ncol, icol, iphrase, hits, global_hits int P_O = 0, C_O = 1, X_O = 2 double score = 0.0, weight double *weights match_info = match_info_buf nphrase = match_info[P_O] ncol = match_info[C_O] weights = get_weights(ncol, raw_weights) # matchinfo X value corresponds to, for each phrase in the search query, a # list of 3 values for each column in the search table. # So if we have a two-phrase search query and three columns of data, the # following would be the layout: # p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8] # p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17] for iphrase in range(nphrase): phrase_info = &match_info[X_O + iphrase * ncol * 3] for icol in range(ncol): weight = weights[icol] if weight == 0: continue # The idea is that we count the number of times the phrase appears # in this column of the current row, compared to how many times it # appears in this column across all rows. The ratio of these values # provides a rough way to score based on "high value" terms. hits = phrase_info[3 * icol] global_hits = phrase_info[3 * icol + 1] if hits > 0: score += weight * (hits / global_hits) free(weights) return -1 * score def peewee_lucene(py_match_info, *raw_weights): # Usage: peewee_lucene(matchinfo(table, 'pcnalx'), 1) cdef: unsigned int *match_info bytes _match_info_buf = bytes(py_match_info) char *match_info_buf = _match_info_buf int nphrase, ncol double total_docs, term_frequency double doc_length, docs_with_term, avg_length double idf, weight, rhs, denom double *weights int P_O = 0, C_O = 1, N_O = 2, L_O, X_O int iphrase, icol, x double score = 0.0 match_info = match_info_buf nphrase = match_info[P_O] ncol = match_info[C_O] total_docs = match_info[N_O] L_O = 3 + ncol X_O = L_O + ncol weights = get_weights(ncol, raw_weights) for iphrase in range(nphrase): for icol in range(ncol): weight = weights[icol] if weight == 0: continue doc_length = match_info[L_O + icol] x = X_O + (3 * (icol + iphrase * ncol)) term_frequency = match_info[x] # f(qi) docs_with_term = match_info[x + 2] or 1. # n(qi) idf = log(total_docs / (docs_with_term + 1.)) tf = sqrt(term_frequency) fieldNorms = 1.0 / sqrt(doc_length) score += (idf * tf * fieldNorms) free(weights) return -1 * score def peewee_bm25(py_match_info, *raw_weights): # Usage: peewee_bm25(matchinfo(table, 'pcnalx'), 1) # where the second parameter is the index of the column and # the 3rd and 4th specify k and b. cdef: unsigned int *match_info bytes _match_info_buf = bytes(py_match_info) char *match_info_buf = _match_info_buf int nphrase, ncol double B = 0.75, K = 1.2 double total_docs, term_frequency double doc_length, docs_with_term, avg_length double idf, weight, ratio, num, b_part, denom, pc_score double *weights int P_O = 0, C_O = 1, N_O = 2, A_O = 3, L_O, X_O int iphrase, icol, x double score = 0.0 match_info = match_info_buf # PCNALX = matchinfo format. # P = 1 = phrase count within query. # C = 1 = searchable columns in table. # N = 1 = total rows in table. # A = c = for each column, avg number of tokens # L = c = for each column, length of current row (in tokens) # X = 3 * c * p = for each phrase and table column, # * phrase count within column for current row. # * phrase count within column for all rows. # * total rows for which column contains phrase. nphrase = match_info[P_O] # n ncol = match_info[C_O] total_docs = match_info[N_O] # N L_O = A_O + ncol X_O = L_O + ncol weights = get_weights(ncol, raw_weights) for iphrase in range(nphrase): for icol in range(ncol): weight = weights[icol] if weight == 0: continue x = X_O + (3 * (icol + iphrase * ncol)) term_frequency = match_info[x] # f(qi, D) docs_with_term = match_info[x + 2] # n(qi) # log( (N - n(qi) + 0.5) / (n(qi) + 0.5) ) idf = log( (total_docs - docs_with_term + 0.5) / (docs_with_term + 0.5)) if idf <= 0.0: idf = 1e-6 doc_length = match_info[L_O + icol] # |D| avg_length = match_info[A_O + icol] # avgdl if avg_length == 0: avg_length = 1 ratio = doc_length / avg_length num = term_frequency * (K + 1) b_part = 1 - B + (B * ratio) denom = term_frequency + (K * b_part) pc_score = idf * (num / denom) score += (pc_score * weight) free(weights) return -1 * score def peewee_bm25f(py_match_info, *raw_weights): # Usage: peewee_bm25f(matchinfo(table, 'pcnalx'), 1) # where the second parameter is the index of the column and # the 3rd and 4th specify k and b. cdef: unsigned int *match_info bytes _match_info_buf = bytes(py_match_info) char *match_info_buf = _match_info_buf int nphrase, ncol double B = 0.75, K = 1.2, epsilon double total_docs, term_frequency, docs_with_term double doc_length = 0.0, avg_length = 0.0 double idf, weight, ratio, num, b_part, denom, pc_score double *weights int P_O = 0, C_O = 1, N_O = 2, A_O = 3, L_O, X_O int iphrase, icol, x double score = 0.0 match_info = match_info_buf nphrase = match_info[P_O] # n ncol = match_info[C_O] total_docs = match_info[N_O] # N L_O = A_O + ncol X_O = L_O + ncol for icol in range(ncol): avg_length += match_info[A_O + icol] doc_length += match_info[L_O + icol] epsilon = 1.0 / (total_docs * avg_length) if avg_length == 0: avg_length = 1 ratio = doc_length / avg_length weights = get_weights(ncol, raw_weights) for iphrase in range(nphrase): for icol in range(ncol): weight = weights[icol] if weight == 0: continue x = X_O + (3 * (icol + iphrase * ncol)) term_frequency = match_info[x] # f(qi, D) docs_with_term = match_info[x + 2] # n(qi) # log( (N - n(qi) + 0.5) / (n(qi) + 0.5) ) idf = log( (total_docs - docs_with_term + 0.5) / (docs_with_term + 0.5)) idf = epsilon if idf <= 0 else idf num = term_frequency * (K + 1) b_part = 1 - B + (B * ratio) denom = term_frequency + (K * b_part) pc_score = idf * ((num / denom) + 1.) score += (pc_score * weight) free(weights) return -1 * score cdef uint32_t murmurhash2(const unsigned char *key, ssize_t nlen, uint32_t seed): cdef: uint32_t m = 0x5bd1e995 int r = 24 const unsigned char *data = key uint32_t h = seed ^ nlen uint32_t k while nlen >= 4: k = ((data)[0]) k *= m k = k ^ (k >> r) k *= m h *= m h = h ^ k data += 4 nlen -= 4 if nlen == 3: h = h ^ (data[2] << 16) if nlen >= 2: h = h ^ (data[1] << 8) if nlen >= 1: h = h ^ (data[0]) h *= m h = h ^ (h >> 13) h *= m h = h ^ (h >> 15) return h def peewee_murmurhash(key, seed=None): if key is None: return cdef: bytes bkey = encode(key) int nseed = seed or 0 if key: return murmurhash2(bkey, len(bkey), nseed) return 0 def make_hash(hash_impl): def inner(*items): state = hash_impl() for item in items: state.update(encode(item)) return state.hexdigest() return inner peewee_md5 = make_hash(hashlib.md5) peewee_sha1 = make_hash(hashlib.sha1) peewee_sha256 = make_hash(hashlib.sha256) def _register_functions(database, pairs): for func, name in pairs: database.register_function(func, name) def register_hash_functions(database): _register_functions(database, ( (peewee_murmurhash, 'murmurhash'), (peewee_md5, 'md5'), (peewee_sha1, 'sha1'), (peewee_sha256, 'sha256'), (zlib.adler32, 'adler32'), (zlib.crc32, 'crc32'))) def register_rank_functions(database): _register_functions(database, ( (peewee_bm25, 'fts_bm25'), (peewee_bm25f, 'fts_bm25f'), (peewee_lucene, 'fts_lucene'), (peewee_rank, 'fts_rank'))) ctypedef struct bf_t: void *bits size_t size cdef int seeds[10] seeds[:] = [0, 1337, 37, 0xabcd, 0xdead, 0xface, 97, 0xed11, 0xcad9, 0x827b] cdef bf_t *bf_create(size_t size): cdef bf_t *bf = calloc(1, sizeof(bf_t)) bf.size = size bf.bits = calloc(1, size) return bf @cython.cdivision(True) cdef uint32_t bf_bitindex(bf_t *bf, unsigned char *key, size_t klen, int seed): cdef: uint32_t h = murmurhash2(key, klen, seed) return h % (bf.size * 8) @cython.cdivision(True) cdef bf_add(bf_t *bf, unsigned char *key): cdef: uint8_t *bits = (bf.bits) uint32_t h int pos, seed size_t keylen = strlen(key) for seed in seeds: h = bf_bitindex(bf, key, keylen, seed) pos = h / 8 bits[pos] = bits[pos] | (1 << (h % 8)) @cython.cdivision(True) cdef int bf_contains(bf_t *bf, unsigned char *key): cdef: uint8_t *bits = (bf.bits) uint32_t h int pos, seed size_t keylen = strlen(key) for seed in seeds: h = bf_bitindex(bf, key, keylen, seed) pos = h / 8 if not (bits[pos] & (1 << (h % 8))): return 0 return 1 cdef bf_free(bf_t *bf): free(bf.bits) free(bf) cdef class BloomFilter(object): cdef: bf_t *bf def __init__(self, size=1024 * 32): self.bf = bf_create(size) def __dealloc__(self): if self.bf: bf_free(self.bf) def __len__(self): return self.bf.size def add(self, *keys): cdef bytes bkey for key in keys: bkey = encode(key) bf_add(self.bf, bkey) def __contains__(self, key): cdef bytes bkey = encode(key) return bf_contains(self.bf, bkey) def to_buffer(self): # We have to do this so that embedded NULL bytes are preserved. cdef bytes buf = PyBytes_FromStringAndSize((self.bf.bits), self.bf.size) # Similarly we wrap in a buffer object so pysqlite preserves the # embedded NULL bytes. return buf @classmethod def from_buffer(cls, data): cdef: char *buf Py_ssize_t buflen BloomFilter bloom PyBytes_AsStringAndSize(data, &buf, &buflen) bloom = BloomFilter(buflen) memcpy(bloom.bf.bits, buf, buflen) return bloom @classmethod def calculate_size(cls, double n, double p): cdef double m = ceil((n * log(p)) / log(1.0 / (pow(2.0, log(2.0))))) return m cdef class BloomFilterAggregate(object): cdef: BloomFilter bf def __init__(self): self.bf = None def step(self, value, size=None): if not self.bf: size = size or 1024 self.bf = BloomFilter(size) self.bf.add(value) def finalize(self): if not self.bf: return None return pysqlite.Binary(self.bf.to_buffer()) def peewee_bloomfilter_contains(key, data): cdef: bf_t bf bytes bkey bytes bdata = bytes(data) unsigned char *cdata = bdata bf.size = len(data) bf.bits = cdata bkey = encode(key) return bf_contains(&bf, bkey) def peewee_bloomfilter_add(key, data): cdef: bf_t bf bytes bkey char *buf Py_ssize_t buflen PyBytes_AsStringAndSize(data, &buf, &buflen) bf.size = buflen bf.bits = buf bkey = encode(key) bf_add(&bf, bkey) return data def peewee_bloomfilter_calculate_size(n_items, error_p): return BloomFilter.calculate_size(n_items, error_p) def register_bloomfilter(database): database.register_aggregate(BloomFilterAggregate, 'bloomfilter') database.register_function(peewee_bloomfilter_add, 'bloomfilter_add') database.register_function(peewee_bloomfilter_contains, 'bloomfilter_contains') database.register_function(peewee_bloomfilter_calculate_size, 'bloomfilter_calculate_size') cdef inline int _check_connection(pysqlite_Connection *conn) except -1: """ Check that the underlying SQLite database connection is usable. Raises an InterfaceError if the connection is either uninitialized or closed. """ if not conn.db: raise InterfaceError('Cannot operate on closed database.') return 1 class ZeroBlob(Node): def __init__(self, length): if not isinstance(length, int) or length < 0: raise ValueError('Length must be a positive integer.') self.length = length def __sql__(self, ctx): return ctx.literal('zeroblob(%s)' % self.length) cdef class Blob(object) # Forward declaration. cdef inline int _check_blob_closed(Blob blob) except -1: _check_connection(blob.conn) if not blob.pBlob: raise InterfaceError('Cannot operate on closed blob.') return 1 cdef class Blob(object): cdef: int offset pysqlite_Connection *conn sqlite3_blob *pBlob def __init__(self, database, table, column, rowid, read_only=False): cdef: bytes btable = encode(table) bytes bcolumn = encode(column) int flags = 0 if read_only else 1 int rc sqlite3_blob *blob self.conn = (database._state.conn) _check_connection(self.conn) rc = sqlite3_blob_open( self.conn.db, 'main', btable, bcolumn, rowid, flags, &blob) if rc != SQLITE_OK: raise OperationalError('Unable to open blob.') if not blob: raise MemoryError('Unable to allocate blob.') self.pBlob = blob self.offset = 0 cdef _close(self): if self.pBlob and self.conn.db: with nogil: sqlite3_blob_close(self.pBlob) self.pBlob = 0 def __dealloc__(self): self._close() def __len__(self): _check_blob_closed(self) return sqlite3_blob_bytes(self.pBlob) def __getitem__(self, idx): cdef: unsigned char c int i = idx int rc int size _check_blob_closed(self) size = sqlite3_blob_bytes(self.pBlob) if i < 0: i += size if i < 0 or i >= size: raise IndexError('Blob index out of range (%s, %s)' % (i, size)) with nogil: rc = sqlite3_blob_read(self.pBlob, &c, 1, i) if rc != SQLITE_OK: self._close() raise OperationalError('Error reading from blob.') return PyBytes_FromStringAndSize(&c, 1) def read(self, n=None): cdef: bytes pybuf int length = -1 int max_length int rc int size char *buf if n is not None: length = n _check_blob_closed(self) size = sqlite3_blob_bytes(self.pBlob) max_length = size - self.offset if length < 0 or length > max_length: length = max_length if length == 0: return b'' pybuf = PyBytes_FromStringAndSize(NULL, length) buf = PyBytes_AS_STRING(pybuf) with nogil: rc = sqlite3_blob_read(self.pBlob, buf, length, self.offset) if rc != SQLITE_OK: self._close() raise OperationalError('Error reading from blob.') self.offset += length return bytes(pybuf) def seek(self, offset, frame_of_reference=0): cdef int size _check_blob_closed(self) size = sqlite3_blob_bytes(self.pBlob) if frame_of_reference == 0: pass elif frame_of_reference == 1: offset += self.offset elif frame_of_reference == 2: offset += size else: raise ValueError('seek() frame of reference must be 0, 1 or 2.') if offset < 0 or offset > size: raise ValueError('seek() offset outside of valid range.') self.offset = offset def tell(self): _check_blob_closed(self) return self.offset def write(self, bytes data): cdef: char *buf int rc int remaining int size Py_ssize_t buflen _check_blob_closed(self) size = sqlite3_blob_bytes(self.pBlob) remaining = size - self.offset PyBytes_AsStringAndSize(data, &buf, &buflen) if buflen > remaining: raise ValueError('Data would go beyond end of blob') with nogil: rc = sqlite3_blob_write(self.pBlob, buf, buflen, self.offset) if rc != SQLITE_OK: raise OperationalError('Error writing to blob.') self.offset += buflen def close(self): _check_connection(self.conn) self._close() def reopen(self, rowid): _check_blob_closed(self) self.offset = 0 if sqlite3_blob_reopen(self.pBlob, rowid): self._close() raise OperationalError('Unable to re-open blob.') def sqlite_get_status(flag): cdef: int current, highwater, rc rc = sqlite3_status(flag, ¤t, &highwater, 0) if rc == SQLITE_OK: return (current, highwater) raise Exception('Error requesting status: %s' % rc) def sqlite_get_db_status(conn, flag): cdef: int current, highwater, rc pysqlite_Connection *c_conn = conn if not c_conn.db: return (None, None) rc = sqlite3_db_status(c_conn.db, flag, ¤t, &highwater, 0) if rc == SQLITE_OK: return (current, highwater) raise Exception('Error requesting db status: %s' % rc) cdef class ConnectionHelper(object): cdef: object _commit_hook, _rollback_hook, _update_hook pysqlite_Connection *conn def __init__(self, connection): self.conn = connection self._commit_hook = self._rollback_hook = self._update_hook = None def __dealloc__(self): # When deallocating a Database object, we need to ensure that we clear # any commit, rollback or update hooks that may have been applied. if not self.conn.initialized or not self.conn.db: return if self._commit_hook is not None: sqlite3_commit_hook(self.conn.db, NULL, NULL) if self._rollback_hook is not None: sqlite3_rollback_hook(self.conn.db, NULL, NULL) if self._update_hook is not None: sqlite3_update_hook(self.conn.db, NULL, NULL) def set_commit_hook(self, fn): if not self.conn.initialized or not self.conn.db: return self._commit_hook = fn if fn is None: sqlite3_commit_hook(self.conn.db, NULL, NULL) else: sqlite3_commit_hook(self.conn.db, _commit_callback, fn) def set_rollback_hook(self, fn): if not self.conn.initialized or not self.conn.db: return self._rollback_hook = fn if fn is None: sqlite3_rollback_hook(self.conn.db, NULL, NULL) else: sqlite3_rollback_hook(self.conn.db, _rollback_callback, fn) def set_update_hook(self, fn): if not self.conn.initialized or not self.conn.db: return self._update_hook = fn if fn is None: sqlite3_update_hook(self.conn.db, NULL, NULL) else: sqlite3_update_hook(self.conn.db, _update_callback, fn) def set_busy_handler(self, timeout=5): """ Replace the default busy handler with one that introduces some "jitter" into the amount of time delayed between checks. """ if not self.conn.initialized or not self.conn.db: return False cdef sqlite3_int64 n = timeout * 1000 sqlite3_busy_handler(self.conn.db, _aggressive_busy_handler, n) return True def changes(self): if self.conn.initialized and self.conn.db: return sqlite3_changes(self.conn.db) def last_insert_rowid(self): if self.conn.initialized and self.conn.db: return sqlite3_last_insert_rowid(self.conn.db) def autocommit(self): if self.conn.initialized and self.conn.db: return sqlite3_get_autocommit(self.conn.db) != 0 cdef int _commit_callback(void *userData) noexcept with gil: # C-callback that delegates to the Python commit handler. If the Python # function raises a ValueError, then the commit is aborted and the # transaction rolled back. Otherwise, regardless of the function return # value, the transaction will commit. cdef object fn = userData try: fn() except ValueError: return 1 else: return SQLITE_OK cdef void _rollback_callback(void *userData) noexcept with gil: # C-callback that delegates to the Python rollback handler. cdef object fn = userData fn() cdef void _update_callback(void *userData, int queryType, const char *database, const char *table, sqlite3_int64 rowid) noexcept with gil: # C-callback that delegates to a Python function that is executed whenever # the database is updated (insert/update/delete queries). The Python # callback receives a string indicating the query type, the name of the # database, the name of the table being updated, and the rowid of the row # being updatd. cdef object fn = userData if queryType == SQLITE_INSERT: query = 'INSERT' elif queryType == SQLITE_UPDATE: query = 'UPDATE' elif queryType == SQLITE_DELETE: query = 'DELETE' else: query = '' fn(query, decode(database), decode(table), rowid) def backup(src_conn, dest_conn, pages=None, name=None, progress=None): cdef: bytes bname = encode(name or 'main') int page_step = pages or -1 int rc pysqlite_Connection *src = src_conn pysqlite_Connection *dest = dest_conn sqlite3 *src_db = src.db sqlite3 *dest_db = dest.db sqlite3_backup *backup if not src_db or not dest_db: raise OperationalError('cannot backup to or from a closed database') # We always backup to the "main" database in the dest db. backup = sqlite3_backup_init(dest_db, b'main', src_db, bname) if backup == NULL: raise OperationalError('Unable to initialize backup.') while True: with nogil: rc = sqlite3_backup_step(backup, page_step) if progress is not None: # Progress-handler is called with (remaining, page count, is done?) remaining = sqlite3_backup_remaining(backup) page_count = sqlite3_backup_pagecount(backup) try: progress(remaining, page_count, rc == SQLITE_DONE) except: sqlite3_backup_finish(backup) raise if rc == SQLITE_BUSY or rc == SQLITE_LOCKED: with nogil: sqlite3_sleep(250) elif rc == SQLITE_DONE: break with nogil: sqlite3_backup_finish(backup) if sqlite3_errcode(dest_db): raise OperationalError('Error backuping up database: %s' % sqlite3_errmsg(dest_db)) return True def backup_to_file(src_conn, filename, pages=None, name=None, progress=None): dest_conn = pysqlite.connect(filename) backup(src_conn, dest_conn, pages=pages, name=name, progress=progress) dest_conn.close() return True cdef int _aggressive_busy_handler(void *ptr, int n) noexcept nogil: # In concurrent environments, it often seems that if multiple queries are # kicked off at around the same time, they proceed in lock-step to check # for the availability of the lock. By introducing some "jitter" we can # ensure that this doesn't happen. Furthermore, this function makes more # attempts in the same time period than the default handler. cdef: sqlite3_int64 busyTimeout = ptr int current, total if n < 20: current = 25 - (rand() % 10) # ~20ms total = n * 20 elif n < 40: current = 50 - (rand() % 20) # ~40ms total = 400 + ((n - 20) * 40) else: current = 120 - (rand() % 40) # ~100ms total = 1200 + ((n - 40) * 100) # Estimate the amount of time slept. if total + current > busyTimeout: current = busyTimeout - total if current > 0: sqlite3_sleep(current) return 1 return 0 peewee-3.17.7/playhouse/_sqlite_udf.pyx000066400000000000000000000066631470346076600201640ustar00rootroot00000000000000# cython: language_level=3 import sys from difflib import SequenceMatcher from random import randint IS_PY3K = sys.version_info[0] == 3 # String UDF. def damerau_levenshtein_dist(s1, s2): cdef: int i, j, del_cost, add_cost, sub_cost int s1_len = len(s1), s2_len = len(s2) list one_ago, two_ago, current_row list zeroes = [0] * (s2_len + 1) if IS_PY3K: current_row = list(range(1, s2_len + 2)) else: current_row = range(1, s2_len + 2) current_row[-1] = 0 one_ago = None for i in range(s1_len): two_ago = one_ago one_ago = current_row current_row = list(zeroes) current_row[-1] = i + 1 for j in range(s2_len): del_cost = one_ago[j] + 1 add_cost = current_row[j - 1] + 1 sub_cost = one_ago[j - 1] + (s1[i] != s2[j]) current_row[j] = min(del_cost, add_cost, sub_cost) # Handle transpositions. if (i > 0 and j > 0 and s1[i] == s2[j - 1] and s1[i-1] == s2[j] and s1[i] != s2[j]): current_row[j] = min(current_row[j], two_ago[j - 2] + 1) return current_row[s2_len - 1] # String UDF. def levenshtein_dist(a, b): cdef: int add, delete, change int i, j int n = len(a), m = len(b) list current, previous list zeroes if n > m: a, b = b, a n, m = m, n zeroes = [0] * (m + 1) if IS_PY3K: current = list(range(n + 1)) else: current = range(n + 1) for i in range(1, m + 1): previous = current current = list(zeroes) current[0] = i for j in range(1, n + 1): add = previous[j] + 1 delete = current[j - 1] + 1 change = previous[j - 1] if a[j - 1] != b[i - 1]: change +=1 current[j] = min(add, delete, change) return current[n] # String UDF. def str_dist(a, b): cdef: int t = 0 for i in SequenceMatcher(None, a, b).get_opcodes(): if i[0] == 'equal': continue t = t + max(i[4] - i[3], i[2] - i[1]) return t # Math Aggregate. cdef class median(object): cdef: int ct list items def __init__(self): self.ct = 0 self.items = [] cdef selectKth(self, int k, int s=0, int e=-1): cdef: int idx if e < 0: e = len(self.items) idx = randint(s, e-1) idx = self.partition_k(idx, s, e) if idx > k: return self.selectKth(k, s, idx) elif idx < k: return self.selectKth(k, idx + 1, e) else: return self.items[idx] cdef int partition_k(self, int pi, int s, int e): cdef: int i, x val = self.items[pi] # Swap pivot w/last item. self.items[e - 1], self.items[pi] = self.items[pi], self.items[e - 1] x = s for i in range(s, e): if self.items[i] < val: self.items[i], self.items[x] = self.items[x], self.items[i] x += 1 self.items[x], self.items[e-1] = self.items[e-1], self.items[x] return x def step(self, item): self.items.append(item) self.ct += 1 def finalize(self): if self.ct == 0: return None elif self.ct < 3: return self.items[0] else: return self.selectKth(self.ct // 2) peewee-3.17.7/playhouse/apsw_ext.py000066400000000000000000000116321470346076600173200ustar00rootroot00000000000000""" Peewee integration with APSW, "another python sqlite wrapper". Project page: https://rogerbinns.github.io/apsw/ APSW is a really neat library that provides a thin wrapper on top of SQLite's C interface. Here are just a few reasons to use APSW, taken from the documentation: * APSW gives all functionality of SQLite, including virtual tables, virtual file system, blob i/o, backups and file control. * Connections can be shared across threads without any additional locking. * Transactions are managed explicitly by your code. * APSW can handle nested transactions. * Unicode is handled correctly. * APSW is faster. """ import apsw from peewee import * from peewee import __exception_wrapper__ from peewee import BooleanField as _BooleanField from peewee import DateField as _DateField from peewee import DateTimeField as _DateTimeField from peewee import DecimalField as _DecimalField from peewee import Insert from peewee import TimeField as _TimeField from peewee import logger from playhouse.sqlite_ext import SqliteExtDatabase class APSWDatabase(SqliteExtDatabase): server_version = tuple(int(i) for i in apsw.sqlitelibversion().split('.')) def __init__(self, database, **kwargs): self._modules = {} super(APSWDatabase, self).__init__(database, **kwargs) def register_module(self, mod_name, mod_inst): self._modules[mod_name] = mod_inst if not self.is_closed(): self.connection().createmodule(mod_name, mod_inst) def unregister_module(self, mod_name): del(self._modules[mod_name]) def _connect(self): conn = apsw.Connection(self.database, **self.connect_params) if self._timeout is not None: conn.setbusytimeout(self._timeout * 1000) try: self._add_conn_hooks(conn) except: conn.close() raise return conn def _add_conn_hooks(self, conn): super(APSWDatabase, self)._add_conn_hooks(conn) self._load_modules(conn) # APSW-only. def _load_modules(self, conn): for mod_name, mod_inst in self._modules.items(): conn.createmodule(mod_name, mod_inst) return conn def _load_aggregates(self, conn): for name, (klass, num_params) in self._aggregates.items(): def make_aggregate(): return (klass(), klass.step, klass.finalize) conn.createaggregatefunction(name, make_aggregate) def _load_collations(self, conn): for name, fn in self._collations.items(): conn.createcollation(name, fn) def _load_functions(self, conn): for name, (fn, num_params, deterministic) in self._functions.items(): args = (deterministic,) if deterministic else () conn.createscalarfunction(name, fn, num_params, *args) def _load_extensions(self, conn): conn.enableloadextension(True) for extension in self._extensions: conn.loadextension(extension) def load_extension(self, extension): self._extensions.add(extension) if not self.is_closed(): conn = self.connection() conn.enableloadextension(True) conn.loadextension(extension) def last_insert_id(self, cursor, query_type=None): if not self.returning_clause: return cursor.getconnection().last_insert_rowid() elif query_type == Insert.SIMPLE: try: return cursor[0][0] except (AttributeError, IndexError, TypeError): pass return cursor def rows_affected(self, cursor): try: return cursor.getconnection().changes() except AttributeError: return cursor.cursor.getconnection().changes() # RETURNING query. def begin(self, lock_type='deferred'): self.cursor().execute('begin %s;' % lock_type) def commit(self): with __exception_wrapper__: curs = self.cursor() if curs.getconnection().getautocommit(): return False curs.execute('commit;') return True def rollback(self): with __exception_wrapper__: curs = self.cursor() if curs.getconnection().getautocommit(): return False curs.execute('rollback;') return True def execute_sql(self, sql, params=None): logger.debug((sql, params)) with __exception_wrapper__: cursor = self.cursor() cursor.execute(sql, params or ()) return cursor def nh(s, v): if v is not None: return str(v) class BooleanField(_BooleanField): def db_value(self, v): v = super(BooleanField, self).db_value(v) if v is not None: return v and 1 or 0 class DateField(_DateField): db_value = nh class TimeField(_TimeField): db_value = nh class DateTimeField(_DateTimeField): db_value = nh class DecimalField(_DecimalField): db_value = nh peewee-3.17.7/playhouse/cockroachdb.py000066400000000000000000000217211470346076600177300ustar00rootroot00000000000000import functools import re import sys from peewee import * from peewee import _atomic from peewee import _manual from peewee import ColumnMetadata # (name, data_type, null, primary_key, table, default) from peewee import EnclosedNodeList from peewee import Entity from peewee import ForeignKeyMetadata # (column, dest_table, dest_column, table). from peewee import IndexMetadata from peewee import NodeList from playhouse.pool import _PooledPostgresqlDatabase try: from playhouse.postgres_ext import ArrayField from playhouse.postgres_ext import BinaryJSONField from playhouse.postgres_ext import IntervalField JSONField = BinaryJSONField except ImportError: # psycopg2 not installed, ignore. ArrayField = BinaryJSONField = IntervalField = JSONField = None if sys.version_info[0] > 2: basestring = str NESTED_TX_MIN_VERSION = 200100 TXN_ERR_MSG = ('CockroachDB does not support nested transactions. You may ' 'alternatively use the @transaction context-manager/decorator, ' 'which only wraps the outer-most block in transactional logic. ' 'To run a transaction with automatic retries, use the ' 'run_transaction() helper.') class ExceededMaxAttempts(OperationalError): pass class UUIDKeyField(UUIDField): auto_increment = True def __init__(self, *args, **kwargs): if kwargs.get('constraints'): raise ValueError('%s cannot specify constraints.' % type(self)) kwargs['constraints'] = [SQL('DEFAULT gen_random_uuid()')] kwargs.setdefault('primary_key', True) super(UUIDKeyField, self).__init__(*args, **kwargs) class RowIDField(AutoField): field_type = 'INT' def __init__(self, *args, **kwargs): if kwargs.get('constraints'): raise ValueError('%s cannot specify constraints.' % type(self)) kwargs['constraints'] = [SQL('DEFAULT unique_rowid()')] super(RowIDField, self).__init__(*args, **kwargs) class CockroachDatabase(PostgresqlDatabase): field_types = PostgresqlDatabase.field_types.copy() field_types.update({ 'BLOB': 'BYTES', }) release_after_rollback = True def __init__(self, database, *args, **kwargs): # Unless a DSN or database connection-url were specified, provide # convenient defaults for the user and port. if 'dsn' not in kwargs and (database and not database.startswith('postgresql://')): kwargs.setdefault('user', 'root') kwargs.setdefault('port', 26257) super(CockroachDatabase, self).__init__(database, *args, **kwargs) def _set_server_version(self, conn): curs = conn.cursor() curs.execute('select version()') raw, = curs.fetchone() match_obj = re.match(r'^CockroachDB.+?v(\d+)\.(\d+)\.(\d+)', raw) if match_obj is not None: clean = '%d%02d%02d' % tuple(int(i) for i in match_obj.groups()) self.server_version = int(clean) # 19.1.5 -> 190105. else: # Fallback to use whatever cockroachdb tells us via protocol. super(CockroachDatabase, self)._set_server_version(conn) def _get_pk_constraint(self, table, schema=None): query = ('SELECT constraint_name ' 'FROM information_schema.table_constraints ' 'WHERE table_name = %s AND table_schema = %s ' 'AND constraint_type = %s') cursor = self.execute_sql(query, (table, schema or 'public', 'PRIMARY KEY')) row = cursor.fetchone() return row and row[0] or None def get_indexes(self, table, schema=None): # The primary-key index is returned by default, so we will just strip # it out here. indexes = super(CockroachDatabase, self).get_indexes(table, schema) pkc = self._get_pk_constraint(table, schema) return [idx for idx in indexes if (not pkc) or (idx.name != pkc)] def conflict_statement(self, on_conflict, query): if not on_conflict._action: return action = on_conflict._action.lower() if action in ('replace', 'upsert'): return SQL('UPSERT') elif action not in ('ignore', 'nothing', 'update'): raise ValueError('Un-supported action for conflict resolution. ' 'CockroachDB supports REPLACE (UPSERT), IGNORE ' 'and UPDATE.') def conflict_update(self, oc, query): action = oc._action.lower() if oc._action else '' if action in ('ignore', 'nothing'): parts = [SQL('ON CONFLICT')] if oc._conflict_target: parts.append(EnclosedNodeList([ Entity(col) if isinstance(col, basestring) else col for col in oc._conflict_target])) parts.append(SQL('DO NOTHING')) return NodeList(parts) elif action in ('replace', 'upsert'): # No special stuff is necessary, this is just indicated by starting # the statement with UPSERT instead of INSERT. return elif oc._conflict_constraint: raise ValueError('CockroachDB does not support the usage of a ' 'constraint name. Use the column(s) instead.') return super(CockroachDatabase, self).conflict_update(oc, query) def extract_date(self, date_part, date_field): return fn.extract(date_part, date_field) def from_timestamp(self, date_field): # CRDB does not allow casting a decimal/float to timestamp, so we first # cast to int, then to timestamptz. return date_field.cast('int').cast('timestamptz') def begin(self, system_time=None, priority=None): super(CockroachDatabase, self).begin() if system_time is not None: self.cursor().execute('SET TRANSACTION AS OF SYSTEM TIME %s', (system_time,)) if priority is not None: priority = priority.lower() if priority not in ('low', 'normal', 'high'): raise ValueError('priority must be low, normal or high') self.cursor().execute('SET TRANSACTION PRIORITY %s' % priority) def atomic(self, system_time=None, priority=None): if self.is_closed(): self.connect() # Side-effect, set server version. if self.server_version < NESTED_TX_MIN_VERSION: return _crdb_atomic(self, system_time, priority) return super(CockroachDatabase, self).atomic(system_time, priority) def savepoint(self): if self.is_closed(): self.connect() # Side-effect, set server version. if self.server_version < NESTED_TX_MIN_VERSION: raise NotImplementedError(TXN_ERR_MSG) return super(CockroachDatabase, self).savepoint() def retry_transaction(self, max_attempts=None, system_time=None, priority=None): def deco(cb): @functools.wraps(cb) def new_fn(): return run_transaction(self, cb, max_attempts, system_time, priority) return new_fn return deco def run_transaction(self, cb, max_attempts=None, system_time=None, priority=None): return run_transaction(self, cb, max_attempts, system_time, priority) class _crdb_atomic(_atomic): def __enter__(self): if self.db.transaction_depth() > 0: if not isinstance(self.db.top_transaction(), _manual): raise NotImplementedError(TXN_ERR_MSG) return super(_crdb_atomic, self).__enter__() def run_transaction(db, callback, max_attempts=None, system_time=None, priority=None): """ Run transactional SQL in a transaction with automatic retries. User-provided `callback`: * Must accept one parameter, the `db` instance representing the connection the transaction is running under. * Must not attempt to commit, rollback or otherwise manage transactions. * May be called more than once. * Should ideally only contain SQL operations. Additionally, the database must not have any open transaction at the time this function is called, as CRDB does not support nested transactions. """ max_attempts = max_attempts or -1 with db.atomic(system_time=system_time, priority=priority) as txn: db.execute_sql('SAVEPOINT cockroach_restart') while max_attempts != 0: try: result = callback(db) db.execute_sql('RELEASE SAVEPOINT cockroach_restart') return result except OperationalError as exc: if exc.orig.pgcode == '40001': max_attempts -= 1 db.execute_sql('ROLLBACK TO SAVEPOINT cockroach_restart') continue raise raise ExceededMaxAttempts(None, 'unable to commit transaction') class PooledCockroachDatabase(_PooledPostgresqlDatabase, CockroachDatabase): pass peewee-3.17.7/playhouse/dataset.py000066400000000000000000000342301470346076600171120ustar00rootroot00000000000000import csv import datetime from decimal import Decimal import json import operator try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse import sys import uuid from peewee import * from playhouse.db_url import connect from playhouse.migrate import migrate from playhouse.migrate import SchemaMigrator from playhouse.reflection import Introspector if sys.version_info[0] == 3: basestring = str from functools import reduce def open_file(f, mode, encoding='utf8'): return open(f, mode, encoding=encoding) else: def open_file(f, mode, encoding='utf8'): return open(f, mode) class DataSet(object): def __init__(self, url, include_views=False, **kwargs): if isinstance(url, Database): self._url = None self._database = url self._database_path = self._database.database else: self._url = url parse_result = urlparse(url) self._database_path = parse_result.path[1:] # Connect to the database. self._database = connect(url) # Open a connection if one does not already exist. self._database.connect(reuse_if_open=True) # Introspect the database and generate models. self._introspector = Introspector.from_database(self._database) self._include_views = include_views self._models = self._introspector.generate_models( skip_invalid=True, literal_column_names=True, include_views=self._include_views, **kwargs) self._migrator = SchemaMigrator.from_database(self._database) class BaseModel(Model): class Meta: database = self._database self._base_model = BaseModel self._export_formats = self.get_export_formats() self._import_formats = self.get_import_formats() def __repr__(self): return '' % self._database_path def get_export_formats(self): return { 'csv': CSVExporter, 'json': JSONExporter, 'tsv': TSVExporter} def get_import_formats(self): return { 'csv': CSVImporter, 'json': JSONImporter, 'tsv': TSVImporter} def __getitem__(self, table): if table not in self._models and table in self.tables: self.update_cache(table) return Table(self, table, self._models.get(table)) @property def tables(self): tables = self._database.get_tables() if self._include_views: tables += self.views return tables @property def views(self): return [v.name for v in self._database.get_views()] def __contains__(self, table): return table in self.tables def connect(self, reuse_if_open=False): self._database.connect(reuse_if_open=reuse_if_open) def close(self): self._database.close() def update_cache(self, table=None): if table: dependencies = [table] if table in self._models: model_class = self._models[table] dependencies.extend([ related._meta.table_name for _, related, _ in model_class._meta.model_graph()]) else: dependencies.extend(self.get_table_dependencies(table)) else: dependencies = None # Update all tables. self._models = {} updated = self._introspector.generate_models( skip_invalid=True, table_names=dependencies, literal_column_names=True, include_views=self._include_views) self._models.update(updated) def get_table_dependencies(self, table): stack = [table] accum = [] seen = set() while stack: table = stack.pop() for fk_meta in self._database.get_foreign_keys(table): dest = fk_meta.dest_table if dest not in seen: stack.append(dest) accum.append(dest) return accum def __enter__(self): self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): if not self._database.is_closed(): self.close() def query(self, sql, params=None): return self._database.execute_sql(sql, params) def transaction(self): return self._database.atomic() def _check_arguments(self, filename, file_obj, format, format_dict): if filename and file_obj: raise ValueError('file is over-specified. Please use either ' 'filename or file_obj, but not both.') if not filename and not file_obj: raise ValueError('A filename or file-like object must be ' 'specified.') if format not in format_dict: valid_formats = ', '.join(sorted(format_dict.keys())) raise ValueError('Unsupported format "%s". Use one of %s.' % ( format, valid_formats)) def freeze(self, query, format='csv', filename=None, file_obj=None, encoding='utf8', **kwargs): self._check_arguments(filename, file_obj, format, self._export_formats) if filename: file_obj = open_file(filename, 'w', encoding) exporter = self._export_formats[format](query) exporter.export(file_obj, **kwargs) if filename: file_obj.close() def thaw(self, table, format='csv', filename=None, file_obj=None, strict=False, encoding='utf8', **kwargs): self._check_arguments(filename, file_obj, format, self._export_formats) if filename: file_obj = open_file(filename, 'r', encoding) importer = self._import_formats[format](self[table], strict) count = importer.load(file_obj, **kwargs) if filename: file_obj.close() return count class Table(object): def __init__(self, dataset, name, model_class): self.dataset = dataset self.name = name if model_class is None: model_class = self._create_model() model_class.create_table() self.dataset._models[name] = model_class @property def model_class(self): return self.dataset._models[self.name] def __repr__(self): return '' % self.name def __len__(self): return self.find().count() def __iter__(self): return iter(self.find().iterator()) def _create_model(self): class Meta: table_name = self.name return type( str(self.name), (self.dataset._base_model,), {'Meta': Meta}) def create_index(self, columns, unique=False): index = ModelIndex(self.model_class, columns, unique=unique) self.model_class.add_index(index) self.dataset._database.execute(index) def _guess_field_type(self, value): if isinstance(value, basestring): return TextField if isinstance(value, (datetime.date, datetime.datetime)): return DateTimeField elif value is True or value is False: return BooleanField elif isinstance(value, int): return IntegerField elif isinstance(value, float): return FloatField elif isinstance(value, Decimal): return DecimalField return TextField @property def columns(self): return [f.name for f in self.model_class._meta.sorted_fields] def _migrate_new_columns(self, data): new_keys = set(data) - set(self.model_class._meta.fields) new_keys -= set(self.model_class._meta.columns) if new_keys: operations = [] for key in new_keys: field_class = self._guess_field_type(data[key]) field = field_class(null=True) operations.append( self.dataset._migrator.add_column(self.name, key, field)) field.bind(self.model_class, key) migrate(*operations) self.dataset.update_cache(self.name) def __getitem__(self, item): try: return self.model_class[item] except self.model_class.DoesNotExist: pass def __setitem__(self, item, value): if not isinstance(value, dict): raise ValueError('Table.__setitem__() value must be a dict') pk = self.model_class._meta.primary_key value[pk.name] = item try: with self.dataset.transaction() as txn: self.insert(**value) except IntegrityError: self.dataset.update_cache(self.name) self.update(columns=[pk.name], **value) def __delitem__(self, item): del self.model_class[item] def insert(self, **data): self._migrate_new_columns(data) return self.model_class.insert(**data).execute() def _apply_where(self, query, filters, conjunction=None): conjunction = conjunction or operator.and_ if filters: expressions = [ (self.model_class._meta.fields[column] == value) for column, value in filters.items()] query = query.where(reduce(conjunction, expressions)) return query def update(self, columns=None, conjunction=None, **data): self._migrate_new_columns(data) filters = {} if columns: for column in columns: filters[column] = data.pop(column) return self._apply_where( self.model_class.update(**data), filters, conjunction).execute() def _query(self, **query): return self._apply_where(self.model_class.select(), query) def find(self, **query): return self._query(**query).dicts() def find_one(self, **query): try: return self.find(**query).get() except self.model_class.DoesNotExist: return None def all(self): return self.find() def delete(self, **query): return self._apply_where(self.model_class.delete(), query).execute() def freeze(self, *args, **kwargs): return self.dataset.freeze(self.all(), *args, **kwargs) def thaw(self, *args, **kwargs): return self.dataset.thaw(self.name, *args, **kwargs) class Exporter(object): def __init__(self, query): self.query = query def export(self, file_obj): raise NotImplementedError class JSONExporter(Exporter): def __init__(self, query, iso8601_datetimes=False): super(JSONExporter, self).__init__(query) self.iso8601_datetimes = iso8601_datetimes def _make_default(self): datetime_types = (datetime.datetime, datetime.date, datetime.time) if self.iso8601_datetimes: def default(o): if isinstance(o, datetime_types): return o.isoformat() elif isinstance(o, (Decimal, uuid.UUID)): return str(o) raise TypeError('Unable to serialize %r as JSON' % o) else: def default(o): if isinstance(o, datetime_types + (Decimal, uuid.UUID)): return str(o) raise TypeError('Unable to serialize %r as JSON' % o) return default def export(self, file_obj, **kwargs): json.dump( list(self.query), file_obj, default=self._make_default(), **kwargs) class CSVExporter(Exporter): def export(self, file_obj, header=True, **kwargs): writer = csv.writer(file_obj, **kwargs) tuples = self.query.tuples().execute() tuples.initialize() if header and getattr(tuples, 'columns', None): writer.writerow([column for column in tuples.columns]) for row in tuples: writer.writerow(row) class TSVExporter(CSVExporter): def export(self, file_obj, header=True, **kwargs): kwargs.setdefault('delimiter', '\t') return super(TSVExporter, self).export(file_obj, header, **kwargs) class Importer(object): def __init__(self, table, strict=False): self.table = table self.strict = strict model = self.table.model_class self.columns = model._meta.columns self.columns.update(model._meta.fields) def load(self, file_obj): raise NotImplementedError class JSONImporter(Importer): def load(self, file_obj, **kwargs): data = json.load(file_obj, **kwargs) count = 0 for row in data: if self.strict: obj = {} for key in row: field = self.columns.get(key) if field is not None: obj[field.name] = field.python_value(row[key]) else: obj = row if obj: self.table.insert(**obj) count += 1 return count class CSVImporter(Importer): def load(self, file_obj, header=True, **kwargs): count = 0 reader = csv.reader(file_obj, **kwargs) if header: try: header_keys = next(reader) except StopIteration: return count if self.strict: header_fields = [] for idx, key in enumerate(header_keys): if key in self.columns: header_fields.append((idx, self.columns[key])) else: header_fields = list(enumerate(header_keys)) else: header_fields = list(enumerate(self.model._meta.sorted_fields)) if not header_fields: return count for row in reader: obj = {} for idx, field in header_fields: if self.strict: obj[field.name] = field.python_value(row[idx]) else: obj[field] = row[idx] self.table.insert(**obj) count += 1 return count class TSVImporter(CSVImporter): def load(self, file_obj, header=True, **kwargs): kwargs.setdefault('delimiter', '\t') return super(TSVImporter, self).load(file_obj, header, **kwargs) peewee-3.17.7/playhouse/db_url.py000066400000000000000000000104061470346076600167330ustar00rootroot00000000000000try: from urlparse import parse_qsl, unquote, urlparse except ImportError: from urllib.parse import parse_qsl, unquote, urlparse from peewee import * from playhouse.cockroachdb import CockroachDatabase from playhouse.cockroachdb import PooledCockroachDatabase from playhouse.pool import PooledMySQLDatabase from playhouse.pool import PooledPostgresqlDatabase from playhouse.pool import PooledPsycopg3Database from playhouse.pool import PooledSqliteDatabase from playhouse.pool import PooledSqliteExtDatabase from playhouse.psycopg3_ext import Psycopg3Database from playhouse.sqlite_ext import SqliteExtDatabase schemes = { 'cockroachdb': CockroachDatabase, 'cockroachdb+pool': PooledCockroachDatabase, 'crdb': CockroachDatabase, 'crdb+pool': PooledCockroachDatabase, 'mysql': MySQLDatabase, 'mysql+pool': PooledMySQLDatabase, 'postgres': PostgresqlDatabase, 'postgresql': PostgresqlDatabase, 'postgres+pool': PooledPostgresqlDatabase, 'postgresql+pool': PooledPostgresqlDatabase, 'psycopg3': Psycopg3Database, 'psycopg3+pool': PooledPsycopg3Database, 'sqlite': SqliteDatabase, 'sqliteext': SqliteExtDatabase, 'sqlite+pool': PooledSqliteDatabase, 'sqliteext+pool': PooledSqliteExtDatabase, } def register_database(db_class, *names): global schemes for name in names: schemes[name] = db_class def parseresult_to_dict(parsed, unquote_password=False): # urlparse in python 2.6 is broken so query will be empty and instead # appended to path complete with '?' path = parsed.path[1:] # Ignore leading '/'. query = parsed.query connect_kwargs = {'database': path} if parsed.username: connect_kwargs['user'] = parsed.username if parsed.password: connect_kwargs['password'] = parsed.password if unquote_password: connect_kwargs['password'] = unquote(connect_kwargs['password']) if parsed.hostname: connect_kwargs['host'] = parsed.hostname if parsed.port: connect_kwargs['port'] = parsed.port # Adjust parameters for MySQL. if parsed.scheme == 'mysql' and 'password' in connect_kwargs: connect_kwargs['passwd'] = connect_kwargs.pop('password') elif 'sqlite' in parsed.scheme and not connect_kwargs['database']: connect_kwargs['database'] = ':memory:' # Get additional connection args from the query string qs_args = parse_qsl(query, keep_blank_values=True) for key, value in qs_args: if value.lower() == 'false': value = False elif value.lower() == 'true': value = True elif value.isdigit(): value = int(value) elif '.' in value and all(p.isdigit() for p in value.split('.', 1)): try: value = float(value) except ValueError: pass elif value.lower() in ('null', 'none'): value = None connect_kwargs[key] = value return connect_kwargs def parse(url, unquote_password=False): parsed = urlparse(url) return parseresult_to_dict(parsed, unquote_password) def connect(url, unquote_password=False, **connect_params): parsed = urlparse(url) connect_kwargs = parseresult_to_dict(parsed, unquote_password) connect_kwargs.update(connect_params) database_class = schemes.get(parsed.scheme) if database_class is None: if database_class in schemes: raise RuntimeError('Attempted to use "%s" but a required library ' 'could not be imported.' % parsed.scheme) else: raise RuntimeError('Unrecognized or unsupported scheme: "%s".' % parsed.scheme) return database_class(**connect_kwargs) # Conditionally register additional databases. try: from playhouse.pool import PooledPostgresqlExtDatabase except ImportError: pass else: register_database( PooledPostgresqlExtDatabase, 'postgresext+pool', 'postgresqlext+pool') try: from playhouse.apsw_ext import APSWDatabase except ImportError: pass else: register_database(APSWDatabase, 'apsw') try: from playhouse.postgres_ext import PostgresqlExtDatabase except ImportError: pass else: register_database(PostgresqlExtDatabase, 'postgresext', 'postgresqlext') peewee-3.17.7/playhouse/fields.py000066400000000000000000000032431470346076600167330ustar00rootroot00000000000000try: import bz2 except ImportError: bz2 = None try: import zlib except ImportError: zlib = None try: import cPickle as pickle except ImportError: import pickle from peewee import BlobField from peewee import buffer_type class CompressedField(BlobField): ZLIB = 'zlib' BZ2 = 'bz2' algorithm_to_import = { ZLIB: zlib, BZ2: bz2, } def __init__(self, compression_level=6, algorithm=ZLIB, *args, **kwargs): self.compression_level = compression_level if algorithm not in self.algorithm_to_import: raise ValueError('Unrecognized algorithm %s' % algorithm) compress_module = self.algorithm_to_import[algorithm] if compress_module is None: raise ValueError('Missing library required for %s.' % algorithm) self.algorithm = algorithm self.compress = compress_module.compress self.decompress = compress_module.decompress super(CompressedField, self).__init__(*args, **kwargs) def python_value(self, value): if value is not None: return self.decompress(value) def db_value(self, value): if value is not None: return self._constructor( self.compress(value, self.compression_level)) class PickleField(BlobField): def python_value(self, value): if value is not None: if isinstance(value, buffer_type): value = bytes(value) return pickle.loads(value) def db_value(self, value): if value is not None: pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) return self._constructor(pickled) peewee-3.17.7/playhouse/flask_utils.py000066400000000000000000000200051470346076600200000ustar00rootroot00000000000000import math import sys from flask import abort from flask import render_template from flask import request from peewee import Database from peewee import DoesNotExist from peewee import Model from peewee import Proxy from peewee import SelectQuery from playhouse.db_url import connect as db_url_connect class PaginatedQuery(object): def __init__(self, query_or_model, paginate_by, page_var='page', page=None, check_bounds=False): self.paginate_by = paginate_by self.page_var = page_var self.page = page or None self.check_bounds = check_bounds if isinstance(query_or_model, SelectQuery): self.query = query_or_model self.model = self.query.model else: self.model = query_or_model self.query = self.model.select() def get_page(self): if self.page is not None: return self.page curr_page = request.args.get(self.page_var) if curr_page and curr_page.isdigit(): return max(1, int(curr_page)) return 1 def get_page_count(self): if not hasattr(self, '_page_count'): self._page_count = int(math.ceil( float(self.query.count()) / self.paginate_by)) return self._page_count def get_object_list(self): if self.check_bounds and self.get_page() > self.get_page_count(): abort(404) return self.query.paginate(self.get_page(), self.paginate_by) def get_page_range(self, page, total, show=5): # Generate page buttons for a subset of pages, e.g. if the current page # is 4, we have 10 pages, and want to show 5 buttons, this function # returns us: [2, 3, 4, 5, 6] start = max((page - (show // 2)), 1) stop = min(start + show, total) + 1 start = max(min(start, stop - show), 1) return list(range(start, stop)[:show]) def get_object_or_404(query_or_model, *query): if not isinstance(query_or_model, SelectQuery): query_or_model = query_or_model.select() try: return query_or_model.where(*query).get() except DoesNotExist: abort(404) def object_list(template_name, query, context_variable='object_list', paginate_by=20, page_var='page', page=None, check_bounds=True, **kwargs): paginated_query = PaginatedQuery( query, paginate_by=paginate_by, page_var=page_var, page=page, check_bounds=check_bounds) kwargs[context_variable] = paginated_query.get_object_list() return render_template( template_name, pagination=paginated_query, page=paginated_query.get_page(), **kwargs) def get_current_url(): if not request.query_string: return request.path return '%s?%s' % (request.path, request.query_string) def get_next_url(default='/'): if request.args.get('next'): return request.args['next'] elif request.form.get('next'): return request.form['next'] return default class FlaskDB(object): """ Convenience wrapper for configuring a Peewee database for use with a Flask application. Provides a base `Model` class and registers handlers to manage the database connection during the request/response cycle. Usage:: from flask import Flask from peewee import * from playhouse.flask_utils import FlaskDB # The database can be specified using a database URL, or you can pass a # Peewee database instance directly: DATABASE = 'postgresql:///my_app' DATABASE = PostgresqlDatabase('my_app') # If we do not want connection-management on any views, we can specify # the view names using FLASKDB_EXCLUDED_ROUTES. The db connection will # not be opened/closed automatically when these views are requested: FLASKDB_EXCLUDED_ROUTES = ('logout',) app = Flask(__name__) app.config.from_object(__name__) # Now we can configure our FlaskDB: flask_db = FlaskDB(app) # Or use the "deferred initialization" pattern: flask_db = FlaskDB() flask_db.init_app(app) # The `flask_db` provides a base Model-class for easily binding models # to the configured database: class User(flask_db.Model): email = CharField() """ def __init__(self, app=None, database=None, model_class=Model, excluded_routes=None): self.database = None # Reference to actual Peewee database instance. self.base_model_class = model_class self._app = app self._db = database # dict, url, Database, or None (default). self._excluded_routes = excluded_routes or () if app is not None: self.init_app(app) def init_app(self, app): self._app = app if self._db is None: if 'DATABASE' in app.config: initial_db = app.config['DATABASE'] elif 'DATABASE_URL' in app.config: initial_db = app.config['DATABASE_URL'] else: raise ValueError('Missing required configuration data for ' 'database: DATABASE or DATABASE_URL.') else: initial_db = self._db if 'FLASKDB_EXCLUDED_ROUTES' in app.config: self._excluded_routes = app.config['FLASKDB_EXCLUDED_ROUTES'] self._load_database(app, initial_db) self._register_handlers(app) def _load_database(self, app, config_value): if isinstance(config_value, Database): database = config_value elif isinstance(config_value, dict): database = self._load_from_config_dict(dict(config_value)) else: # Assume a database connection URL. database = db_url_connect(config_value) if isinstance(self.database, Proxy): self.database.initialize(database) else: self.database = database def _load_from_config_dict(self, config_dict): try: name = config_dict.pop('name') engine = config_dict.pop('engine') except KeyError: raise RuntimeError('DATABASE configuration must specify a ' '`name` and `engine`.') if '.' in engine: path, class_name = engine.rsplit('.', 1) else: path, class_name = 'peewee', engine try: __import__(path) module = sys.modules[path] database_class = getattr(module, class_name) assert issubclass(database_class, Database) except ImportError: raise RuntimeError('Unable to import %s' % engine) except AttributeError: raise RuntimeError('Database engine not found %s' % engine) except AssertionError: raise RuntimeError('Database engine not a subclass of ' 'peewee.Database: %s' % engine) return database_class(name, **config_dict) def _register_handlers(self, app): app.before_request(self.connect_db) app.teardown_request(self.close_db) def get_model_class(self): if self.database is None: raise RuntimeError('Database must be initialized.') class BaseModel(self.base_model_class): class Meta: database = self.database return BaseModel @property def Model(self): if self._app is None: database = getattr(self, 'database', None) if database is None: self.database = Proxy() if not hasattr(self, '_model_class'): self._model_class = self.get_model_class() return self._model_class def connect_db(self): if self._excluded_routes and request.endpoint in self._excluded_routes: return self.database.connect() def close_db(self, exc): if self._excluded_routes and request.endpoint in self._excluded_routes: return if not self.database.is_closed(): self.database.close() peewee-3.17.7/playhouse/hybrid.py000066400000000000000000000027701470346076600167520ustar00rootroot00000000000000from peewee import ModelDescriptor # Hybrid methods/attributes, based on similar functionality in SQLAlchemy: # http://docs.sqlalchemy.org/en/improve_toc/orm/extensions/hybrid.html class hybrid_method(ModelDescriptor): def __init__(self, func, expr=None): self.func = func self.expr = expr or func def __get__(self, instance, instance_type): if instance is None: return self.expr.__get__(instance_type, instance_type.__class__) return self.func.__get__(instance, instance_type) def expression(self, expr): self.expr = expr return self class hybrid_property(ModelDescriptor): def __init__(self, fget, fset=None, fdel=None, expr=None): self.fget = fget self.fset = fset self.fdel = fdel self.expr = expr or fget def __get__(self, instance, instance_type): if instance is None: return self.expr(instance_type) return self.fget(instance) def __set__(self, instance, value): if self.fset is None: raise AttributeError('Cannot set attribute.') self.fset(instance, value) def __delete__(self, instance): if self.fdel is None: raise AttributeError('Cannot delete attribute.') self.fdel(instance) def setter(self, fset): self.fset = fset return self def deleter(self, fdel): self.fdel = fdel return self def expression(self, expr): self.expr = expr return self peewee-3.17.7/playhouse/kv.py000066400000000000000000000127501470346076600161100ustar00rootroot00000000000000import operator from peewee import * from peewee import sqlite3 from peewee import Expression from playhouse.fields import PickleField try: from playhouse.sqlite_ext import CSqliteExtDatabase as SqliteExtDatabase except ImportError: from playhouse.sqlite_ext import SqliteExtDatabase Sentinel = type('Sentinel', (object,), {}) class KeyValue(object): """ Persistent dictionary. :param Field key_field: field to use for key. Defaults to CharField. :param Field value_field: field to use for value. Defaults to PickleField. :param bool ordered: data should be returned in key-sorted order. :param Database database: database where key/value data is stored. :param str table_name: table name for data. """ def __init__(self, key_field=None, value_field=None, ordered=False, database=None, table_name='keyvalue'): if key_field is None: key_field = CharField(max_length=255, primary_key=True) if not key_field.primary_key: raise ValueError('key_field must have primary_key=True.') if value_field is None: value_field = PickleField() self._key_field = key_field self._value_field = value_field self._ordered = ordered self._database = database or SqliteExtDatabase(':memory:') self._table_name = table_name support_on_conflict = (isinstance(self._database, PostgresqlDatabase) or (isinstance(self._database, SqliteDatabase) and self._database.server_version >= (3, 24))) if support_on_conflict: self.upsert = self._postgres_upsert self.update = self._postgres_update else: self.upsert = self._upsert self.update = self._update self.model = self.create_model() self.key = self.model.key self.value = self.model.value # Ensure table exists. self.model.create_table() def create_model(self): class KeyValue(Model): key = self._key_field value = self._value_field class Meta: database = self._database table_name = self._table_name return KeyValue def query(self, *select): query = self.model.select(*select).tuples() if self._ordered: query = query.order_by(self.key) return query def convert_expression(self, expr): if not isinstance(expr, Expression): return (self.key == expr), True return expr, False def __contains__(self, key): expr, _ = self.convert_expression(key) return self.model.select().where(expr).exists() def __len__(self): return len(self.model) def __getitem__(self, expr): converted, is_single = self.convert_expression(expr) query = self.query(self.value).where(converted) item_getter = operator.itemgetter(0) result = [item_getter(row) for row in query] if len(result) == 0 and is_single: raise KeyError(expr) elif is_single: return result[0] return result def _upsert(self, key, value): (self.model .insert(key=key, value=value) .on_conflict('replace') .execute()) def _postgres_upsert(self, key, value): (self.model .insert(key=key, value=value) .on_conflict(conflict_target=[self.key], preserve=[self.value]) .execute()) def __setitem__(self, expr, value): if isinstance(expr, Expression): self.model.update(value=value).where(expr).execute() else: self.upsert(expr, value) def __delitem__(self, expr): converted, _ = self.convert_expression(expr) self.model.delete().where(converted).execute() def __iter__(self): return iter(self.query().execute()) def keys(self): return map(operator.itemgetter(0), self.query(self.key)) def values(self): return map(operator.itemgetter(0), self.query(self.value)) def items(self): return iter(self.query().execute()) def _update(self, __data=None, **mapping): if __data is not None: mapping.update(__data) return (self.model .insert_many(list(mapping.items()), fields=[self.key, self.value]) .on_conflict('replace') .execute()) def _postgres_update(self, __data=None, **mapping): if __data is not None: mapping.update(__data) return (self.model .insert_many(list(mapping.items()), fields=[self.key, self.value]) .on_conflict(conflict_target=[self.key], preserve=[self.value]) .execute()) def get(self, key, default=None): try: return self[key] except KeyError: return default def setdefault(self, key, default=None): try: return self[key] except KeyError: self[key] = default return default def pop(self, key, default=Sentinel): with self._database.atomic(): try: result = self[key] except KeyError: if default is Sentinel: raise return default del self[key] return result def clear(self): self.model.delete().execute() peewee-3.17.7/playhouse/migrate.py000066400000000000000000001007371470346076600171230ustar00rootroot00000000000000""" Lightweight schema migrations. Example Usage ------------- Instantiate a migrator: # Postgres example: my_db = PostgresqlDatabase(...) migrator = PostgresqlMigrator(my_db) # SQLite example: my_db = SqliteDatabase('my_database.db') migrator = SqliteMigrator(my_db) Then you will use the `migrate` function to run various `Operation`s which are generated by the migrator: migrate( migrator.add_column('some_table', 'column_name', CharField(default='')) ) Migrations are not run inside a transaction, so if you wish the migration to run in a transaction you will need to wrap the call to `migrate` in a transaction block, e.g.: with my_db.transaction(): migrate(...) Supported Operations -------------------- Add new field(s) to an existing model: # Create your field instances. For non-null fields you must specify a # default value. pubdate_field = DateTimeField(null=True) comment_field = TextField(default='') # Run the migration, specifying the database table, field name and field. migrate( migrator.add_column('comment_tbl', 'pub_date', pubdate_field), migrator.add_column('comment_tbl', 'comment', comment_field), ) Renaming a field: # Specify the table, original name of the column, and its new name. migrate( migrator.rename_column('story', 'pub_date', 'publish_date'), migrator.rename_column('story', 'mod_date', 'modified_date'), ) Dropping a field: migrate( migrator.drop_column('story', 'some_old_field'), ) Making a field nullable or not nullable: # Note that when making a field not null that field must not have any # NULL values present. migrate( # Make `pub_date` allow NULL values. migrator.drop_not_null('story', 'pub_date'), # Prevent `modified_date` from containing NULL values. migrator.add_not_null('story', 'modified_date'), ) Renaming a table: migrate( migrator.rename_table('story', 'stories_tbl'), ) Adding an index: # Specify the table, column names, and whether the index should be # UNIQUE or not. migrate( # Create an index on the `pub_date` column. migrator.add_index('story', ('pub_date',), False), # Create a multi-column index on the `pub_date` and `status` fields. migrator.add_index('story', ('pub_date', 'status'), False), # Create a unique index on the category and title fields. migrator.add_index('story', ('category_id', 'title'), True), ) Dropping an index: # Specify the index name. migrate(migrator.drop_index('story', 'story_pub_date_status')) Adding or dropping table constraints: .. code-block:: python # Add a CHECK() constraint to enforce the price cannot be negative. migrate(migrator.add_constraint( 'products', 'price_check', Check('price >= 0'))) # Remove the price check constraint. migrate(migrator.drop_constraint('products', 'price_check')) # Add a UNIQUE constraint on the first and last names. migrate(migrator.add_unique('person', 'first_name', 'last_name')) """ from collections import namedtuple import functools import hashlib import re from peewee import * from peewee import CommaNodeList from peewee import EnclosedNodeList from peewee import Entity from peewee import Expression from peewee import Node from peewee import NodeList from peewee import OP from peewee import callable_ from peewee import sort_models from peewee import sqlite3 from peewee import _truncate_constraint_name try: from playhouse.cockroachdb import CockroachDatabase except ImportError: CockroachDatabase = None class Operation(object): """Encapsulate a single schema altering operation.""" def __init__(self, migrator, method, *args, **kwargs): self.migrator = migrator self.method = method self.args = args self.kwargs = kwargs def execute(self, node): self.migrator.database.execute(node) def _handle_result(self, result): if isinstance(result, (Node, Context)): self.execute(result) elif isinstance(result, Operation): result.run() elif isinstance(result, (list, tuple)): for item in result: self._handle_result(item) def run(self): kwargs = self.kwargs.copy() kwargs['with_context'] = True method = getattr(self.migrator, self.method) self._handle_result(method(*self.args, **kwargs)) def operation(fn): @functools.wraps(fn) def inner(self, *args, **kwargs): with_context = kwargs.pop('with_context', False) if with_context: return fn(self, *args, **kwargs) return Operation(self, fn.__name__, *args, **kwargs) return inner def make_index_name(table_name, columns): index_name = '_'.join((table_name,) + tuple(columns)) if len(index_name) > 64: index_hash = hashlib.md5(index_name.encode('utf-8')).hexdigest() index_name = '%s_%s' % (index_name[:56], index_hash[:7]) return index_name class SchemaMigrator(object): explicit_create_foreign_key = False explicit_delete_foreign_key = False def __init__(self, database): self.database = database def make_context(self): return self.database.get_sql_context() @classmethod def from_database(cls, database): if CockroachDatabase and isinstance(database, CockroachDatabase): return CockroachDBMigrator(database) elif isinstance(database, PostgresqlDatabase): return PostgresqlMigrator(database) elif isinstance(database, MySQLDatabase): return MySQLMigrator(database) elif isinstance(database, SqliteDatabase): return SqliteMigrator(database) raise ValueError('Unsupported database: %s' % database) @operation def apply_default(self, table, column_name, field): default = field.default if callable_(default): default = default() return (self.make_context() .literal('UPDATE ') .sql(Entity(table)) .literal(' SET ') .sql(Expression( Entity(column_name), OP.EQ, field.db_value(default), flat=True))) def _alter_table(self, ctx, table): return ctx.literal('ALTER TABLE ').sql(Entity(table)) def _alter_column(self, ctx, table, column): return (self ._alter_table(ctx, table) .literal(' ALTER COLUMN ') .sql(Entity(column))) @operation def alter_add_column(self, table, column_name, field): # Make field null at first. ctx = self.make_context() field_null, field.null = field.null, True # Set the field's column-name and name, if it is not set or doesn't # match the new value. if field.column_name != column_name: field.name = field.column_name = column_name (self ._alter_table(ctx, table) .literal(' ADD COLUMN ') .sql(field.ddl(ctx))) field.null = field_null if isinstance(field, ForeignKeyField): self.add_inline_fk_sql(ctx, field) return ctx @operation def add_constraint(self, table, name, constraint): return (self ._alter_table(self.make_context(), table) .literal(' ADD CONSTRAINT ') .sql(Entity(name)) .literal(' ') .sql(constraint)) @operation def add_unique(self, table, *column_names): constraint_name = 'uniq_%s' % '_'.join(column_names) constraint = NodeList(( SQL('UNIQUE'), EnclosedNodeList([Entity(column) for column in column_names]))) return self.add_constraint(table, constraint_name, constraint) @operation def drop_constraint(self, table, name): return (self ._alter_table(self.make_context(), table) .literal(' DROP CONSTRAINT ') .sql(Entity(name))) def add_inline_fk_sql(self, ctx, field): ctx = (ctx .literal(' REFERENCES ') .sql(Entity(field.rel_model._meta.table_name)) .literal(' ') .sql(EnclosedNodeList((Entity(field.rel_field.column_name),)))) if field.on_delete is not None: ctx = ctx.literal(' ON DELETE %s' % field.on_delete) if field.on_update is not None: ctx = ctx.literal(' ON UPDATE %s' % field.on_update) return ctx @operation def add_foreign_key_constraint(self, table, column_name, rel, rel_column, on_delete=None, on_update=None): constraint = 'fk_%s_%s_refs_%s' % (table, column_name, rel) ctx = (self .make_context() .literal('ALTER TABLE ') .sql(Entity(table)) .literal(' ADD CONSTRAINT ') .sql(Entity(_truncate_constraint_name(constraint))) .literal(' FOREIGN KEY ') .sql(EnclosedNodeList((Entity(column_name),))) .literal(' REFERENCES ') .sql(Entity(rel)) .literal(' (') .sql(Entity(rel_column)) .literal(')')) if on_delete is not None: ctx = ctx.literal(' ON DELETE %s' % on_delete) if on_update is not None: ctx = ctx.literal(' ON UPDATE %s' % on_update) return ctx @operation def add_column(self, table, column_name, field): # Adding a column is complicated by the fact that if there are rows # present and the field is non-null, then we need to first add the # column as a nullable field, then set the value, then add a not null # constraint. if not field.null and field.default is None: raise ValueError('%s is not null but has no default' % column_name) is_foreign_key = isinstance(field, ForeignKeyField) if is_foreign_key and not field.rel_field: raise ValueError('Foreign keys must specify a `field`.') operations = [self.alter_add_column(table, column_name, field)] # In the event the field is *not* nullable, update with the default # value and set not null. if not field.null: operations.extend([ self.apply_default(table, column_name, field), self.add_not_null(table, column_name)]) if is_foreign_key and self.explicit_create_foreign_key: operations.append( self.add_foreign_key_constraint( table, column_name, field.rel_model._meta.table_name, field.rel_field.column_name, field.on_delete, field.on_update)) if field.index or field.unique: using = getattr(field, 'index_type', None) operations.append(self.add_index(table, (column_name,), field.unique, using)) return operations @operation def drop_foreign_key_constraint(self, table, column_name): raise NotImplementedError @operation def drop_column(self, table, column_name, cascade=True): ctx = self.make_context() (self._alter_table(ctx, table) .literal(' DROP COLUMN ') .sql(Entity(column_name))) if cascade: ctx.literal(' CASCADE') fk_columns = [ foreign_key.column for foreign_key in self.database.get_foreign_keys(table)] if column_name in fk_columns and self.explicit_delete_foreign_key: return [self.drop_foreign_key_constraint(table, column_name), ctx] return ctx @operation def rename_column(self, table, old_name, new_name): return (self ._alter_table(self.make_context(), table) .literal(' RENAME COLUMN ') .sql(Entity(old_name)) .literal(' TO ') .sql(Entity(new_name))) @operation def add_not_null(self, table, column): return (self ._alter_column(self.make_context(), table, column) .literal(' SET NOT NULL')) @operation def drop_not_null(self, table, column): return (self ._alter_column(self.make_context(), table, column) .literal(' DROP NOT NULL')) @operation def add_column_default(self, table, column, default): if default is None: raise ValueError('`default` must be not None/NULL.') if callable_(default): default = default() # Try to handle SQL functions and string literals, otherwise pass as a # bound value. if isinstance(default, str) and default.endswith((')', "'")): default = SQL(default) return (self ._alter_table(self.make_context(), table) .literal(' ALTER COLUMN ') .sql(Entity(column)) .literal(' SET DEFAULT ') .sql(default)) @operation def drop_column_default(self, table, column): return (self ._alter_table(self.make_context(), table) .literal(' ALTER COLUMN ') .sql(Entity(column)) .literal(' DROP DEFAULT')) @operation def alter_column_type(self, table, column, field, cast=None): # ALTER TABLE
ALTER COLUMN ctx = self.make_context() ctx = (self ._alter_column(ctx, table, column) .literal(' TYPE ') .sql(field.ddl_datatype(ctx))) if cast is not None: if not isinstance(cast, Node): cast = SQL(cast) ctx = ctx.literal(' USING ').sql(cast) return ctx @operation def rename_table(self, old_name, new_name): return (self ._alter_table(self.make_context(), old_name) .literal(' RENAME TO ') .sql(Entity(new_name))) @operation def add_index(self, table, columns, unique=False, using=None): ctx = self.make_context() index_name = make_index_name(table, columns) table_obj = Table(table) cols = [getattr(table_obj.c, column) for column in columns] index = Index(index_name, table_obj, cols, unique=unique, using=using) return ctx.sql(index) @operation def drop_index(self, table, index_name): return (self .make_context() .literal('DROP INDEX ') .sql(Entity(index_name))) class PostgresqlMigrator(SchemaMigrator): def _primary_key_columns(self, tbl): query = """ SELECT pg_attribute.attname FROM pg_index, pg_class, pg_attribute WHERE pg_class.oid = '%s'::regclass AND indrelid = pg_class.oid AND pg_attribute.attrelid = pg_class.oid AND pg_attribute.attnum = any(pg_index.indkey) AND indisprimary; """ cursor = self.database.execute_sql(query % tbl) return [row[0] for row in cursor.fetchall()] @operation def set_search_path(self, schema_name): return (self .make_context() .literal('SET search_path TO %s' % schema_name)) @operation def rename_table(self, old_name, new_name): pk_names = self._primary_key_columns(old_name) ParentClass = super(PostgresqlMigrator, self) operations = [ ParentClass.rename_table(old_name, new_name, with_context=True)] if len(pk_names) == 1: # Check for existence of primary key sequence. seq_name = '%s_%s_seq' % (old_name, pk_names[0]) query = """ SELECT 1 FROM information_schema.sequences WHERE LOWER(sequence_name) = LOWER(%s) """ cursor = self.database.execute_sql(query, (seq_name,)) if bool(cursor.fetchone()): new_seq_name = '%s_%s_seq' % (new_name, pk_names[0]) operations.append(ParentClass.rename_table( seq_name, new_seq_name)) return operations class CockroachDBMigrator(PostgresqlMigrator): explicit_create_foreign_key = True def add_inline_fk_sql(self, ctx, field): pass @operation def drop_index(self, table, index_name): return (self .make_context() .literal('DROP INDEX ') .sql(Entity(index_name)) .literal(' CASCADE')) class MySQLColumn(namedtuple('_Column', ('name', 'definition', 'null', 'pk', 'default', 'extra'))): @property def is_pk(self): return self.pk == 'PRI' @property def is_unique(self): return self.pk == 'UNI' @property def is_null(self): return self.null == 'YES' def sql(self, column_name=None, is_null=None): if is_null is None: is_null = self.is_null if column_name is None: column_name = self.name parts = [ Entity(column_name), SQL(self.definition)] if self.is_unique: parts.append(SQL('UNIQUE')) if is_null: parts.append(SQL('NULL')) else: parts.append(SQL('NOT NULL')) if self.is_pk: parts.append(SQL('PRIMARY KEY')) if self.extra: parts.append(SQL(self.extra)) return NodeList(parts) class MySQLMigrator(SchemaMigrator): explicit_create_foreign_key = True explicit_delete_foreign_key = True def _alter_column(self, ctx, table, column): return (self ._alter_table(ctx, table) .literal(' MODIFY ') .sql(Entity(column))) @operation def rename_table(self, old_name, new_name): return (self .make_context() .literal('RENAME TABLE ') .sql(Entity(old_name)) .literal(' TO ') .sql(Entity(new_name))) def _get_column_definition(self, table, column_name): cursor = self.database.execute_sql('DESCRIBE `%s`;' % table) rows = cursor.fetchall() for row in rows: column = MySQLColumn(*row) if column.name == column_name: return column return False def get_foreign_key_constraint(self, table, column_name): cursor = self.database.execute_sql( ('SELECT constraint_name ' 'FROM information_schema.key_column_usage WHERE ' 'table_schema = DATABASE() AND ' 'table_name = %s AND ' 'column_name = %s AND ' 'referenced_table_name IS NOT NULL AND ' 'referenced_column_name IS NOT NULL;'), (table, column_name)) result = cursor.fetchone() if not result: raise AttributeError( 'Unable to find foreign key constraint for ' '"%s" on table "%s".' % (table, column_name)) return result[0] @operation def drop_foreign_key_constraint(self, table, column_name): fk_constraint = self.get_foreign_key_constraint(table, column_name) return (self ._alter_table(self.make_context(), table) .literal(' DROP FOREIGN KEY ') .sql(Entity(fk_constraint))) def add_inline_fk_sql(self, ctx, field): pass @operation def add_not_null(self, table, column): column_def = self._get_column_definition(table, column) add_not_null = (self ._alter_table(self.make_context(), table) .literal(' MODIFY ') .sql(column_def.sql(is_null=False))) fk_objects = dict( (fk.column, fk) for fk in self.database.get_foreign_keys(table)) if column not in fk_objects: return add_not_null fk_metadata = fk_objects[column] return (self.drop_foreign_key_constraint(table, column), add_not_null, self.add_foreign_key_constraint( table, column, fk_metadata.dest_table, fk_metadata.dest_column)) @operation def drop_not_null(self, table, column): column = self._get_column_definition(table, column) if column.is_pk: raise ValueError('Primary keys can not be null') return (self ._alter_table(self.make_context(), table) .literal(' MODIFY ') .sql(column.sql(is_null=True))) @operation def rename_column(self, table, old_name, new_name): fk_objects = dict( (fk.column, fk) for fk in self.database.get_foreign_keys(table)) is_foreign_key = old_name in fk_objects column = self._get_column_definition(table, old_name) rename_ctx = (self ._alter_table(self.make_context(), table) .literal(' CHANGE ') .sql(Entity(old_name)) .literal(' ') .sql(column.sql(column_name=new_name))) if is_foreign_key: fk_metadata = fk_objects[old_name] return [ self.drop_foreign_key_constraint(table, old_name), rename_ctx, self.add_foreign_key_constraint( table, new_name, fk_metadata.dest_table, fk_metadata.dest_column), ] else: return rename_ctx @operation def alter_column_type(self, table, column, field, cast=None): if cast is not None: raise ValueError('alter_column_type() does not support cast with ' 'MySQL.') ctx = self.make_context() return (self ._alter_table(ctx, table) .literal(' MODIFY ') .sql(Entity(column)) .literal(' ') .sql(field.ddl(ctx))) @operation def drop_index(self, table, index_name): return (self .make_context() .literal('DROP INDEX ') .sql(Entity(index_name)) .literal(' ON ') .sql(Entity(table))) class SqliteMigrator(SchemaMigrator): """ SQLite supports a subset of ALTER TABLE queries, view the docs for the full details http://sqlite.org/lang_altertable.html """ column_re = re.compile(r'(.+?)\((.+)\)') column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+') column_name_re = re.compile(r'''["`']?([\w]+)''') fk_re = re.compile(r'FOREIGN KEY\s+\("?([\w]+)"?\)\s+', re.I) def _get_column_names(self, table): res = self.database.execute_sql('select * from "%s" limit 1' % table) return [item[0] for item in res.description] def _get_create_table(self, table): res = self.database.execute_sql( ('select name, sql from sqlite_master ' 'where type=? and LOWER(name)=?'), ['table', table.lower()]) return res.fetchone() @operation def _update_column(self, table, column_to_update, fn): columns = set(column.name.lower() for column in self.database.get_columns(table)) if column_to_update.lower() not in columns: raise ValueError('Column "%s" does not exist on "%s"' % (column_to_update, table)) # Get the SQL used to create the given table. table, create_table = self._get_create_table(table) # Get the indexes and SQL to re-create indexes. indexes = self.database.get_indexes(table) # Find any foreign keys we may need to remove. self.database.get_foreign_keys(table) # Make sure the create_table does not contain any newlines or tabs, # allowing the regex to work correctly. create_table = re.sub(r'\s+', ' ', create_table) # Parse out the `CREATE TABLE` and column list portions of the query. raw_create, raw_columns = self.column_re.search(create_table).groups() # Clean up the individual column definitions. split_columns = self.column_split_re.findall(raw_columns) column_defs = [col.strip() for col in split_columns] new_column_defs = [] new_column_names = [] original_column_names = [] constraint_terms = ('foreign ', 'primary ', 'constraint ', 'check ') for column_def in column_defs: column_name, = self.column_name_re.match(column_def).groups() if column_name == column_to_update: new_column_def = fn(column_name, column_def) if new_column_def: new_column_defs.append(new_column_def) original_column_names.append(column_name) column_name, = self.column_name_re.match( new_column_def).groups() new_column_names.append(column_name) else: new_column_defs.append(column_def) # Avoid treating constraints as columns. if not column_def.lower().startswith(constraint_terms): new_column_names.append(column_name) original_column_names.append(column_name) # Create a mapping of original columns to new columns. original_to_new = dict(zip(original_column_names, new_column_names)) new_column = original_to_new.get(column_to_update) fk_filter_fn = lambda column_def: column_def if not new_column: # Remove any foreign keys associated with this column. fk_filter_fn = lambda column_def: None elif new_column != column_to_update: # Update any foreign keys for this column. fk_filter_fn = lambda column_def: self.fk_re.sub( 'FOREIGN KEY ("%s") ' % new_column, column_def) cleaned_columns = [] for column_def in new_column_defs: match = self.fk_re.match(column_def) if match is not None and match.groups()[0] == column_to_update: column_def = fk_filter_fn(column_def) if column_def: cleaned_columns.append(column_def) # Update the name of the new CREATE TABLE query. temp_table = table + '__tmp__' rgx = re.compile('("?)%s("?)' % table, re.I) create = rgx.sub( '\\1%s\\2' % temp_table, raw_create) # Create the new table. columns = ', '.join(cleaned_columns) queries = [ NodeList([SQL('DROP TABLE IF EXISTS'), Entity(temp_table)]), SQL('%s (%s)' % (create.strip(), columns))] # Populate new table. populate_table = NodeList(( SQL('INSERT INTO'), Entity(temp_table), EnclosedNodeList([Entity(col) for col in new_column_names]), SQL('SELECT'), CommaNodeList([Entity(col) for col in original_column_names]), SQL('FROM'), Entity(table))) drop_original = NodeList([SQL('DROP TABLE'), Entity(table)]) # Drop existing table and rename temp table. queries += [ populate_table, drop_original, self.rename_table(temp_table, table)] # Re-create user-defined indexes. User-defined indexes will have a # non-empty SQL attribute. for index in filter(lambda idx: idx.sql, indexes): if column_to_update not in index.columns: queries.append(SQL(index.sql)) elif new_column: sql = self._fix_index(index.sql, column_to_update, new_column) if sql is not None: queries.append(SQL(sql)) return queries def _fix_index(self, sql, column_to_update, new_column): # Split on the name of the column to update. If it splits into two # pieces, then there's no ambiguity and we can simply replace the # old with the new. parts = sql.split(column_to_update) if len(parts) == 2: return sql.replace(column_to_update, new_column) # Find the list of columns in the index expression. lhs, rhs = sql.rsplit('(', 1) # Apply the same "split in two" logic to the column list portion of # the query. if len(rhs.split(column_to_update)) == 2: return '%s(%s' % (lhs, rhs.replace(column_to_update, new_column)) # Strip off the trailing parentheses and go through each column. parts = rhs.rsplit(')', 1)[0].split(',') columns = [part.strip('"`[]\' ') for part in parts] # `columns` looks something like: ['status', 'timestamp" DESC'] # https://www.sqlite.org/lang_keywords.html # Strip out any junk after the column name. clean = [] for column in columns: if re.match(r'%s(?:[\'"`\]]?\s|$)' % column_to_update, column): column = new_column + column[len(column_to_update):] clean.append(column) return '%s(%s)' % (lhs, ', '.join('"%s"' % c for c in clean)) @operation def drop_column(self, table, column_name, cascade=True, legacy=False): if sqlite3.sqlite_version_info >= (3, 35, 0) and not legacy: ctx = self.make_context() (self._alter_table(ctx, table) .literal(' DROP COLUMN ') .sql(Entity(column_name))) return ctx return self._update_column(table, column_name, lambda a, b: None) @operation def rename_column(self, table, old_name, new_name, legacy=False): if sqlite3.sqlite_version_info >= (3, 25, 0) and not legacy: return (self ._alter_table(self.make_context(), table) .literal(' RENAME COLUMN ') .sql(Entity(old_name)) .literal(' TO ') .sql(Entity(new_name))) def _rename(column_name, column_def): return column_def.replace(column_name, new_name) return self._update_column(table, old_name, _rename) @operation def add_not_null(self, table, column): def _add_not_null(column_name, column_def): return column_def + ' NOT NULL' return self._update_column(table, column, _add_not_null) @operation def drop_not_null(self, table, column): def _drop_not_null(column_name, column_def): return column_def.replace('NOT NULL', '') return self._update_column(table, column, _drop_not_null) @operation def add_column_default(self, table, column, default): if default is None: raise ValueError('`default` must be not None/NULL.') if callable_(default): default = default() if (isinstance(default, str) and not default.endswith((')', "'")) and not default.isdigit()): default = "'%s'" % default def _add_default(column_name, column_def): # Try to handle SQL functions and string literals, otherwise quote. return column_def + ' DEFAULT %s' % default return self._update_column(table, column, _add_default) @operation def drop_column_default(self, table, column): def _drop_default(column_name, column_def): col = re.sub(r'DEFAULT\s+[\w"\'\(\)]+(\s|$)', '', column_def, re.I) return col.strip() return self._update_column(table, column, _drop_default) @operation def alter_column_type(self, table, column, field, cast=None): if cast is not None: raise ValueError('alter_column_type() does not support cast with ' 'Sqlite.') ctx = self.make_context() def _alter_column_type(column_name, column_def): node_list = field.ddl(ctx) sql, _ = ctx.sql(Entity(column)).sql(node_list).query() return sql return self._update_column(table, column, _alter_column_type) @operation def add_constraint(self, table, name, constraint): raise NotImplementedError @operation def drop_constraint(self, table, name): raise NotImplementedError @operation def add_foreign_key_constraint(self, table, column_name, field, on_delete=None, on_update=None): raise NotImplementedError def migrate(*operations, **kwargs): for operation in operations: operation.run() peewee-3.17.7/playhouse/mysql_ext.py000066400000000000000000000074421470346076600175170ustar00rootroot00000000000000import json try: import mysql.connector as mysql_connector except ImportError: mysql_connector = None try: import mariadb except ImportError: mariadb = None from peewee import ImproperlyConfigured from peewee import Insert from peewee import MySQLDatabase from peewee import Node from peewee import NodeList from peewee import SQL from peewee import TextField from peewee import fn from peewee import __deprecated__ class MySQLConnectorDatabase(MySQLDatabase): def _connect(self): if mysql_connector is None: raise ImproperlyConfigured('MySQL connector not installed!') return mysql_connector.connect(db=self.database, autocommit=True, **self.connect_params) def cursor(self, commit=None, named_cursor=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') if self.is_closed(): if self.autoconnect: self.connect() else: raise InterfaceError('Error, database connection not opened.') return self._state.conn.cursor(buffered=True) def get_binary_type(self): return mysql_connector.Binary class MariaDBConnectorDatabase(MySQLDatabase): def _connect(self): if mariadb is None: raise ImproperlyConfigured('mariadb connector not installed!') self.connect_params.pop('charset', None) self.connect_params.pop('sql_mode', None) self.connect_params.pop('use_unicode', None) return mariadb.connect(db=self.database, autocommit=True, **self.connect_params) def cursor(self, commit=None, named_cursor=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') if self.is_closed(): if self.autoconnect: self.connect() else: raise InterfaceError('Error, database connection not opened.') return self._state.conn.cursor(buffered=True) def _set_server_version(self, conn): version = conn.server_version version, point = divmod(version, 100) version, minor = divmod(version, 100) self.server_version = (version, minor, point) if self.server_version >= (10, 5, 0): self.returning_clause = True def last_insert_id(self, cursor, query_type=None): if not self.returning_clause: return cursor.lastrowid elif query_type == Insert.SIMPLE: try: return cursor[0][0] except (AttributeError, IndexError): return cursor.lastrowid return cursor def get_binary_type(self): return mariadb.Binary class JSONField(TextField): field_type = 'JSON' def __init__(self, json_dumps=None, json_loads=None, **kwargs): self._json_dumps = json_dumps or json.dumps self._json_loads = json_loads or json.loads super(JSONField, self).__init__(**kwargs) def python_value(self, value): if value is not None: try: return self._json_loads(value) except (TypeError, ValueError): return value def db_value(self, value): if value is not None: if not isinstance(value, Node): value = self._json_dumps(value) return value def extract(self, path): return fn.json_extract(self, path) def Match(columns, expr, modifier=None): if isinstance(columns, (list, tuple)): match = fn.MATCH(*columns) # Tuple of one or more columns / fields. else: match = fn.MATCH(columns) # Single column / field. args = expr if modifier is None else NodeList((expr, SQL(modifier))) return NodeList((match, fn.AGAINST(args))) peewee-3.17.7/playhouse/pool.py000066400000000000000000000314441470346076600164420ustar00rootroot00000000000000""" Lightweight connection pooling for peewee. In a multi-threaded application, up to `max_connections` will be opened. Each thread (or, if using gevent, greenlet) will have it's own connection. In a single-threaded application, only one connection will be created. It will be continually recycled until either it exceeds the stale timeout or is closed explicitly (using `.manual_close()`). By default, all your application needs to do is ensure that connections are closed when you are finished with them, and they will be returned to the pool. For web applications, this typically means that at the beginning of a request, you will open a connection, and when you return a response, you will close the connection. Simple Postgres pool example code: # Use the special postgresql extensions. from playhouse.pool import PooledPostgresqlExtDatabase db = PooledPostgresqlExtDatabase( 'my_app', max_connections=32, stale_timeout=300, # 5 minutes. user='postgres') class BaseModel(Model): class Meta: database = db That's it! """ import functools import heapq import logging import threading import time from collections import namedtuple from itertools import chain try: from psycopg2.extensions import TRANSACTION_STATUS_IDLE from psycopg2.extensions import TRANSACTION_STATUS_INERROR from psycopg2.extensions import TRANSACTION_STATUS_UNKNOWN except ImportError: TRANSACTION_STATUS_IDLE = \ TRANSACTION_STATUS_INERROR = \ TRANSACTION_STATUS_UNKNOWN = None try: from psycopg.pq import TransactionStatus except ImportError: pass from peewee import MySQLDatabase from peewee import PostgresqlDatabase from peewee import SqliteDatabase logger = logging.getLogger('peewee.pool') def make_int(val): if val is not None and not isinstance(val, (int, float)): return int(val) return val class MaxConnectionsExceeded(ValueError): pass PoolConnection = namedtuple('PoolConnection', ('timestamp', 'connection', 'checked_out')) class _sentinel(object): def __lt__(self, other): return True def locked(fn): @functools.wraps(fn) def inner(self, *args, **kwargs): with self._pool_lock: return fn(self, *args, **kwargs) return inner class PooledDatabase(object): def __init__(self, database, max_connections=20, stale_timeout=None, timeout=None, **kwargs): self._max_connections = make_int(max_connections) self._stale_timeout = make_int(stale_timeout) self._wait_timeout = make_int(timeout) if self._wait_timeout == 0: self._wait_timeout = float('inf') self._pool_lock = threading.RLock() # Available / idle connections stored in a heap, sorted oldest first. self._connections = [] # Mapping of connection id to PoolConnection. Ordinarily we would want # to use something like a WeakKeyDictionary, but Python typically won't # allow us to create weak references to connection objects. self._in_use = {} # Use the memory address of the connection as the key in the event the # connection object is not hashable. Connections will not get # garbage-collected, however, because a reference to them will persist # in "_in_use" as long as the conn has not been closed. self.conn_key = id super(PooledDatabase, self).__init__(database, **kwargs) def init(self, database, max_connections=None, stale_timeout=None, timeout=None, **connect_kwargs): super(PooledDatabase, self).init(database, **connect_kwargs) if max_connections is not None: self._max_connections = make_int(max_connections) if stale_timeout is not None: self._stale_timeout = make_int(stale_timeout) if timeout is not None: self._wait_timeout = make_int(timeout) if self._wait_timeout == 0: self._wait_timeout = float('inf') def connect(self, reuse_if_open=False): if not self._wait_timeout: return super(PooledDatabase, self).connect(reuse_if_open) expires = time.time() + self._wait_timeout while expires > time.time(): try: ret = super(PooledDatabase, self).connect(reuse_if_open) except MaxConnectionsExceeded: time.sleep(0.1) else: return ret raise MaxConnectionsExceeded('Max connections exceeded, timed out ' 'attempting to connect.') @locked def _connect(self): while True: try: # Remove the oldest connection from the heap. ts, _, c_conn = heapq.heappop(self._connections) conn = c_conn key = self.conn_key(conn) except IndexError: ts = conn = None logger.debug('No connection available in pool.') break else: if self._is_closed(conn): # This connecton was closed, but since it was not stale # it got added back to the queue of available conns. We # then closed it and marked it as explicitly closed, so # it's safe to throw it away now. # (Because Database.close() calls Database._close()). logger.debug('Connection %s was closed.', key) ts = conn = None elif self._stale_timeout and self._is_stale(ts): # If we are attempting to check out a stale connection, # then close it. We don't need to mark it in the "closed" # set, because it is not in the list of available conns # anymore. logger.debug('Connection %s was stale, closing.', key) self._close(conn, True) ts = conn = None else: break if conn is None: if self._max_connections and ( len(self._in_use) >= self._max_connections): raise MaxConnectionsExceeded('Exceeded maximum connections.') conn = super(PooledDatabase, self)._connect() ts = time.time() key = self.conn_key(conn) logger.debug('Created new connection %s.', key) self._in_use[key] = PoolConnection(ts, conn, time.time()) return conn def _is_stale(self, timestamp): # Called on check-out and check-in to ensure the connection has # not outlived the stale timeout. return (time.time() - timestamp) > self._stale_timeout def _is_closed(self, conn): return False def _can_reuse(self, conn): # Called on check-in to make sure the connection can be re-used. return True @locked def _close(self, conn, close_conn=False): key = self.conn_key(conn) if close_conn: super(PooledDatabase, self)._close(conn) elif key in self._in_use: pool_conn = self._in_use.pop(key) if self._stale_timeout and self._is_stale(pool_conn.timestamp): logger.debug('Closing stale connection %s.', key) super(PooledDatabase, self)._close(conn) elif self._can_reuse(conn): logger.debug('Returning %s to pool.', key) heapq.heappush(self._connections, (pool_conn.timestamp, _sentinel(), conn)) else: logger.debug('Closed %s.', key) @locked def manual_close(self): """ Close the underlying connection without returning it to the pool. """ if self.is_closed(): return False # Obtain reference to the connection in-use by the calling thread. conn = self.connection() # A connection will only be re-added to the available list if it is # marked as "in use" at the time it is closed. We will explicitly # remove it from the "in use" list, call "close()" for the # side-effects, and then explicitly close the connection. self._in_use.pop(self.conn_key(conn), None) self.close() self._close(conn, close_conn=True) @locked def close_idle(self): # Close any open connections that are not currently in-use. for _, _, conn in self._connections: self._close(conn, close_conn=True) self._connections = [] @locked def close_stale(self, age=600): # Close any connections that are in-use but were checked out quite some # time ago and can be considered stale. in_use = {} cutoff = time.time() - age n = 0 for key, pool_conn in self._in_use.items(): if pool_conn.checked_out < cutoff: self._close(pool_conn.connection, close_conn=True) n += 1 else: in_use[key] = pool_conn self._in_use = in_use return n @locked def close_all(self): # Close all connections -- available and in-use. Warning: may break any # active connections used by other threads. self.close() for _, _, conn in self._connections: self._close(conn, close_conn=True) for pool_conn in self._in_use.values(): self._close(pool_conn.connection, close_conn=True) self._connections = [] self._in_use = {} class PooledMySQLDatabase(PooledDatabase, MySQLDatabase): def _is_closed(self, conn): if self.server_version[0] == 8: args = () else: args = (False,) try: conn.ping(*args) except: return True else: return False class _PooledPostgresqlDatabase(PooledDatabase): def _is_closed(self, conn): if conn.closed: return True txn_status = conn.get_transaction_status() if txn_status == TRANSACTION_STATUS_UNKNOWN: return True elif txn_status != TRANSACTION_STATUS_IDLE: conn.rollback() return False def _can_reuse(self, conn): txn_status = conn.get_transaction_status() # Do not return connection in an error state, as subsequent queries # will all fail. If the status is unknown then we lost the connection # to the server and the connection should not be re-used. if txn_status == TRANSACTION_STATUS_UNKNOWN: return False elif txn_status == TRANSACTION_STATUS_INERROR: conn.reset() elif txn_status != TRANSACTION_STATUS_IDLE: conn.rollback() return True class PooledPostgresqlDatabase(_PooledPostgresqlDatabase, PostgresqlDatabase): pass try: from playhouse.postgres_ext import PostgresqlExtDatabase class PooledPostgresqlExtDatabase(_PooledPostgresqlDatabase, PostgresqlExtDatabase): pass except ImportError: PooledPostgresqlExtDatabase = None try: from playhouse.psycopg3_ext import Psycopg3Database class PooledPsycopg3Database(PooledDatabase, Psycopg3Database): def _is_closed(self, conn): if conn.closed: return True txn_status = conn.pgconn.transaction_status if txn_status == TransactionStatus.UNKNOWN: return True elif txn_status != TransactionStatus.IDLE: conn.rollback() return False def _can_reuse(self, conn): txn_status = conn.pgconn.transaction_status # Do not return connection in an error state, as subsequent queries # will all fail. If the status is unknown then we lost the connection # to the server and the connection should not be re-used. if txn_status == TransactionStatus.UNKNOWN: return False elif txn_status == TransactionStatus.INERROR: conn.reset() elif txn_status != TransactionStatus.IDLE: conn.rollback() return True except ImportError: PooledPsycopg3Database = None class _PooledSqliteDatabase(PooledDatabase): def _is_closed(self, conn): try: conn.total_changes except: return True else: return False class PooledSqliteDatabase(_PooledSqliteDatabase, SqliteDatabase): pass try: from playhouse.sqlite_ext import SqliteExtDatabase class PooledSqliteExtDatabase(_PooledSqliteDatabase, SqliteExtDatabase): pass except ImportError: PooledSqliteExtDatabase = None try: from playhouse.sqlite_ext import CSqliteExtDatabase class PooledCSqliteExtDatabase(_PooledSqliteDatabase, CSqliteExtDatabase): pass except ImportError: PooledCSqliteExtDatabase = None peewee-3.17.7/playhouse/postgres_ext.py000066400000000000000000000357501470346076600202230ustar00rootroot00000000000000""" Collection of postgres-specific extensions, currently including: * Support for hstore, a key/value type storage """ import json import logging import uuid from peewee import * from peewee import ColumnBase from peewee import Expression from peewee import Node from peewee import NodeList from peewee import __deprecated__ from peewee import __exception_wrapper__ try: from psycopg2cffi import compat compat.register() except ImportError: pass try: from psycopg2.extras import register_hstore except ImportError: def register_hstore(c, globally): pass try: from psycopg2.extras import Json except: Json = None logger = logging.getLogger('peewee') HCONTAINS_DICT = '@>' HCONTAINS_KEYS = '?&' HCONTAINS_KEY = '?' HCONTAINS_ANY_KEY = '?|' HKEY = '->' HUPDATE = '||' ACONTAINS = '@>' ACONTAINED_BY = '<@' ACONTAINS_ANY = '&&' TS_MATCH = '@@' JSONB_CONTAINS = '@>' JSONB_CONTAINED_BY = '<@' JSONB_CONTAINS_KEY = '?' JSONB_CONTAINS_ANY_KEY = '?|' JSONB_CONTAINS_ALL_KEYS = '?&' JSONB_EXISTS = '?' JSONB_REMOVE = '-' class _LookupNode(ColumnBase): def __init__(self, node, parts): self.node = node self.parts = parts super(_LookupNode, self).__init__() def clone(self): return type(self)(self.node, list(self.parts)) def __hash__(self): return hash((self.__class__.__name__, id(self))) class _JsonLookupBase(_LookupNode): def __init__(self, node, parts, as_json=False): super(_JsonLookupBase, self).__init__(node, parts) self._as_json = as_json def clone(self): return type(self)(self.node, list(self.parts), self._as_json) @Node.copy def as_json(self, as_json=True): self._as_json = as_json def concat(self, rhs): if not isinstance(rhs, Node): rhs = Json(rhs) return Expression(self.as_json(True), OP.CONCAT, rhs) def contains(self, other): clone = self.as_json(True) if isinstance(other, (list, dict)): return Expression(clone, JSONB_CONTAINS, Json(other)) return Expression(clone, JSONB_EXISTS, other) def contains_any(self, *keys): return Expression( self.as_json(True), JSONB_CONTAINS_ANY_KEY, Value(list(keys), unpack=False)) def contains_all(self, *keys): return Expression( self.as_json(True), JSONB_CONTAINS_ALL_KEYS, Value(list(keys), unpack=False)) def has_key(self, key): return Expression(self.as_json(True), JSONB_CONTAINS_KEY, key) class JsonLookup(_JsonLookupBase): def __getitem__(self, value): return JsonLookup(self.node, self.parts + [value], self._as_json) def __sql__(self, ctx): ctx.sql(self.node) for part in self.parts[:-1]: ctx.literal('->').sql(part) if self.parts: (ctx .literal('->' if self._as_json else '->>') .sql(self.parts[-1])) return ctx class JsonPath(_JsonLookupBase): def __sql__(self, ctx): return (ctx .sql(self.node) .literal('#>' if self._as_json else '#>>') .sql(Value('{%s}' % ','.join(map(str, self.parts))))) class ObjectSlice(_LookupNode): @classmethod def create(cls, node, value): if isinstance(value, slice): parts = [value.start or 0, value.stop or 0] elif isinstance(value, int): parts = [value] elif isinstance(value, Node): parts = value else: # Assumes colon-separated integer indexes. parts = [int(i) for i in value.split(':')] return cls(node, parts) def __sql__(self, ctx): ctx.sql(self.node) if isinstance(self.parts, Node): ctx.literal('[').sql(self.parts).literal(']') else: ctx.literal('[%s]' % ':'.join(str(p + 1) for p in self.parts)) return ctx def __getitem__(self, value): return ObjectSlice.create(self, value) class IndexedFieldMixin(object): default_index_type = 'GIN' def __init__(self, *args, **kwargs): kwargs.setdefault('index', True) # By default, use an index. super(IndexedFieldMixin, self).__init__(*args, **kwargs) class ArrayField(IndexedFieldMixin, Field): passthrough = True def __init__(self, field_class=IntegerField, field_kwargs=None, dimensions=1, convert_values=False, *args, **kwargs): self.__field = field_class(**(field_kwargs or {})) self.dimensions = dimensions self.convert_values = convert_values self.field_type = self.__field.field_type super(ArrayField, self).__init__(*args, **kwargs) def bind(self, model, name, set_attribute=True): ret = super(ArrayField, self).bind(model, name, set_attribute) self.__field.bind(model, '__array_%s' % name, False) return ret def ddl_datatype(self, ctx): data_type = self.__field.ddl_datatype(ctx) return NodeList((data_type, SQL('[]' * self.dimensions)), glue='') def db_value(self, value): if value is None or isinstance(value, Node): return value elif self.convert_values: return self._process(self.__field.db_value, value, self.dimensions) else: return value if isinstance(value, list) else list(value) def python_value(self, value): if self.convert_values and value is not None: conv = self.__field.python_value if isinstance(value, list): return self._process(conv, value, self.dimensions) else: return conv(value) else: return value def _process(self, conv, value, dimensions): dimensions -= 1 if dimensions == 0: return [conv(v) for v in value] else: return [self._process(conv, v, dimensions) for v in value] def __getitem__(self, value): return ObjectSlice.create(self, value) def _e(op): def inner(self, rhs): return Expression(self, op, ArrayValue(self, rhs)) return inner __eq__ = _e(OP.EQ) __ne__ = _e(OP.NE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __hash__ = Field.__hash__ def contains(self, *items): return Expression(self, ACONTAINS, ArrayValue(self, items)) def contains_any(self, *items): return Expression(self, ACONTAINS_ANY, ArrayValue(self, items)) def contained_by(self, *items): return Expression(self, ACONTAINED_BY, ArrayValue(self, items)) class ArrayValue(Node): def __init__(self, field, value): self.field = field self.value = value def __sql__(self, ctx): return (ctx .sql(Value(self.value, unpack=False)) .literal('::') .sql(self.field.ddl_datatype(ctx))) class DateTimeTZField(DateTimeField): field_type = 'TIMESTAMPTZ' class HStoreField(IndexedFieldMixin, Field): field_type = 'HSTORE' __hash__ = Field.__hash__ def __getitem__(self, key): return Expression(self, HKEY, Value(key)) def keys(self): return fn.akeys(self) def values(self): return fn.avals(self) def items(self): return fn.hstore_to_matrix(self) def slice(self, *args): return fn.slice(self, Value(list(args), unpack=False)) def exists(self, key): return fn.exist(self, key) def defined(self, key): return fn.defined(self, key) def update(self, **data): return Expression(self, HUPDATE, data) def delete(self, *keys): return fn.delete(self, Value(list(keys), unpack=False)) def contains(self, value): if isinstance(value, dict): rhs = Value(value, unpack=False) return Expression(self, HCONTAINS_DICT, rhs) elif isinstance(value, (list, tuple)): rhs = Value(value, unpack=False) return Expression(self, HCONTAINS_KEYS, rhs) return Expression(self, HCONTAINS_KEY, value) def contains_any(self, *keys): return Expression(self, HCONTAINS_ANY_KEY, Value(list(keys), unpack=False)) class JSONField(Field): field_type = 'JSON' _json_datatype = 'json' def __init__(self, dumps=None, *args, **kwargs): self.dumps = dumps or json.dumps super(JSONField, self).__init__(*args, **kwargs) def db_value(self, value): if value is None: return value if not isinstance(value, Json): return Cast(self.dumps(value), self._json_datatype) return value def __getitem__(self, value): return JsonLookup(self, [value]) def path(self, *keys): return JsonPath(self, keys) def concat(self, value): if not isinstance(value, Node): value = Json(value) return super(JSONField, self).concat(value) def cast_jsonb(node): return NodeList((node, SQL('::jsonb')), glue='') class BinaryJSONField(IndexedFieldMixin, JSONField): field_type = 'JSONB' _json_datatype = 'jsonb' __hash__ = Field.__hash__ def contains(self, other): if isinstance(other, (list, dict)): return Expression(self, JSONB_CONTAINS, Json(other)) elif isinstance(other, JSONField): return Expression(self, JSONB_CONTAINS, other) return Expression(cast_jsonb(self), JSONB_EXISTS, other) def contained_by(self, other): return Expression(cast_jsonb(self), JSONB_CONTAINED_BY, Json(other)) def contains_any(self, *items): return Expression( cast_jsonb(self), JSONB_CONTAINS_ANY_KEY, Value(list(items), unpack=False)) def contains_all(self, *items): return Expression( cast_jsonb(self), JSONB_CONTAINS_ALL_KEYS, Value(list(items), unpack=False)) def has_key(self, key): return Expression(cast_jsonb(self), JSONB_CONTAINS_KEY, key) def remove(self, *items): return Expression( cast_jsonb(self), JSONB_REMOVE, Value(list(items), unpack=False)) class TSVectorField(IndexedFieldMixin, TextField): field_type = 'TSVECTOR' __hash__ = Field.__hash__ def match(self, query, language=None, plain=False): params = (language, query) if language is not None else (query,) func = fn.plainto_tsquery if plain else fn.to_tsquery return Expression(self, TS_MATCH, func(*params)) def Match(field, query, language=None): params = (language, query) if language is not None else (query,) field_params = (language, field) if language is not None else (field,) return Expression( fn.to_tsvector(*field_params), TS_MATCH, fn.to_tsquery(*params)) class IntervalField(Field): field_type = 'INTERVAL' class FetchManyCursor(object): __slots__ = ('cursor', 'array_size', 'exhausted', 'iterable') def __init__(self, cursor, array_size=None): self.cursor = cursor self.array_size = array_size or cursor.itersize self.exhausted = False self.iterable = self.row_gen() def __del__(self): if self.cursor and not self.cursor.closed: try: self.cursor.close() except Exception: pass @property def description(self): return self.cursor.description def close(self): self.cursor.close() def row_gen(self): try: while True: rows = self.cursor.fetchmany(self.array_size) if not rows: return for row in rows: yield row finally: self.close() def fetchone(self): if self.exhausted: return try: return next(self.iterable) except StopIteration: self.exhausted = True class ServerSideQuery(Node): def __init__(self, query, array_size=None): self.query = query self.array_size = array_size self._cursor_wrapper = None def __sql__(self, ctx): return self.query.__sql__(ctx) def __iter__(self): if self._cursor_wrapper is None: self._execute(self.query._database) return iter(self._cursor_wrapper.iterator()) def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self.query, named_cursor=True, array_size=self.array_size) self._cursor_wrapper = self.query._get_cursor_wrapper(cursor) return self._cursor_wrapper def ServerSide(query, database=None, array_size=None): if database is None: database = query._database server_side_query = ServerSideQuery(query, array_size=array_size) for row in server_side_query: yield row class _empty_object(object): __slots__ = () def __nonzero__(self): return False __bool__ = __nonzero__ class PostgresqlExtDatabase(PostgresqlDatabase): def __init__(self, *args, **kwargs): self._register_hstore = kwargs.pop('register_hstore', False) self._server_side_cursors = kwargs.pop('server_side_cursors', False) super(PostgresqlExtDatabase, self).__init__(*args, **kwargs) def _connect(self): conn = super(PostgresqlExtDatabase, self)._connect() if self._register_hstore: register_hstore(conn, globally=True) return conn def cursor(self, commit=None, named_cursor=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') if self.is_closed(): if self.autoconnect: self.connect() else: raise InterfaceError('Error, database connection not opened.') if named_cursor: curs = self._state.conn.cursor(name=str(uuid.uuid1()), withhold=True) return curs return self._state.conn.cursor() def execute(self, query, commit=None, named_cursor=False, array_size=None, **context_options): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') ctx = self.get_sql_context(**context_options) sql, params = ctx.sql(query).query() named_cursor = named_cursor or (self._server_side_cursors and sql[:6].lower() == 'select') cursor = self.execute_sql(sql, params, named_cursor=named_cursor) if named_cursor: cursor = FetchManyCursor(cursor, array_size) return cursor def execute_sql(self, sql, params=None, commit=None, named_cursor=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') logger.debug((sql, params)) with __exception_wrapper__: cursor = self.cursor(named_cursor=named_cursor) cursor.execute(sql, params or ()) return cursor peewee-3.17.7/playhouse/psycopg3_ext.py000066400000000000000000000125411470346076600201150ustar00rootroot00000000000000import json from peewee import * from peewee import Expression from peewee import Node from peewee import NodeList from playhouse.postgres_ext import ArrayField from playhouse.postgres_ext import DateTimeTZField from playhouse.postgres_ext import IndexedFieldMixin from playhouse.postgres_ext import IntervalField from playhouse.postgres_ext import Match from playhouse.postgres_ext import TSVectorField # Helpers needed for psycopg3-specific overrides. from playhouse.postgres_ext import _JsonLookupBase try: import psycopg from psycopg.types.json import Jsonb from psycopg.pq import TransactionStatus except ImportError: psycopg = Jsonb = None JSONB_CONTAINS = '@>' JSONB_CONTAINED_BY = '<@' JSONB_CONTAINS_KEY = '?' JSONB_CONTAINS_ANY_KEY = '?|' JSONB_CONTAINS_ALL_KEYS = '?&' JSONB_EXISTS = '?' JSONB_REMOVE = '-' class _Psycopg3JsonLookupBase(_JsonLookupBase): def concat(self, rhs): if not isinstance(rhs, Node): rhs = Jsonb(rhs) # Note: uses psycopg3's Jsonb. return Expression(self.as_json(True), OP.CONCAT, rhs) def contains(self, other): clone = self.as_json(True) if isinstance(other, (list, dict)): return Expression(clone, JSONB_CONTAINS, Jsonb(other)) # Same. return Expression(clone, JSONB_EXISTS, other) class JsonLookup(_Psycopg3JsonLookupBase): def __getitem__(self, value): return JsonLookup(self.node, self.parts + [value], self._as_json) def __sql__(self, ctx): ctx.sql(self.node) for part in self.parts[:-1]: ctx.literal('->').sql(part) if self.parts: (ctx .literal('->' if self._as_json else '->>') .sql(self.parts[-1])) return ctx class JsonPath(_Psycopg3JsonLookupBase): def __sql__(self, ctx): return (ctx .sql(self.node) .literal('#>' if self._as_json else '#>>') .sql(Value('{%s}' % ','.join(map(str, self.parts))))) def cast_jsonb(node): return NodeList((node, SQL('::jsonb')), glue='') class BinaryJSONField(IndexedFieldMixin, Field): field_type = 'JSONB' _json_datatype = 'jsonb' __hash__ = Field.__hash__ def __init__(self, dumps=None, *args, **kwargs): self.dumps = dumps or json.dumps super(BinaryJSONField, self).__init__(*args, **kwargs) def db_value(self, value): if value is None: return value if not isinstance(value, Jsonb): return Cast(self.dumps(value), self._json_datatype) return value def __getitem__(self, value): return JsonLookup(self, [value]) def path(self, *keys): return JsonPath(self, keys) def concat(self, value): if not isinstance(value, Node): value = Jsonb(value) return super(BinaryJSONField, self).concat(value) def contains(self, other): if isinstance(other, (list, dict)): return Expression(self, JSONB_CONTAINS, Jsonb(other)) elif isinstance(other, BinaryJSONField): return Expression(self, JSONB_CONTAINS, other) return Expression(cast_jsonb(self), JSONB_EXISTS, other) def contained_by(self, other): return Expression(cast_jsonb(self), JSONB_CONTAINED_BY, Jsonb(other)) def contains_any(self, *items): return Expression( cast_jsonb(self), JSONB_CONTAINS_ANY_KEY, Value(list(items), unpack=False)) def contains_all(self, *items): return Expression( cast_jsonb(self), JSONB_CONTAINS_ALL_KEYS, Value(list(items), unpack=False)) def has_key(self, key): return Expression(cast_jsonb(self), JSONB_CONTAINS_KEY, key) def remove(self, *items): return Expression( cast_jsonb(self), JSONB_REMOVE, # Hack: psycopg3 parameterizes this as an array, e.g. '{k1,k2}', # but that doesn't seem to be working, so we explicitly cast. # Perhaps postgres is interpreting it as a string. Using the more # explicit ARRAY['k1','k2'] also works just fine -- but we'll make # the cast explicit to get it working. Cast(Value(list(items), unpack=False), 'text[]')) class Psycopg3Database(PostgresqlDatabase): def _connect(self): if psycopg is None: raise ImproperlyConfigured('psycopg3 is not installed!') conn = psycopg.connect(dbname=self.database, **self.connect_params) if self._isolation_level is not None: conn.isolation_level = self._isolation_level conn.autocommit = True return conn def get_binary_type(self): return psycopg.Binary def _set_server_version(self, conn): self.server_version = conn.pgconn.server_version if self.server_version >= 90600: self.safe_create_index = True def is_connection_usable(self): if self._state.closed: return False # Returns True if we are idle, running a command, or in an active # connection. If the connection is in an error state or the connection # is otherwise unusable, return False. conn = self._state.conn return conn.pgconn.transaction_status < TransactionStatus.INERROR def extract_date(self, date_part, date_field): return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field))) peewee-3.17.7/playhouse/reflection.py000066400000000000000000000746541470346076600176350ustar00rootroot00000000000000try: from collections import OrderedDict except ImportError: OrderedDict = dict from collections import namedtuple from inspect import isclass import re import warnings from peewee import * from peewee import _StringField from peewee import _query_val_transform from peewee import CommaNodeList from peewee import SCOPE_VALUES from peewee import make_snake_case from peewee import text_type try: from pymysql.constants import FIELD_TYPE except ImportError: try: from MySQLdb.constants import FIELD_TYPE except ImportError: FIELD_TYPE = None try: from playhouse import postgres_ext except ImportError: postgres_ext = None try: from playhouse.cockroachdb import CockroachDatabase except ImportError: CockroachDatabase = None RESERVED_WORDS = set([ 'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise', 'return', 'try', 'while', 'with', 'yield', ]) class UnknownField(object): pass class Column(object): """ Store metadata about a database column. """ primary_key_types = (IntegerField, AutoField) def __init__(self, name, field_class, raw_column_type, nullable, primary_key=False, column_name=None, index=False, unique=False, default=None, extra_parameters=None): self.name = name self.field_class = field_class self.raw_column_type = raw_column_type self.nullable = nullable self.primary_key = primary_key self.column_name = column_name self.index = index self.unique = unique self.default = default self.extra_parameters = extra_parameters # Foreign key metadata. self.rel_model = None self.related_name = None self.to_field = None def __repr__(self): attrs = [ 'field_class', 'raw_column_type', 'nullable', 'primary_key', 'column_name'] keyword_args = ', '.join( '%s=%s' % (attr, getattr(self, attr)) for attr in attrs) return 'Column(%s, %s)' % (self.name, keyword_args) def get_field_parameters(self): params = {} if self.extra_parameters is not None: params.update(self.extra_parameters) # Set up default attributes. if self.nullable: params['null'] = True if self.field_class is ForeignKeyField or self.name != self.column_name: params['column_name'] = "'%s'" % self.column_name if self.primary_key and not issubclass(self.field_class, AutoField): params['primary_key'] = True if self.default is not None: params['constraints'] = '[SQL("DEFAULT %s")]' % \ self.default.replace('"', '\\"') # Handle ForeignKeyField-specific attributes. if self.is_foreign_key(): params['model'] = self.rel_model if self.to_field: params['field'] = "'%s'" % self.to_field if self.related_name: params['backref'] = "'%s'" % self.related_name # Handle indexes on column. if not self.is_primary_key(): if self.unique: params['unique'] = 'True' elif self.index and not self.is_foreign_key(): params['index'] = 'True' return params def is_primary_key(self): return self.field_class is AutoField or self.primary_key def is_foreign_key(self): return self.field_class is ForeignKeyField def is_self_referential_fk(self): return (self.field_class is ForeignKeyField and self.rel_model == "'self'") def set_foreign_key(self, foreign_key, model_names, dest=None, related_name=None): self.foreign_key = foreign_key self.field_class = ForeignKeyField if foreign_key.dest_table == foreign_key.table: self.rel_model = "'self'" else: self.rel_model = model_names[foreign_key.dest_table] self.to_field = dest and dest.name or None self.related_name = related_name or None def get_field(self): # Generate the field definition for this column. field_params = {} for key, value in self.get_field_parameters().items(): if isclass(value) and issubclass(value, Field): value = value.__name__ field_params[key] = value param_str = ', '.join('%s=%s' % (k, v) for k, v in sorted(field_params.items())) field = '%s = %s(%s)' % ( self.name, self.field_class.__name__, param_str) if self.field_class is UnknownField: field = '%s # %s' % (field, self.raw_column_type) return field class Metadata(object): column_map = {} extension_import = '' def __init__(self, database): self.database = database self.requires_extension = False def execute(self, sql, *params): return self.database.execute_sql(sql, params) def get_columns(self, table, schema=None): metadata = OrderedDict( (metadata.name, metadata) for metadata in self.database.get_columns(table, schema)) # Look up the actual column type for each column. column_types, extra_params = self.get_column_types(table, schema) # Look up the primary keys. pk_names = self.get_primary_keys(table, schema) if len(pk_names) == 1: pk = pk_names[0] if column_types[pk] is IntegerField: column_types[pk] = AutoField elif column_types[pk] is BigIntegerField: column_types[pk] = BigAutoField columns = OrderedDict() for name, column_data in metadata.items(): field_class = column_types[name] default = self._clean_default(field_class, column_data.default) columns[name] = Column( name, field_class=field_class, raw_column_type=column_data.data_type, nullable=column_data.null, primary_key=column_data.primary_key, column_name=name, default=default, extra_parameters=extra_params.get(name)) return columns def get_column_types(self, table, schema=None): raise NotImplementedError def _clean_default(self, field_class, default): if default is None or field_class in (AutoField, BigAutoField) or \ default.lower() == 'null': return if issubclass(field_class, _StringField) and \ isinstance(default, text_type) and not default.startswith("'"): default = "'%s'" % default return default or "''" def get_foreign_keys(self, table, schema=None): return self.database.get_foreign_keys(table, schema) def get_primary_keys(self, table, schema=None): return self.database.get_primary_keys(table, schema) def get_indexes(self, table, schema=None): return self.database.get_indexes(table, schema) class PostgresqlMetadata(Metadata): column_map = { 16: BooleanField, 17: BlobField, 20: BigIntegerField, 21: SmallIntegerField, 23: IntegerField, 25: TextField, 700: FloatField, 701: DoubleField, 1042: CharField, # blank-padded CHAR 1043: CharField, 1082: DateField, 1114: DateTimeField, 1184: DateTimeField, 1083: TimeField, 1266: TimeField, 1700: DecimalField, 2950: UUIDField, # UUID } array_types = { 1000: BooleanField, 1001: BlobField, 1005: SmallIntegerField, 1007: IntegerField, 1009: TextField, 1014: CharField, 1015: CharField, 1016: BigIntegerField, 1115: DateTimeField, 1182: DateField, 1183: TimeField, 2951: UUIDField, } extension_import = 'from playhouse.postgres_ext import *' def __init__(self, database): super(PostgresqlMetadata, self).__init__(database) if postgres_ext is not None: # Attempt to add types like HStore and JSON. cursor = self.execute('select oid, typname, format_type(oid, NULL)' ' from pg_type;') results = cursor.fetchall() for oid, typname, formatted_type in results: if typname == 'json': self.column_map[oid] = postgres_ext.JSONField elif typname == 'jsonb': self.column_map[oid] = postgres_ext.BinaryJSONField elif typname == 'hstore': self.column_map[oid] = postgres_ext.HStoreField elif typname == 'tsvector': self.column_map[oid] = postgres_ext.TSVectorField for oid in self.array_types: self.column_map[oid] = postgres_ext.ArrayField def get_column_types(self, table, schema): column_types = {} extra_params = {} extension_types = set(( postgres_ext.ArrayField, postgres_ext.BinaryJSONField, postgres_ext.JSONField, postgres_ext.TSVectorField, postgres_ext.HStoreField)) if postgres_ext is not None else set() # Look up the actual column type for each column. identifier = '%s."%s"' % (schema, table) cursor = self.execute( 'SELECT attname, atttypid FROM pg_catalog.pg_attribute ' 'WHERE attrelid = %s::regclass AND attnum > %s', identifier, 0) # Store column metadata in dictionary keyed by column name. for name, oid in cursor.fetchall(): column_types[name] = self.column_map.get(oid, UnknownField) if column_types[name] in extension_types: self.requires_extension = True if oid in self.array_types: extra_params[name] = {'field_class': self.array_types[oid]} return column_types, extra_params def get_columns(self, table, schema=None): schema = schema or 'public' return super(PostgresqlMetadata, self).get_columns(table, schema) def get_foreign_keys(self, table, schema=None): schema = schema or 'public' return super(PostgresqlMetadata, self).get_foreign_keys(table, schema) def get_primary_keys(self, table, schema=None): schema = schema or 'public' return super(PostgresqlMetadata, self).get_primary_keys(table, schema) def get_indexes(self, table, schema=None): schema = schema or 'public' return super(PostgresqlMetadata, self).get_indexes(table, schema) class CockroachDBMetadata(PostgresqlMetadata): # CRDB treats INT the same as BIGINT, so we just map bigint type OIDs to # regular IntegerField. column_map = PostgresqlMetadata.column_map.copy() column_map[20] = IntegerField array_types = PostgresqlMetadata.array_types.copy() array_types[1016] = IntegerField extension_import = 'from playhouse.cockroachdb import *' def __init__(self, database): Metadata.__init__(self, database) self.requires_extension = True if postgres_ext is not None: # Attempt to add JSON types. cursor = self.execute('select oid, typname, format_type(oid, NULL)' ' from pg_type;') results = cursor.fetchall() for oid, typname, formatted_type in results: if typname == 'jsonb': self.column_map[oid] = postgres_ext.BinaryJSONField for oid in self.array_types: self.column_map[oid] = postgres_ext.ArrayField class MySQLMetadata(Metadata): if FIELD_TYPE is None: column_map = {} else: column_map = { FIELD_TYPE.BLOB: TextField, FIELD_TYPE.CHAR: CharField, FIELD_TYPE.DATE: DateField, FIELD_TYPE.DATETIME: DateTimeField, FIELD_TYPE.DECIMAL: DecimalField, FIELD_TYPE.DOUBLE: FloatField, FIELD_TYPE.FLOAT: FloatField, FIELD_TYPE.INT24: IntegerField, FIELD_TYPE.LONG_BLOB: TextField, FIELD_TYPE.LONG: IntegerField, FIELD_TYPE.LONGLONG: BigIntegerField, FIELD_TYPE.MEDIUM_BLOB: TextField, FIELD_TYPE.NEWDECIMAL: DecimalField, FIELD_TYPE.SHORT: IntegerField, FIELD_TYPE.STRING: CharField, FIELD_TYPE.TIMESTAMP: DateTimeField, FIELD_TYPE.TIME: TimeField, FIELD_TYPE.TINY_BLOB: TextField, FIELD_TYPE.TINY: IntegerField, FIELD_TYPE.VAR_STRING: CharField, } def __init__(self, database, **kwargs): if 'password' in kwargs: kwargs['passwd'] = kwargs.pop('password') super(MySQLMetadata, self).__init__(database, **kwargs) def get_column_types(self, table, schema=None): column_types = {} # Look up the actual column type for each column. cursor = self.execute('SELECT * FROM `%s` LIMIT 1' % table) # Store column metadata in dictionary keyed by column name. for column_description in cursor.description: name, type_code = column_description[:2] column_types[name] = self.column_map.get(type_code, UnknownField) return column_types, {} class SqliteMetadata(Metadata): column_map = { 'bigint': BigIntegerField, 'blob': BlobField, 'bool': BooleanField, 'boolean': BooleanField, 'char': CharField, 'date': DateField, 'datetime': DateTimeField, 'decimal': DecimalField, 'float': FloatField, 'integer': IntegerField, 'integer unsigned': IntegerField, 'int': IntegerField, 'long': BigIntegerField, 'numeric': DecimalField, 'real': FloatField, 'smallinteger': IntegerField, 'smallint': IntegerField, 'smallint unsigned': IntegerField, 'text': TextField, 'time': TimeField, 'varchar': CharField, } begin = r'(?:["\[\(]+)?' end = r'(?:["\]\)]+)?' re_foreign_key = ( r'(?:FOREIGN KEY\s*)?' r'{begin}(.+?){end}\s+(?:.+\s+)?' r'references\s+{begin}(.+?){end}' r'\s*\(["|\[]?(.+?)["|\]]?\)').format(begin=begin, end=end) re_varchar = r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$' def _map_col(self, column_type): raw_column_type = column_type.lower() if raw_column_type in self.column_map: field_class = self.column_map[raw_column_type] elif re.search(self.re_varchar, raw_column_type): field_class = CharField else: column_type = re.sub(r'\(.+\)', '', raw_column_type) if column_type == '': field_class = BareField else: field_class = self.column_map.get(column_type, UnknownField) return field_class def get_column_types(self, table, schema=None): column_types = {} columns = self.database.get_columns(table) for column in columns: column_types[column.name] = self._map_col(column.data_type) return column_types, {} _DatabaseMetadata = namedtuple('_DatabaseMetadata', ( 'columns', 'primary_keys', 'foreign_keys', 'model_names', 'indexes')) class DatabaseMetadata(_DatabaseMetadata): def multi_column_indexes(self, table): accum = [] for index in self.indexes[table]: if len(index.columns) > 1: field_names = [self.columns[table][column].name for column in index.columns if column in self.columns[table]] accum.append((field_names, index.unique)) return accum def column_indexes(self, table): accum = {} for index in self.indexes[table]: if len(index.columns) == 1: accum[index.columns[0]] = index.unique return accum class Introspector(object): pk_classes = [AutoField, IntegerField] def __init__(self, metadata, schema=None): self.metadata = metadata self.schema = schema def __repr__(self): return '' % self.metadata.database @classmethod def from_database(cls, database, schema=None): if isinstance(database, Proxy): if database.obj is None: raise ValueError('Cannot introspect an uninitialized Proxy.') database = database.obj # Reference the proxied db obj. if CockroachDatabase and isinstance(database, CockroachDatabase): metadata = CockroachDBMetadata(database) elif isinstance(database, PostgresqlDatabase): metadata = PostgresqlMetadata(database) elif isinstance(database, MySQLDatabase): metadata = MySQLMetadata(database) elif isinstance(database, SqliteDatabase): metadata = SqliteMetadata(database) else: raise ValueError('Introspection not supported for %r' % database) return cls(metadata, schema=schema) def get_database_class(self): return type(self.metadata.database) def get_database_name(self): return self.metadata.database.database def get_database_kwargs(self): return self.metadata.database.connect_params def get_additional_imports(self): if self.metadata.requires_extension: return '\n' + self.metadata.extension_import return '' def make_model_name(self, table, snake_case=True): if snake_case: table = make_snake_case(table) model = re.sub(r'[^\w]+', '', table) model_name = ''.join(sub.title() for sub in model.split('_')) if not model_name[0].isalpha(): model_name = 'T' + model_name return model_name def make_column_name(self, column, is_foreign_key=False, snake_case=True): column = column.strip() if snake_case: column = make_snake_case(column) column = column.lower() if is_foreign_key: # Strip "_id" from foreign keys, unless the foreign-key happens to # be named "_id", in which case the name is retained. column = re.sub('_id$', '', column) or column # Remove characters that are invalid for Python identifiers. column = re.sub(r'[^\w]+', '_', column) if column in RESERVED_WORDS: column += '_' if len(column) and column[0].isdigit(): column = '_' + column return column def introspect(self, table_names=None, literal_column_names=False, include_views=False, snake_case=True): # Retrieve all the tables in the database. tables = self.metadata.database.get_tables(schema=self.schema) if include_views: views = self.metadata.database.get_views(schema=self.schema) tables.extend([view.name for view in views]) if table_names is not None: tables = [table for table in tables if table in table_names] table_set = set(tables) # Store a mapping of table name -> dictionary of columns. columns = {} # Store a mapping of table name -> set of primary key columns. primary_keys = {} # Store a mapping of table -> foreign keys. foreign_keys = {} # Store a mapping of table name -> model name. model_names = {} # Store a mapping of table name -> indexes. indexes = {} # Gather the columns for each table. for table in tables: table_indexes = self.metadata.get_indexes(table, self.schema) table_columns = self.metadata.get_columns(table, self.schema) try: foreign_keys[table] = self.metadata.get_foreign_keys( table, self.schema) except ValueError as exc: foreign_keys[table] = [] else: # If there is a possibility we could exclude a dependent table, # ensure that we introspect it so FKs will work. if table_names is not None: for foreign_key in foreign_keys[table]: if foreign_key.dest_table not in table_set: tables.append(foreign_key.dest_table) table_set.add(foreign_key.dest_table) model_names[table] = self.make_model_name(table, snake_case) # Collect sets of all the column names as well as all the # foreign-key column names. lower_col_names = set(column_name.lower() for column_name in table_columns) fks = set(fk_col.column for fk_col in foreign_keys[table]) for col_name, column in table_columns.items(): if literal_column_names: new_name = re.sub(r'[^\w]+', '_', col_name) else: new_name = self.make_column_name(col_name, col_name in fks, snake_case) # If we have two columns, "parent" and "parent_id", ensure # that when we don't introduce naming conflicts. lower_name = col_name.lower() if lower_name.endswith('_id') and new_name in lower_col_names: new_name = col_name.lower() column.name = new_name for index in table_indexes: if len(index.columns) == 1: column = index.columns[0] if column in table_columns: table_columns[column].unique = index.unique table_columns[column].index = True primary_keys[table] = self.metadata.get_primary_keys( table, self.schema) columns[table] = table_columns indexes[table] = table_indexes # Gather all instances where we might have a `related_name` conflict, # either due to multiple FKs on a table pointing to the same table, # or a related_name that would conflict with an existing field. related_names = {} sort_fn = lambda foreign_key: foreign_key.column for table in tables: models_referenced = set() for foreign_key in sorted(foreign_keys[table], key=sort_fn): try: column = columns[table][foreign_key.column] except KeyError: continue dest_table = foreign_key.dest_table if dest_table in models_referenced: related_names[column] = '%s_%s_set' % ( dest_table, column.name) else: models_referenced.add(dest_table) # On the second pass convert all foreign keys. for table in tables: for foreign_key in foreign_keys[table]: src = columns[foreign_key.table][foreign_key.column] try: dest = columns[foreign_key.dest_table][ foreign_key.dest_column] except KeyError: dest = None src.set_foreign_key( foreign_key=foreign_key, model_names=model_names, dest=dest, related_name=related_names.get(src)) return DatabaseMetadata( columns, primary_keys, foreign_keys, model_names, indexes) def generate_models(self, skip_invalid=False, table_names=None, literal_column_names=False, bare_fields=False, include_views=False): database = self.introspect(table_names, literal_column_names, include_views) models = {} class BaseModel(Model): class Meta: database = self.metadata.database schema = self.schema pending = set() def _create_model(table, models): pending.add(table) for foreign_key in database.foreign_keys[table]: dest = foreign_key.dest_table if dest not in models and dest != table: if dest in pending: warnings.warn('Possible reference cycle found between ' '%s and %s' % (table, dest)) else: _create_model(dest, models) primary_keys = [] columns = database.columns[table] for column_name, column in columns.items(): if column.primary_key: primary_keys.append(column.name) multi_column_indexes = database.multi_column_indexes(table) column_indexes = database.column_indexes(table) class Meta: indexes = multi_column_indexes table_name = table # Fix models with multi-column primary keys. composite_key = False if len(primary_keys) == 0: if 'id' not in columns: Meta.primary_key = False else: primary_keys = columns.keys() if len(primary_keys) > 1: Meta.primary_key = CompositeKey(*[ field.name for col, field in columns.items() if col in primary_keys]) composite_key = True attrs = {'Meta': Meta} for column_name, column in columns.items(): FieldClass = column.field_class if FieldClass is not ForeignKeyField and bare_fields: FieldClass = BareField elif FieldClass is UnknownField: FieldClass = BareField params = { 'column_name': column_name, 'null': column.nullable} if column.primary_key and composite_key: if FieldClass is AutoField: FieldClass = IntegerField params['primary_key'] = False elif column.primary_key and FieldClass is not AutoField: params['primary_key'] = True if column.is_foreign_key(): if column.is_self_referential_fk(): params['model'] = 'self' else: dest_table = column.foreign_key.dest_table if dest_table in models: params['model'] = models[dest_table] else: FieldClass = DeferredForeignKey params['rel_model_name'] = dest_table if column.to_field: params['field'] = column.to_field # Generate a unique related name. params['backref'] = '%s_%s_rel' % (table, column_name) if column.default is not None: constraint = SQL('DEFAULT %s' % column.default) params['constraints'] = [constraint] if not column.is_primary_key(): if column_name in column_indexes: if column_indexes[column_name]: params['unique'] = True elif not column.is_foreign_key(): params['index'] = True else: params['index'] = False attrs[column.name] = FieldClass(**params) try: models[table] = type(str(table), (BaseModel,), attrs) except ValueError: if not skip_invalid: raise finally: if table in pending: pending.remove(table) # Actually generate Model classes. for table, model in sorted(database.model_names.items()): if table not in models: _create_model(table, models) return models def introspect(database, schema=None): introspector = Introspector.from_database(database, schema=schema) return introspector.introspect() def generate_models(database, schema=None, **options): introspector = Introspector.from_database(database, schema=schema) return introspector.generate_models(**options) def print_model(model, indexes=True, inline_indexes=False): print(model._meta.name) for field in model._meta.sorted_fields: parts = [' %s %s' % (field.name, field.field_type)] if field.primary_key: parts.append(' PK') elif inline_indexes: if field.unique: parts.append(' UNIQUE') elif field.index: parts.append(' INDEX') if isinstance(field, ForeignKeyField): parts.append(' FK: %s.%s' % (field.rel_model.__name__, field.rel_field.name)) print(''.join(parts)) if indexes: index_list = model._meta.fields_to_index() if not index_list: return print('\nindex(es)') for index in index_list: parts = [' '] ctx = model._meta.database.get_sql_context() with ctx.scope_values(param='%s', quote='""'): ctx.sql(CommaNodeList(index._expressions)) if index._where: ctx.literal(' WHERE ') ctx.sql(index._where) sql, params = ctx.query() clean = sql % tuple(map(_query_val_transform, params)) parts.append(clean.replace('"', '')) if index._unique: parts.append(' UNIQUE') print(''.join(parts)) def get_table_sql(model): sql, params = model._schema._create_table().query() if model._meta.database.param != '%s': sql = sql.replace(model._meta.database.param, '%s') # Format and indent the table declaration, simplest possible approach. match_obj = re.match(r'^(.+?\()(.+)(\).*)', sql) create, columns, extra = match_obj.groups() indented = ',\n'.join(' %s' % column for column in columns.split(', ')) clean = '\n'.join((create, indented, extra)).strip() return clean % tuple(map(_query_val_transform, params)) def print_table_sql(model): print(get_table_sql(model)) peewee-3.17.7/playhouse/shortcuts.py000066400000000000000000000267511470346076600175340ustar00rootroot00000000000000import threading from peewee import * from peewee import Alias from peewee import CompoundSelectQuery from peewee import Metadata from peewee import callable_ from peewee import __deprecated__ _clone_set = lambda s: set(s) if s else set() def model_to_dict(model, recurse=True, backrefs=False, only=None, exclude=None, seen=None, extra_attrs=None, fields_from_query=None, max_depth=None, manytomany=False): """ Convert a model instance (and any related objects) to a dictionary. :param bool recurse: Whether foreign-keys should be recursed. :param bool backrefs: Whether lists of related objects should be recursed. :param only: A list (or set) of field instances indicating which fields should be included. :param exclude: A list (or set) of field instances that should be excluded from the dictionary. :param list extra_attrs: Names of model instance attributes or methods that should be included. :param SelectQuery fields_from_query: Query that was source of model. Take fields explicitly selected by the query and serialize them. :param int max_depth: Maximum depth to recurse, value <= 0 means no max. :param bool manytomany: Process many-to-many fields. """ max_depth = -1 if max_depth is None else max_depth if max_depth == 0: recurse = False only = _clone_set(only) extra_attrs = _clone_set(extra_attrs) should_skip = lambda n: (n in exclude) or (only and (n not in only)) if fields_from_query is not None: only.add('__sentinel__') # Add a placeholder to make non-empty. for item in fields_from_query._returning: if isinstance(item, Field): only.add(item) elif isinstance(item, Alias): extra_attrs.add(item._alias) data = {} exclude = _clone_set(exclude) seen = _clone_set(seen) exclude |= seen model_class = type(model) if manytomany: for name, m2m in model._meta.manytomany.items(): if should_skip(name): continue exclude.update((m2m, m2m.rel_model._meta.manytomany[m2m.backref])) for fkf in m2m.through_model._meta.refs: exclude.add(fkf) accum = [] for rel_obj in getattr(model, name): accum.append(model_to_dict( rel_obj, recurse=recurse, backrefs=backrefs, only=only, exclude=exclude, max_depth=max_depth - 1)) data[name] = accum for field in model._meta.sorted_fields: if should_skip(field): continue field_data = model.__data__.get(field.name) if isinstance(field, ForeignKeyField) and recurse: if field_data is not None: seen.add(field) rel_obj = getattr(model, field.name) field_data = model_to_dict( rel_obj, recurse=recurse, backrefs=backrefs, only=only, exclude=exclude, seen=seen, max_depth=max_depth - 1) else: field_data = None data[field.name] = field_data if extra_attrs: for attr_name in extra_attrs: attr = getattr(model, attr_name) if callable_(attr): data[attr_name] = attr() else: data[attr_name] = attr if backrefs and recurse: for foreign_key, rel_model in model._meta.backrefs.items(): if foreign_key.backref == '+': continue descriptor = getattr(model_class, foreign_key.backref) if descriptor in exclude or foreign_key in exclude: continue if only and (descriptor not in only) and (foreign_key not in only): continue accum = [] exclude.add(foreign_key) related_query = getattr(model, foreign_key.backref) for rel_obj in related_query: accum.append(model_to_dict( rel_obj, recurse=recurse, backrefs=backrefs, only=only, exclude=exclude, max_depth=max_depth - 1)) data[foreign_key.backref] = accum return data def update_model_from_dict(instance, data, ignore_unknown=False): meta = instance._meta backrefs = dict([(fk.backref, fk) for fk in meta.backrefs]) for key, value in data.items(): if key in meta.combined: field = meta.combined[key] is_backref = False elif key in backrefs: field = backrefs[key] is_backref = True elif ignore_unknown: setattr(instance, key, value) continue else: raise AttributeError('Unrecognized attribute "%s" for model ' 'class %s.' % (key, type(instance))) is_foreign_key = isinstance(field, ForeignKeyField) if not is_backref and is_foreign_key and isinstance(value, dict): try: rel_instance = instance.__rel__[field.name] except KeyError: rel_instance = field.rel_model() setattr( instance, field.name, update_model_from_dict(rel_instance, value, ignore_unknown)) elif is_backref and isinstance(value, (list, tuple)): instances = [ dict_to_model(field.model, row_data, ignore_unknown) for row_data in value] for rel_instance in instances: setattr(rel_instance, field.name, instance) setattr(instance, field.backref, instances) else: setattr(instance, field.name, value) return instance def dict_to_model(model_class, data, ignore_unknown=False): return update_model_from_dict(model_class(), data, ignore_unknown) def insert_where(cls, data, where=None): """ Helper for generating conditional INSERT queries. For example, prevent INSERTing a new tweet if the user has tweeted within the last hour:: INSERT INTO "tweet" ("user_id", "content", "timestamp") SELECT 234, 'some content', now() WHERE NOT EXISTS ( SELECT 1 FROM "tweet" WHERE user_id = 234 AND timestamp > now() - interval '1 hour') Using this helper: cond = ~fn.EXISTS(Tweet.select().where( Tweet.user == user_obj, Tweet.timestamp > one_hour_ago)) iq = insert_where(Tweet, { Tweet.user: user_obj, Tweet.content: 'some content'}, where=cond) res = iq.execute() """ for field, default in cls._meta.defaults.items(): if field.name in data or field in data: continue value = default() if callable_(default) else default data[field] = value fields, values = zip(*data.items()) sq = Select(columns=values).where(where) return cls.insert_from(sq, fields).as_rowcount() class ReconnectMixin(object): """ Mixin class that attempts to automatically reconnect to the database under certain error conditions. For example, MySQL servers will typically close connections that are idle for 28800 seconds ("wait_timeout" setting). If your application makes use of long-lived connections, you may find your connections are closed after a period of no activity. This mixin will attempt to reconnect automatically when these errors occur. This mixin class probably should not be used with Postgres (unless you REALLY know what you are doing) and definitely has no business being used with Sqlite. If you wish to use with Postgres, you will need to adapt the `reconnect_errors` attribute to something appropriate for Postgres. """ reconnect_errors = ( # Error class, error message fragment (or empty string for all). (OperationalError, '2006'), # MySQL server has gone away. (OperationalError, '2013'), # Lost connection to MySQL server. (OperationalError, '2014'), # Commands out of sync. (OperationalError, '4031'), # Client interaction timeout. # mysql-connector raises a slightly different error when an idle # connection is terminated by the server. This is equivalent to 2013. (OperationalError, 'MySQL Connection not available.'), # Postgres error examples: #(OperationalError, 'terminat'), #(InterfaceError, 'connection already closed'), ) def __init__(self, *args, **kwargs): super(ReconnectMixin, self).__init__(*args, **kwargs) # Normalize the reconnect errors to a more efficient data-structure. self._reconnect_errors = {} for exc_class, err_fragment in self.reconnect_errors: self._reconnect_errors.setdefault(exc_class, []) self._reconnect_errors[exc_class].append(err_fragment.lower()) def execute_sql(self, sql, params=None, commit=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') return self._reconnect(super(ReconnectMixin, self).execute_sql, sql, params) def begin(self): return self._reconnect(super(ReconnectMixin, self).begin) def _reconnect(self, func, *args, **kwargs): try: return func(*args, **kwargs) except Exception as exc: # If we are in a transaction, do not reconnect silently as # any changes could be lost. if self.in_transaction(): raise exc exc_class = type(exc) if exc_class not in self._reconnect_errors: raise exc exc_repr = str(exc).lower() for err_fragment in self._reconnect_errors[exc_class]: if err_fragment in exc_repr: break else: raise exc if not self.is_closed(): self.close() self.connect() return func(*args, **kwargs) def resolve_multimodel_query(query, key='_model_identifier'): mapping = {} accum = [query] while accum: curr = accum.pop() if isinstance(curr, CompoundSelectQuery): accum.extend((curr.lhs, curr.rhs)) continue model_class = curr.model name = model_class._meta.table_name mapping[name] = model_class curr._returning.append(Value(name).alias(key)) def wrapped_iterator(): for row in query.dicts().iterator(): identifier = row.pop(key) model = mapping[identifier] yield model(**row) return wrapped_iterator() class ThreadSafeDatabaseMetadata(Metadata): """ Metadata class to allow swapping database at run-time in a multi-threaded application. To use: class Base(Model): class Meta: model_metadata_class = ThreadSafeDatabaseMetadata """ def __init__(self, *args, **kwargs): # The database attribute is stored in a thread-local. self._database = None self._local = threading.local() super(ThreadSafeDatabaseMetadata, self).__init__(*args, **kwargs) def _get_db(self): return getattr(self._local, 'database', self._database) def _set_db(self, db): if self._database is None: self._database = db self._local.database = db database = property(_get_db, _set_db) peewee-3.17.7/playhouse/signals.py000066400000000000000000000047171470346076600171340ustar00rootroot00000000000000""" Provide django-style hooks for model events. """ from peewee import Model as _Model class Signal(object): def __init__(self): self._flush() def _flush(self): self._receivers = set() self._receiver_list = [] def connect(self, receiver, name=None, sender=None): name = name or receiver.__name__ key = (name, sender) if key not in self._receivers: self._receivers.add(key) self._receiver_list.append((name, receiver, sender)) else: raise ValueError('receiver named %s (for sender=%s) already ' 'connected' % (name, sender or 'any')) def disconnect(self, receiver=None, name=None, sender=None): if receiver: name = name or receiver.__name__ if not name: raise ValueError('a receiver or a name must be provided') key = (name, sender) if key not in self._receivers: raise ValueError('receiver named %s for sender=%s not found.' % (name, sender or 'any')) self._receivers.remove(key) self._receiver_list = [(n, r, s) for n, r, s in self._receiver_list if (n, s) != key] def __call__(self, name=None, sender=None): def decorator(fn): self.connect(fn, name, sender) return fn return decorator def send(self, instance, *args, **kwargs): sender = type(instance) responses = [] for n, r, s in self._receiver_list: if s is None or isinstance(instance, s): responses.append((r, r(sender, instance, *args, **kwargs))) return responses pre_save = Signal() post_save = Signal() pre_delete = Signal() post_delete = Signal() pre_init = Signal() class Model(_Model): def __init__(self, *args, **kwargs): super(Model, self).__init__(*args, **kwargs) pre_init.send(self) def save(self, *args, **kwargs): pk_value = self._pk if self._meta.primary_key else True created = kwargs.get('force_insert', False) or not bool(pk_value) pre_save.send(self, created=created) ret = super(Model, self).save(*args, **kwargs) post_save.send(self, created=created) return ret def delete_instance(self, *args, **kwargs): pre_delete.send(self) ret = super(Model, self).delete_instance(*args, **kwargs) post_delete.send(self) return ret peewee-3.17.7/playhouse/sqlcipher_ext.py000066400000000000000000000070601470346076600203400ustar00rootroot00000000000000""" Peewee integration with pysqlcipher. Project page: https://github.com/leapcode/pysqlcipher/ **WARNING!!! EXPERIMENTAL!!!** * Although this extention's code is short, it has not been properly peer-reviewed yet and may have introduced vulnerabilities. Also note that this code relies on pysqlcipher and sqlcipher, and the code there might have vulnerabilities as well, but since these are widely used crypto modules, we can expect "short zero days" there. Example usage: from peewee.playground.ciphersql_ext import SqlCipherDatabase db = SqlCipherDatabase('/path/to/my.db', passphrase="don'tuseme4real") * `passphrase`: should be "long enough". Note that *length beats vocabulary* (much exponential), and even a lowercase-only passphrase like easytorememberyethardforotherstoguess packs more noise than 8 random printable characters and *can* be memorized. When opening an existing database, passphrase should be the one used when the database was created. If the passphrase is incorrect, an exception will only be raised **when you access the database**. If you need to ask for an interactive passphrase, here's example code you can put after the `db = ...` line: try: # Just access the database so that it checks the encryption. db.get_tables() # We're looking for a DatabaseError with a specific error message. except peewee.DatabaseError as e: # Check whether the message *means* "passphrase is wrong" if e.args[0] == 'file is encrypted or is not a database': raise Exception('Developer should Prompt user for passphrase ' 'again.') else: # A different DatabaseError. Raise it. raise e See a more elaborate example with this code at https://gist.github.com/thedod/11048875 """ import datetime import decimal import sys from peewee import * from playhouse.sqlite_ext import SqliteExtDatabase if sys.version_info[0] != 3: from pysqlcipher import dbapi2 as sqlcipher else: try: from sqlcipher3 import dbapi2 as sqlcipher except ImportError: from pysqlcipher3 import dbapi2 as sqlcipher sqlcipher.register_adapter(decimal.Decimal, str) sqlcipher.register_adapter(datetime.date, str) sqlcipher.register_adapter(datetime.time, str) __sqlcipher_version__ = sqlcipher.sqlite_version_info class _SqlCipherDatabase(object): server_version = __sqlcipher_version__ def _connect(self): params = dict(self.connect_params) passphrase = params.pop('passphrase', '').replace("'", "''") conn = sqlcipher.connect(self.database, isolation_level=None, **params) try: if passphrase: conn.execute("PRAGMA key='%s'" % passphrase) self._add_conn_hooks(conn) except: conn.close() raise return conn def set_passphrase(self, passphrase): if not self.is_closed(): raise ImproperlyConfigured('Cannot set passphrase when database ' 'is open. To change passphrase of an ' 'open database use the rekey() method.') self.connect_params['passphrase'] = passphrase def rekey(self, passphrase): if self.is_closed(): self.connect() self.execute_sql("PRAGMA rekey='%s'" % passphrase.replace("'", "''")) self.connect_params['passphrase'] = passphrase return True class SqlCipherDatabase(_SqlCipherDatabase, SqliteDatabase): pass class SqlCipherExtDatabase(_SqlCipherDatabase, SqliteExtDatabase): pass peewee-3.17.7/playhouse/sqlite_changelog.py000066400000000000000000000112711470346076600207750ustar00rootroot00000000000000from peewee import * from playhouse.sqlite_ext import JSONField class BaseChangeLog(Model): timestamp = DateTimeField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')]) action = TextField() table = TextField() primary_key = IntegerField() changes = JSONField() class ChangeLog(object): # Model class that will serve as the base for the changelog. This model # will be subclassed and mapped to your application database. base_model = BaseChangeLog # Template for the triggers that handle updating the changelog table. # table: table name # action: insert / update / delete # new_old: NEW or OLD (OLD is for DELETE) # primary_key: table primary key column name # column_array: output of build_column_array() # change_table: changelog table name template = """CREATE TRIGGER IF NOT EXISTS %(table)s_changes_%(action)s AFTER %(action)s ON %(table)s BEGIN INSERT INTO %(change_table)s ("action", "table", "primary_key", "changes") SELECT '%(action)s', '%(table)s', %(new_old)s."%(primary_key)s", "changes" FROM ( SELECT json_group_object( col, json_array( case when json_valid("oldval") then json("oldval") else "oldval" end, case when json_valid("newval") then json("newval") else "newval" end) ) AS "changes" FROM ( SELECT json_extract(value, '$[0]') as "col", json_extract(value, '$[1]') as "oldval", json_extract(value, '$[2]') as "newval" FROM json_each(json_array(%(column_array)s)) WHERE "oldval" IS NOT "newval" ) ); END;""" drop_template = 'DROP TRIGGER IF EXISTS %(table)s_changes_%(action)s' _actions = ('INSERT', 'UPDATE', 'DELETE') def __init__(self, db, table_name='changelog'): self.db = db self.table_name = table_name def _build_column_array(self, model, use_old, use_new, skip_fields=None): # Builds a list of SQL expressions for each field we are tracking. This # is used as the data source for change tracking in our trigger. col_array = [] for field in model._meta.sorted_fields: if field.primary_key: continue if skip_fields is not None and field.name in skip_fields: continue column = field.column_name new = 'NULL' if not use_new else 'NEW."%s"' % column old = 'NULL' if not use_old else 'OLD."%s"' % column if isinstance(field, JSONField): # Ensure that values are cast to JSON so that the serialization # is preserved when calculating the old / new. if use_old: old = 'json(%s)' % old if use_new: new = 'json(%s)' % new col_array.append("json_array('%s', %s, %s)" % (column, old, new)) return ', '.join(col_array) def trigger_sql(self, model, action, skip_fields=None): assert action in self._actions use_old = action != 'INSERT' use_new = action != 'DELETE' cols = self._build_column_array(model, use_old, use_new, skip_fields) return self.template % { 'table': model._meta.table_name, 'action': action, 'new_old': 'NEW' if action != 'DELETE' else 'OLD', 'primary_key': model._meta.primary_key.column_name, 'column_array': cols, 'change_table': self.table_name} def drop_trigger_sql(self, model, action): assert action in self._actions return self.drop_template % { 'table': model._meta.table_name, 'action': action} @property def model(self): if not hasattr(self, '_changelog_model'): class ChangeLog(self.base_model): class Meta: database = self.db table_name = self.table_name self._changelog_model = ChangeLog return self._changelog_model def install(self, model, skip_fields=None, drop=True, insert=True, update=True, delete=True, create_table=True): ChangeLog = self.model if create_table: ChangeLog.create_table() actions = list(zip((insert, update, delete), self._actions)) if drop: for _, action in actions: self.db.execute_sql(self.drop_trigger_sql(model, action)) for enabled, action in actions: if enabled: sql = self.trigger_sql(model, action, skip_fields) self.db.execute_sql(sql) peewee-3.17.7/playhouse/sqlite_ext.py000066400000000000000000001366101470346076600176530ustar00rootroot00000000000000import json import math import re import struct import sys from peewee import * from peewee import ColumnBase from peewee import EnclosedNodeList from peewee import Entity from peewee import Expression from peewee import Insert from peewee import Node from peewee import NodeList from peewee import OP from peewee import VirtualField from peewee import merge_dict from peewee import sqlite3 try: from playhouse._sqlite_ext import ( backup, backup_to_file, Blob, ConnectionHelper, register_bloomfilter, register_hash_functions, register_rank_functions, sqlite_get_db_status, sqlite_get_status, TableFunction, ZeroBlob, ) CYTHON_SQLITE_EXTENSIONS = True except ImportError: CYTHON_SQLITE_EXTENSIONS = False if sys.version_info[0] == 3: basestring = str FTS3_MATCHINFO = 'pcx' FTS4_MATCHINFO = 'pcnalx' if sqlite3 is not None: FTS_VERSION = 4 if sqlite3.sqlite_version_info[:3] >= (3, 7, 4) else 3 else: FTS_VERSION = 3 FTS5_MIN_SQLITE_VERSION = (3, 9, 0) class RowIDField(AutoField): auto_increment = True column_name = name = required_name = 'rowid' def bind(self, model, name, *args): if name != self.required_name: raise ValueError('%s must be named "%s".' % (type(self), self.required_name)) super(RowIDField, self).bind(model, name, *args) class DocIDField(RowIDField): column_name = name = required_name = 'docid' class AutoIncrementField(AutoField): def ddl(self, ctx): node_list = super(AutoIncrementField, self).ddl(ctx) return NodeList((node_list, SQL('AUTOINCREMENT'))) class TDecimalField(DecimalField): field_type = 'TEXT' def get_modifiers(self): pass class JSONPath(ColumnBase): def __init__(self, field, path=None): super(JSONPath, self).__init__() self._field = field self._path = path or () @property def path(self): return Value('$%s' % ''.join(self._path)) def __getitem__(self, idx): if isinstance(idx, int) or idx == '#': item = '[%s]' % idx else: item = '.%s' % idx return type(self)(self._field, self._path + (item,)) def append(self, value, as_json=None): if as_json or isinstance(value, (list, dict)): value = fn.json(self._field._json_dumps(value)) return fn.json_set(self._field, self['#'].path, value) def _json_operation(self, func, value, as_json=None): if as_json or isinstance(value, (list, dict)): value = fn.json(self._field._json_dumps(value)) return func(self._field, self.path, value) def insert(self, value, as_json=None): return self._json_operation(fn.json_insert, value, as_json) def set(self, value, as_json=None): return self._json_operation(fn.json_set, value, as_json) def replace(self, value, as_json=None): return self._json_operation(fn.json_replace, value, as_json) def update(self, value): return self.set(fn.json_patch(self, self._field._json_dumps(value))) def remove(self): return fn.json_remove(self._field, self.path) def json_type(self): return fn.json_type(self._field, self.path) def length(self): return fn.json_array_length(self._field, self.path) def children(self): return fn.json_each(self._field, self.path) def tree(self): return fn.json_tree(self._field, self.path) def __sql__(self, ctx): return ctx.sql(fn.json_extract(self._field, self.path) if self._path else self._field) class JSONBPath(JSONPath): def append(self, value, as_json=None): if as_json or isinstance(value, (list, dict)): value = fn.jsonb(self._field._json_dumps(value)) return fn.jsonb_set(self._field, self['#'].path, value) def _json_operation(self, func, value, as_json=None): if as_json or isinstance(value, (list, dict)): value = fn.jsonb(self._field._json_dumps(value)) return func(self._field, self.path, value) def insert(self, value, as_json=None): return self._json_operation(fn.jsonb_insert, value, as_json) def set(self, value, as_json=None): return self._json_operation(fn.jsonb_set, value, as_json) def replace(self, value, as_json=None): return self._json_operation(fn.jsonb_replace, value, as_json) def update(self, value): return self.set(fn.jsonb_patch(self, self._field._json_dumps(value))) def remove(self): return fn.jsonb_remove(self._field, self.path) def __sql__(self, ctx): return ctx.sql(fn.jsonb_extract(self._field, self.path) if self._path else self._field) class JSONField(TextField): field_type = 'JSON' unpack = False Path = JSONPath def __init__(self, json_dumps=None, json_loads=None, **kwargs): self._json_dumps = json_dumps or json.dumps self._json_loads = json_loads or json.loads super(JSONField, self).__init__(**kwargs) def python_value(self, value): if value is not None: try: return self._json_loads(value) except (TypeError, ValueError): return value def db_value(self, value): if value is not None: if not isinstance(value, Node): value = fn.json(self._json_dumps(value)) return value def _e(op): def inner(self, rhs): if isinstance(rhs, (list, dict)): rhs = Value(rhs, converter=self.db_value, unpack=False) return Expression(self, op, rhs) return inner __eq__ = _e(OP.EQ) __ne__ = _e(OP.NE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __hash__ = Field.__hash__ def __getitem__(self, item): return self.Path(self)[item] def extract(self, *paths): paths = [Value(p, converter=False) for p in paths] return fn.json_extract(self, *paths) def extract_json(self, path): return Expression(self, '->', Value(path, converter=False)) def extract_text(self, path): return Expression(self, '->>', Value(path, converter=False)) def append(self, value, as_json=None): return self.Path(self).append(value, as_json) def insert(self, value, as_json=None): return self.Path(self).insert(value, as_json) def set(self, value, as_json=None): return self.Path(self).set(value, as_json) def replace(self, value, as_json=None): return self.Path(self).replace(value, as_json) def update(self, data): return self.Path(self).update(data) def remove(self, *paths): if not paths: return self.Path(self).remove() return fn.json_remove(self, *paths) def json_type(self): return fn.json_type(self) def length(self, path=None): args = (self, path) if path else (self,) return fn.json_array_length(*args) def children(self): """ Schema of `json_each` and `json_tree`: key, value, type TEXT (object, array, string, etc), atom (value for primitive/scalar types, NULL for array and object) id INTEGER (unique identifier for element) parent INTEGER (unique identifier of parent element or NULL) fullkey TEXT (full path describing element) path TEXT (path to the container of the current element) json JSON hidden (1st input parameter to function) root TEXT hidden (2nd input parameter, path at which to start) """ return fn.json_each(self) def tree(self): return fn.json_tree(self) class JSONBField(JSONField): field_type = 'JSONB' Path = JSONBPath def db_value(self, value): if value is not None: if not isinstance(value, Node): value = fn.jsonb(self._json_dumps(value)) return value def json(self): return fn.json(self) def extract(self, *paths): paths = [Value(p, converter=False) for p in paths] return fn.jsonb_extract(self, *paths) def remove(self, *paths): if not paths: return self.Path(self).remove() return fn.jsonb_remove(self, *paths) class SearchField(Field): def __init__(self, unindexed=False, column_name=None, **k): if k: raise ValueError('SearchField does not accept these keyword ' 'arguments: %s.' % sorted(k)) super(SearchField, self).__init__(unindexed=unindexed, column_name=column_name, null=True) def match(self, term): return match(self, term) @property def fts_column_index(self): if not hasattr(self, '_fts_column_index'): search_fields = [f.name for f in self.model._meta.sorted_fields if isinstance(f, SearchField)] self._fts_column_index = search_fields.index(self.name) return self._fts_column_index def highlight(self, left, right): column_idx = self.fts_column_index return fn.highlight(self.model._meta.entity, column_idx, left, right) def snippet(self, left, right, over_length='...', max_tokens=16): if not (0 < max_tokens < 65): raise ValueError('max_tokens must be between 1 and 64 (inclusive)') column_idx = self.fts_column_index return fn.snippet(self.model._meta.entity, column_idx, left, right, over_length, max_tokens) class VirtualTableSchemaManager(SchemaManager): def _create_virtual_table(self, safe=True, **options): options = self.model.clean_options( merge_dict(self.model._meta.options, options)) # Structure: # CREATE VIRTUAL TABLE # USING # ([prefix_arguments, ...] fields, ... [arguments, ...], [options...]) ctx = self._create_context() ctx.literal('CREATE VIRTUAL TABLE ') if safe: ctx.literal('IF NOT EXISTS ') (ctx .sql(self.model) .literal(' USING ')) ext_module = self.model._meta.extension_module if isinstance(ext_module, Node): return ctx.sql(ext_module) ctx.sql(SQL(ext_module)).literal(' ') arguments = [] meta = self.model._meta if meta.prefix_arguments: arguments.extend([SQL(a) for a in meta.prefix_arguments]) # Constraints, data-types, foreign and primary keys are all omitted. for field in meta.sorted_fields: if isinstance(field, (RowIDField)) or field._hidden: continue field_def = [Entity(field.column_name)] if field.unindexed: field_def.append(SQL('UNINDEXED')) arguments.append(NodeList(field_def)) if meta.arguments: arguments.extend([SQL(a) for a in meta.arguments]) if options: arguments.extend(self._create_table_option_sql(options)) return ctx.sql(EnclosedNodeList(arguments)) def _create_table(self, safe=True, **options): if issubclass(self.model, VirtualModel): return self._create_virtual_table(safe, **options) return super(VirtualTableSchemaManager, self)._create_table( safe, **options) class VirtualModel(Model): class Meta: arguments = None extension_module = None prefix_arguments = None primary_key = False schema_manager_class = VirtualTableSchemaManager @classmethod def clean_options(cls, options): return options class BaseFTSModel(VirtualModel): @classmethod def clean_options(cls, options): content = options.get('content') prefix = options.get('prefix') tokenize = options.get('tokenize') if isinstance(content, basestring) and content == '': # Special-case content-less full-text search tables. options['content'] = "''" elif isinstance(content, Field): # Special-case to ensure fields are fully-qualified. options['content'] = Entity(content.model._meta.table_name, content.column_name) if prefix: if isinstance(prefix, (list, tuple)): prefix = ','.join([str(i) for i in prefix]) options['prefix'] = "'%s'" % prefix.strip("' ") if tokenize and cls._meta.extension_module.lower() == 'fts5': # Tokenizers need to be in quoted string for FTS5, but not for FTS3 # or FTS4. options['tokenize'] = '"%s"' % tokenize return options class FTSModel(BaseFTSModel): """ VirtualModel class for creating tables that use either the FTS3 or FTS4 search extensions. Peewee automatically determines which version of the FTS extension is supported and will use FTS4 if possible. """ # FTS3/4 uses "docid" in the same way a normal table uses "rowid". docid = DocIDField() class Meta: extension_module = 'FTS%s' % FTS_VERSION @classmethod def _fts_cmd(cls, cmd): tbl = cls._meta.table_name res = cls._meta.database.execute_sql( "INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd)) return res.fetchone() @classmethod def optimize(cls): return cls._fts_cmd('optimize') @classmethod def rebuild(cls): return cls._fts_cmd('rebuild') @classmethod def integrity_check(cls): return cls._fts_cmd('integrity-check') @classmethod def merge(cls, blocks=200, segments=8): return cls._fts_cmd('merge=%s,%s' % (blocks, segments)) @classmethod def automerge(cls, state=True): return cls._fts_cmd('automerge=%s' % (state and '1' or '0')) @classmethod def match(cls, term): """ Generate a `MATCH` expression appropriate for searching this table. """ return match(cls._meta.entity, term) @classmethod def rank(cls, *weights): matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO) return fn.fts_rank(matchinfo, *weights) @classmethod def bm25(cls, *weights): match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO) return fn.fts_bm25(match_info, *weights) @classmethod def bm25f(cls, *weights): match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO) return fn.fts_bm25f(match_info, *weights) @classmethod def lucene(cls, *weights): match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO) return fn.fts_lucene(match_info, *weights) @classmethod def _search(cls, term, weights, with_score, score_alias, score_fn, explicit_ordering): if not weights: rank = score_fn() elif isinstance(weights, dict): weight_args = [] for field in cls._meta.sorted_fields: # Attempt to get the specified weight of the field by looking # it up using it's field instance followed by name. field_weight = weights.get(field, weights.get(field.name, 1.0)) weight_args.append(field_weight) rank = score_fn(*weight_args) else: rank = score_fn(*weights) selection = () order_by = rank if with_score: selection = (cls, rank.alias(score_alias)) if with_score and not explicit_ordering: order_by = SQL(score_alias) return (cls .select(*selection) .where(cls.match(term)) .order_by(order_by)) @classmethod def search(cls, term, weights=None, with_score=False, score_alias='score', explicit_ordering=False): """Full-text search using selected `term`.""" return cls._search( term, weights, with_score, score_alias, cls.rank, explicit_ordering) @classmethod def search_bm25(cls, term, weights=None, with_score=False, score_alias='score', explicit_ordering=False): """Full-text search for selected `term` using BM25 algorithm.""" return cls._search( term, weights, with_score, score_alias, cls.bm25, explicit_ordering) @classmethod def search_bm25f(cls, term, weights=None, with_score=False, score_alias='score', explicit_ordering=False): """Full-text search for selected `term` using BM25 algorithm.""" return cls._search( term, weights, with_score, score_alias, cls.bm25f, explicit_ordering) @classmethod def search_lucene(cls, term, weights=None, with_score=False, score_alias='score', explicit_ordering=False): """Full-text search for selected `term` using BM25 algorithm.""" return cls._search( term, weights, with_score, score_alias, cls.lucene, explicit_ordering) _alphabet = 'abcdefghijklmnopqrstuvwxyz' _alphanum = (set('\t ,"(){}*:_+0123456789') | set(_alphabet) | set(_alphabet.upper()) | set((chr(26),))) _invalid_ascii = set(chr(p) for p in range(128) if chr(p) not in _alphanum) del _alphabet del _alphanum _quote_re = re.compile(r'(?:[^\s"]|"(?:\\.|[^"])*")+') class FTS5Model(BaseFTSModel): """ Requires SQLite >= 3.9.0. Table options: content: table name of external content, or empty string for "contentless" content_rowid: column name of external content primary key prefix: integer(s). Ex: '2' or '2 3 4' tokenize: porter, unicode61, ascii. Ex: 'porter unicode61' The unicode tokenizer supports the following parameters: * remove_diacritics (1 or 0, default is 1) * tokenchars (string of characters, e.g. '-_' * separators (string of characters) Parameters are passed as alternating parameter name and value, so: {'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"} Content-less tables: If you don't need the full-text content in it's original form, you can specify a content-less table. Searches and auxiliary functions will work as usual, but the only values returned when SELECT-ing can be rowid. Also content-less tables do not support UPDATE or DELETE. External content tables: You can set up triggers to sync these, e.g. -- Create a table. And an external content fts5 table to index it. CREATE TABLE tbl(a INTEGER PRIMARY KEY, b); CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a'); -- Triggers to keep the FTS index up to date. CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN INSERT INTO ft(rowid, b) VALUES (new.a, new.b); END; CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b); END; CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b); INSERT INTO ft(rowid, b) VALUES (new.a, new.b); END; Built-in auxiliary functions: * bm25(tbl[, weight_0, ... weight_n]) * highlight(tbl, col_idx, prefix, suffix) * snippet(tbl, col_idx, prefix, suffix, ?, max_tokens) """ # FTS5 does not support declared primary keys, but we can use the # implicit rowid. rowid = RowIDField() class Meta: extension_module = 'fts5' _error_messages = { 'field_type': ('Besides the implicit `rowid` column, all columns must ' 'be instances of SearchField'), 'index': 'Secondary indexes are not supported for FTS5 models', 'pk': 'FTS5 models must use the default `rowid` primary key', } @classmethod def validate_model(cls): # Perform FTS5-specific validation and options post-processing. if cls._meta.primary_key.name != 'rowid': raise ImproperlyConfigured(cls._error_messages['pk']) for field in cls._meta.fields.values(): if not isinstance(field, (SearchField, RowIDField)): raise ImproperlyConfigured(cls._error_messages['field_type']) if cls._meta.indexes: raise ImproperlyConfigured(cls._error_messages['index']) @classmethod def fts5_installed(cls): if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION: return False # Test in-memory DB to determine if the FTS5 extension is installed. tmp_db = sqlite3.connect(':memory:') try: tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);') except: try: tmp_db.enable_load_extension(True) tmp_db.load_extension('fts5') except: return False else: cls._meta.database.load_extension('fts5') finally: tmp_db.close() return True @staticmethod def validate_query(query): """ Simple helper function to indicate whether a search query is a valid FTS5 query. Note: this simply looks at the characters being used, and is not guaranteed to catch all problematic queries. """ tokens = _quote_re.findall(query) for token in tokens: if token.startswith('"') and token.endswith('"'): continue if set(token) & _invalid_ascii: return False return True @staticmethod def clean_query(query, replace=chr(26)): """ Clean a query of invalid tokens. """ accum = [] any_invalid = False tokens = _quote_re.findall(query) for token in tokens: if token.startswith('"') and token.endswith('"'): accum.append(token) continue token_set = set(token) invalid_for_token = token_set & _invalid_ascii if invalid_for_token: any_invalid = True for c in invalid_for_token: token = token.replace(c, replace) accum.append(token) if any_invalid: return ' '.join(accum) return query @classmethod def match(cls, term): """ Generate a `MATCH` expression appropriate for searching this table. """ return match(cls._meta.entity, term) @classmethod def rank(cls, *args): return cls.bm25(*args) if args else SQL('rank') @classmethod def bm25(cls, *weights): return fn.bm25(cls._meta.entity, *weights) @classmethod def search(cls, term, weights=None, with_score=False, score_alias='score', explicit_ordering=False): """Full-text search using selected `term`.""" return cls.search_bm25( FTS5Model.clean_query(term), weights, with_score, score_alias, explicit_ordering) @classmethod def search_bm25(cls, term, weights=None, with_score=False, score_alias='score', explicit_ordering=False): """Full-text search using selected `term`.""" if not weights: rank = SQL('rank') elif isinstance(weights, dict): weight_args = [] for field in cls._meta.sorted_fields: if isinstance(field, SearchField) and not field.unindexed: weight_args.append( weights.get(field, weights.get(field.name, 1.0))) rank = fn.bm25(cls._meta.entity, *weight_args) else: rank = fn.bm25(cls._meta.entity, *weights) selection = () order_by = rank if with_score: selection = (cls, rank.alias(score_alias)) if with_score and not explicit_ordering: order_by = SQL(score_alias) return (cls .select(*selection) .where(cls.match(FTS5Model.clean_query(term))) .order_by(order_by)) @classmethod def _fts_cmd_sql(cls, cmd, **extra_params): tbl = cls._meta.entity columns = [tbl] values = [cmd] for key, value in extra_params.items(): columns.append(Entity(key)) values.append(value) return NodeList(( SQL('INSERT INTO'), cls._meta.entity, EnclosedNodeList(columns), SQL('VALUES'), EnclosedNodeList(values))) @classmethod def _fts_cmd(cls, cmd, **extra_params): query = cls._fts_cmd_sql(cmd, **extra_params) return cls._meta.database.execute(query) @classmethod def automerge(cls, level): if not (0 <= level <= 16): raise ValueError('level must be between 0 and 16') return cls._fts_cmd('automerge', rank=level) @classmethod def merge(cls, npages): return cls._fts_cmd('merge', rank=npages) @classmethod def optimize(cls): return cls._fts_cmd('optimize') @classmethod def rebuild(cls): return cls._fts_cmd('rebuild') @classmethod def set_pgsz(cls, pgsz): return cls._fts_cmd('pgsz', rank=pgsz) @classmethod def set_rank(cls, rank_expression): return cls._fts_cmd('rank', rank=rank_expression) @classmethod def delete_all(cls): return cls._fts_cmd('delete-all') @classmethod def integrity_check(cls, rank=0): return cls._fts_cmd('integrity-check', rank=rank) @classmethod def VocabModel(cls, table_type='row', table=None): if table_type not in ('row', 'col', 'instance'): raise ValueError('table_type must be either "row", "col" or ' '"instance".') attr = '_vocab_model_%s' % table_type if not hasattr(cls, attr): class Meta: database = cls._meta.database table_name = table or cls._meta.table_name + '_v' extension_module = fn.fts5vocab( cls._meta.entity, SQL(table_type)) attrs = { 'term': VirtualField(TextField), 'doc': IntegerField(), 'cnt': IntegerField(), 'rowid': RowIDField(), 'Meta': Meta, } if table_type == 'col': attrs['col'] = VirtualField(TextField) elif table_type == 'instance': attrs['offset'] = VirtualField(IntegerField) class_name = '%sVocab' % cls.__name__ setattr(cls, attr, type(class_name, (VirtualModel,), attrs)) return getattr(cls, attr) def ClosureTable(model_class, foreign_key=None, referencing_class=None, referencing_key=None): """Model factory for the transitive closure extension.""" if referencing_class is None: referencing_class = model_class if foreign_key is None: for field_obj in model_class._meta.refs: if field_obj.rel_model is model_class: foreign_key = field_obj break else: raise ValueError('Unable to find self-referential foreign key.') source_key = model_class._meta.primary_key if referencing_key is None: referencing_key = source_key class BaseClosureTable(VirtualModel): depth = VirtualField(IntegerField) id = VirtualField(IntegerField) idcolumn = VirtualField(TextField) parentcolumn = VirtualField(TextField) root = VirtualField(IntegerField) tablename = VirtualField(TextField) class Meta: extension_module = 'transitive_closure' @classmethod def descendants(cls, node, depth=None, include_node=False): query = (model_class .select(model_class, cls.depth.alias('depth')) .join(cls, on=(source_key == cls.id)) .where(cls.root == node) .objects()) if depth is not None: query = query.where(cls.depth == depth) elif not include_node: query = query.where(cls.depth > 0) return query @classmethod def ancestors(cls, node, depth=None, include_node=False): query = (model_class .select(model_class, cls.depth.alias('depth')) .join(cls, on=(source_key == cls.root)) .where(cls.id == node) .objects()) if depth: query = query.where(cls.depth == depth) elif not include_node: query = query.where(cls.depth > 0) return query @classmethod def siblings(cls, node, include_node=False): if referencing_class is model_class: # self-join fk_value = node.__data__.get(foreign_key.name) query = model_class.select().where(foreign_key == fk_value) else: # siblings as given in reference_class siblings = (referencing_class .select(referencing_key) .join(cls, on=(foreign_key == cls.root)) .where((cls.id == node) & (cls.depth == 1))) # the according models query = (model_class .select() .where(source_key << siblings) .objects()) if not include_node: query = query.where(source_key != node) return query class Meta: database = referencing_class._meta.database options = { 'tablename': referencing_class._meta.table_name, 'idcolumn': referencing_key.column_name, 'parentcolumn': foreign_key.column_name} primary_key = False name = '%sClosure' % model_class.__name__ return type(name, (BaseClosureTable,), {'Meta': Meta}) class LSMTable(VirtualModel): class Meta: extension_module = 'lsm1' filename = None @classmethod def clean_options(cls, options): filename = cls._meta.filename if not filename: raise ValueError('LSM1 extension requires that you specify a ' 'filename for the LSM database.') else: if len(filename) >= 2 and filename[0] != '"': filename = '"%s"' % filename if not cls._meta.primary_key: raise ValueError('LSM1 models must specify a primary-key field.') key = cls._meta.primary_key if isinstance(key, AutoField): raise ValueError('LSM1 models must explicitly declare a primary ' 'key field.') if not isinstance(key, (TextField, BlobField, IntegerField)): raise ValueError('LSM1 key must be a TextField, BlobField, or ' 'IntegerField.') key._hidden = True if isinstance(key, IntegerField): data_type = 'UINT' elif isinstance(key, BlobField): data_type = 'BLOB' else: data_type = 'TEXT' cls._meta.prefix_arguments = [filename, '"%s"' % key.name, data_type] # Does the key map to a scalar value, or a tuple of values? if len(cls._meta.sorted_fields) == 2: cls._meta._value_field = cls._meta.sorted_fields[1] else: cls._meta._value_field = None return options @classmethod def load_extension(cls, path='lsm.so'): cls._meta.database.load_extension(path) @staticmethod def slice_to_expr(key, idx): if idx.start is not None and idx.stop is not None: return key.between(idx.start, idx.stop) elif idx.start is not None: return key >= idx.start elif idx.stop is not None: return key <= idx.stop @staticmethod def _apply_lookup_to_query(query, key, lookup): if isinstance(lookup, slice): expr = LSMTable.slice_to_expr(key, lookup) if expr is not None: query = query.where(expr) return query, False elif isinstance(lookup, Expression): return query.where(lookup), False else: return query.where(key == lookup), True @classmethod def get_by_id(cls, pk): query, is_single = cls._apply_lookup_to_query( cls.select().namedtuples(), cls._meta.primary_key, pk) if is_single: row = query.get() return row[1] if cls._meta._value_field is not None else row else: return query @classmethod def set_by_id(cls, key, value): if cls._meta._value_field is not None: data = {cls._meta._value_field: value} elif isinstance(value, tuple): data = {} for field, fval in zip(cls._meta.sorted_fields[1:], value): data[field] = fval elif isinstance(value, dict): data = value elif isinstance(value, cls): data = value.__dict__ data[cls._meta.primary_key] = key cls.replace(data).execute() @classmethod def delete_by_id(cls, pk): query, is_single = cls._apply_lookup_to_query( cls.delete(), cls._meta.primary_key, pk) return query.execute() OP.MATCH = 'MATCH' def _sqlite_regexp(regex, value): return re.search(regex, value) is not None class SqliteExtDatabase(SqliteDatabase): def __init__(self, database, c_extensions=None, rank_functions=True, hash_functions=False, regexp_function=False, bloomfilter=False, json_contains=False, *args, **kwargs): super(SqliteExtDatabase, self).__init__(database, *args, **kwargs) self._row_factory = None if c_extensions and not CYTHON_SQLITE_EXTENSIONS: raise ImproperlyConfigured('SqliteExtDatabase initialized with ' 'C extensions, but shared library was ' 'not found!') prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False) if rank_functions: if prefer_c: register_rank_functions(self) else: self.register_function(bm25, 'fts_bm25') self.register_function(rank, 'fts_rank') self.register_function(bm25, 'fts_bm25f') # Fall back to bm25. self.register_function(bm25, 'fts_lucene') if hash_functions: if not prefer_c: raise ValueError('C extension required to register hash ' 'functions.') register_hash_functions(self) if regexp_function: self.register_function(_sqlite_regexp, 'regexp', 2) if bloomfilter: if not prefer_c: raise ValueError('C extension required to use bloomfilter.') register_bloomfilter(self) if json_contains: self.register_function(_json_contains, 'json_contains') self._c_extensions = prefer_c def _add_conn_hooks(self, conn): super(SqliteExtDatabase, self)._add_conn_hooks(conn) if self._row_factory: conn.row_factory = self._row_factory def row_factory(self, fn): self._row_factory = fn if CYTHON_SQLITE_EXTENSIONS: SQLITE_STATUS_MEMORY_USED = 0 SQLITE_STATUS_PAGECACHE_USED = 1 SQLITE_STATUS_PAGECACHE_OVERFLOW = 2 SQLITE_STATUS_SCRATCH_USED = 3 SQLITE_STATUS_SCRATCH_OVERFLOW = 4 SQLITE_STATUS_MALLOC_SIZE = 5 SQLITE_STATUS_PARSER_STACK = 6 SQLITE_STATUS_PAGECACHE_SIZE = 7 SQLITE_STATUS_SCRATCH_SIZE = 8 SQLITE_STATUS_MALLOC_COUNT = 9 SQLITE_DBSTATUS_LOOKASIDE_USED = 0 SQLITE_DBSTATUS_CACHE_USED = 1 SQLITE_DBSTATUS_SCHEMA_USED = 2 SQLITE_DBSTATUS_STMT_USED = 3 SQLITE_DBSTATUS_LOOKASIDE_HIT = 4 SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5 SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6 SQLITE_DBSTATUS_CACHE_HIT = 7 SQLITE_DBSTATUS_CACHE_MISS = 8 SQLITE_DBSTATUS_CACHE_WRITE = 9 SQLITE_DBSTATUS_DEFERRED_FKS = 10 #SQLITE_DBSTATUS_CACHE_USED_SHARED = 11 def __status__(flag, return_highwater=False): """ Expose a sqlite3_status() call for a particular flag as a property of the Database object. """ def getter(self): result = sqlite_get_status(flag) return result[1] if return_highwater else result return property(getter) def __dbstatus__(flag, return_highwater=False, return_current=False): """ Expose a sqlite3_dbstatus() call for a particular flag as a property of the Database instance. Unlike sqlite3_status(), the dbstatus properties pertain to the current connection. """ def getter(self): if self._state.conn is None: raise ImproperlyConfigured('database connection not opened.') result = sqlite_get_db_status(self._state.conn, flag) if return_current: return result[0] return result[1] if return_highwater else result return property(getter) class CSqliteExtDatabase(SqliteExtDatabase): def __init__(self, *args, **kwargs): self._conn_helper = None self._commit_hook = self._rollback_hook = self._update_hook = None self._replace_busy_handler = False super(CSqliteExtDatabase, self).__init__(*args, **kwargs) def init(self, database, replace_busy_handler=False, **kwargs): super(CSqliteExtDatabase, self).init(database, **kwargs) self._replace_busy_handler = replace_busy_handler def _close(self, conn): if self._commit_hook: self._conn_helper.set_commit_hook(None) if self._rollback_hook: self._conn_helper.set_rollback_hook(None) if self._update_hook: self._conn_helper.set_update_hook(None) return super(CSqliteExtDatabase, self)._close(conn) def _add_conn_hooks(self, conn): super(CSqliteExtDatabase, self)._add_conn_hooks(conn) self._conn_helper = ConnectionHelper(conn) if self._commit_hook is not None: self._conn_helper.set_commit_hook(self._commit_hook) if self._rollback_hook is not None: self._conn_helper.set_rollback_hook(self._rollback_hook) if self._update_hook is not None: self._conn_helper.set_update_hook(self._update_hook) if self._replace_busy_handler: timeout = self._timeout or 5 self._conn_helper.set_busy_handler(timeout * 1000) def on_commit(self, fn): self._commit_hook = fn if not self.is_closed(): self._conn_helper.set_commit_hook(fn) return fn def on_rollback(self, fn): self._rollback_hook = fn if not self.is_closed(): self._conn_helper.set_rollback_hook(fn) return fn def on_update(self, fn): self._update_hook = fn if not self.is_closed(): self._conn_helper.set_update_hook(fn) return fn def changes(self): return self._conn_helper.changes() @property def last_insert_rowid(self): return self._conn_helper.last_insert_rowid() @property def autocommit(self): return self._conn_helper.autocommit() def backup(self, destination, pages=None, name=None, progress=None): return backup(self.connection(), destination.connection(), pages=pages, name=name, progress=progress) def backup_to_file(self, filename, pages=None, name=None, progress=None): return backup_to_file(self.connection(), filename, pages=pages, name=name, progress=progress) def blob_open(self, table, column, rowid, read_only=False): return Blob(self, table, column, rowid, read_only) # Status properties. memory_used = __status__(SQLITE_STATUS_MEMORY_USED) malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True) malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT) pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED) pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW) pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True) scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED) scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW) scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True) # Connection status properties. lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED) lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True) lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE, True) lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL, True) cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True) #cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED, # False, True) schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True) statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True) cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True) cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True) cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True) def match(lhs, rhs): return Expression(lhs, OP.MATCH, rhs) def _parse_match_info(buf): # See http://sqlite.org/fts3.html#matchinfo bufsize = len(buf) # Length in bytes. return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)] def get_weights(ncol, raw_weights): if not raw_weights: return [1] * ncol else: weights = [0] * ncol for i, weight in enumerate(raw_weights): weights[i] = weight return weights # Ranking implementation, which parse matchinfo. def rank(raw_match_info, *raw_weights): # Handle match_info called w/default args 'pcx' - based on the example rank # function http://sqlite.org/fts3.html#appendix_a match_info = _parse_match_info(raw_match_info) score = 0.0 p, c = match_info[:2] weights = get_weights(c, raw_weights) # matchinfo X value corresponds to, for each phrase in the search query, a # list of 3 values for each column in the search table. # So if we have a two-phrase search query and three columns of data, the # following would be the layout: # p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8] # p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17] for phrase_num in range(p): phrase_info_idx = 2 + (phrase_num * c * 3) for col_num in range(c): weight = weights[col_num] if not weight: continue col_idx = phrase_info_idx + (col_num * 3) # The idea is that we count the number of times the phrase appears # in this column of the current row, compared to how many times it # appears in this column across all rows. The ratio of these values # provides a rough way to score based on "high value" terms. row_hits = match_info[col_idx] all_rows_hits = match_info[col_idx + 1] if row_hits > 0: score += weight * (float(row_hits) / all_rows_hits) return -score # Okapi BM25 ranking implementation (FTS4 only). def bm25(raw_match_info, *args): """ Usage: # Format string *must* be pcnalx # Second parameter to bm25 specifies the index of the column, on # the table being queries. bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank """ match_info = _parse_match_info(raw_match_info) K = 1.2 B = 0.75 score = 0.0 P_O, C_O, N_O, A_O = range(4) # Offsets into the matchinfo buffer. term_count = match_info[P_O] # n col_count = match_info[C_O] total_docs = match_info[N_O] # N L_O = A_O + col_count X_O = L_O + col_count # Worked example of pcnalx for two columns and two phrases, 100 docs total. # { # p = 2 # c = 2 # n = 100 # a0 = 4 -- avg number of tokens for col0, e.g. title # a1 = 40 -- avg number of tokens for col1, e.g. body # l0 = 5 -- curr doc has 5 tokens in col0 # l1 = 30 -- curr doc has 30 tokens in col1 # # x000 -- hits this row for phrase0, col0 # x001 -- hits all rows for phrase0, col0 # x002 -- rows with phrase0 in col0 at least once # # x010 -- hits this row for phrase0, col1 # x011 -- hits all rows for phrase0, col1 # x012 -- rows with phrase0 in col1 at least once # # x100 -- hits this row for phrase1, col0 # x101 -- hits all rows for phrase1, col0 # x102 -- rows with phrase1 in col0 at least once # # x110 -- hits this row for phrase1, col1 # x111 -- hits all rows for phrase1, col1 # x112 -- rows with phrase1 in col1 at least once # } weights = get_weights(col_count, args) for i in range(term_count): for j in range(col_count): weight = weights[j] if weight == 0: continue x = X_O + (3 * (j + i * col_count)) term_frequency = float(match_info[x]) # f(qi, D) docs_with_term = float(match_info[x + 2]) # n(qi) # log( (N - n(qi) + 0.5) / (n(qi) + 0.5) ) idf = math.log( (total_docs - docs_with_term + 0.5) / (docs_with_term + 0.5)) if idf <= 0.0: idf = 1e-6 doc_length = float(match_info[L_O + j]) # |D| avg_length = float(match_info[A_O + j]) or 1. # avgdl ratio = doc_length / avg_length num = term_frequency * (K + 1.0) b_part = 1.0 - B + (B * ratio) denom = term_frequency + (K * b_part) pc_score = idf * (num / denom) score += (pc_score * weight) return -score def _json_contains(src_json, obj_json): stack = [] try: stack.append((json.loads(obj_json), json.loads(src_json))) except: # Invalid JSON! return False while stack: obj, src = stack.pop() if isinstance(src, dict): if isinstance(obj, dict): for key in obj: if key not in src: return False stack.append((obj[key], src[key])) elif isinstance(obj, list): for item in obj: if item not in src: return False elif obj not in src: return False elif isinstance(src, list): if isinstance(obj, dict): return False elif isinstance(obj, list): try: for i in range(len(obj)): stack.append((obj[i], src[i])) except IndexError: return False elif obj not in src: return False elif obj != src: return False return True peewee-3.17.7/playhouse/sqlite_udf.py000066400000000000000000000325411470346076600176270ustar00rootroot00000000000000import datetime import hashlib import heapq import math import os import random import re import sys import threading import zlib try: from collections import Counter except ImportError: Counter = None try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse try: from playhouse._sqlite_ext import TableFunction except ImportError: TableFunction = None SQLITE_DATETIME_FORMATS = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d', '%H:%M:%S', '%H:%M:%S.%f', '%H:%M') from peewee import format_date_time def format_date_time_sqlite(date_value): return format_date_time(date_value, SQLITE_DATETIME_FORMATS) try: from playhouse import _sqlite_udf as cython_udf except ImportError: cython_udf = None # Group udf by function. CONTROL_FLOW = 'control_flow' DATE = 'date' FILE = 'file' HELPER = 'helpers' MATH = 'math' STRING = 'string' AGGREGATE_COLLECTION = {} TABLE_FUNCTION_COLLECTION = {} UDF_COLLECTION = {} class synchronized_dict(dict): def __init__(self, *args, **kwargs): super(synchronized_dict, self).__init__(*args, **kwargs) self._lock = threading.Lock() def __getitem__(self, key): with self._lock: return super(synchronized_dict, self).__getitem__(key) def __setitem__(self, key, value): with self._lock: return super(synchronized_dict, self).__setitem__(key, value) def __delitem__(self, key): with self._lock: return super(synchronized_dict, self).__delitem__(key) STATE = synchronized_dict() SETTINGS = synchronized_dict() # Class and function decorators. def aggregate(*groups): def decorator(klass): for group in groups: AGGREGATE_COLLECTION.setdefault(group, []) AGGREGATE_COLLECTION[group].append(klass) return klass return decorator def table_function(*groups): def decorator(klass): for group in groups: TABLE_FUNCTION_COLLECTION.setdefault(group, []) TABLE_FUNCTION_COLLECTION[group].append(klass) return klass return decorator def udf(*groups): def decorator(fn): for group in groups: UDF_COLLECTION.setdefault(group, []) UDF_COLLECTION[group].append(fn) return fn return decorator # Register aggregates / functions with connection. def register_aggregate_groups(db, *groups): seen = set() for group in groups: klasses = AGGREGATE_COLLECTION.get(group, ()) for klass in klasses: name = getattr(klass, 'name', klass.__name__) if name not in seen: seen.add(name) db.register_aggregate(klass, name) def register_table_function_groups(db, *groups): seen = set() for group in groups: klasses = TABLE_FUNCTION_COLLECTION.get(group, ()) for klass in klasses: if klass.name not in seen: seen.add(klass.name) db.register_table_function(klass) def register_udf_groups(db, *groups): seen = set() for group in groups: functions = UDF_COLLECTION.get(group, ()) for function in functions: name = function.__name__ if name not in seen: seen.add(name) db.register_function(function, name) def register_groups(db, *groups): register_aggregate_groups(db, *groups) register_table_function_groups(db, *groups) register_udf_groups(db, *groups) def register_all(db): register_aggregate_groups(db, *AGGREGATE_COLLECTION) register_table_function_groups(db, *TABLE_FUNCTION_COLLECTION) register_udf_groups(db, *UDF_COLLECTION) # Begin actual user-defined functions and aggregates. # Scalar functions. @udf(CONTROL_FLOW) def if_then_else(cond, truthy, falsey=None): if cond: return truthy return falsey @udf(DATE) def strip_tz(date_str): date_str = date_str.replace('T', ' ') tz_idx1 = date_str.find('+') if tz_idx1 != -1: return date_str[:tz_idx1] tz_idx2 = date_str.find('-') if tz_idx2 > 13: return date_str[:tz_idx2] return date_str @udf(DATE) def human_delta(nseconds, glue=', '): parts = ( (86400 * 365, 'year'), (86400 * 30, 'month'), (86400 * 7, 'week'), (86400, 'day'), (3600, 'hour'), (60, 'minute'), (1, 'second'), ) accum = [] for offset, name in parts: val, nseconds = divmod(nseconds, offset) if val: suffix = val != 1 and 's' or '' accum.append('%s %s%s' % (val, name, suffix)) if not accum: return '0 seconds' return glue.join(accum) @udf(FILE) def file_ext(filename): try: res = os.path.splitext(filename) except ValueError: return None return res[1] @udf(FILE) def file_read(filename): try: with open(filename) as fh: return fh.read() except: pass if sys.version_info[0] == 2: @udf(HELPER) def gzip(data, compression=9): return buffer(zlib.compress(data, compression)) @udf(HELPER) def gunzip(data): return zlib.decompress(data) else: @udf(HELPER) def gzip(data, compression=9): if isinstance(data, str): data = bytes(data.encode('raw_unicode_escape')) return zlib.compress(data, compression) @udf(HELPER) def gunzip(data): return zlib.decompress(data) @udf(HELPER) def hostname(url): parse_result = urlparse(url) if parse_result: return parse_result.netloc @udf(HELPER) def toggle(key): key = key.lower() STATE[key] = ret = not STATE.get(key) return ret @udf(HELPER) def setting(key, value=None): if value is None: return SETTINGS.get(key) else: SETTINGS[key] = value return value @udf(HELPER) def clear_settings(): SETTINGS.clear() @udf(HELPER) def clear_toggles(): STATE.clear() @udf(MATH) def randomrange(start, end=None, step=None): if end is None: start, end = 0, start elif step is None: step = 1 return random.randrange(start, end, step) @udf(MATH) def gauss_distribution(mean, sigma): try: return random.gauss(mean, sigma) except ValueError: return None @udf(MATH) def sqrt(n): try: return math.sqrt(n) except ValueError: return None @udf(MATH) def tonumber(s): try: return int(s) except ValueError: try: return float(s) except: return None @udf(STRING) def substr_count(haystack, needle): if not haystack or not needle: return 0 return haystack.count(needle) @udf(STRING) def strip_chars(haystack, chars): return haystack.strip(chars) def _hash(constructor, *args): hash_obj = constructor() for arg in args: hash_obj.update(arg) return hash_obj.hexdigest() # Aggregates. class _heap_agg(object): def __init__(self): self.heap = [] self.ct = 0 def process(self, value): return value def step(self, value): self.ct += 1 heapq.heappush(self.heap, self.process(value)) class _datetime_heap_agg(_heap_agg): def process(self, value): return format_date_time_sqlite(value) if sys.version_info[:2] == (2, 6): def total_seconds(td): return (td.seconds + (td.days * 86400) + (td.microseconds / (10.**6))) else: total_seconds = lambda td: td.total_seconds() @aggregate(DATE) class mintdiff(_datetime_heap_agg): def finalize(self): dtp = min_diff = None while self.heap: if min_diff is None: if dtp is None: dtp = heapq.heappop(self.heap) continue dt = heapq.heappop(self.heap) diff = dt - dtp if min_diff is None or min_diff > diff: min_diff = diff dtp = dt if min_diff is not None: return total_seconds(min_diff) @aggregate(DATE) class avgtdiff(_datetime_heap_agg): def finalize(self): if self.ct < 1: return elif self.ct == 1: return 0 total = ct = 0 dtp = None while self.heap: if total == 0: if dtp is None: dtp = heapq.heappop(self.heap) continue dt = heapq.heappop(self.heap) diff = dt - dtp ct += 1 total += total_seconds(diff) dtp = dt return float(total) / ct @aggregate(DATE) class duration(object): def __init__(self): self._min = self._max = None def step(self, value): dt = format_date_time_sqlite(value) if self._min is None or dt < self._min: self._min = dt if self._max is None or dt > self._max: self._max = dt def finalize(self): if self._min and self._max: td = (self._max - self._min) return total_seconds(td) return None @aggregate(MATH) class mode(object): if Counter: def __init__(self): self.items = Counter() def step(self, *args): self.items.update(args) def finalize(self): if self.items: return self.items.most_common(1)[0][0] else: def __init__(self): self.items = [] def step(self, item): self.items.append(item) def finalize(self): if self.items: return max(set(self.items), key=self.items.count) @aggregate(MATH) class minrange(_heap_agg): def finalize(self): if self.ct == 0: return elif self.ct == 1: return 0 prev = min_diff = None while self.heap: if min_diff is None: if prev is None: prev = heapq.heappop(self.heap) continue curr = heapq.heappop(self.heap) diff = curr - prev if min_diff is None or min_diff > diff: min_diff = diff prev = curr return min_diff @aggregate(MATH) class avgrange(_heap_agg): def finalize(self): if self.ct == 0: return elif self.ct == 1: return 0 total = ct = 0 prev = None while self.heap: if total == 0: if prev is None: prev = heapq.heappop(self.heap) continue curr = heapq.heappop(self.heap) diff = curr - prev ct += 1 total += diff prev = curr return float(total) / ct @aggregate(MATH) class _range(object): name = 'range' def __init__(self): self._min = self._max = None def step(self, value): if self._min is None or value < self._min: self._min = value if self._max is None or value > self._max: self._max = value def finalize(self): if self._min is not None and self._max is not None: return self._max - self._min return None @aggregate(MATH) class stddev(object): def __init__(self): self.n = 0 self.values = [] def step(self, v): self.n += 1 self.values.append(v) def finalize(self): if self.n <= 1: return 0 mean = sum(self.values) / self.n return math.sqrt(sum((i - mean) ** 2 for i in self.values) / (self.n - 1)) if cython_udf is not None: damerau_levenshtein_dist = udf(STRING)(cython_udf.damerau_levenshtein_dist) levenshtein_dist = udf(STRING)(cython_udf.levenshtein_dist) str_dist = udf(STRING)(cython_udf.str_dist) median = aggregate(MATH)(cython_udf.median) if TableFunction is not None: @table_function(STRING) class RegexSearch(TableFunction): params = ['regex', 'search_string'] columns = ['match'] name = 'regex_search' def initialize(self, regex=None, search_string=None): self._iter = re.finditer(regex, search_string) def iterate(self, idx): return (next(self._iter).group(0),) @table_function(DATE) class DateSeries(TableFunction): params = ['start', 'stop', 'step_seconds'] columns = ['date'] name = 'date_series' def initialize(self, start, stop, step_seconds=86400): self.start = format_date_time_sqlite(start) self.stop = format_date_time_sqlite(stop) step_seconds = int(step_seconds) self.step_seconds = datetime.timedelta(seconds=step_seconds) if (self.start.hour == 0 and self.start.minute == 0 and self.start.second == 0 and step_seconds >= 86400): self.format = '%Y-%m-%d' elif (self.start.year == 1900 and self.start.month == 1 and self.start.day == 1 and self.stop.year == 1900 and self.stop.month == 1 and self.stop.day == 1 and step_seconds < 86400): self.format = '%H:%M:%S' else: self.format = '%Y-%m-%d %H:%M:%S' def iterate(self, idx): if self.start > self.stop: raise StopIteration current = self.start self.start += self.step_seconds return (current.strftime(self.format),) peewee-3.17.7/playhouse/sqliteq.py000066400000000000000000000251031470346076600171460ustar00rootroot00000000000000import logging import weakref from threading import local as thread_local from threading import Event from threading import Lock from threading import Thread try: from Queue import Queue except ImportError: from queue import Queue try: import gevent from gevent import Greenlet as GThread from gevent.event import Event as GEvent from gevent.local import local as greenlet_local from gevent.queue import Queue as GQueue except ImportError: GThread = GQueue = GEvent = None from peewee import __deprecated__ from playhouse.sqlite_ext import SqliteExtDatabase logger = logging.getLogger('peewee.sqliteq') class ResultTimeout(Exception): pass class WriterPaused(Exception): pass class ShutdownException(Exception): pass class AsyncCursor(object): __slots__ = ('sql', 'params', 'timeout', '_event', '_cursor', '_exc', '_idx', '_rows', '_ready') def __init__(self, event, sql, params, timeout): self._event = event self.sql = sql self.params = params self.timeout = timeout self._cursor = self._exc = self._idx = self._rows = None self._ready = False def set_result(self, cursor, exc=None): self._cursor = cursor self._exc = exc self._idx = 0 self._rows = cursor.fetchall() if exc is None else [] self._event.set() return self def _wait(self, timeout=None): timeout = timeout if timeout is not None else self.timeout if not self._event.wait(timeout=timeout) and timeout: raise ResultTimeout('results not ready, timed out.') if self._exc is not None: raise self._exc self._ready = True def __iter__(self): if not self._ready: self._wait() if self._exc is not None: raise self._exc return self def next(self): if not self._ready: self._wait() try: obj = self._rows[self._idx] except IndexError: raise StopIteration else: self._idx += 1 return obj __next__ = next @property def lastrowid(self): if not self._ready: self._wait() return self._cursor.lastrowid @property def rowcount(self): if not self._ready: self._wait() return self._cursor.rowcount @property def description(self): return self._cursor.description def close(self): self._cursor.close() def fetchall(self): return list(self) # Iterating implies waiting until populated. def fetchone(self): if not self._ready: self._wait() try: return next(self) except StopIteration: return None SHUTDOWN = StopIteration QUERY = object() PAUSE = object() UNPAUSE = object() class Writer(object): __slots__ = ('database', 'queue') def __init__(self, database, queue): self.database = database self.queue = queue def run(self): conn = self.database.connection() try: while True: try: if conn is None: # Paused. if self.wait_unpause(): conn = self.database.connection() else: conn = self.loop(conn) except ShutdownException: logger.info('writer received shutdown request, exiting.') return finally: if conn is not None: self.database._close(conn) self.database._state.reset() def wait_unpause(self): op, obj = self.queue.get() if op is UNPAUSE: logger.info('writer unpaused - reconnecting to database.') obj.set() return True elif op is SHUTDOWN: raise ShutdownException() elif op is PAUSE: logger.error('writer received pause, but is already paused.') obj.set() else: obj.set_result(None, WriterPaused()) logger.warning('writer paused, not handling %s', obj) def loop(self, conn): op, obj = self.queue.get() if op is QUERY: self.execute(obj) elif op is PAUSE: logger.info('writer paused - closing database connection.') self.database._close(conn) self.database._state.reset() obj.set() return elif op is UNPAUSE: logger.error('writer received unpause, but is already running.') obj.set() elif op is SHUTDOWN: raise ShutdownException() else: logger.error('writer received unsupported object: %s', obj) return conn def execute(self, obj): logger.debug('received query %s', obj.sql) try: cursor = self.database._execute(obj.sql, obj.params) except Exception as execute_err: cursor = None exc = execute_err # python3 is so fucking lame. else: exc = None return obj.set_result(cursor, exc) class SqliteQueueDatabase(SqliteExtDatabase): WAL_MODE_ERROR_MESSAGE = ('SQLite must be configured to use the WAL ' 'journal mode when using this feature. WAL mode ' 'allows one or more readers to continue reading ' 'while another connection writes to the ' 'database.') def __init__(self, database, use_gevent=False, autostart=True, queue_max_size=None, results_timeout=None, *args, **kwargs): kwargs['check_same_thread'] = False # Lock around starting and stopping write thread operations. self._qlock = Lock() # Ensure that journal_mode is WAL. This value is passed to the parent # class constructor below. pragmas = self._validate_journal_mode(kwargs.pop('pragmas', None)) # Reference to execute_sql on the parent class. Since we've overridden # execute_sql(), this is just a handy way to reference the real # implementation. Parent = super(SqliteQueueDatabase, self) self._execute = Parent.execute_sql # Call the parent class constructor with our modified pragmas. Parent.__init__(database, pragmas=pragmas, *args, **kwargs) self._autostart = autostart self._results_timeout = results_timeout self._is_stopped = True # Get different objects depending on the threading implementation. self._thread_helper = self.get_thread_impl(use_gevent)(queue_max_size) # Create the writer thread, optionally starting it. self._create_write_queue() if self._autostart: self.start() def get_thread_impl(self, use_gevent): return GreenletHelper if use_gevent else ThreadHelper def _validate_journal_mode(self, pragmas=None): if not pragmas: return {'journal_mode': 'wal'} if not isinstance(pragmas, dict): pragmas = dict((k.lower(), v) for (k, v) in pragmas) if pragmas.get('journal_mode', 'wal').lower() != 'wal': raise ValueError(self.WAL_MODE_ERROR_MESSAGE) pragmas['journal_mode'] = 'wal' return pragmas def _create_write_queue(self): self._write_queue = self._thread_helper.queue() def queue_size(self): return self._write_queue.qsize() def execute_sql(self, sql, params=None, commit=None, timeout=None): if commit is not None: __deprecated__('"commit" has been deprecated and is a no-op.') if sql.lower().startswith('select'): return self._execute(sql, params) cursor = AsyncCursor( event=self._thread_helper.event(), sql=sql, params=params, timeout=self._results_timeout if timeout is None else timeout) self._write_queue.put((QUERY, cursor)) return cursor def start(self): with self._qlock: if not self._is_stopped: return False def run(): writer = Writer(self, self._write_queue) writer.run() self._writer = self._thread_helper.thread(run) self._writer.start() self._is_stopped = False return True def stop(self): logger.debug('environment stop requested.') with self._qlock: if self._is_stopped: return False self._write_queue.put((SHUTDOWN, None)) self._writer.join() # Empty queue of any remaining tasks. while not self._write_queue.empty(): op, obj = self._write_queue.get() if op == PAUSE or op == UNPAUSE: obj.set() elif op == QUERY: obj.set_result(None, ShutdownException()) self._is_stopped = True return True def is_stopped(self): with self._qlock: return self._is_stopped def pause(self): with self._qlock: if self._is_stopped: return False evt = self._thread_helper.event() self._write_queue.put((PAUSE, evt)) evt.wait() def unpause(self): with self._qlock: if self._is_stopped: return False evt = self._thread_helper.event() self._write_queue.put((UNPAUSE, evt)) evt.wait() def __unsupported__(self, *args, **kwargs): raise ValueError('This method is not supported by %r.' % type(self)) atomic = transaction = savepoint = __unsupported__ class ThreadHelper(object): __slots__ = ('queue_max_size',) def __init__(self, queue_max_size=None): self.queue_max_size = queue_max_size def event(self): return Event() def queue(self, max_size=None): max_size = max_size if max_size is not None else self.queue_max_size return Queue(maxsize=max_size or 0) def thread(self, fn, *args, **kwargs): thread = Thread(target=fn, args=args, kwargs=kwargs) thread.daemon = True return thread class GreenletHelper(ThreadHelper): __slots__ = () def event(self): return GEvent() def queue(self, max_size=None): max_size = max_size if max_size is not None else self.queue_max_size return GQueue(maxsize=max_size or 0) def thread(self, fn, *args, **kwargs): def wrap(*a, **k): gevent.sleep() return fn(*a, **k) return GThread(wrap, *args, **kwargs) peewee-3.17.7/playhouse/test_utils.py000066400000000000000000000034761470346076600176740ustar00rootroot00000000000000from functools import wraps import logging logger = logging.getLogger('peewee') class _QueryLogHandler(logging.Handler): def __init__(self, *args, **kwargs): self.queries = [] logging.Handler.__init__(self, *args, **kwargs) def emit(self, record): # Counts all entries logged to the "peewee" logger by execute_sql(). if record.name == 'peewee': self.queries.append(record) class count_queries(object): def __init__(self, only_select=False): self.only_select = only_select self.count = 0 def get_queries(self): return self._handler.queries def __enter__(self): self._handler = _QueryLogHandler() logger.setLevel(logging.DEBUG) logger.addHandler(self._handler) return self def __exit__(self, exc_type, exc_val, exc_tb): logger.removeHandler(self._handler) if self.only_select: self.count = len([q for q in self._handler.queries if q.msg[0].startswith('SELECT ')]) else: self.count = len(self._handler.queries) class assert_query_count(count_queries): def __init__(self, expected, only_select=False): super(assert_query_count, self).__init__(only_select=only_select) self.expected = expected def __call__(self, f): @wraps(f) def decorated(*args, **kwds): with self: ret = f(*args, **kwds) self._assert_count() return ret return decorated def _assert_count(self): error_msg = '%s != %s' % (self.count, self.expected) assert self.count == self.expected, error_msg def __exit__(self, exc_type, exc_val, exc_tb): super(assert_query_count, self).__exit__(exc_type, exc_val, exc_tb) self._assert_count() peewee-3.17.7/pwiz.py000077500000000000000000000200011470346076600144370ustar00rootroot00000000000000#!/usr/bin/env python import datetime import os import sys from getpass import getpass from optparse import OptionParser from peewee import * from peewee import print_ from peewee import __version__ as peewee_version from playhouse.cockroachdb import CockroachDatabase from playhouse.reflection import * HEADER = """from peewee import *%s database = %s('%s'%s) """ BASE_MODEL = """\ class BaseModel(Model): class Meta: database = database """ UNKNOWN_FIELD = """\ class UnknownField(object): def __init__(self, *_, **__): pass """ DATABASE_ALIASES = { CockroachDatabase: ['cockroach', 'cockroachdb', 'crdb'], MySQLDatabase: ['mysql', 'mysqldb'], PostgresqlDatabase: ['postgres', 'postgresql'], SqliteDatabase: ['sqlite', 'sqlite3'], } DATABASE_MAP = dict((value, key) for key in DATABASE_ALIASES for value in DATABASE_ALIASES[key]) def make_introspector(database_type, database_name, **kwargs): if database_type not in DATABASE_MAP: err('Unrecognized database, must be one of: %s' % ', '.join(DATABASE_MAP.keys())) sys.exit(1) schema = kwargs.pop('schema', None) DatabaseClass = DATABASE_MAP[database_type] db = DatabaseClass(database_name, **kwargs) return Introspector.from_database(db, schema=schema) def print_models(introspector, tables=None, preserve_order=False, include_views=False, ignore_unknown=False, snake_case=True): database = introspector.introspect(table_names=tables, include_views=include_views, snake_case=snake_case) db_kwargs = introspector.get_database_kwargs() header = HEADER % ( introspector.get_additional_imports(), introspector.get_database_class().__name__, introspector.get_database_name(), ', **%s' % repr(db_kwargs) if db_kwargs else '') print_(header) if not ignore_unknown: print_(UNKNOWN_FIELD) print_(BASE_MODEL) def _print_table(table, seen, accum=None): accum = accum or [] foreign_keys = database.foreign_keys[table] for foreign_key in foreign_keys: dest = foreign_key.dest_table # In the event the destination table has already been pushed # for printing, then we have a reference cycle. if dest in accum and table not in accum: print_('# Possible reference cycle: %s' % dest) # If this is not a self-referential foreign key, and we have # not already processed the destination table, do so now. if dest not in seen and dest not in accum: seen.add(dest) if dest != table: _print_table(dest, seen, accum + [table]) print_('class %s(BaseModel):' % database.model_names[table]) columns = database.columns[table].items() if not preserve_order: columns = sorted(columns) primary_keys = database.primary_keys[table] for name, column in columns: skip = all([ name in primary_keys, name == 'id', len(primary_keys) == 1, column.field_class in introspector.pk_classes]) if skip: continue if column.primary_key and len(primary_keys) > 1: # If we have a CompositeKey, then we do not want to explicitly # mark the columns as being primary keys. column.primary_key = False is_unknown = column.field_class is UnknownField if is_unknown and ignore_unknown: disp = '%s - %s' % (column.name, column.raw_column_type or '?') print_(' # %s' % disp) else: print_(' %s' % column.get_field()) print_('') print_(' class Meta:') print_(' table_name = \'%s\'' % table) multi_column_indexes = database.multi_column_indexes(table) if multi_column_indexes: print_(' indexes = (') for fields, unique in sorted(multi_column_indexes): print_(' ((%s), %s),' % ( ', '.join("'%s'" % field for field in fields), unique, )) print_(' )') if introspector.schema: print_(' schema = \'%s\'' % introspector.schema) if len(primary_keys) > 1: pk_field_names = sorted([ field.name for col, field in columns if col in primary_keys]) pk_list = ', '.join("'%s'" % pk for pk in pk_field_names) print_(' primary_key = CompositeKey(%s)' % pk_list) elif not primary_keys: print_(' primary_key = False') print_('') seen.add(table) seen = set() for table in sorted(database.model_names.keys()): if table not in seen: if not tables or table in tables: _print_table(table, seen) def print_header(cmd_line, introspector): timestamp = datetime.datetime.now() print_('# Code generated by:') print_('# python -m pwiz %s' % cmd_line) print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p')) print_('# Database: %s' % introspector.get_database_name()) print_('# Peewee version: %s' % peewee_version) print_('') def err(msg): sys.stderr.write('\033[91m%s\033[0m\n' % msg) sys.stderr.flush() def get_option_parser(): parser = OptionParser(usage='usage: %prog [options] database_name') ao = parser.add_option ao('-H', '--host', dest='host') ao('-p', '--port', dest='port', type='int') ao('-u', '--user', dest='user') ao('-P', '--password', dest='password', action='store_true') engines = sorted(DATABASE_MAP) ao('-e', '--engine', dest='engine', choices=engines, help=('Database type, e.g. sqlite, mysql, postgresql or cockroachdb. ' 'Default is "postgresql".')) ao('-s', '--schema', dest='schema') ao('-t', '--tables', dest='tables', help=('Only generate the specified tables. Multiple table names should ' 'be separated by commas.')) ao('-v', '--views', dest='views', action='store_true', help='Generate model classes for VIEWs in addition to tables.') ao('-i', '--info', dest='info', action='store_true', help=('Add database information and other metadata to top of the ' 'generated file.')) ao('-o', '--preserve-order', action='store_true', dest='preserve_order', help='Model definition column ordering matches source table.') ao('-I', '--ignore-unknown', action='store_true', dest='ignore_unknown', help='Ignore fields whose type cannot be determined.') ao('-L', '--legacy-naming', action='store_true', dest='legacy_naming', help='Use legacy table- and column-name generation.') return parser def get_connect_kwargs(options): ops = ('host', 'port', 'user', 'schema') kwargs = dict((o, getattr(options, o)) for o in ops if getattr(options, o)) if options.password: kwargs['password'] = getpass() return kwargs if __name__ == '__main__': raw_argv = sys.argv parser = get_option_parser() options, args = parser.parse_args() if len(args) < 1: err('Missing required parameter "database"') parser.print_help() sys.exit(1) connect = get_connect_kwargs(options) database = args[-1] tables = None if options.tables: tables = [table.strip() for table in options.tables.split(',') if table.strip()] engine = options.engine if engine is None: engine = 'sqlite' if os.path.exists(database) else 'postgresql' introspector = make_introspector(engine, database, **connect) if options.info: cmd_line = ' '.join(raw_argv[1:]) print_header(cmd_line, introspector) print_models(introspector, tables, options.preserve_order, options.views, options.ignore_unknown, not options.legacy_naming) peewee-3.17.7/pyproject.toml000066400000000000000000000001301470346076600160060ustar00rootroot00000000000000[build-system] requires = ["setuptools", "wheel"] build-backend="setuptools.build_meta" peewee-3.17.7/runtests.py000077500000000000000000000101621470346076600153440ustar00rootroot00000000000000#!/usr/bin/env python import optparse import os import shutil import sys import unittest USER = os.environ.get('USER') or 'root' def runtests(suite, verbosity=1, failfast=False): runner = unittest.TextTestRunner(verbosity=verbosity, failfast=failfast) results = runner.run(suite) return results.failures, results.errors def get_option_parser(): usage = 'usage: %prog [-e engine_name, other options] module1, module2 ...' parser = optparse.OptionParser(usage=usage) basic = optparse.OptionGroup(parser, 'Basic test options') basic.add_option( '-e', '--engine', dest='engine', help=('Database engine to test, one of ' '[sqlite, postgres, mysql, mysqlconnector, apsw, sqlcipher,' ' cockroachdb, psycopg3]')) basic.add_option('-v', '--verbosity', dest='verbosity', default=1, type='int', help='Verbosity of output') basic.add_option('-f', '--failfast', action='store_true', default=False, dest='failfast', help='Exit on first failure/error.') basic.add_option('-s', '--slow-tests', action='store_true', default=False, dest='slow_tests', help='Run tests that may be slow.') parser.add_option_group(basic) db_param_map = ( ('MySQL', 'MYSQL', ( # param default disp default val ('host', 'localhost', 'localhost'), ('port', '3306', ''), ('user', USER, USER), ('password', 'blank', ''))), ('Postgresql', 'PSQL', ( ('host', 'localhost', os.environ.get('PGHOST', '')), ('port', '5432', ''), ('user', 'postgres', os.environ.get('PGUSER', '')), ('password', 'blank', os.environ.get('PGPASSWORD', '')))), ('CockroachDB', 'CRDB', ( # param default disp default val ('host', 'localhost', 'localhost'), ('port', '26257', ''), ('user', 'root', 'root'), ('password', 'blank', '')))) for name, prefix, param_list in db_param_map: group = optparse.OptionGroup(parser, '%s connection options' % name) for param, default_disp, default_val in param_list: dest = '%s_%s' % (prefix.lower(), param) opt = '--%s-%s' % (prefix.lower(), param) group.add_option(opt, default=default_val, dest=dest, help=( '%s database %s. Default %s.' % (name, param, default_disp))) parser.add_option_group(group) return parser def collect_tests(args): suite = unittest.TestSuite() if not args: import tests module_suite = unittest.TestLoader().loadTestsFromModule(tests) suite.addTest(module_suite) else: cleaned = ['tests.%s' % arg if not arg.startswith('tests.') else arg for arg in args] user_suite = unittest.TestLoader().loadTestsFromNames(cleaned) suite.addTest(user_suite) return suite if __name__ == '__main__': parser = get_option_parser() options, args = parser.parse_args() if options.engine: os.environ['PEEWEE_TEST_BACKEND'] = options.engine for db in ('mysql', 'psql', 'crdb'): for key in ('host', 'port', 'user', 'password'): att_name = '_'.join((db, key)) value = getattr(options, att_name, None) if value: os.environ['PEEWEE_%s' % att_name.upper()] = value os.environ['PEEWEE_TEST_VERBOSITY'] = str(options.verbosity) if options.slow_tests: os.environ['PEEWEE_SLOW_TESTS'] = '1' suite = collect_tests(args) failures, errors = runtests(suite, options.verbosity, options.failfast) files_to_delete = [ 'peewee_test.db', 'peewee_test', 'tmp.db', 'peewee_test.bdb.db', 'peewee_test.cipher.db'] paths_to_delete = ['peewee_test.bdb.db-journal'] for filename in files_to_delete: if os.path.exists(filename): os.unlink(filename) for path in paths_to_delete: if os.path.exists(path): shutil.rmtree(path) if errors: sys.exit(2) elif failures: sys.exit(1) sys.exit(0) peewee-3.17.7/setup.py000066400000000000000000000165171470346076600146240ustar00rootroot00000000000000import os import platform import re import sys import warnings try: from distutils.command.build_ext import build_ext from distutils.errors import CCompilerError from distutils.errors import DistutilsExecError from distutils.errors import DistutilsPlatformError except ImportError: from setuptools._distutils.command.build_ext import build_ext from setuptools._distutils.errors import CCompilerError from setuptools._distutils.errors import DistutilsExecError from setuptools._distutils.errors import DistutilsPlatformError from setuptools import setup from setuptools.extension import Extension f = open(os.path.join(os.path.dirname(__file__), 'README.rst')) readme = f.read() f.close() extension_support = True # Assume we are building C extensions. # Check if Cython is available and use it to generate extension modules. If # Cython is not installed, we will fall back to using the pre-generated C files # (so long as we're running on CPython). try: from Cython.Build import cythonize from Cython.Distutils import build_ext from Cython.Distutils.extension import Extension except ImportError: cython_installed = False else: if platform.python_implementation() != 'CPython': cython_installed = extension_support = False warnings.warn('C extensions disabled as you are not using CPython.') else: cython_installed = True if 'sdist' in sys.argv and not cython_installed: raise Exception('Building sdist requires that Cython be installed.') if sys.version_info[0] < 3: FileNotFoundError = EnvironmentError if cython_installed: src_ext = '.pyx' else: src_ext = '.c' cythonize = lambda obj: obj sqlite_udf_module = Extension( 'playhouse._sqlite_udf', ['playhouse/_sqlite_udf' + src_ext]) sqlite_ext_module = Extension( 'playhouse._sqlite_ext', ['playhouse/_sqlite_ext' + src_ext], libraries=['sqlite3']) def _have_sqlite_extension_support(): import shutil import tempfile try: from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler except ImportError: from setuptools.command.build_ext import customize_compiler from setuptools.command.build_ext import new_compiler libraries = ['sqlite3'] c_code = ('#include \n\n' 'int main(int argc, char **argv) { return 0; }') tmp_dir = tempfile.mkdtemp(prefix='tmp_pw_sqlite3_') bin_file = os.path.join(tmp_dir, 'test_pw_sqlite3') src_file = bin_file + '.c' with open(src_file, 'w') as fh: fh.write(c_code) compiler = new_compiler() customize_compiler(compiler) success = False try: compiler.link_shared_object( compiler.compile([src_file], output_dir=tmp_dir), bin_file, libraries=['sqlite3']) except CCompilerError: print('unable to compile sqlite3 C extensions - missing headers?') except DistutilsExecError: print('unable to compile sqlite3 C extensions - no c compiler?') except DistutilsPlatformError: print('unable to compile sqlite3 C extensions - platform error') except FileNotFoundError: print('unable to compile sqlite3 C extensions - no compiler!') else: success = True shutil.rmtree(tmp_dir) return success # This is set to True if there is extension support and libsqlite3 is found. sqlite_extension_support = False if extension_support: if os.environ.get('NO_SQLITE'): warnings.warn('SQLite extensions will not be built at users request.') elif not _have_sqlite_extension_support(): warnings.warn('Could not find libsqlite3, SQLite extensions will not ' 'be built.') else: sqlite_extension_support = True # Exception we will raise to indicate a failure to build C extensions. class BuildFailure(Exception): pass class _PeeweeBuildExt(build_ext): def run(self): try: build_ext.run(self) except DistutilsPlatformError: raise BuildFailure() def build_extension(self, ext): try: build_ext.build_extension(self, ext) except (CCompilerError, DistutilsExecError, DistutilsPlatformError): raise BuildFailure() def _do_setup(c_extensions, sqlite_extensions): if c_extensions and sqlite_extensions: # Only add modules if the required source files are present. This is to # work-around python 3.11 and pip being janky. if sys.version_info < (3, 11, 0): ext_modules = [sqlite_ext_module, sqlite_udf_module] else: ext_modules = [] for m in (sqlite_ext_module, sqlite_udf_module): if all(os.path.exists(src) for src in m.sources): ext_modules.append(m) else: print('could not find sources for module: %s!' % m.sources) print('try adding "cython" to your local pyproject.toml') else: ext_modules = None with open('peewee.py', 'rt') as fh: version, = [l for l in fh.readlines() if l.startswith('__version__')] version, = re.search(r'\'([\d\.]+)\'', version).groups() setup( name='peewee', version=version, description='a little orm', long_description=readme, author='Charles Leifer', author_email='coleifer@gmail.com', url='https://github.com/coleifer/peewee/', packages=['playhouse'], py_modules=['peewee', 'pwiz'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', #'Programming Language :: Python :: 3.13', #'Programming Language :: Python :: 3.14', #'Programming Language :: Python :: 3.15', #'Programming Language :: Python :: 999.99', 'Topic :: Database', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='MIT License', platforms=['any'], project_urls={ 'Documentation': 'http://docs.peewee-orm.com', 'Source': 'https://github.com/coleifer/peewee'}, scripts=['pwiz.py'], zip_safe=False, cmdclass={'build_ext': _PeeweeBuildExt}, ext_modules=cythonize(ext_modules)) if extension_support: try: _do_setup(extension_support, sqlite_extension_support) except BuildFailure: print('#' * 75) print('Error compiling C extensions, C extensions will not be built.') print('#' * 75) _do_setup(False, False) else: _do_setup(False, False) peewee-3.17.7/tests/000077500000000000000000000000001470346076600142425ustar00rootroot00000000000000peewee-3.17.7/tests/__init__.py000066400000000000000000000050361470346076600163570ustar00rootroot00000000000000import sys import unittest from peewee import OperationalError # Core modules. from .db_tests import * from .expressions import * from .fields import * from .keys import * from .manytomany import * from .models import * from .model_save import * from .model_sql import * from .prefetch_tests import * from .queries import * from .regressions import * from .results import * from .schema import * from .sql import * from .transactions import * # Extensions. try: from .apsw_ext import * except ImportError: print('Unable to import APSW extension tests, skipping.') try: from .cockroachdb import * except: print('Unable to import CockroachDB tests, skipping.') try: from .cysqlite import * except ImportError: print('Unable to import sqlite C extension tests, skipping.') from .dataset import * from .db_url import * from .extra_fields import * from .hybrid import * from .kv import * from .migrations import * try: import mysql.connector from .mysql_ext import * except ImportError: print('Unable to import mysql-connector, skipping mysql_ext tests.') from .pool import * try: from .postgres import * except (ImportError, ImproperlyConfigured): print('Unable to import postgres extension tests, skipping.') except OperationalError: print('Postgresql test database "peewee_test" not found, skipping ' 'the postgres_ext tests.') try: from .psycopg3_ext import * except (ImportError, ImproperlyConfigured): print('Unable to import psycopg3 extension tests, skipping.') from .pwiz_integration import * from .reflection import * from .returning import * from .shortcuts import * from .signals import * try: from .sqlcipher_ext import * except ImportError: print('Unable to import SQLCipher extension tests, skipping.') try: from .sqlite import * except ImportError: print('Unable to import sqlite extension tests, skipping.') try: from .sqlite_changelog import * except ImportError: print('Unable to import sqlite changelog tests, skipping.') from .sqliteq import * from .sqlite_udf import * from .test_utils import * if __name__ == '__main__': from peewee import print_ print_(r"""\x1b[1;31m ______ ______ ______ __ __ ______ ______ /\ == \ /\ ___\ /\ ___\ /\ \ _ \ \ /\ ___\ /\ ___\\ \ \ _-/ \ \ __\ \ \ __\ \ \ \/ ".\ \ \ \ __\ \ \ __\\ \ \_\ \ \_____\ \ \_____\ \ \__/".~\_\ \ \_____\ \ \_____\\ \/_/ \/_____/ \/_____/ \/_/ \/_/ \/_____/ \/_____/ \x1b[0m""") unittest.main(argv=sys.argv) peewee-3.17.7/tests/__main__.py000066400000000000000000000001561470346076600163360ustar00rootroot00000000000000import sys import unittest from tests import * if __name__ == '__main__': unittest.main(argv=sys.argv) peewee-3.17.7/tests/apsw_ext.py000066400000000000000000000135341470346076600164540ustar00rootroot00000000000000import apsw import datetime from playhouse.apsw_ext import * from .base import ModelTestCase from .base import TestModel database = APSWDatabase(':memory:') class User(TestModel): username = TextField() class Message(TestModel): user = ForeignKeyField(User) message = TextField() pub_date = DateTimeField() published = BooleanField() class VTSource(object): def Create(self, db, modulename, dbname, tablename, *args): schema = 'CREATE TABLE x(value)' return schema, VTable() Connect = Create class VTable(object): def BestIndex(self, *args): return def Open(self): return VTCursor() def Disconnect(self): pass Destroy = Disconnect class VTCursor(object): def Filter(self, *a): self.val = 0 def Eof(self): return False def Rowid(self): return self.val def Column(self, col): return self.val def Next(self): self.val += 1 def Close(self): pass class TestAPSWExtension(ModelTestCase): database = database requires = [User, Message] def test_db_register_module(self): database.register_module('series', VTSource()) database.execute_sql('create virtual table foo using series()') curs = database.execute_sql('select * from foo limit 5;') self.assertEqual([v for v, in curs], [0, 1, 2, 3, 4]) database.unregister_module('series') def test_db_register_function(self): @database.func() def title(s): return s.title() curs = self.database.execute_sql('SELECT title(?)', ('heLLo',)) self.assertEqual(curs.fetchone()[0], 'Hello') def test_db_register_aggregate(self): @database.aggregate() class First(object): def __init__(self): self._value = None def step(self, value): if self._value is None: self._value = value def finalize(self): return self._value with database.atomic(): for i in range(10): User.create(username='u%s' % i) query = User.select(fn.First(User.username)).order_by(User.username) self.assertEqual(query.scalar(), 'u0') def test_db_register_collation(self): @database.collation() def reverse(lhs, rhs): lhs, rhs = lhs.lower(), rhs.lower() if lhs < rhs: return 1 return -1 if rhs > lhs else 0 with database.atomic(): for i in range(3): User.create(username='u%s' % i) query = (User .select(User.username) .order_by(User.username.collate('reverse'))) self.assertEqual([u.username for u in query], ['u2', 'u1', 'u0']) def test_db_pragmas(self): test_db = APSWDatabase(':memory:', pragmas=( ('cache_size', '1337'), )) test_db.connect() cs = test_db.execute_sql('PRAGMA cache_size;').fetchone()[0] self.assertEqual(cs, 1337) def test_select_insert(self): for user in ('u1', 'u2', 'u3'): User.create(username=user) self.assertEqual([x.username for x in User.select()], ['u1', 'u2', 'u3']) dt = datetime.datetime(2012, 1, 1, 11, 11, 11) Message.create(user=User.get(User.username == 'u1'), message='herps', pub_date=dt, published=True) Message.create(user=User.get(User.username == 'u2'), message='derps', pub_date=dt, published=False) m1 = Message.get(Message.message == 'herps') self.assertEqual(m1.user.username, 'u1') self.assertEqual(m1.pub_date, dt) self.assertEqual(m1.published, True) m2 = Message.get(Message.message == 'derps') self.assertEqual(m2.user.username, 'u2') self.assertEqual(m2.pub_date, dt) self.assertEqual(m2.published, False) def test_update_delete(self): u1 = User.create(username='u1') u2 = User.create(username='u2') u1.username = 'u1-modified' u1.save() self.assertEqual(User.select().count(), 2) self.assertEqual(User.get(User.username == 'u1-modified').id, u1.id) u1.delete_instance() self.assertEqual(User.select().count(), 1) def test_transaction_handling(self): dt = datetime.datetime(2012, 1, 1, 11, 11, 11) def do_ctx_mgr_error(): with self.database.transaction(): User.create(username='u1') raise ValueError self.assertRaises(ValueError, do_ctx_mgr_error) self.assertEqual(User.select().count(), 0) def do_ctx_mgr_success(): with self.database.transaction(): u = User.create(username='test') Message.create(message='testing', user=u, pub_date=dt, published=1) do_ctx_mgr_success() self.assertEqual(User.select().count(), 1) self.assertEqual(Message.select().count(), 1) def create_error(): with self.database.atomic(): u = User.create(username='test') Message.create(message='testing', user=u, pub_date=dt, published=1) raise ValueError self.assertRaises(ValueError, create_error) self.assertEqual(User.select().count(), 1) def create_success(): with self.database.atomic(): u = User.create(username='test') Message.create(message='testing', user=u, pub_date=dt, published=1) create_success() self.assertEqual(User.select().count(), 2) self.assertEqual(Message.select().count(), 2) def test_exists_regression(self): User.create(username='u1') self.assertTrue(User.select().where(User.username == 'u1').exists()) self.assertFalse(User.select().where(User.username == 'ux').exists()) peewee-3.17.7/tests/base.py000066400000000000000000000226231470346076600155330ustar00rootroot00000000000000from contextlib import contextmanager from functools import wraps import datetime import logging import os import re import unittest try: from unittest import mock except ImportError: from .libs import mock from peewee import * from peewee import sqlite3 from playhouse.cockroachdb import CockroachDatabase from playhouse.cockroachdb import NESTED_TX_MIN_VERSION from playhouse.mysql_ext import MariaDBConnectorDatabase from playhouse.mysql_ext import MySQLConnectorDatabase from playhouse.psycopg3_ext import Psycopg3Database logger = logging.getLogger('peewee') def db_loader(engine, name='peewee_test', db_class=None, **params): if db_class is None: engine_aliases = { SqliteDatabase: ['sqlite', 'sqlite3'], MySQLDatabase: ['mysql'], PostgresqlDatabase: ['postgres', 'postgresql'], Psycopg3Database: ['psycopg3'], MySQLConnectorDatabase: ['mysqlconnector'], MariaDBConnectorDatabase: ['mariadb', 'maridbconnector'], CockroachDatabase: ['cockroach', 'cockroachdb', 'crdb'], } engine_map = dict((alias, db) for db, aliases in engine_aliases.items() for alias in aliases) if engine.lower() not in engine_map: raise Exception('Unsupported engine: %s.' % engine) db_class = engine_map[engine.lower()] if issubclass(db_class, SqliteDatabase) and not name.endswith('.db'): name = '%s.db' % name if name != ':memory:' else name elif issubclass(db_class, MySQLDatabase): params.update(MYSQL_PARAMS) elif issubclass(db_class, CockroachDatabase): params.update(CRDB_PARAMS) elif issubclass(db_class, PostgresqlDatabase): params.update(PSQL_PARAMS) return db_class(name, **params) def get_in_memory_db(**params): return db_loader('sqlite3', ':memory:', thread_safe=False, **params) BACKEND = os.environ.get('PEEWEE_TEST_BACKEND') or 'sqlite' VERBOSITY = int(os.environ.get('PEEWEE_TEST_VERBOSITY') or 1) SLOW_TESTS = bool(os.environ.get('PEEWEE_SLOW_TESTS')) IS_SQLITE = BACKEND.startswith('sqlite') IS_MYSQL = BACKEND.startswith(('mysql', 'maria')) IS_POSTGRESQL = BACKEND.startswith(('postgres', 'psycopg')) IS_CRDB = BACKEND in ('cockroach', 'cockroachdb', 'crdb') IS_PSYCOPG3 = BACKEND == 'psycopg3' def make_db_params(key): params = {} env_vars = [(part, 'PEEWEE_%s_%s' % (key, part.upper())) for part in ('host', 'port', 'user', 'password')] for param, env_var in env_vars: value = os.environ.get(env_var) if value: params[param] = int(value) if param == 'port' else value return params CRDB_PARAMS = make_db_params('CRDB') MYSQL_PARAMS = make_db_params('MYSQL') PSQL_PARAMS = make_db_params('PSQL') if VERBOSITY > 1: handler = logging.StreamHandler() handler.setLevel(logging.INFO) logger.addHandler(handler) if VERBOSITY > 2: handler.setLevel(logging.DEBUG) def new_connection(**kwargs): return db_loader(BACKEND, 'peewee_test', **kwargs) db = new_connection() # Database-specific feature flags. IS_SQLITE_OLD = IS_SQLITE and sqlite3.sqlite_version_info < (3, 18) IS_SQLITE_15 = IS_SQLITE and sqlite3.sqlite_version_info >= (3, 15) IS_SQLITE_24 = IS_SQLITE and sqlite3.sqlite_version_info >= (3, 24) IS_SQLITE_25 = IS_SQLITE and sqlite3.sqlite_version_info >= (3, 25) IS_SQLITE_30 = IS_SQLITE and sqlite3.sqlite_version_info >= (3, 30) IS_SQLITE_35 = IS_SQLITE and sqlite3.sqlite_version_info >= (3, 35) IS_SQLITE_37 = IS_SQLITE and sqlite3.sqlite_version_info >= (3, 37) IS_SQLITE_9 = IS_SQLITE and sqlite3.sqlite_version_info >= (3, 9) IS_MYSQL_ADVANCED_FEATURES = False IS_MYSQL_JSON = False if IS_MYSQL: db.connect() server_info = db.server_version if server_info[0] == 8 or server_info[:2] >= (10, 2): IS_MYSQL_ADVANCED_FEATURES = True elif server_info[0] == 0: logger.warning('Could not determine mysql server version.') if server_info[0] >= 8 or ((5, 7) <= server_info[:2] <= (6, 0)): # Needs actual MySQL - not MariaDB. IS_MYSQL_JSON = True db.close() if not IS_MYSQL_ADVANCED_FEATURES: logger.warning('MySQL too old to test certain advanced features.') if IS_CRDB: db.connect() IS_CRDB_NESTED_TX = db.server_version >= NESTED_TX_MIN_VERSION db.close() else: IS_CRDB_NESTED_TX = False class TestModel(Model): class Meta: database = db legacy_table_names = False def __sql__(q, **state): return Context(**state).sql(q).query() class QueryLogHandler(logging.Handler): def __init__(self, *args, **kwargs): self.queries = [] logging.Handler.__init__(self, *args, **kwargs) def emit(self, record): self.queries.append(record) class BaseTestCase(unittest.TestCase): def setUp(self): self._qh = QueryLogHandler() logger.setLevel(logging.DEBUG) logger.addHandler(self._qh) def tearDown(self): logger.removeHandler(self._qh) def assertIsNone(self, value): self.assertTrue(value is None, '%r is not None' % value) def assertIsNotNone(self, value): self.assertTrue(value is not None, '%r is None' % value) @contextmanager def assertRaisesCtx(self, exceptions): try: yield except Exception as exc: if not isinstance(exc, exceptions): raise AssertionError('Got %s, expected %s' % (exc, exceptions)) else: raise AssertionError('No exception was raised.') def assertSQL(self, query, sql, params=None, **state): database = getattr(self, 'database', None) or db state.setdefault('conflict_statement', database.conflict_statement) state.setdefault('conflict_update', database.conflict_update) qsql, qparams = __sql__(query, **state) self.assertEqual(qsql, sql) if params is not None: self.assertEqual(qparams, params) def assertHistory(self, n, expected): queries = [logrecord.msg for logrecord in self._qh.queries[-n:]] queries = [(sql.replace('%s', '?').replace('`', '"'), params) for sql, params in queries] self.assertEqual(queries, expected) @property def history(self): return self._qh.queries def reset_sql_history(self): self._qh.queries = [] @contextmanager def assertQueryCount(self, num): qc = len(self.history) yield self.assertEqual(len(self.history) - qc, num) class DatabaseTestCase(BaseTestCase): database = db def setUp(self): if not self.database.is_closed(): self.database.close() self.database.connect() super(DatabaseTestCase, self).setUp() def tearDown(self): super(DatabaseTestCase, self).tearDown() self.database.close() def execute(self, sql, params=None): return self.database.execute_sql(sql, params) class ModelDatabaseTestCase(DatabaseTestCase): database = db requires = None def setUp(self): super(ModelDatabaseTestCase, self).setUp() self._db_mapping = {} # Override the model's database object with test db. if self.requires: for model in self.requires: self._db_mapping[model] = model._meta.database model._meta.set_database(self.database) def tearDown(self): # Restore the model's previous database object. if self.requires: for model in self.requires: model._meta.set_database(self._db_mapping[model]) super(ModelDatabaseTestCase, self).tearDown() class ModelTestCase(ModelDatabaseTestCase): database = db requires = None def setUp(self): super(ModelTestCase, self).setUp() if self.requires: self.database.drop_tables(self.requires, safe=True) self.database.create_tables(self.requires) def tearDown(self): # Restore the model's previous database object. try: if self.requires: self.database.drop_tables(self.requires, safe=True) finally: super(ModelTestCase, self).tearDown() def requires_models(*models): def decorator(method): @wraps(method) def inner(self): with self.database.bind_ctx(models, False, False): self.database.drop_tables(models, safe=True) self.database.create_tables(models) try: method(self) finally: try: self.database.drop_tables(models) except: pass return inner return decorator def skip_if(expr, reason='n/a'): def decorator(method): return unittest.skipIf(expr, reason)(method) return decorator def skip_unless(expr, reason='n/a'): def decorator(method): return unittest.skipUnless(expr, reason)(method) return decorator def slow_test(): def decorator(method): return unittest.skipUnless(SLOW_TESTS, 'skipping slow test')(method) return decorator def requires_sqlite(method): return skip_unless(IS_SQLITE, 'requires sqlite')(method) def requires_mysql(method): return skip_unless(IS_MYSQL, 'requires mysql')(method) def requires_postgresql(method): return skip_unless(IS_POSTGRESQL, 'requires postgresql')(method) def requires_pglike(method): return skip_unless(IS_POSTGRESQL or IS_CRDB, 'requires pg-like')(method) peewee-3.17.7/tests/base_models.py000066400000000000000000000045761470346076600171050ustar00rootroot00000000000000from peewee import * from .base import TestModel class Person(TestModel): first = CharField() last = CharField() dob = DateField(index=True) class Meta: indexes = ( (('first', 'last'), True), ) class Note(TestModel): author = ForeignKeyField(Person) content = TextField() class Category(TestModel): parent = ForeignKeyField('self', backref='children', null=True) name = CharField(max_length=20, primary_key=True) class Relationship(TestModel): from_person = ForeignKeyField(Person, backref='relations') to_person = ForeignKeyField(Person, backref='related_to') class Register(TestModel): value = IntegerField() class User(TestModel): username = CharField() class Meta: table_name = 'users' class Account(TestModel): email = CharField() user = ForeignKeyField(User, backref='accounts', null=True) class Tweet(TestModel): user = ForeignKeyField(User, backref='tweets') content = TextField() timestamp = TimestampField() class Favorite(TestModel): user = ForeignKeyField(User, backref='favorites') tweet = ForeignKeyField(Tweet, backref='favorites') class Sample(TestModel): counter = IntegerField() value = FloatField(default=1.0) class SampleMeta(TestModel): sample = ForeignKeyField(Sample, backref='metadata') value = FloatField(default=0.0) class A(TestModel): a = TextField() class B(TestModel): a = ForeignKeyField(A, backref='bs') b = TextField() class C(TestModel): b = ForeignKeyField(B, backref='cs') c = TextField() class Emp(TestModel): first = CharField() last = CharField() empno = CharField(unique=True) class Meta: indexes = ( (('first', 'last'), True), ) class OCTest(TestModel): a = CharField(unique=True) b = IntegerField(default=0) c = IntegerField(default=0) class UKVP(TestModel): key = TextField() value = IntegerField() extra = IntegerField() class Meta: # Partial index, the WHERE clause must be reflected in the conflict # target. indexes = [ SQL('CREATE UNIQUE INDEX "ukvp_kve" ON "ukvp" ("key", "value") ' 'WHERE "extra" > 1')] class DfltM(TestModel): name = CharField() dflt1 = IntegerField(default=1) dflt2 = IntegerField(default=lambda: 2) dfltn = IntegerField(null=True) peewee-3.17.7/tests/cockroachdb.py000066400000000000000000000327311470346076600170640ustar00rootroot00000000000000import datetime import uuid from peewee import * from playhouse.cockroachdb import * from .base import IS_CRDB from .base import ModelTestCase from .base import TestModel from .base import db from .base import requires_models from .base import skip_unless from .base_models import User from .postgres_helpers import BaseBinaryJsonFieldTestCase class KV(TestModel): k = TextField(unique=True) v = IntegerField() class Arr(TestModel): title = TextField() tags = ArrayField(TextField, index=False) class JsonModel(TestModel): data = JSONField() class Normal(TestModel): data = TextField() class UID(TestModel): id = UUIDKeyField() title = TextField() class RID(TestModel): id = RowIDField() title = TextField() class UIDNote(TestModel): uid = ForeignKeyField(UID, backref='notes') note = TextField() @skip_unless(IS_CRDB) class TestCockroachDatabase(ModelTestCase): @requires_models(KV) def test_retry_transaction_ok(self): @self.database.retry_transaction() def succeeds(db): k1 = KV.create(k='k1', v=1) k2 = KV.create(k='k2', v=2) return [k1.id, k2.id] id_list = succeeds() self.assertEqual(KV.select().count(), 2) kv_list = [kv.id for kv in KV.select().order_by(KV.k)] self.assertEqual(kv_list, id_list) @requires_models(KV) def test_retry_transfer_example(self): k1 = KV.create(k='k1', v=100) k2 = KV.create(k='k2', v=1) def transfer_funds(from_k, to_k, amt): query = KV.select().where(KV.k.in_((from_k, to_k))) ka, kb = list(query) if from_k != ka.k: ka, kb = kb, ka # Swap order. if ka.v < amt: return False, ka.v, kb.v from_v, = (KV .update(v=KV.v - amt) .where(KV.k == from_k) .returning(KV.v) .execute()) to_v, = (KV .update(v=KV.v + amt) .where(KV.k == to_k) .returning(KV.v) .execute()) return True, from_v.v, to_v.v def thunk(db_ref): return transfer_funds('k1', 'k2', 90) self.assertEqual(run_transaction(self.database, thunk), (True, 10, 91)) def thunk(db_ref): return transfer_funds('k1', 'k2', 5) self.assertEqual(run_transaction(self.database, thunk), (True, 5, 96)) def thunk(db_ref): return transfer_funds('k1', 'k2', 6) self.assertEqual(run_transaction(self.database, thunk), (False, 5, 96)) @requires_models(KV) def test_retry_transfer_example2(self): k1 = KV.create(k='k1', v=100) k2 = KV.create(k='k2', v=1) def transfer_funds(from_k, to_k, amount): def thunk(db_ref): src, dest = KV.select().where(KV.k.in_([from_k, to_k])) if src.k != from_k: src, dest = dest, src if src.v < amount: return False, src.v, dest.v src, = (KV .update(v=KV.v - amount) .where(KV.k == from_k) .returning(KV.v) .execute()) dest, = (KV .update(v=KV.v + amount) .where(KV.k == to_k) .returning(KV.v) .execute()) return True, src.v, dest.v return run_transaction(self.database, thunk, max_attempts=10) self.assertEqual(transfer_funds('k1', 'k2', 90), (True, 10, 91)) self.assertEqual(transfer_funds('k1', 'k2', 11), (False, 10, 91)) self.assertEqual(transfer_funds('k1', 'k2', 10), (True, 0, 101)) @requires_models(KV) def test_retry_transaction_integrityerror(self): KV.create(k='kx', v=0) @self.database.retry_transaction() def fails(db): KV.create(k='k1', v=1) KV.create(k='kx', v=1) with self.assertRaises(IntegrityError): fails() self.assertEqual(KV.select().count(), 1) kv = KV.get(KV.k == 'kx') self.assertEqual(kv.v, 0) @requires_models(KV) def test_run_transaction_helper(self): def succeeds(db): KV.insert_many([('k%s' % i, i) for i in range(10)]).execute() run_transaction(self.database, succeeds) self.assertEqual([(kv.k, kv.v) for kv in KV.select().order_by(KV.k)], [('k%s' % i, i) for i in range(10)]) @requires_models(KV) def test_cannot_nest_run_transaction(self): def insert_row(db): KV.create(k='k1', v=1) with self.database.atomic(): self.assertRaises(Exception, run_transaction, self.database, insert_row) self.assertEqual(KV.select().count(), 0) @requires_models(User) def test_retry_transaction_docs_example(self): def create_user(username): def thunk(db_ref): return User.create(username=username) return self.database.run_transaction(thunk, max_attempts=5) users = [create_user(u) for u in 'abc'] self.assertEqual([u.username for u in users], ['a', 'b', 'c']) query = User.select().order_by(User.username) self.assertEqual([u.username for u in query], ['a', 'b', 'c']) @requires_models(KV) def test_retry_transaction_decorator(self): @self.database.retry_transaction() def retry_decorator(db): content = [] for i in range(5): kv = KV.create(k='k%s' % i, v=i) content.append(kv.k) return content self.assertEqual(retry_decorator(), ['k0', 'k1', 'k2', 'k3', 'k4']) @requires_models(Arr) def test_array_field(self): a1 = Arr.create(title='a1', tags=['t1', 't2']) a2 = Arr.create(title='a2', tags=['t2', 't3']) # Ensure we can read an array back. a1_db = Arr.get(Arr.title == 'a1') self.assertEqual(a1_db.tags, ['t1', 't2']) # Ensure we can filter on arrays. a2_db = Arr.get(Arr.tags == ['t2', 't3']) self.assertEqual(a2_db.id, a2.id) # Item lookups. a1_db = Arr.get(Arr.tags[1] == 't2') self.assertEqual(a1_db.id, a1.id) self.assertRaises(Arr.DoesNotExist, Arr.get, Arr.tags[2] == 'x') @requires_models(Arr) def test_array_field_search(self): def assertAM(where, id_list): query = Arr.select().where(where).order_by(Arr.title) self.assertEqual([a.id for a in query], id_list) data = ( ('a1', ['t1', 't2']), ('a2', ['t2', 't3']), ('a3', ['t3', 't4'])) id_list = Arr.insert_many(data).execute() a1, a2, a3 = [pk for pk, in id_list] assertAM(Value('t2') == fn.ANY(Arr.tags), [a1, a2]) assertAM(Value('t1') == fn.Any(Arr.tags), [a1]) assertAM(Value('tx') == fn.Any(Arr.tags), []) # Use the contains operator explicitly. assertAM(SQL("tags::text[] @> ARRAY['t2']"), [a1, a2]) # Use the porcelain. assertAM(Arr.tags.contains('t2'), [a1, a2]) assertAM(Arr.tags.contains('t3'), [a2, a3]) assertAM(Arr.tags.contains('t1', 't2'), [a1]) assertAM(Arr.tags.contains('t3', 't4'), [a3]) assertAM(Arr.tags.contains('t2', 't3', 't4'), []) assertAM(Arr.tags.contains_any('t2'), [a1, a2]) assertAM(Arr.tags.contains_any('t3'), [a2, a3]) assertAM(Arr.tags.contains_any('t1', 't2'), [a1, a2]) assertAM(Arr.tags.contains_any('t3', 't4'), [a2, a3]) assertAM(Arr.tags.contains_any('t2', 't3', 't4'), [a1, a2, a3]) @requires_models(Arr) def test_array_field_index(self): a1 = Arr.create(title='a1', tags=['a1', 'a2']) a2 = Arr.create(title='a2', tags=['a2', 'a3', 'a4', 'a5']) # NOTE: CRDB does not support array slicing. query = (Arr .select(Arr.tags[1].alias('st')) .order_by(Arr.title)) self.assertEqual([a.st for a in query], ['a2', 'a3']) @requires_models(UID) def test_uuid_key_field(self): # UUID primary-key is automatically populated and returned, and is of # the correct type. u1 = UID.create(title='u1') self.assertTrue(u1.id is not None) self.assertTrue(isinstance(u1.id, uuid.UUID)) # Bulk-insert works as expected. id_list = UID.insert_many([('u2',), ('u3',)]).execute() u2_id, u3_id = [pk for pk, in id_list] self.assertTrue(isinstance(u2_id, uuid.UUID)) # We can perform lookups using UUID() type. u2 = UID.get(UID.id == u2_id) self.assertEqual(u2.title, 'u2') # Get the UUID hex and query using that. u3 = UID.get(UID.id == u3_id.hex) self.assertEqual(u3.title, 'u3') @requires_models(RID) def test_rowid_field(self): r1 = RID.create(title='r1') self.assertTrue(r1.id is not None) # Bulk-insert works as expected. id_list = RID.insert_many([('r2',), ('r3',)]).execute() r2_id, r3_id = [pk for pk, in id_list] r2 = RID.get(RID.id == r2_id) self.assertEqual(r2.title, 'r2') @requires_models(KV) def test_readonly_transaction(self): kv = KV.create(k='k1', v=1) # Table doesn't exist yet. with self.assertRaises((ProgrammingError, InternalError)): with self.database.atomic('-10s'): kv_db = KV.get(KV.k == 'k1') # Cannot write in a read-only transaction with self.assertRaises((ProgrammingError, InternalError)): with self.database.atomic(datetime.datetime.now()): KV.create(k='k2', v=2) # Without system time there are no issues. with self.database.atomic(): kv_db = KV.get(KV.k == 'k1') self.assertEqual(kv.id, kv_db.id) @requires_models(KV) def test_transaction_priority(self): with self.database.atomic(priority='HIGH'): KV.create(k='k1', v=1) with self.assertRaises(IntegrityError): with self.database.atomic(priority='LOW'): KV.create(k='k1', v=2) with self.assertRaises(ValueError): with self.database.atomic(priority='HUH'): KV.create(k='k2', v=2) self.assertEqual(KV.select().count(), 1) kv = KV.get() self.assertEqual((kv.k, kv.v), ('k1', 1)) @requires_models(UID, UIDNote) def test_uuid_key_as_fk(self): # This is covered thoroughly elsewhere, but added here just for fun. u1, u2, u3 = [UID.create(title='u%s' % i) for i in (1, 2, 3)] UIDNote.create(uid=u1, note='u1-1') UIDNote.create(uid=u2, note='u2-1') UIDNote.create(uid=u2, note='u2-2') with self.assertQueryCount(1): query = (UIDNote .select(UIDNote, UID) .join(UID) .where(UID.title == 'u2') .order_by(UIDNote.note)) self.assertEqual([(un.note, un.uid.title) for un in query], [('u2-1', 'u2'), ('u2-2', 'u2')]) query = (UID .select(UID, fn.COUNT(UIDNote.id).alias('note_count')) .join(UIDNote, JOIN.LEFT_OUTER) .group_by(UID) .order_by(fn.COUNT(UIDNote.id).desc())) self.assertEqual([(u.title, u.note_count) for u in query], [('u2', 2), ('u1', 1), ('u3', 0)]) @skip_unless(IS_CRDB) class TestCockroachDatabaseJson(BaseBinaryJsonFieldTestCase, ModelTestCase): database = db M = JsonModel N = Normal requires = [JsonModel, Normal] # General integration tests. class KV2(TestModel): k2 = CharField() v2 = IntegerField() class Post(TestModel): content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) class PostNote(TestModel): post = ForeignKeyField(Post, backref='notes', primary_key=True) note = TextField() @skip_unless(IS_CRDB) class TestCockroachIntegration(ModelTestCase): @requires_models(KV, KV2) def test_compound_select(self): KV.insert_many([('10', 1), ('40', 4)]).execute() KV2.insert_many([('20', 2), ('30', 3)]).execute() lhs = KV.select(KV.k.cast('INT'), KV.v) rhs = KV2.select(KV2.k2.cast('INT'), KV2.v2) query = (lhs | rhs).order_by(SQL('1')) self.assertEqual([(obj.k, obj.v) for obj in query], [(10, 1), (20, 2), (30, 3), (40, 4)]) @requires_models(Post, PostNote) def test_primary_key_as_foreign_key(self): p = Post.create(content='p') n = PostNote.create(post=p, note='n') p_db = Post.select().get() self.assertEqual([n.note for n in p_db.notes], ['n']) with self.assertQueryCount(1): query = (PostNote .select(PostNote, Post) .join(Post)) self.assertEqual([(n.post.content, n.note) for n in query], [('p', 'n')]) @skip_unless(IS_CRDB) class TestEnsureServerVersionSet(ModelTestCase): # References GH ssue #2584. requires = [KV] def test_server_version_set(self): # Mimic state of newly-initialized database. self.database.close() self.database.server_version = None with self.database.atomic() as txn: KV.create(k='k1', v=1) self.assertTrue(self.database.server_version is not None) peewee-3.17.7/tests/cysqlite.py000066400000000000000000000431631470346076600164600ustar00rootroot00000000000000import os import sys from peewee import * from peewee import sqlite3 from playhouse.sqlite_ext import CYTHON_SQLITE_EXTENSIONS from playhouse.sqlite_ext import * from playhouse._sqlite_ext import BloomFilter from playhouse._sqlite_ext import peewee_bloomfilter_add from playhouse._sqlite_ext import peewee_bloomfilter_contains from .base import BaseTestCase from .base import DatabaseTestCase from .base import TestModel from .base import db_loader from .base import skip_unless database = CSqliteExtDatabase('peewee_test.db', timeout=100, hash_functions=1) class CyDatabaseTestCase(DatabaseTestCase): database = database def tearDown(self): super(CyDatabaseTestCase, self).tearDown() if os.path.exists(self.database.database): os.unlink(self.database.database) def execute(self, sql, *params): return self.database.execute_sql(sql, params) class TestCySqliteHelpers(CyDatabaseTestCase): def test_autocommit(self): self.assertTrue(self.database.autocommit) self.database.begin() self.assertFalse(self.database.autocommit) self.database.rollback() self.assertTrue(self.database.autocommit) def test_commit_hook(self): state = {} @self.database.on_commit def on_commit(): state.setdefault('commits', 0) state['commits'] += 1 self.execute('create table register (value text)') self.assertEqual(state['commits'], 1) # Check hook is preserved. self.database.close() self.database.connect() self.execute('insert into register (value) values (?), (?)', 'foo', 'bar') self.assertEqual(state['commits'], 2) curs = self.execute('select * from register order by value;') results = curs.fetchall() self.assertEqual([tuple(r) for r in results], [('bar',), ('foo',)]) self.assertEqual(state['commits'], 2) def test_rollback_hook(self): state = {} @self.database.on_rollback def on_rollback(): state.setdefault('rollbacks', 0) state['rollbacks'] += 1 self.execute('create table register (value text);') self.assertEqual(state, {}) # Check hook is preserved. self.database.close() self.database.connect() self.database.begin() self.execute('insert into register (value) values (?)', 'test') self.database.rollback() self.assertEqual(state, {'rollbacks': 1}) curs = self.execute('select * from register;') self.assertEqual(curs.fetchall(), []) def test_update_hook(self): state = [] @self.database.on_update def on_update(query, db, table, rowid): state.append((query, db, table, rowid)) self.execute('create table register (value text)') self.execute('insert into register (value) values (?), (?)', 'foo', 'bar') self.assertEqual(state, [ ('INSERT', 'main', 'register', 1), ('INSERT', 'main', 'register', 2)]) # Check hook is preserved. self.database.close() self.database.connect() self.execute('update register set value = ? where rowid = ?', 'baz', 1) self.assertEqual(state, [ ('INSERT', 'main', 'register', 1), ('INSERT', 'main', 'register', 2), ('UPDATE', 'main', 'register', 1)]) self.execute('delete from register where rowid=?;', 2) self.assertEqual(state, [ ('INSERT', 'main', 'register', 1), ('INSERT', 'main', 'register', 2), ('UPDATE', 'main', 'register', 1), ('DELETE', 'main', 'register', 2)]) def test_properties(self): self.assertTrue(self.database.cache_used is not None) HUser = Table('users', ('id', 'username')) class TestHashFunctions(CyDatabaseTestCase): database = database def setUp(self): super(TestHashFunctions, self).setUp() self.database.execute_sql( 'create table users (id integer not null primary key, ' 'username text not null)') def test_md5(self): for username in ('charlie', 'huey', 'zaizee'): HUser.insert({HUser.username: username}).execute(self.database) query = (HUser .select(HUser.username, fn.SUBSTR(fn.SHA1(HUser.username), 1, 6).alias('sha')) .order_by(HUser.username) .tuples() .execute(self.database)) self.assertEqual(query[:], [ ('charlie', 'd8cd10'), ('huey', '89b31a'), ('zaizee', 'b4dcf9')]) class TestBackup(CyDatabaseTestCase): backup_filenames = set(('test_backup.db', 'test_backup1.db', 'test_backup2.db')) def tearDown(self): super(TestBackup, self).tearDown() for backup_filename in self.backup_filenames: if os.path.exists(backup_filename): os.unlink(backup_filename) def _populate_test_data(self, nrows=100, db=None): db = self.database if db is None else db db.execute_sql('CREATE TABLE register (id INTEGER NOT NULL PRIMARY KEY' ', value INTEGER NOT NULL)') with db.atomic(): for i in range(nrows): db.execute_sql('INSERT INTO register (value) VALUES (?)', (i,)) def test_backup(self): self._populate_test_data() # Back-up to an in-memory database and verify contents. other_db = CSqliteExtDatabase(':memory:') self.database.backup(other_db) cursor = other_db.execute_sql('SELECT value FROM register ORDER BY ' 'value;') self.assertEqual([val for val, in cursor.fetchall()], list(range(100))) other_db.close() def test_backup_preserve_pagesize(self): db1 = CSqliteExtDatabase('test_backup1.db') with db1.connection_context(): db1.page_size = 8192 self._populate_test_data(db=db1) db1.connect() self.assertEqual(db1.page_size, 8192) db2 = CSqliteExtDatabase('test_backup2.db') db1.backup(db2) self.assertEqual(db2.page_size, 8192) nrows, = db2.execute_sql('select count(*) from register;').fetchone() self.assertEqual(nrows, 100) def test_backup_to_file(self): self._populate_test_data() self.database.backup_to_file('test_backup.db') backup_db = CSqliteExtDatabase('test_backup.db') cursor = backup_db.execute_sql('SELECT value FROM register ORDER BY ' 'value;') self.assertEqual([val for val, in cursor.fetchall()], list(range(100))) backup_db.close() def test_backup_progress(self): self._populate_test_data() accum = [] def progress(remaining, total, is_done): accum.append((remaining, total, is_done)) other_db = CSqliteExtDatabase(':memory:') self.database.backup(other_db, pages=1, progress=progress) self.assertTrue(len(accum) > 0) sql = 'select value from register order by value;' self.assertEqual([r for r, in other_db.execute_sql(sql)], list(range(100))) other_db.close() def test_backup_progress_error(self): self._populate_test_data() def broken_progress(remaining, total, is_done): raise ValueError('broken') other_db = CSqliteExtDatabase(':memory:') self.assertRaises(ValueError, self.database.backup, other_db, progress=broken_progress) other_db.close() class TestBlob(CyDatabaseTestCase): def setUp(self): super(TestBlob, self).setUp() self.Register = Table('register', ('id', 'data')) self.execute('CREATE TABLE register (id INTEGER NOT NULL PRIMARY KEY, ' 'data BLOB NOT NULL)') def create_blob_row(self, nbytes): Register = self.Register.bind(self.database) Register.insert({Register.data: ZeroBlob(nbytes)}).execute() return self.database.last_insert_rowid def test_blob(self): rowid1024 = self.create_blob_row(1024) rowid16 = self.create_blob_row(16) blob = Blob(self.database, 'register', 'data', rowid1024) self.assertEqual(len(blob), 1024) blob.write(b'x' * 1022) blob.write(b'zz') blob.seek(1020) self.assertEqual(blob.tell(), 1020) data = blob.read(3) self.assertEqual(data, b'xxz') self.assertEqual(blob.read(), b'z') self.assertEqual(blob.read(), b'') blob.seek(-10, 2) self.assertEqual(blob.tell(), 1014) self.assertEqual(blob.read(), b'xxxxxxxxzz') blob.reopen(rowid16) self.assertEqual(blob.tell(), 0) self.assertEqual(len(blob), 16) blob.write(b'x' * 15) self.assertEqual(blob.tell(), 15) def test_blob_exceed_size(self): rowid = self.create_blob_row(16) blob = self.database.blob_open('register', 'data', rowid) with self.assertRaisesCtx(ValueError): blob.seek(17, 0) with self.assertRaisesCtx(ValueError): blob.write(b'x' * 17) blob.write(b'x' * 16) self.assertEqual(blob.tell(), 16) blob.seek(0) data = blob.read(17) # Attempting to read more data is OK. self.assertEqual(data, b'x' * 16) data = blob.read(1) self.assertEqual(data, b'') blob.seek(0) blob.write(b'0123456789abcdef') self.assertEqual(blob[0], b'0') self.assertEqual(blob[-1], b'f') self.assertRaises(IndexError, lambda: data[17]) blob.close() def test_blob_errors_opening(self): rowid = self.create_blob_row(4) with self.assertRaisesCtx(OperationalError): blob = self.database.blob_open('register', 'data', rowid + 1) with self.assertRaisesCtx(OperationalError): blob = self.database.blob_open('register', 'missing', rowid) with self.assertRaisesCtx(OperationalError): blob = self.database.blob_open('missing', 'data', rowid) def test_blob_operating_on_closed(self): rowid = self.create_blob_row(4) blob = self.database.blob_open('register', 'data', rowid) self.assertEqual(len(blob), 4) blob.close() with self.assertRaisesCtx(InterfaceError): len(blob) self.assertRaises(InterfaceError, blob.read) self.assertRaises(InterfaceError, blob.write, b'foo') self.assertRaises(InterfaceError, blob.seek, 0, 0) self.assertRaises(InterfaceError, blob.tell) self.assertRaises(InterfaceError, blob.reopen, rowid) blob.close() # Safe to call again. def test_blob_readonly(self): rowid = self.create_blob_row(4) blob = self.database.blob_open('register', 'data', rowid) blob.write(b'huey') blob.seek(0) self.assertEqual(blob.read(), b'huey') blob.close() blob = self.database.blob_open('register', 'data', rowid, True) self.assertEqual(blob.read(), b'huey') blob.seek(0) with self.assertRaisesCtx(OperationalError): blob.write(b'meow') # BLOB is read-only. self.assertEqual(blob.read(), b'huey') class TestBloomFilterIntegration(CyDatabaseTestCase): database = CSqliteExtDatabase(':memory:', bloomfilter=True) def setUp(self): super(TestBloomFilterIntegration, self).setUp() self.execute('create table register (data TEXT);') def populate(self): accum = [] with self.database.atomic(): for i in 'abcdefghijklmnopqrstuvwxyz': keys = [i * j for j in range(1, 10)] accum.extend(keys) self.execute('insert into register (data) values %s' % ', '.join(['(?)'] * len(keys)), *keys) curs = self.execute('select * from register ' 'order by data limit 5 offset 6') self.assertEqual([key for key, in curs.fetchall()], ['aaaaaaa', 'aaaaaaaa', 'aaaaaaaaa', 'b', 'bb']) return accum def test_bloomfilter(self): all_keys = self.populate() curs = self.execute('select bloomfilter(data, ?) from register', 1024 * 128) buf, = curs.fetchone() self.assertEqual(len(buf), 1024 * 128) for key in all_keys: curs = self.execute('select bloomfilter_contains(?, ?)', key, buf) self.assertEqual(curs.fetchone()[0], 1) for key in all_keys: key += '-test' curs = self.execute('select bloomfilter_contains(?, ?)', key, buf) self.assertEqual(curs.fetchone()[0], 0) @skip_unless(sys.version_info[0] >= 3, 'requires python 3') def test_bf_stored(self): class Base(TestModel): class Meta: database = self.database class BF(Base): data = BlobField() class Reg(Base): key = TextField() value = TextField() self.database.create_tables([Reg, BF]) with self.database.atomic(): for i in range(100): Reg.insert(key='k%03d' % i, value='v%064d' % i).execute() agg = (Reg .select(fn.bloomfilter(Reg.value)) .where(Reg.key.endswith('0'))) n = BF.insert(data=agg).execute() query = (Reg .select() .join(BF, on=(fn.bloomfilter_contains(Reg.value, BF.data))) .order_by(Reg.key)) self.assertTrue(all(r.value.endswith('0') for r in query)) self.assertEqual(len(query), 10) # Perform update, adding the values that end with "1" now. for i in range(1, 100, 10): BF.update(data=fn.bloomfilter_add('v%064d' % i, BF.data)).execute() query = (Reg .select() .join(BF, on=(fn.bloomfilter_contains(Reg.value, BF.data))) .order_by(Reg.key)) self.assertTrue(all(r.value.endswith(('0', '1')) for r in query)) self.assertEqual(len(query), 20) class TestBloomFilter(BaseTestCase): n = 1024 def setUp(self): super(TestBloomFilter, self).setUp() self.bf = BloomFilter(self.n) def test_bloomfilter(self): keys = ('charlie', 'huey', 'mickey', 'zaizee', 'nuggie', 'foo', 'bar', 'baz') self.bf.add(*keys) for key in keys: self.assertTrue(key in self.bf) for key in keys: self.assertFalse(key + '-x' in self.bf) self.assertFalse(key + '-y' in self.bf) self.assertFalse(key + ' ' in self.bf) def test_bloomfilter_buffer(self): self.assertEqual(len(self.bf), self.n) # Buffer is all zeroes when uninitialized. buf = self.bf.to_buffer() self.assertEqual(len(buf), self.n) self.assertEqual(buf, b'\x00' * self.n) keys = ('alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta') self.bf.add(*keys) for key in keys: self.assertTrue(key in self.bf) self.assertFalse(key + '-x' in self.bf) # Convert to buffer and then populate a 2nd bloom-filter. buf = self.bf.to_buffer() new_bf = BloomFilter.from_buffer(buf) for key in keys: self.assertTrue(key in new_bf) self.assertFalse(key + '-x' in new_bf) # Ensure that the two underlying bloom-filter buffers are equal. self.assertEqual(len(new_bf), self.n) new_buf = new_bf.to_buffer() self.assertEqual(buf, new_buf) def test_bloomfilter_functions(self): bf = BloomFilter() for i in range(1000): bf.add('k%04d' % i) buf = bf.to_buffer() for i in range(1000): self.assertTrue(peewee_bloomfilter_contains('k%04d' % i, buf)) for i in range(1000, 3000): self.assertFalse(peewee_bloomfilter_contains('k%04d' % i, buf)) # Add 1000-2000 now and verify the bloom filter is updated. for i in range(1000, 2000): buf = peewee_bloomfilter_add('k%04d' % i, buf) for i in range(2000): self.assertTrue(peewee_bloomfilter_contains('k%04d' % i, buf)) # These still are not present. for i in range(2000, 4000): self.assertFalse(peewee_bloomfilter_contains('k%04d' % i, buf)) class DataTypes(TableFunction): columns = ('key', 'value') params = () name = 'data_types' def initialize(self): self.values = ( None, 1, 2., u'unicode str', b'byte str', False, True) self.idx = 0 self.n = len(self.values) def iterate(self, idx): if idx < self.n: return ('k%s' % idx, self.values[idx]) raise StopIteration @skip_unless(sqlite3.sqlite_version_info >= (3, 9), 'requires sqlite >= 3.9') class TestDataTypesTableFunction(CyDatabaseTestCase): database = db_loader('sqlite') def test_data_types_table_function(self): self.database.register_table_function(DataTypes) cursor = self.database.execute_sql('SELECT key, value ' 'FROM data_types() ORDER BY key') self.assertEqual(cursor.fetchall(), [ ('k0', None), ('k1', 1), ('k2', 2.), ('k3', u'unicode str'), ('k4', b'byte str'), ('k5', 0), ('k6', 1), ]) peewee-3.17.7/tests/dataset.py000066400000000000000000000505451470346076600162520ustar00rootroot00000000000000import csv import datetime import json import operator import os import sys import tempfile try: from StringIO import StringIO except ImportError: from io import StringIO from peewee import * from playhouse.dataset import DataSet from playhouse.dataset import Table from .base import IS_SQLITE_OLD from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import skip_if db = db_loader('sqlite') class User(TestModel): username = TextField(primary_key=True) class Note(TestModel): user = ForeignKeyField(User) content = TextField() timestamp = DateTimeField() status = IntegerField(default=1) class Category(TestModel): name = TextField() parent = ForeignKeyField('self', null=True) class TestDataSet(ModelTestCase): database = db requires = [User, Note, Category] names = ['charlie', 'huey', 'peewee', 'mickey', 'zaizee'] def setUp(self): if os.path.exists(self.database.database): os.unlink(self.database.database) super(TestDataSet, self).setUp() self.dataset = DataSet('sqlite:///%s' % self.database.database) def tearDown(self): self.dataset.close() super(TestDataSet, self).tearDown() def test_create_index(self): users = self.dataset['users'] users.insert(username='u0') users.create_index(['username'], True) with self.assertRaises(IntegrityError): users.insert(username='u0') def test_pass_database(self): db = SqliteDatabase(':memory:') dataset = DataSet(db) self.assertEqual(dataset._database_path, ':memory:') users = dataset['users'] users.insert(username='charlie') self.assertEqual(list(users), [{'id': 1, 'username': 'charlie'}]) @skip_if(IS_SQLITE_OLD) def test_with_views(self): self.dataset.query('CREATE VIEW notes_public AS ' 'SELECT content, timestamp FROM note ' 'WHERE status = 1 ORDER BY timestamp DESC') try: self.assertTrue('notes_public' in self.dataset.views) self.assertFalse('notes_public' in self.dataset.tables) users = self.dataset['user'] with self.dataset.transaction(): users.insert(username='charlie') users.insert(username='huey') notes = self.dataset['note'] for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]): notes.insert(content=ct, status=st, user_id='charlie', timestamp=datetime.datetime(2022, 1, 1 + i)) self.assertFalse('notes_public' in self.dataset) # Create a new dataset instance with views enabled. dataset = DataSet(self.dataset._database, include_views=True) self.assertTrue('notes_public' in dataset) public = dataset['notes_public'] self.assertEqual(public.columns, ['content', 'timestamp']) self.assertEqual(list(public), [ {'content': 'n3', 'timestamp': datetime.datetime(2022, 1, 3)}, {'content': 'n1', 'timestamp': datetime.datetime(2022, 1, 1)}]) finally: self.dataset.query('DROP VIEW notes_public') def test_item_apis(self): dataset = DataSet('sqlite:///:memory:') users = dataset['users'] users.insert(username='charlie') self.assertEqual(list(users), [{'id': 1, 'username': 'charlie'}]) users[2] = {'username': 'huey', 'color': 'white'} self.assertEqual(list(users), [ {'id': 1, 'username': 'charlie', 'color': None}, {'id': 2, 'username': 'huey', 'color': 'white'}]) users[2] = {'username': 'huey-x', 'kind': 'cat'} self.assertEqual(list(users), [ {'id': 1, 'username': 'charlie', 'color': None, 'kind': None}, {'id': 2, 'username': 'huey-x', 'color': 'white', 'kind': 'cat'}]) del users[2] self.assertEqual(list(users), [ {'id': 1, 'username': 'charlie', 'color': None, 'kind': None}]) users[1] = {'kind': 'person'} users[2] = {'username': 'zaizee'} users[2] = {'kind': 'cat'} self.assertEqual(list(users), [ {'id': 1, 'username': 'charlie', 'color': None, 'kind': 'person'}, {'id': 2, 'username': 'zaizee', 'color': None, 'kind': 'cat'}]) def create_users(self, n=2): user = self.dataset['user'] for i in range(min(n, len(self.names))): user.insert(username=self.names[i]) def test_special_char_table(self): self.database.execute_sql('CREATE TABLE "hello!!world" ("data" TEXT);') self.database.execute_sql('INSERT INTO "hello!!world" VALUES (?)', ('test',)) ds = DataSet('sqlite:///%s' % self.database.database) table = ds['hello!!world'] model = table.model_class self.assertEqual(model._meta.table_name, 'hello!!world') def test_column_preservation(self): ds = DataSet('sqlite:///:memory:') books = ds['books'] books.insert(book_id='BOOK1') books.insert(bookId='BOOK2') data = [(row['book_id'] or '', row['bookId'] or '') for row in books] self.assertEqual(sorted(data), [ ('', 'BOOK2'), ('BOOK1', '')]) def test_case_insensitive(self): db.execute_sql('CREATE TABLE "SomeTable" (data TEXT);') tables = sorted(self.dataset.tables) self.assertEqual(tables, ['SomeTable', 'category', 'note', 'user']) table = self.dataset['HueyMickey'] self.assertEqual(table.model_class._meta.table_name, 'HueyMickey') tables = sorted(self.dataset.tables) self.assertEqual( tables, ['HueyMickey', 'SomeTable', 'category', 'note', 'user']) # Subsequent lookup succeeds. self.dataset['HueyMickey'] def test_introspect(self): tables = sorted(self.dataset.tables) self.assertEqual(tables, ['category', 'note', 'user']) user = self.dataset['user'] columns = sorted(user.columns) self.assertEqual(columns, ['username']) note = self.dataset['note'] columns = sorted(note.columns) self.assertEqual(columns, ['content', 'id', 'status', 'timestamp', 'user_id']) category = self.dataset['category'] columns = sorted(category.columns) self.assertEqual(columns, ['id', 'name', 'parent_id']) def test_update_cache(self): self.assertEqual(sorted(self.dataset.tables), ['category', 'note', 'user']) db.execute_sql('create table "foo" (id INTEGER, data TEXT)') Foo = self.dataset['foo'] self.assertEqual(sorted(Foo.columns), ['data', 'id']) self.assertTrue('foo' in self.dataset._models) self.dataset._models['foo'].drop_table() self.dataset.update_cache() self.assertTrue('foo' not in self.database.get_tables()) # This will create the table again. Foo = self.dataset['foo'] self.assertTrue('foo' in self.database.get_tables()) self.assertEqual(Foo.columns, ['id']) def assertQuery(self, query, expected, sort_key='id'): key = operator.itemgetter(sort_key) self.assertEqual( sorted(list(query), key=key), sorted(expected, key=key)) def test_insert(self): self.create_users() user = self.dataset['user'] expected = [ {'username': 'charlie'}, {'username': 'huey'}] self.assertQuery(user.all(), expected, 'username') user.insert(username='mickey', age=5) expected = [ {'username': 'charlie', 'age': None}, {'username': 'huey', 'age': None}, {'username': 'mickey', 'age': 5}] self.assertQuery(user.all(), expected, 'username') query = user.find(username='charlie') expected = [{'username': 'charlie', 'age': None}] self.assertQuery(query, expected, 'username') self.assertEqual( user.find_one(username='mickey'), {'username': 'mickey', 'age': 5}) self.assertTrue(user.find_one(username='xx') is None) def test_update(self): self.create_users() user = self.dataset['user'] self.assertEqual(user.update(favorite_color='green'), 2) expected = [ {'username': 'charlie', 'favorite_color': 'green'}, {'username': 'huey', 'favorite_color': 'green'}] self.assertQuery(user.all(), expected, 'username') res = user.update( favorite_color='blue', username='huey', columns=['username']) self.assertEqual(res, 1) expected[1]['favorite_color'] = 'blue' self.assertQuery(user.all(), expected, 'username') def test_delete(self): self.create_users() user = self.dataset['user'] self.assertEqual(user.delete(username='huey'), 1) self.assertEqual(list(user.all()), [{'username': 'charlie'}]) def test_find(self): self.create_users(5) user = self.dataset['user'] def assertUsernames(query, expected): self.assertEqual( sorted(row['username'] for row in query), sorted(expected)) assertUsernames(user.all(), self.names) assertUsernames(user.find(), self.names) assertUsernames(user.find(username='charlie'), ['charlie']) assertUsernames(user.find(username='missing'), []) user.update(favorite_color='green') for username in ['zaizee', 'huey']: user.update( favorite_color='blue', username=username, columns=['username']) assertUsernames( user.find(favorite_color='green'), ['charlie', 'mickey', 'peewee']) assertUsernames( user.find(favorite_color='blue'), ['zaizee', 'huey']) assertUsernames( user.find(favorite_color='green', username='peewee'), ['peewee']) self.assertEqual( user.find_one(username='charlie'), {'username': 'charlie', 'favorite_color': 'green'}) def test_magic_methods(self): self.create_users(5) user = self.dataset['user'] # __len__() self.assertEqual(len(user), 5) # __iter__() users = sorted([u for u in user], key=operator.itemgetter('username')) self.assertEqual(users[0], {'username': 'charlie'}) self.assertEqual(users[-1], {'username': 'zaizee'}) # __contains__() self.assertTrue('user' in self.dataset) self.assertFalse('missing' in self.dataset) def test_foreign_keys(self): user = self.dataset['user'] user.insert(username='charlie') note = self.dataset['note'] for i in range(1, 4): note.insert( content='note %s' % i, timestamp=datetime.date(2014, 1, i), status=i, user_id='charlie') notes = sorted(note.all(), key=operator.itemgetter('id')) self.assertEqual(notes[0], { 'content': 'note 1', 'id': 1, 'status': 1, 'timestamp': datetime.datetime(2014, 1, 1), 'user_id': 'charlie'}) self.assertEqual(notes[-1], { 'content': 'note 3', 'id': 3, 'status': 3, 'timestamp': datetime.datetime(2014, 1, 3), 'user_id': 'charlie'}) user.insert(username='mickey') note.update(user_id='mickey', id=3, columns=['id']) self.assertEqual(note.find(user_id='charlie').count(), 2) self.assertEqual(note.find(user_id='mickey').count(), 1) category = self.dataset['category'] category.insert(name='c1') c1 = category.find_one(name='c1') self.assertEqual(c1, {'id': 1, 'name': 'c1', 'parent_id': None}) category.insert(name='c2', parent_id=1) c2 = category.find_one(parent_id=1) self.assertEqual(c2, {'id': 2, 'name': 'c2', 'parent_id': 1}) self.assertEqual(category.delete(parent_id=1), 1) self.assertEqual(list(category.all()), [c1]) def test_transactions(self): user = self.dataset['user'] with self.dataset.transaction() as txn: user.insert(username='u1') with self.dataset.transaction() as txn2: user.insert(username='u2') txn2.rollback() with self.dataset.transaction() as txn3: user.insert(username='u3') with self.dataset.transaction() as txn4: user.insert(username='u4') txn3.rollback() with self.dataset.transaction() as txn5: user.insert(username='u5') with self.dataset.transaction() as txn6: with self.dataset.transaction() as txn7: user.insert(username='u6') txn7.rollback() user.insert(username='u7') user.insert(username='u8') self.assertQuery(user.all(), [ {'username': 'u1'}, {'username': 'u5'}, {'username': 'u7'}, {'username': 'u8'}, ], 'username') def test_export(self): self.create_users() user = self.dataset['user'] buf = StringIO() self.dataset.freeze(user.all(), 'json', file_obj=buf) self.assertEqual(buf.getvalue(), ( '[{"username": "charlie"}, {"username": "huey"}]')) buf = StringIO() self.dataset.freeze(user.all(), 'csv', file_obj=buf) self.assertEqual(buf.getvalue().splitlines(), [ 'username', 'charlie', 'huey']) @skip_if(sys.version_info[0] < 3, 'requires python 3.x') def test_freeze_thaw_csv_utf8(self): self._test_freeze_thaw_utf8('csv') def test_freeze_thaw_json_utf8(self): self._test_freeze_thaw_utf8('json') def _test_freeze_thaw_utf8(self, fmt): username_bytes = b'\xd0\x92obby' # Bobby with cyrillic "B". username_str = username_bytes.decode('utf8') u = User.create(username=username_str) # Freeze the data as a the given format. user = self.dataset['user'] filename = tempfile.mktemp() # Get a filename. self.dataset.freeze(user.all(), fmt, filename) # Clear out the table and reload. User.delete().execute() self.assertEqual(list(user.all()), []) # Thaw the frozen data. n = user.thaw(format=fmt, filename=filename) self.assertEqual(n, 1) self.assertEqual(list(user.all()), [{'username': username_str}]) def test_freeze_thaw(self): user = self.dataset['user'] user.insert(username='charlie') note = self.dataset['note'] note_ts = datetime.datetime(2017, 1, 2, 3, 4, 5) note.insert(content='foo', timestamp=note_ts, user_id='charlie', status=2) buf = StringIO() self.dataset.freeze(note.all(), 'json', file_obj=buf) self.assertEqual(json.loads(buf.getvalue()), [{ 'id': 1, 'user_id': 'charlie', 'content': 'foo', 'status': 2, 'timestamp': '2017-01-02 03:04:05'}]) note.delete(id=1) self.assertEqual(list(note.all()), []) buf.seek(0) note.thaw(format='json', file_obj=buf) self.assertEqual(list(note.all()), [{ 'id': 1, 'user_id': 'charlie', 'content': 'foo', 'status': 2, 'timestamp': note_ts}]) def test_table_column_creation(self): table = self.dataset['people'] table.insert(name='charlie') self.assertEqual(table.columns, ['id', 'name']) self.assertEqual(list(table.all()), [{'id': 1, 'name': 'charlie'}]) def test_table_column_creation_field_col(self): table = self.dataset['people'] table.insert(**{'First Name': 'charlie'}) self.assertEqual(table.columns, ['id', 'First_Name']) self.assertEqual(list(table.all()), [{'id': 1, 'First_Name': 'charlie'}]) table.insert(**{'First Name': 'huey'}) self.assertEqual(table.columns, ['id', 'First_Name']) self.assertEqual(list(table.all().order_by(table.model_class.id)), [ {'id': 1, 'First_Name': 'charlie'}, {'id': 2, 'First_Name': 'huey'}]) def test_import_json(self): table = self.dataset['people'] table.insert(name='charlie') data = [ {'name': 'zaizee', 'foo': 1}, {'name': 'huey'}, {'name': 'mickey', 'foo': 2}, {'bar': None}] buf = StringIO() json.dump(data, buf) buf.seek(0) # All rows but the last will be inserted. count = self.dataset.thaw('people', 'json', file_obj=buf, strict=True) self.assertEqual(count, 3) names = [row['name'] for row in self.dataset['people'].all()] self.assertEqual( set(names), set(['charlie', 'huey', 'mickey', 'zaizee'])) # The columns have not changed. self.assertEqual(table.columns, ['id', 'name']) # No rows are inserted because no column overlap between `user` and the # provided data. buf.seek(0) count = self.dataset.thaw('user', 'json', file_obj=buf, strict=True) self.assertEqual(count, 0) # Create a new table and load all data into it. table = self.dataset['more_people'] # All rows and columns will be inserted. buf.seek(0) count = self.dataset.thaw('more_people', 'json', file_obj=buf) self.assertEqual(count, 4) self.assertEqual( set(table.columns), set(['id', 'name', 'bar', 'foo'])) self.assertEqual(sorted(table.all(), key=lambda row: row['id']), [ {'id': 1, 'name': 'zaizee', 'foo': 1, 'bar': None}, {'id': 2, 'name': 'huey', 'foo': None, 'bar': None}, {'id': 3, 'name': 'mickey', 'foo': 2, 'bar': None}, {'id': 4, 'name': None, 'foo': None, 'bar': None}, ]) def test_import_csv(self): table = self.dataset['people'] table.insert(name='charlie') data = [ ('zaizee', 1, None), ('huey', 2, 'foo'), ('mickey', 3, 'baze')] buf = StringIO() writer = csv.writer(buf) writer.writerow(['name', 'foo', 'bar']) writer.writerows(data) buf.seek(0) count = self.dataset.thaw('people', 'csv', file_obj=buf, strict=True) self.assertEqual(count, 3) names = [row['name'] for row in self.dataset['people'].all()] self.assertEqual( set(names), set(['charlie', 'huey', 'mickey', 'zaizee'])) # The columns have not changed. self.assertEqual(table.columns, ['id', 'name']) # No rows are inserted because no column overlap between `user` and the # provided data. buf.seek(0) count = self.dataset.thaw('user', 'csv', file_obj=buf, strict=True) self.assertEqual(count, 0) # Create a new table and load all data into it. table = self.dataset['more_people'] # All rows and columns will be inserted. buf.seek(0) count = self.dataset.thaw('more_people', 'csv', file_obj=buf) self.assertEqual(count, 3) self.assertEqual( set(table.columns), set(['id', 'name', 'bar', 'foo'])) self.assertEqual(sorted(table.all(), key=lambda row: row['id']), [ {'id': 1, 'name': 'zaizee', 'foo': '1', 'bar': ''}, {'id': 2, 'name': 'huey', 'foo': '2', 'bar': 'foo'}, {'id': 3, 'name': 'mickey', 'foo': '3', 'bar': 'baze'}, ]) def test_table_thaw(self): table = self.dataset['people'] data = json.dumps([{'name': 'charlie'}, {'name': 'huey', 'color': 'white'}]) self.assertEqual(table.thaw(file_obj=StringIO(data), format='json'), 2) self.assertEqual(list(table.all()), [ {'id': 1, 'name': 'charlie', 'color': None}, {'id': 2, 'name': 'huey', 'color': 'white'}, ]) def test_creating_tables(self): new_table = self.dataset['new_table'] new_table.insert(data='foo') ref2 = self.dataset['new_table'] self.assertEqual(list(ref2.all()), [{'id': 1, 'data': 'foo'}]) peewee-3.17.7/tests/db_tests.py000066400000000000000000001024111470346076600164220ustar00rootroot00000000000000from itertools import permutations try: from Queue import Queue except ImportError: from queue import Queue import platform import re import threading from peewee import * from peewee import Database from peewee import FIELD from peewee import attrdict from peewee import sort_models from .base import BaseTestCase from .base import DatabaseTestCase from .base import IS_CRDB from .base import IS_MYSQL from .base import IS_POSTGRESQL from .base import IS_SQLITE from .base import ModelTestCase from .base import TestModel from .base import db from .base import db_loader from .base import get_in_memory_db from .base import new_connection from .base import requires_models from .base import requires_postgresql from .base_models import Category from .base_models import Tweet from .base_models import User class TestDatabase(DatabaseTestCase): database = db_loader('sqlite3') def test_pragmas(self): self.database.cache_size = -2048 self.assertEqual(self.database.cache_size, -2048) self.database.cache_size = -4096 self.assertEqual(self.database.cache_size, -4096) self.database.foreign_keys = 'on' self.assertEqual(self.database.foreign_keys, 1) self.database.foreign_keys = 'off' self.assertEqual(self.database.foreign_keys, 0) def test_appid_user_version(self): self.assertEqual(self.database.application_id, 0) self.assertEqual(self.database.user_version, 0) self.database.application_id = 1 self.database.user_version = 2 self.assertEqual(self.database.application_id, 1) self.assertEqual(self.database.user_version, 2) self.assertTrue(self.database.close()) self.assertTrue(self.database.connect()) self.assertEqual(self.database.application_id, 1) self.assertEqual(self.database.user_version, 2) def test_timeout_semantics(self): self.assertEqual(self.database.timeout, 5) self.assertEqual(self.database.pragma('busy_timeout'), 5000) self.database.timeout = 2.5 self.assertEqual(self.database.timeout, 2.5) self.assertEqual(self.database.pragma('busy_timeout'), 2500) self.database.close() self.database.connect() self.assertEqual(self.database.timeout, 2.5) self.assertEqual(self.database.pragma('busy_timeout'), 2500) def test_pragmas_deferred(self): pragmas = (('journal_mode', 'wal'),) db = SqliteDatabase(None, pragmas=pragmas) self.assertEqual(db._pragmas, pragmas) # Test pragmas preserved after initializing. db.init(':memory:') self.assertEqual(db._pragmas, pragmas) db = SqliteDatabase(None) self.assertEqual(db._pragmas, ()) # Test pragmas are set and subsequently overwritten. db.init(':memory:', pragmas=pragmas) self.assertEqual(db._pragmas, pragmas) db.init(':memory:', pragmas=()) self.assertEqual(db._pragmas, ()) # Test when specified twice, the previous value is overwritten. db = SqliteDatabase(None, pragmas=pragmas) db.init(':memory:', pragmas=(('cache_size', -8000),)) self.assertEqual(db._pragmas, (('cache_size', -8000),)) def test_pragmas_as_dict(self): pragmas = {'journal_mode': 'wal'} pragma_list = [('journal_mode', 'wal')] db = SqliteDatabase(':memory:', pragmas=pragmas) self.assertEqual(db._pragmas, pragma_list) # Test deferred databases correctly handle pragma dicts. db = SqliteDatabase(None, pragmas=pragmas) self.assertEqual(db._pragmas, pragma_list) db.init(':memory:') self.assertEqual(db._pragmas, pragma_list) db.init(':memory:', pragmas={}) self.assertEqual(db._pragmas, []) def test_pragmas_permanent(self): db = SqliteDatabase(':memory:') db.execute_sql('pragma foreign_keys=0') self.assertEqual(db.foreign_keys, 0) db.pragma('foreign_keys', 1, True) self.assertEqual(db.foreign_keys, 1) db.close() db.connect() self.assertEqual(db.foreign_keys, 1) def test_context_settings(self): class TestDatabase(Database): field_types = {'BIGINT': 'TEST_BIGINT', 'TEXT': 'TEST_TEXT'} operations = {'LIKE': '~', 'NEW': '->>'} param = '$' test_db = TestDatabase(None) state = test_db.get_sql_context().state self.assertEqual(state.field_types['BIGINT'], 'TEST_BIGINT') self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT') self.assertEqual(state.field_types['INT'], FIELD.INT) self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR) self.assertEqual(state.operations['LIKE'], '~') self.assertEqual(state.operations['NEW'], '->>') self.assertEqual(state.operations['ILIKE'], 'ILIKE') self.assertEqual(state.param, '$') self.assertEqual(state.quote, '""') test_db2 = TestDatabase(None, field_types={'BIGINT': 'XXX_BIGINT', 'INT': 'XXX_INT'}) state = test_db2.get_sql_context().state self.assertEqual(state.field_types['BIGINT'], 'XXX_BIGINT') self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT') self.assertEqual(state.field_types['INT'], 'XXX_INT') self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR) def test_connection_state(self): conn = self.database.connection() self.assertFalse(self.database.is_closed()) self.database.close() self.assertTrue(self.database.is_closed()) conn = self.database.connection() self.assertFalse(self.database.is_closed()) def test_db_context_manager(self): self.database.close() self.assertTrue(self.database.is_closed()) with self.database: self.assertFalse(self.database.is_closed()) self.assertTrue(self.database.is_closed()) self.database.connect() self.assertFalse(self.database.is_closed()) # Enter context with an already-open db. with self.database: self.assertFalse(self.database.is_closed()) # Closed after exit. self.assertTrue(self.database.is_closed()) def test_connection_initialization(self): state = {'count': 0} class TestDatabase(SqliteDatabase): def _initialize_connection(self, conn): state['count'] += 1 db = TestDatabase(':memory:') self.assertEqual(state['count'], 0) conn = db.connection() self.assertEqual(state['count'], 1) # Since already connected, nothing happens here. conn = db.connection() self.assertEqual(state['count'], 1) def test_connect_semantics(self): state = {'count': 0} class TestDatabase(SqliteDatabase): def _initialize_connection(self, conn): state['count'] += 1 db = TestDatabase(':memory:') db.connect() self.assertEqual(state['count'], 1) self.assertRaises(OperationalError, db.connect) self.assertEqual(state['count'], 1) self.assertFalse(db.connect(reuse_if_open=True)) self.assertEqual(state['count'], 1) with db: self.assertEqual(state['count'], 1) self.assertFalse(db.is_closed()) self.assertTrue(db.is_closed()) with db: self.assertEqual(state['count'], 2) def test_execute_sql(self): self.database.execute_sql('CREATE TABLE register (val INTEGER);') self.database.execute_sql('INSERT INTO register (val) VALUES (?), (?)', (1337, 31337)) cursor = self.database.execute_sql( 'SELECT val FROM register ORDER BY val') self.assertEqual(cursor.fetchall(), [(1337,), (31337,)]) self.database.execute_sql('DROP TABLE register;') def test_bind_helpers(self): db = get_in_memory_db() alt_db = get_in_memory_db() class Base(Model): class Meta: database = db class A(Base): a = TextField() class B(Base): b = TextField() db.create_tables([A, B]) # Temporarily bind A to alt_db. with alt_db.bind_ctx([A]): self.assertFalse(A.table_exists()) self.assertTrue(B.table_exists()) self.assertTrue(A.table_exists()) self.assertTrue(B.table_exists()) alt_db.bind([A]) self.assertFalse(A.table_exists()) self.assertTrue(B.table_exists()) db.close() alt_db.close() def test_bind_regression(self): class Base(Model): class Meta: database = None class A(Base): pass class B(Base): pass class AB(Base): a = ForeignKeyField(A) b = ForeignKeyField(B) self.assertTrue(A._meta.database is None) db = get_in_memory_db() with db.bind_ctx([A, B]): self.assertEqual(A._meta.database, db) self.assertEqual(B._meta.database, db) self.assertEqual(AB._meta.database, db) self.assertTrue(A._meta.database is None) self.assertTrue(B._meta.database is None) self.assertTrue(AB._meta.database is None) class C(Base): a = ForeignKeyField(A) with db.bind_ctx([C], bind_refs=False): self.assertEqual(C._meta.database, db) self.assertTrue(A._meta.database is None) self.assertTrue(C._meta.database is None) self.assertTrue(A._meta.database is None) def test_batch_commit(self): class PatchCommitDatabase(SqliteDatabase): commits = 0 def begin(self): pass def commit(self): self.commits += 1 db = PatchCommitDatabase(':memory:') def assertBatches(n_objs, batch_size, n_commits): accum = [] source = range(n_objs) db.commits = 0 for item in db.batch_commit(source, batch_size): accum.append(item) self.assertEqual(accum, list(range(n_objs))) self.assertEqual(db.commits, n_commits) assertBatches(12, 1, 12) assertBatches(12, 2, 6) assertBatches(12, 3, 4) assertBatches(12, 4, 3) assertBatches(12, 5, 3) assertBatches(12, 6, 2) assertBatches(12, 7, 2) assertBatches(12, 11, 2) assertBatches(12, 12, 1) assertBatches(12, 13, 1) def test_server_version(self): class FakeDatabase(Database): server_version = None def _connect(self): return 1 def _close(self, conn): pass def _set_server_version(self, conn): self.server_version = (1, 33, 7) db = FakeDatabase(':memory:') self.assertTrue(db.server_version is None) db.connect() self.assertEqual(db.server_version, (1, 33, 7)) db.close() self.assertEqual(db.server_version, (1, 33, 7)) db.server_version = (1, 2, 3) db.connect() self.assertEqual(db.server_version, (1, 2, 3)) db.close() def test_explicit_connect(self): db = get_in_memory_db(autoconnect=False) self.assertRaises(InterfaceError, db.execute_sql, 'pragma cache_size') with db: db.execute_sql('pragma cache_size') self.assertRaises(InterfaceError, db.cursor) class TestThreadSafety(ModelTestCase): # HACK: This workaround increases the Sqlite busy timeout when tests are # being run on certain architectures. if IS_SQLITE and platform.machine() not in ('i386', 'i686', 'x86_64'): database = new_connection(timeout=60) nthreads = 4 nrows = 10 requires = [User] def test_multiple_writers(self): def create_users(idx): for i in range(idx * self.nrows, (idx + 1) * self.nrows): User.create(username='u%d' % i) threads = [] for i in range(self.nthreads): threads.append(threading.Thread(target=create_users, args=(i,))) for t in threads: t.start() for t in threads: t.join() self.assertEqual(User.select().count(), self.nrows * self.nthreads) def test_multiple_readers(self): data = Queue() def read_user_count(n): for i in range(n): data.put(User.select().count()) threads = [] for i in range(self.nthreads): threads.append(threading.Thread(target=read_user_count, args=(self.nrows,))) for t in threads: t.start() for t in threads: t.join() self.assertEqual(data.qsize(), self.nrows * self.nthreads) def test_mt_general(self): def connect_close(): for _ in range(self.nrows): self.database.connect() with self.database.atomic() as txn: self.database.execute_sql('select 1').fetchone() self.database.close() threads = [] for i in range(self.nthreads): threads.append(threading.Thread(target=connect_close)) for t in threads: t.start() for t in threads: t.join() class TestDeferredDatabase(BaseTestCase): def test_deferred_database(self): deferred_db = SqliteDatabase(None) self.assertTrue(deferred_db.deferred) class DeferredModel(Model): class Meta: database = deferred_db self.assertRaises(Exception, deferred_db.connect) query = DeferredModel.select() self.assertRaises(Exception, query.execute) deferred_db.init(':memory:') self.assertFalse(deferred_db.deferred) conn = deferred_db.connect() self.assertFalse(deferred_db.is_closed()) DeferredModel._schema.create_all() self.assertEqual(list(DeferredModel.select()), []) deferred_db.init(None) self.assertTrue(deferred_db.deferred) # The connection was automatically closed. self.assertTrue(deferred_db.is_closed()) class CatToy(TestModel): description = TextField() class Meta: schema = 'huey' @requires_postgresql class TestSchemaNamespace(ModelTestCase): requires = [CatToy] def setUp(self): with self.database: self.execute('CREATE SCHEMA huey;') super(TestSchemaNamespace, self).setUp() def tearDown(self): super(TestSchemaNamespace, self).tearDown() with self.database: self.execute('DROP SCHEMA huey;') def test_schema(self): toy = CatToy.create(description='fur mouse') toy_db = CatToy.select().where(CatToy.id == toy.id).get() self.assertEqual(toy.id, toy_db.id) self.assertEqual(toy.description, toy_db.description) class TestSqliteIsolation(ModelTestCase): database = db_loader('sqlite3') requires = [User] def test_sqlite_isolation(self): for username in ('u1', 'u2', 'u3'): User.create(username=username) new_db = db_loader('sqlite3') curs = new_db.execute_sql('SELECT COUNT(*) FROM users') self.assertEqual(curs.fetchone()[0], 3) self.assertEqual(User.select().count(), 3) self.assertEqual(User.delete().execute(), 3) with self.database.atomic(): User.create(username='u4') User.create(username='u5') # Second conn does not see the changes. curs = new_db.execute_sql('SELECT COUNT(*) FROM users') self.assertEqual(curs.fetchone()[0], 0) # Third conn does not see the changes. new_db2 = db_loader('sqlite3') curs = new_db2.execute_sql('SELECT COUNT(*) FROM users') self.assertEqual(curs.fetchone()[0], 0) # Original connection sees its own changes. self.assertEqual(User.select().count(), 2) curs = new_db.execute_sql('SELECT COUNT(*) FROM users') self.assertEqual(curs.fetchone()[0], 2) class UniqueModel(TestModel): name = CharField(unique=True) class IndexedModel(TestModel): first = CharField() last = CharField() dob = DateField() class Meta: indexes = ( (('first', 'last', 'dob'), True), (('first', 'last'), False), ) class Note(TestModel): content = TextField() ts = DateTimeField() status = IntegerField() class Meta: table_name = 'notes' class Person(TestModel): first = CharField() last = CharField() email = CharField() class Meta: indexes = ( (('last', 'first'), False), ) class TestIntrospection(ModelTestCase): requires = [Category, User, UniqueModel, IndexedModel, Person] def test_table_exists(self): self.assertTrue(self.database.table_exists(User._meta.table_name)) self.assertFalse(self.database.table_exists('nuggies')) self.assertTrue(self.database.table_exists(User)) class X(TestModel): pass self.assertFalse(self.database.table_exists(X)) def test_get_tables(self): tables = self.database.get_tables() required = set(m._meta.table_name for m in self.requires) self.assertTrue(required.issubset(set(tables))) UniqueModel._schema.drop_all() tables = self.database.get_tables() self.assertFalse(UniqueModel._meta.table_name in tables) def test_get_indexes(self): indexes = self.database.get_indexes('unique_model') data = [(index.name, index.columns, index.unique, index.table) for index in indexes if index.name not in ('unique_model_pkey', 'PRIMARY')] self.assertEqual(data, [ ('unique_model_name', ['name'], True, 'unique_model')]) indexes = self.database.get_indexes('indexed_model') data = [(index.name, index.columns, index.unique, index.table) for index in indexes if index.name not in ('indexed_model_pkey', 'PRIMARY')] self.assertEqual(sorted(data), [ ('indexed_model_first_last', ['first', 'last'], False, 'indexed_model'), ('indexed_model_first_last_dob', ['first', 'last', 'dob'], True, 'indexed_model')]) # Multi-column index where columns are in different order than declared # on the table. indexes = self.database.get_indexes('person') data = [(index.name, index.columns, index.unique) for index in indexes if index.name not in ('person_pkey', 'PRIMARY')] self.assertEqual(data, [ ('person_last_first', ['last', 'first'], False)]) def test_get_columns(self): columns = self.database.get_columns('indexed_model') data = [(c.name, c.null, c.primary_key, c.table) for c in columns] self.assertEqual(data, [ ('id', False, True, 'indexed_model'), ('first', False, False, 'indexed_model'), ('last', False, False, 'indexed_model'), ('dob', False, False, 'indexed_model')]) columns = self.database.get_columns('category') data = [(c.name, c.null, c.primary_key, c.table) for c in columns] self.assertEqual(data, [ ('name', False, True, 'category'), ('parent_id', True, False, 'category')]) def test_get_primary_keys(self): primary_keys = self.database.get_primary_keys('users') self.assertEqual(primary_keys, ['id']) primary_keys = self.database.get_primary_keys('category') self.assertEqual(primary_keys, ['name']) @requires_models(Note) def test_get_views(self): def normalize_view_meta(view_meta): sql_ws_norm = re.sub(r'[\n\s]+', ' ', view_meta.sql.strip('; ')) return view_meta.name, (sql_ws_norm .replace('`peewee_test`.', '') .replace('`notes`.', '') .replace('notes.', '') .replace('`', '')) def assertViews(expected): # Create two sample views. self.database.execute_sql('CREATE VIEW notes_public AS ' 'SELECT content, ts FROM notes ' 'WHERE status = 1 ORDER BY ts DESC') self.database.execute_sql('CREATE VIEW notes_deleted AS ' 'SELECT content FROM notes ' 'WHERE status = 9 ORDER BY id DESC') try: views = self.database.get_views() normalized = sorted([normalize_view_meta(v) for v in views]) self.assertEqual(normalized, expected) # Ensure that we can use get_columns to introspect views. columns = self.database.get_columns('notes_deleted') self.assertEqual([c.name for c in columns], ['content']) columns = self.database.get_columns('notes_public') self.assertEqual([c.name for c in columns], ['content', 'ts']) finally: self.database.execute_sql('DROP VIEW notes_public;') self.database.execute_sql('DROP VIEW notes_deleted;') # Unfortunately, all databases seem to represent VIEW definitions # differently internally. if IS_SQLITE: assertViews([ ('notes_deleted', ('CREATE VIEW notes_deleted AS ' 'SELECT content FROM notes ' 'WHERE status = 9 ORDER BY id DESC')), ('notes_public', ('CREATE VIEW notes_public AS ' 'SELECT content, ts FROM notes ' 'WHERE status = 1 ORDER BY ts DESC'))]) elif IS_MYSQL: assertViews([ ('notes_deleted', ('select content AS content from notes ' 'where status = 9 order by id desc')), ('notes_public', ('select content AS content,ts AS ts from notes ' 'where status = 1 order by ts desc'))]) elif IS_POSTGRESQL: assertViews([ ('notes_deleted', ('SELECT content FROM notes ' 'WHERE (status = 9) ORDER BY id DESC')), ('notes_public', ('SELECT content, ts FROM notes ' 'WHERE (status = 1) ORDER BY ts DESC'))]) elif IS_CRDB: assertViews([ ('notes_deleted', ('SELECT content FROM peewee_test.public.notes ' 'WHERE status = 9 ORDER BY id DESC')), ('notes_public', ('SELECT content, ts FROM peewee_test.public.notes ' 'WHERE status = 1 ORDER BY ts DESC'))]) @requires_models(User, Tweet, Category) def test_get_foreign_keys(self): foreign_keys = self.database.get_foreign_keys('tweet') data = [(fk.column, fk.dest_table, fk.dest_column, fk.table) for fk in foreign_keys] self.assertEqual(data, [ ('user_id', 'users', 'id', 'tweet')]) foreign_keys = self.database.get_foreign_keys('category') data = [(fk.column, fk.dest_table, fk.dest_column, fk.table) for fk in foreign_keys] self.assertEqual(data, [ ('parent_id', 'category', 'name', 'category')]) class TestSortModels(BaseTestCase): def test_sort_models(self): class A(Model): pass class B(Model): a = ForeignKeyField(A) class C(Model): b = ForeignKeyField(B) class D(Model): c = ForeignKeyField(C) class E(Model): pass models = [A, B, C, D, E] for list_of_models in permutations(models): sorted_models = sort_models(list_of_models) self.assertEqual(sorted_models, models) def test_sort_models_multi_fk(self): class Inventory(Model): pass class Sheet(Model): inventory = ForeignKeyField(Inventory) class Program(Model): inventory = ForeignKeyField(Inventory) class ProgramSheet(Model): program = ForeignKeyField(Program) sheet = ForeignKeyField(Sheet) class ProgramPart(Model): program_sheet = ForeignKeyField(ProgramSheet) class Offal(Model): program_sheet = ForeignKeyField(ProgramSheet) sheet = ForeignKeyField(Sheet) M = [Inventory, Sheet, Program, ProgramSheet, ProgramPart, Offal] sorted_models = sort_models(M) self.assertEqual(sorted_models, [ Inventory, Program, Sheet, ProgramSheet, Offal, ProgramPart, ]) for list_of_models in permutations(M): self.assertEqual(sort_models(list_of_models), sorted_models) class TestDBProxy(BaseTestCase): def test_proxy_context_manager(self): db = Proxy() class User(Model): username = TextField() class Meta: database = db self.assertRaises(AttributeError, User.create_table) sqlite_db = SqliteDatabase(':memory:') db.initialize(sqlite_db) User.create_table() with db: self.assertFalse(db.is_closed()) self.assertTrue(db.is_closed()) def test_db_proxy(self): db = Proxy() class BaseModel(Model): class Meta: database = db class User(BaseModel): username = TextField() class Tweet(BaseModel): user = ForeignKeyField(User, backref='tweets') message = TextField() sqlite_db = SqliteDatabase(':memory:') db.initialize(sqlite_db) self.assertEqual(User._meta.database.database, ':memory:') self.assertEqual(Tweet._meta.database.database, ':memory:') self.assertTrue(User._meta.database.is_closed()) self.assertTrue(Tweet._meta.database.is_closed()) sqlite_db.connect() self.assertFalse(User._meta.database.is_closed()) self.assertFalse(Tweet._meta.database.is_closed()) sqlite_db.close() def test_proxy_decorator(self): db = DatabaseProxy() @db.connection_context() def with_connection(): self.assertFalse(db.is_closed()) @db.atomic() def with_transaction(): self.assertTrue(db.in_transaction()) @db.manual_commit() def with_manual_commit(): self.assertTrue(db.in_transaction()) db.initialize(SqliteDatabase(':memory:')) with_connection() self.assertTrue(db.is_closed()) with_transaction() self.assertFalse(db.in_transaction()) with_manual_commit() self.assertFalse(db.in_transaction()) def test_proxy_bind_ctx_callbacks(self): db = Proxy() class BaseModel(Model): class Meta: database = db class Hook(BaseModel): data = BlobField() # Attaches hook to configure blob-type. self.assertTrue(Hook.data._constructor is bytearray) class CustomSqliteDB(SqliteDatabase): sentinel = object() def get_binary_type(self): return self.sentinel custom_db = CustomSqliteDB(':memory:') with custom_db.bind_ctx([Hook]): self.assertTrue(Hook.data._constructor is custom_db.sentinel) self.assertTrue(Hook.data._constructor is bytearray) class Data(TestModel): key = TextField() value = TextField() class Meta: schema = 'main' class TestAttachDatabase(ModelTestCase): database = db_loader('sqlite3') requires = [Data] def test_attach(self): database = self.database Data.create(key='k1', value='v1') Data.create(key='k2', value='v2') # Attach an in-memory cache database. database.attach(':memory:', 'cache') # Clone data into the in-memory cache. class CacheData(Data): class Meta: schema = 'cache' self.assertFalse(CacheData.table_exists()) CacheData.create_table(safe=False) self.assertTrue(CacheData.table_exists()) (CacheData .insert_from(Data.select(), fields=[Data.id, Data.key, Data.value]) .execute()) # Update the source data. query = Data.update({Data.value: Data.value + '-x'}) self.assertEqual(query.execute(), 2) # Verify the source data was updated. query = Data.select(Data.key, Data.value).order_by(Data.key) self.assertSQL(query, ( 'SELECT "t1"."key", "t1"."value" ' 'FROM "main"."data" AS "t1" ' 'ORDER BY "t1"."key"'), []) self.assertEqual([v for k, v in query.tuples()], ['v1-x', 'v2-x']) # Verify the cached data reflects the original data, pre-update. query = (CacheData .select(CacheData.key, CacheData.value) .order_by(CacheData.key)) self.assertSQL(query, ( 'SELECT "t1"."key", "t1"."value" ' 'FROM "cache"."cache_data" AS "t1" ' 'ORDER BY "t1"."key"'), []) self.assertEqual([v for k, v in query.tuples()], ['v1', 'v2']) database.close() # On re-connecting, the in-memory database will re-attached. database.connect() # Cache-Data table does not exist. self.assertFalse(CacheData.table_exists()) # Double-check the sqlite master table. curs = database.execute_sql('select * from cache.sqlite_master;') self.assertEqual(curs.fetchall(), []) # Because it's in-memory, the table needs to be re-created. CacheData.create_table(safe=False) self.assertEqual(CacheData.select().count(), 0) # Original data is still there. self.assertEqual(Data.select().count(), 2) def test_attach_detach(self): database = self.database Data.create(key='k1', value='v1') Data.create(key='k2', value='v2') # Attach an in-memory cache database. database.attach(':memory:', 'cache') curs = database.execute_sql('select * from cache.sqlite_master') self.assertEqual(curs.fetchall(), []) self.assertFalse(database.attach(':memory:', 'cache')) self.assertRaises(OperationalError, database.attach, 'foo.db', 'cache') self.assertTrue(database.detach('cache')) self.assertFalse(database.detach('cache')) self.assertRaises(OperationalError, database.execute_sql, 'select * from cache.sqlite_master') def test_sqlite_schema_support(self): class CacheData(Data): class Meta: schema = 'cache' # Attach an in-memory cache database and create the cache table. self.database.attach(':memory:', 'cache') CacheData.create_table() tables = self.database.get_tables() self.assertEqual(tables, ['data']) tables = self.database.get_tables(schema='cache') self.assertEqual(tables, ['cache_data']) class TestDatabaseConnection(DatabaseTestCase): def test_is_connection_usable(self): # Ensure a connection is open. conn = self.database.connection() self.assertTrue(self.database.is_connection_usable()) self.database.close() self.assertFalse(self.database.is_connection_usable()) self.database.connect() self.assertTrue(self.database.is_connection_usable()) @requires_postgresql def test_is_connection_usable_pg(self): self.database.execute_sql('drop table if exists foo') self.database.execute_sql('create table foo (data text not null)') self.assertTrue(self.database.is_connection_usable()) with self.database.atomic() as txn: with self.assertRaises(IntegrityError): self.database.execute_sql('insert into foo (data) values (NULL)') self.assertFalse(self.database.is_closed()) self.assertFalse(self.database.is_connection_usable()) txn.rollback() self.assertTrue(self.database.is_connection_usable()) curs = self.database.execute_sql('select * from foo') self.assertEqual(list(curs), []) self.database.execute_sql('drop table foo') class TestExceptionWrapper(ModelTestCase): database = get_in_memory_db() requires = [User] def test_exception_wrapper(self): exc = None try: User.create(username=None) except IntegrityError as e: exc = e if exc is None: raise Exception('expected integrity error not raised') self.assertTrue(exc.orig.__module__ != 'peewee') class TestModelPropertyHelper(BaseTestCase): def test_model_property(self): database = get_in_memory_db() class M1(database.Model): pass class M2(database.Model): pass class CM1(M1): pass for M in (M1, M2, CM1): self.assertTrue(M._meta.database is database) def test_model_property_on_proxy(self): db = DatabaseProxy() class M1(db.Model): pass class M2(db.Model): pass class CM1(M1): pass test_db = get_in_memory_db() db.initialize(test_db) for M in (M1, M2, CM1): self.assertEqual(M._meta.database.database, ':memory:') peewee-3.17.7/tests/db_url.py000066400000000000000000000060141470346076600160640ustar00rootroot00000000000000from peewee import * from playhouse.db_url import connect from playhouse.db_url import parse from .base import BaseTestCase class TestDBUrl(BaseTestCase): def test_db_url_parse(self): cfg = parse('mysql://usr:pwd@hst:123/db') self.assertEqual(cfg['user'], 'usr') self.assertEqual(cfg['passwd'], 'pwd') self.assertEqual(cfg['host'], 'hst') self.assertEqual(cfg['database'], 'db') self.assertEqual(cfg['port'], 123) cfg = parse('postgresql://usr:pwd@hst/db') self.assertEqual(cfg['password'], 'pwd') cfg = parse('mysql+pool://usr:pwd@hst:123/db' '?max_connections=42&stale_timeout=8001.2&zai=&baz=3.4.5' '&boolz=false') self.assertEqual(cfg['user'], 'usr') self.assertEqual(cfg['password'], 'pwd') self.assertEqual(cfg['host'], 'hst') self.assertEqual(cfg['database'], 'db') self.assertEqual(cfg['port'], 123) self.assertEqual(cfg['max_connections'], 42) self.assertEqual(cfg['stale_timeout'], 8001.2) self.assertEqual(cfg['zai'], '') self.assertEqual(cfg['baz'], '3.4.5') self.assertEqual(cfg['boolz'], False) def test_db_url_quoted_password(self): # By default, the password is not unescaped. cfg = parse('mysql://usr:pwd%23%20@hst:123/db') self.assertEqual(cfg['user'], 'usr') self.assertEqual(cfg['passwd'], 'pwd%23%20') self.assertEqual(cfg['host'], 'hst') self.assertEqual(cfg['database'], 'db') self.assertEqual(cfg['port'], 123) cfg = parse('mysql://usr:pwd%23%20@hst:123/db', unquote_password=True) self.assertEqual(cfg['user'], 'usr') self.assertEqual(cfg['passwd'], 'pwd# ') self.assertEqual(cfg['host'], 'hst') self.assertEqual(cfg['database'], 'db') self.assertEqual(cfg['port'], 123) def test_db_url(self): db = connect('sqlite:///:memory:') self.assertTrue(isinstance(db, SqliteDatabase)) self.assertEqual(db.database, ':memory:') db = connect('sqlite:///:memory:', pragmas=( ('journal_mode', 'MEMORY'),)) self.assertTrue(('journal_mode', 'MEMORY') in db._pragmas) #db = connect('sqliteext:///foo/bar.db') #self.assertTrue(isinstance(db, SqliteExtDatabase)) #self.assertEqual(db.database, 'foo/bar.db') db = connect('sqlite:////this/is/absolute.path') self.assertEqual(db.database, '/this/is/absolute.path') db = connect('sqlite://') self.assertTrue(isinstance(db, SqliteDatabase)) self.assertEqual(db.database, ':memory:') db = connect('sqlite:///test.db?p1=1?a&p2=22&p3=xyz') self.assertTrue(isinstance(db, SqliteDatabase)) self.assertEqual(db.database, 'test.db') self.assertEqual(db.connect_params, { 'p1': '1?a', 'p2': 22, 'p3': 'xyz'}) def test_bad_scheme(self): def _test_scheme(): connect('missing:///') self.assertRaises(RuntimeError, _test_scheme) peewee-3.17.7/tests/expressions.py000066400000000000000000000154451470346076600172070ustar00rootroot00000000000000from peewee import * from .base import IS_SQLITE from .base import ModelTestCase from .base import TestModel from .base import get_in_memory_db from .base import skip_if class Person(TestModel): name = CharField() class BaseNamesTest(ModelTestCase): requires = [Person] def assertNames(self, exp, x): query = Person.select().where(exp).order_by(Person.name) self.assertEqual([p.name for p in query], x) class TestRegexp(BaseNamesTest): @skip_if(IS_SQLITE) def test_regexp_iregexp(self): people = [Person.create(name=name) for name in ('n1', 'n2', 'n3')] self.assertNames(Person.name.regexp('n[1,3]'), ['n1', 'n3']) self.assertNames(Person.name.regexp('N[1,3]'), []) self.assertNames(Person.name.iregexp('n[1,3]'), ['n1', 'n3']) self.assertNames(Person.name.iregexp('N[1,3]'), ['n1', 'n3']) class TestContains(BaseNamesTest): def test_contains_startswith_endswith(self): people = [Person.create(name=n) for n in ('huey', 'mickey', 'zaizee')] self.assertNames(Person.name.contains('ey'), ['huey', 'mickey']) self.assertNames(Person.name.contains('EY'), ['huey', 'mickey']) self.assertNames(Person.name.startswith('m'), ['mickey']) self.assertNames(Person.name.startswith('M'), ['mickey']) self.assertNames(Person.name.endswith('ey'), ['huey', 'mickey']) self.assertNames(Person.name.endswith('EY'), ['huey', 'mickey']) class UpperField(TextField): def db_value(self, value): return fn.UPPER(value) class UpperModel(TestModel): name = UpperField() class TestValueConversion(ModelTestCase): """ Test the conversion of field values using a field's db_value() function. It is possible that a field's `db_value()` function may returns a Node subclass (e.g. a SQL function). These tests verify and document how such conversions are applied in various parts of the query. """ database = get_in_memory_db() requires = [UpperModel] def test_value_conversion(self): # Ensure value is converted on INSERT. insert = UpperModel.insert({UpperModel.name: 'huey'}) self.assertSQL(insert, ( 'INSERT INTO "upper_model" ("name") VALUES (UPPER(?))'), ['huey']) uid = insert.execute() obj = UpperModel.get(UpperModel.id == uid) self.assertEqual(obj.name, 'HUEY') # Ensure value is converted on UPDATE. update = (UpperModel .update({UpperModel.name: 'zaizee'}) .where(UpperModel.id == uid)) self.assertSQL(update, ( 'UPDATE "upper_model" SET "name" = UPPER(?) ' 'WHERE ("upper_model"."id" = ?)'), ['zaizee', uid]) update.execute() # Ensure it works with SELECT (or more generally, WHERE expressions). select = UpperModel.select().where(UpperModel.name == 'zaizee') self.assertSQL(select, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'WHERE ("t1"."name" = UPPER(?))'), ['zaizee']) obj = select.get() self.assertEqual(obj.name, 'ZAIZEE') # Ensure it works with DELETE. delete = UpperModel.delete().where(UpperModel.name == 'zaizee') self.assertSQL(delete, ( 'DELETE FROM "upper_model" ' 'WHERE ("upper_model"."name" = UPPER(?))'), ['zaizee']) self.assertEqual(delete.execute(), 1) def test_value_conversion_mixed(self): um = UpperModel.create(name='huey') # If we apply a function to the field, the conversion is not applied. sq = UpperModel.select().where(fn.SUBSTR(UpperModel.name, 1, 1) == 'h') self.assertSQL(sq, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'WHERE (SUBSTR("t1"."name", ?, ?) = ?)'), [1, 1, 'h']) self.assertRaises(UpperModel.DoesNotExist, sq.get) # If we encapsulate the object as a value, the conversion is applied. sq = UpperModel.select().where(UpperModel.name == Value('huey')) self.assertSQL(sq, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'WHERE ("t1"."name" = UPPER(?))'), ['huey']) self.assertEqual(sq.get().id, um.id) # Unless we explicitly pass converter=False. sq = UpperModel.select().where(UpperModel.name == Value('huey', False)) self.assertSQL(sq, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'WHERE ("t1"."name" = ?)'), ['huey']) self.assertRaises(UpperModel.DoesNotExist, sq.get) # If we specify explicit SQL on the rhs, the conversion is not applied. sq = UpperModel.select().where(UpperModel.name == SQL('?', ['huey'])) self.assertSQL(sq, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'WHERE ("t1"."name" = ?)'), ['huey']) self.assertRaises(UpperModel.DoesNotExist, sq.get) # Function arguments are not coerced. sq = UpperModel.select().where(UpperModel.name == fn.LOWER('huey')) self.assertSQL(sq, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'WHERE ("t1"."name" = LOWER(?))'), ['huey']) self.assertRaises(UpperModel.DoesNotExist, sq.get) def test_value_conversion_query(self): um = UpperModel.create(name='huey') UM = UpperModel.alias() subq = UM.select(UM.name).where(UM.name == 'huey') # Select from WHERE ... IN . query = UpperModel.select().where(UpperModel.name.in_(subq)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'WHERE ("t1"."name" IN (' 'SELECT "t2"."name" FROM "upper_model" AS "t2" ' 'WHERE ("t2"."name" = UPPER(?))))'), ['huey']) self.assertEqual(query.get().id, um.id) # Join on sub-query. query = (UpperModel .select() .join(subq, on=(UpperModel.name == subq.c.name))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" ' 'INNER JOIN (SELECT "t2"."name" FROM "upper_model" AS "t2" ' 'WHERE ("t2"."name" = UPPER(?))) AS "t3" ' 'ON ("t1"."name" = "t3"."name")'), ['huey']) row = query.tuples().get() self.assertEqual(row, (um.id, 'HUEY')) def test_having_clause(self): query = (UpperModel .select(UpperModel.name, fn.COUNT(UpperModel.id).alias('ct')) .group_by(UpperModel.name) .having(UpperModel.name == 'huey')) self.assertSQL(query, ( 'SELECT "t1"."name", COUNT("t1"."id") AS "ct" ' 'FROM "upper_model" AS "t1" ' 'GROUP BY "t1"."name" ' 'HAVING ("t1"."name" = UPPER(?))'), ['huey']) peewee-3.17.7/tests/extra_fields.py000066400000000000000000000027201470346076600172660ustar00rootroot00000000000000from peewee import * from playhouse.fields import CompressedField from playhouse.fields import PickleField from .base import db from .base import ModelTestCase from .base import TestModel class Comp(TestModel): key = TextField() data = CompressedField() class Pickled(TestModel): key = TextField() data = PickleField() class TestCompressedField(ModelTestCase): requires = [Comp] def test_compressed_field(self): a = b'a' * 1024 b = b'b' * 1024 Comp.create(data=a, key='a') Comp.create(data=b, key='b') a_db = Comp.get(Comp.key == 'a') self.assertEqual(a_db.data, a) b_db = Comp.get(Comp.key == 'b') self.assertEqual(b_db.data, b) # Get at the underlying data. CompTbl = Table('comp', ('id', 'data', 'key')).bind(self.database) obj = CompTbl.select().where(CompTbl.key == 'a').get() self.assertEqual(obj['key'], 'a') # Ensure that the data actually was compressed. self.assertTrue(len(obj['data']) < 1024) class TestPickleField(ModelTestCase): requires = [Pickled] def test_pickle_field(self): a = {'k1': 'v1', 'k2': [0, 1, 2], 'k3': None} b = 'just a string' Pickled.create(data=a, key='a') Pickled.create(data=b, key='b') a_db = Pickled.get(Pickled.key == 'a') self.assertEqual(a_db.data, a) b_db = Pickled.get(Pickled.key == 'b') self.assertEqual(b_db.data, b) peewee-3.17.7/tests/fields.py000066400000000000000000001441611470346076600160710ustar00rootroot00000000000000import calendar import datetime import sqlite3 import time import uuid from decimal import Decimal as D from decimal import ROUND_UP from peewee import bytes_type from peewee import NodeList from peewee import * from .base import BaseTestCase from .base import IS_CRDB from .base import IS_MYSQL from .base import IS_POSTGRESQL from .base import IS_SQLITE from .base import ModelTestCase from .base import TestModel from .base import db from .base import get_in_memory_db from .base import requires_models from .base import requires_mysql from .base import requires_pglike from .base import requires_sqlite from .base import skip_if from .base_models import Tweet from .base_models import User class IntModel(TestModel): value = IntegerField() value_null = IntegerField(null=True) class TestCoerce(ModelTestCase): requires = [IntModel] def test_coerce(self): i = IntModel.create(value='1337', value_null=3.14159) i_db = IntModel.get(IntModel.id == i.id) self.assertEqual(i_db.value, 1337) self.assertEqual(i_db.value_null, 3) class DefaultValues(TestModel): data = IntegerField(default=17) data_callable = IntegerField(default=lambda: 1337) class TestTextField(TextField): def first_char(self): return fn.SUBSTR(self, 1, 1) class PhoneBook(TestModel): name = TestTextField() class Bits(TestModel): F_STICKY = 1 F_FAVORITE = 2 F_MINIMIZED = 4 flags = BitField() is_sticky = flags.flag(F_STICKY) is_favorite = flags.flag(F_FAVORITE) is_minimized = flags.flag(F_MINIMIZED) data = BigBitField() class TestDefaultValues(ModelTestCase): requires = [DefaultValues] def test_default_values(self): d = DefaultValues() self.assertEqual(d.data, 17) self.assertEqual(d.data_callable, 1337) d.save() d_db = DefaultValues.get(DefaultValues.id == d.id) self.assertEqual(d_db.data, 17) self.assertEqual(d_db.data_callable, 1337) def test_defaults_create(self): d = DefaultValues.create() self.assertEqual(d.data, 17) self.assertEqual(d.data_callable, 1337) d_db = DefaultValues.get(DefaultValues.id == d.id) self.assertEqual(d_db.data, 17) self.assertEqual(d_db.data_callable, 1337) class TestNullConstraint(ModelTestCase): requires = [IntModel] def test_null(self): i = IntModel.create(value=1) i_db = IntModel.get(IntModel.value == 1) self.assertIsNone(i_db.value_null) def test_empty_value(self): with self.database.atomic(): with self.assertRaisesCtx(IntegrityError): IntModel.create(value=None) class TestIntegerField(ModelTestCase): requires = [IntModel] def test_integer_field(self): i1 = IntModel.create(value=1) i2 = IntModel.create(value=2, value_null=20) vals = [(i.value, i.value_null) for i in IntModel.select().order_by(IntModel.value)] self.assertEqual(vals, [ (1, None), (2, 20)]) class FloatModel(TestModel): value = FloatField() value_null = FloatField(null=True) class TestFloatField(ModelTestCase): requires = [FloatModel] def test_float_field(self): f1 = FloatModel.create(value=1.23) f2 = FloatModel.create(value=3.14, value_null=0.12) query = FloatModel.select().order_by(FloatModel.id) self.assertEqual([(f.value, f.value_null) for f in query], [(1.23, None), (3.14, 0.12)]) class DecimalModel(TestModel): value = DecimalField(decimal_places=2, auto_round=True) value_up = DecimalField(decimal_places=2, auto_round=True, rounding=ROUND_UP, null=True) class TestDecimalField(ModelTestCase): requires = [DecimalModel] def test_decimal_field(self): d1 = DecimalModel.create(value=D('3')) d2 = DecimalModel.create(value=D('100.33')) self.assertEqual(sorted(d.value for d in DecimalModel.select()), [D('3'), D('100.33')]) def test_decimal_rounding(self): d = DecimalModel.create(value=D('1.2345'), value_up=D('1.2345')) d_db = DecimalModel.get(DecimalModel.id == d.id) self.assertEqual(d_db.value, D('1.23')) self.assertEqual(d_db.value_up, D('1.24')) class BoolModel(TestModel): value = BooleanField(null=True) name = CharField() class TestBooleanField(ModelTestCase): requires = [BoolModel] def test_boolean_field(self): BoolModel.create(value=True, name='t') BoolModel.create(value=False, name='f') BoolModel.create(value=None, name='n') vals = sorted((b.name, b.value) for b in BoolModel.select()) self.assertEqual(vals, [ ('f', False), ('n', None), ('t', True)]) class DateModel(TestModel): date = DateField(null=True) time = TimeField(null=True) date_time = DateTimeField(null=True) class CustomDateTimeModel(TestModel): date_time = DateTimeField(formats=[ '%m/%d/%Y %I:%M %p', '%Y-%m-%d %H:%M:%S']) class TestDateFields(ModelTestCase): requires = [DateModel] @requires_models(CustomDateTimeModel) def test_date_time_custom_format(self): cdtm = CustomDateTimeModel.create(date_time='01/02/2003 01:37 PM') cdtm_db = CustomDateTimeModel[cdtm.id] self.assertEqual(cdtm_db.date_time, datetime.datetime(2003, 1, 2, 13, 37, 0)) def test_date_fields(self): dt1 = datetime.datetime(2011, 1, 2, 11, 12, 13, 54321) dt2 = datetime.datetime(2011, 1, 2, 11, 12, 13) d1 = datetime.date(2011, 1, 3) t1 = datetime.time(11, 12, 13, 54321) t2 = datetime.time(11, 12, 13) if isinstance(self.database, MySQLDatabase): dt1 = dt1.replace(microsecond=0) t1 = t1.replace(microsecond=0) dm1 = DateModel.create(date_time=dt1, date=d1, time=t1) dm2 = DateModel.create(date_time=dt2, time=t2) dm1_db = DateModel.get(DateModel.id == dm1.id) self.assertEqual(dm1_db.date, d1) self.assertEqual(dm1_db.date_time, dt1) self.assertEqual(dm1_db.time, t1) dm2_db = DateModel.get(DateModel.id == dm2.id) self.assertEqual(dm2_db.date, None) self.assertEqual(dm2_db.date_time, dt2) self.assertEqual(dm2_db.time, t2) def test_extract_parts(self): dm = DateModel.create( date_time=datetime.datetime(2011, 1, 2, 11, 12, 13, 54321), date=datetime.date(2012, 2, 3), time=datetime.time(3, 13, 37)) query = (DateModel .select(DateModel.date_time.year, DateModel.date_time.month, DateModel.date_time.day, DateModel.date_time.hour, DateModel.date_time.minute, DateModel.date_time.second, DateModel.date.year, DateModel.date.month, DateModel.date.day, DateModel.time.hour, DateModel.time.minute, DateModel.time.second) .tuples()) row, = query if IS_SQLITE or IS_MYSQL: self.assertEqual(row, (2011, 1, 2, 11, 12, 13, 2012, 2, 3, 3, 13, 37)) else: self.assertTrue(row in [ (2011., 1., 2., 11., 12., 13.054321, 2012., 2., 3., 3., 13., 37.), (D('2011'), D('1'), D('2'), D('11'), D('12'), D('13.054321'), D('2012'), D('2'), D('3'), D('3'), D('13'), D('37'))]) def test_truncate_date(self): dm = DateModel.create( date_time=datetime.datetime(2001, 2, 3, 4, 5, 6, 7), date=datetime.date(2002, 3, 4)) accum = [] for p in ('year', 'month', 'day', 'hour', 'minute', 'second'): accum.append(DateModel.date_time.truncate(p)) for p in ('year', 'month', 'day'): accum.append(DateModel.date.truncate(p)) query = DateModel.select(*accum).tuples() data = list(query[0]) # Postgres includes timezone info, so strip that for comparison. if IS_POSTGRESQL or IS_CRDB: data = [dt.replace(tzinfo=None) for dt in data] self.assertEqual(data, [ datetime.datetime(2001, 1, 1, 0, 0, 0), datetime.datetime(2001, 2, 1, 0, 0, 0), datetime.datetime(2001, 2, 3, 0, 0, 0), datetime.datetime(2001, 2, 3, 4, 0, 0), datetime.datetime(2001, 2, 3, 4, 5, 0), datetime.datetime(2001, 2, 3, 4, 5, 6), datetime.datetime(2002, 1, 1, 0, 0, 0), datetime.datetime(2002, 3, 1, 0, 0, 0), datetime.datetime(2002, 3, 4, 0, 0, 0)]) def test_to_timestamp(self): dt = datetime.datetime(2019, 1, 2, 3, 4, 5) ts = calendar.timegm(dt.utctimetuple()) dt2 = datetime.datetime(2019, 1, 3) ts2 = calendar.timegm(dt2.utctimetuple()) DateModel.create(date_time=dt, date=dt2.date()) query = DateModel.select( DateModel.id, DateModel.date_time.to_timestamp().alias('dt_ts'), DateModel.date.to_timestamp().alias('dt2_ts')) obj = query.get() self.assertEqual(obj.dt_ts, ts) self.assertEqual(obj.dt2_ts, ts2) ts3 = ts + 86400 query = (DateModel.select() .where((DateModel.date_time.to_timestamp() + 86400) < ts3)) self.assertRaises(DateModel.DoesNotExist, query.get) query = (DateModel.select() .where((DateModel.date.to_timestamp() + 86400) > ts3)) self.assertEqual(query.get().id, obj.id) def test_distinct_date_part(self): years = (1980, 1990, 2000, 2010) for i, year in enumerate(years): for j in range(i + 1): DateModel.create(date=datetime.date(year, i + 1, 1)) query = (DateModel .select(DateModel.date.year.distinct()) .order_by(DateModel.date.year)) self.assertEqual([year for year, in query.tuples()], [1980, 1990, 2000, 2010]) class U2(TestModel): username = TextField() class T2(TestModel): user = ForeignKeyField(U2, backref='tweets', on_delete='CASCADE') content = TextField() class TestForeignKeyField(ModelTestCase): requires = [User, Tweet] def test_set_fk(self): huey = User.create(username='huey') zaizee = User.create(username='zaizee') # Test resolution of attributes after creation does not trigger SELECT. with self.assertQueryCount(1): tweet = Tweet.create(content='meow', user=huey) self.assertEqual(tweet.user.username, 'huey') # Test we can set to an integer, in which case a query will occur. with self.assertQueryCount(2): tweet = Tweet.create(content='purr', user=zaizee.id) self.assertEqual(tweet.user.username, 'zaizee') # Test we can set the ID accessor directly. with self.assertQueryCount(2): tweet = Tweet.create(content='hiss', user_id=huey.id) self.assertEqual(tweet.user.username, 'huey') def test_follow_attributes(self): huey = User.create(username='huey') Tweet.create(content='meow', user=huey) Tweet.create(content='hiss', user=huey) with self.assertQueryCount(1): query = (Tweet .select(Tweet.content, Tweet.user.username) .join(User) .order_by(Tweet.content)) self.assertEqual([(tweet.content, tweet.user.username) for tweet in query], [('hiss', 'huey'), ('meow', 'huey')]) self.assertRaises(AttributeError, lambda: Tweet.user.foo) def test_disable_backref(self): class Person(TestModel): pass class Pet(TestModel): owner = ForeignKeyField(Person, backref='!') self.assertEqual(Pet.owner.backref, '!') # No attribute/accessor is added to the related model. self.assertRaises(AttributeError, lambda: Person.pet_set) # We still preserve the metadata about the relationship. self.assertTrue(Pet.owner in Person._meta.backrefs) @requires_models(U2, T2) def test_on_delete_behavior(self): if IS_SQLITE: self.database.foreign_keys = 1 with self.database.atomic(): for username in ('u1', 'u2', 'u3'): user = U2.create(username=username) for i in range(3): T2.create(user=user, content='%s-%s' % (username, i)) self.assertEqual(T2.select().count(), 9) U2.delete().where(U2.username == 'u2').execute() self.assertEqual(T2.select().count(), 6) query = (U2 .select(U2.username, fn.COUNT(T2.id).alias('ct')) .join(T2, JOIN.LEFT_OUTER) .group_by(U2.username) .order_by(U2.username)) self.assertEqual([(u.username, u.ct) for u in query], [ ('u1', 3), ('u3', 3)]) class M1(TestModel): name = CharField(primary_key=True) m2 = DeferredForeignKey('M2', deferrable='INITIALLY DEFERRED', on_delete='CASCADE') class M2(TestModel): name = CharField(primary_key=True) m1 = ForeignKeyField(M1, deferrable='INITIALLY DEFERRED', on_delete='CASCADE') @skip_if(IS_MYSQL) @skip_if(IS_CRDB, 'crdb does not support deferred foreign-key constraints') class TestDeferredForeignKey(ModelTestCase): requires = [M1, M2] def test_deferred_foreign_key(self): with self.database.atomic(): m1 = M1.create(name='m1', m2='m2') m2 = M2.create(name='m2', m1='m1') m1_db = M1.get(M1.name == 'm1') self.assertEqual(m1_db.m2.name, 'm2') m2_db = M2.get(M2.name == 'm2') self.assertEqual(m2_db.m1.name, 'm1') class TestDeferredForeignKeyResolution(ModelTestCase): def test_unresolved_deferred_fk(self): class Photo(Model): album = DeferredForeignKey('Album', column_name='id_album') class Meta: database = get_in_memory_db() self.assertSQL(Photo.select(), ( 'SELECT "t1"."id", "t1"."id_album" FROM "photo" AS "t1"'), []) def test_deferred_foreign_key_resolution(self): class Base(Model): class Meta: database = get_in_memory_db() class Photo(Base): album = DeferredForeignKey('Album', column_name='id_album', null=False, backref='pictures') alt_album = DeferredForeignKey('Album', column_name='id_Alt_album', field='alt_id', backref='alt_pix', null=True) class Album(Base): name = TextField() alt_id = IntegerField(column_name='_Alt_id') self.assertTrue(Photo.album.rel_model is Album) self.assertTrue(Photo.album.rel_field is Album.id) self.assertEqual(Photo.album.column_name, 'id_album') self.assertFalse(Photo.album.null) self.assertTrue(Photo.alt_album.rel_model is Album) self.assertTrue(Photo.alt_album.rel_field is Album.alt_id) self.assertEqual(Photo.alt_album.column_name, 'id_Alt_album') self.assertTrue(Photo.alt_album.null) self.assertSQL(Photo._schema._create_table(), ( 'CREATE TABLE IF NOT EXISTS "photo" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"id_album" INTEGER NOT NULL, ' '"id_Alt_album" INTEGER)'), []) self.assertSQL(Photo._schema._create_foreign_key(Photo.album), ( 'ALTER TABLE "photo" ADD CONSTRAINT "fk_photo_id_album_refs_album"' ' FOREIGN KEY ("id_album") REFERENCES "album" ("id")')) self.assertSQL(Photo._schema._create_foreign_key(Photo.alt_album), ( 'ALTER TABLE "photo" ADD CONSTRAINT ' '"fk_photo_id_Alt_album_refs_album"' ' FOREIGN KEY ("id_Alt_album") REFERENCES "album" ("_Alt_id")')) self.assertSQL(Photo.select(), ( 'SELECT "t1"."id", "t1"."id_album", "t1"."id_Alt_album" ' 'FROM "photo" AS "t1"'), []) a = Album(id=3, alt_id=4) self.assertSQL(a.pictures, ( 'SELECT "t1"."id", "t1"."id_album", "t1"."id_Alt_album" ' 'FROM "photo" AS "t1" WHERE ("t1"."id_album" = ?)'), [3]) self.assertSQL(a.alt_pix, ( 'SELECT "t1"."id", "t1"."id_album", "t1"."id_Alt_album" ' 'FROM "photo" AS "t1" WHERE ("t1"."id_Alt_album" = ?)'), [4]) class Composite(TestModel): first = CharField() last = CharField() data = TextField() class Meta: primary_key = CompositeKey('first', 'last') class TestCompositePrimaryKeyField(ModelTestCase): requires = [Composite] def test_composite_primary_key(self): pass class TestFieldFunction(ModelTestCase): requires = [PhoneBook] def setUp(self): super(TestFieldFunction, self).setUp() names = ('huey', 'mickey', 'zaizee', 'beanie', 'scout', 'hallee') for name in names: PhoneBook.create(name=name) def _test_field_function(self, PB): query = (PB .select() .where(PB.name.first_char() == 'h') .order_by(PB.name)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name" ' 'FROM "phone_book" AS "t1" ' 'WHERE (SUBSTR("t1"."name", ?, ?) = ?) ' 'ORDER BY "t1"."name"'), [1, 1, 'h']) self.assertEqual([pb.name for pb in query], ['hallee', 'huey']) def test_field_function(self): self._test_field_function(PhoneBook) def test_field_function_alias(self): self._test_field_function(PhoneBook.alias()) class IPModel(TestModel): ip = IPField() ip_null = IPField(null=True) class TestIPField(ModelTestCase): requires = [IPModel] def test_ip_field(self): ips = ('0.0.0.0', '255.255.255.255', '192.168.1.1') for ip in ips: i = IPModel.create(ip=ip) i_db = IPModel.get(ip=ip) self.assertEqual(i_db.ip, ip) self.assertEqual(i_db.ip_null, None) class TestBitFields(ModelTestCase): requires = [Bits] def test_bit_field_update(self): def assertFlags(expected): query = Bits.select().order_by(Bits.id) self.assertEqual([b.flags for b in query], expected) # Bits - flags (1=sticky, 2=favorite, 4=minimized) for i in range(1, 5): Bits.create(flags=i) Bits.update(flags=Bits.flags & ~2).execute() assertFlags([1, 0, 1, 4]) Bits.update(flags=Bits.flags | 2).execute() assertFlags([3, 2, 3, 6]) Bits.update(flags=Bits.is_favorite.clear()).execute() assertFlags([1, 0, 1, 4]) Bits.update(flags=Bits.is_favorite.set()).execute() assertFlags([3, 2, 3, 6]) # Clear multiple bits in one operation. Bits.update(flags=Bits.flags & ~(1 | 4)).execute() assertFlags([2, 2, 2, 2]) def test_bit_field_auto_flag(self): class Bits2(TestModel): flags = BitField() f1 = flags.flag() # Automatically gets 1. f2 = flags.flag() # 2 f4 = flags.flag() # 4 f16 = flags.flag(16) f32 = flags.flag() # 32 b = Bits2() self.assertEqual(b.flags, 0) b.f1 = True self.assertEqual(b.flags, 1) b.f4 = True self.assertEqual(b.flags, 5) b.f32 = True self.assertEqual(b.flags, 37) def test_bit_field_instance_flags(self): b = Bits() self.assertEqual(b.flags, 0) self.assertFalse(b.is_sticky) self.assertFalse(b.is_favorite) self.assertFalse(b.is_minimized) b.is_sticky = True b.is_minimized = True self.assertEqual(b.flags, 5) # 1 | 4 self.assertTrue(b.is_sticky) self.assertFalse(b.is_favorite) self.assertTrue(b.is_minimized) b.flags = 3 self.assertTrue(b.is_sticky) self.assertTrue(b.is_favorite) self.assertFalse(b.is_minimized) def test_bit_field(self): b1 = Bits.create(flags=1) b2 = Bits.create(flags=2) b3 = Bits.create(flags=3) query = Bits.select().where(Bits.is_sticky).order_by(Bits.id) self.assertEqual([x.id for x in query], [b1.id, b3.id]) query = Bits.select().where(Bits.is_favorite).order_by(Bits.id) self.assertEqual([x.id for x in query], [b2.id, b3.id]) query = Bits.select().where(~Bits.is_favorite).order_by(Bits.id) self.assertEqual([x.id for x in query], [b1.id]) # "&" operator does bitwise and for BitField. query = Bits.select().where((Bits.flags & 1) == 1).order_by(Bits.id) self.assertEqual([x.id for x in query], [b1.id, b3.id]) # Test combining multiple bit expressions. query = Bits.select().where(Bits.is_sticky & Bits.is_favorite) self.assertEqual([x.id for x in query], [b3.id]) query = Bits.select().where(Bits.is_sticky & ~Bits.is_favorite) self.assertEqual([x.id for x in query], [b1.id]) def test_bigbit_field_instance_data(self): b = Bits() values_to_set = (1, 11, 63, 31, 55, 48, 100, 99) for value in values_to_set: b.data.set_bit(value) for i in range(128): self.assertEqual(b.data.is_set(i), i in values_to_set) for i in range(128): b.data.clear_bit(i) buf = bytes_type(b.data._buffer) self.assertEqual(len(buf), 16) self.assertEqual(bytes_type(buf), b'\x00' * 16) def test_bigbit_zero_idx(self): b = Bits() b.data.set_bit(0) self.assertTrue(b.data.is_set(0)) b.data.clear_bit(0) self.assertFalse(b.data.is_set(0)) # Out-of-bounds returns False and does not extend data. self.assertFalse(b.data.is_set(1000)) self.assertTrue(len(b.data), 1) def test_bigbit_item_methods(self): b = Bits() idxs = [0, 1, 4, 7, 8, 15, 16, 31, 32, 63] for i in idxs: b.data[i] = True for i in range(64): self.assertEqual(b.data[i], i in idxs) data = list(b.data) self.assertEqual(data, [1 if i in idxs else 0 for i in range(64)]) for i in range(64): del b.data[i] self.assertEqual(len(b.data), 8) self.assertEqual(b.data._buffer, b'\x00' * 8) def test_bigbit_set_clear(self): b = Bits() b.data = b'\x01' for i in range(8): self.assertEqual(b.data[i], i == 0) b.data.clear() self.assertEqual(len(b.data), 0) def test_bigbit_field(self): b = Bits.create() b.data.set_bit(1) b.data.set_bit(3) b.data.set_bit(5) b.save() b_db = Bits.get(Bits.id == b.id) for x in range(7): if x % 2 == 1: self.assertTrue(b_db.data.is_set(x)) else: self.assertFalse(b_db.data.is_set(x)) def test_bigbit_field_bitwise(self): b1 = Bits(data=b'\x11') b2 = Bits(data=b'\x12') b3 = Bits(data=b'\x99') self.assertEqual(b1.data & b2.data, b'\x10') self.assertEqual(b1.data | b2.data, b'\x13') self.assertEqual(b1.data ^ b2.data, b'\x03') self.assertEqual(b1.data & b3.data, b'\x11') self.assertEqual(b1.data | b3.data, b'\x99') self.assertEqual(b1.data ^ b3.data, b'\x88') b1.data &= b2.data self.assertEqual(b1.data._buffer, b'\x10') b1.data |= b2.data self.assertEqual(b1.data._buffer, b'\x12') b1.data ^= b3.data self.assertEqual(b1.data._buffer, b'\x8b') b1.data = b'\x11' self.assertEqual(b1.data & b'\xff\xff', b'\x11\x00') self.assertEqual(b1.data | b'\xff\xff', b'\xff\xff') self.assertEqual(b1.data ^ b'\xff\xff', b'\xee\xff') b1.data = b'\x11\x11' self.assertEqual(b1.data & b'\xff', b'\x11\x00') self.assertEqual(b1.data | b'\xff', b'\xff\x11') self.assertEqual(b1.data ^ b'\xff', b'\xee\x11') def test_bigbit_field_bulk_create(self): b1, b2, b3 = Bits(), Bits(), Bits() b1.data.set_bit(1) b2.data.set_bit(2) b3.data.set_bit(3) Bits.bulk_create([b1, b2, b3]) self.assertEqual(len(Bits), 3) for b in Bits.select(): self.assertEqual(sum(1 if b.data.is_set(i) else 0 for i in (1, 2, 3)), 1) def test_bigbit_field_bulk_update(self): b1, b2, b3 = Bits.create(), Bits.create(), Bits.create() b1.data.set_bit(11) b2.data.set_bit(12) b3.data.set_bit(13) Bits.bulk_update([b1, b2, b3], fields=[Bits.data]) mapping = {b1.id: 11, b2.id: 12, b3.id: 13} for b in Bits.select(): bit = mapping[b.id] self.assertTrue(b.data.is_set(bit)) class BlobModel(TestModel): data = BlobField() class TestBlobField(ModelTestCase): requires = [BlobModel] def test_blob_field(self): b = BlobModel.create(data=b'\xff\x01') b_db = BlobModel.get(BlobModel.data == b'\xff\x01') self.assertEqual(b.id, b_db.id) data = b_db.data if isinstance(data, memoryview): data = data.tobytes() elif not isinstance(data, bytes): data = bytes(data) self.assertEqual(data, b'\xff\x01') def test_blob_on_proxy(self): db = Proxy() class NewBlobModel(Model): data = BlobField() class Meta: database = db db_obj = SqliteDatabase(':memory:') db.initialize(db_obj) self.assertTrue(NewBlobModel.data._constructor is sqlite3.Binary) def test_blob_db_hook(self): sentinel = object() class FakeDatabase(Database): def get_binary_type(self): return sentinel class B(Model): b1 = BlobField() b2 = BlobField() B._meta.set_database(FakeDatabase(None)) self.assertTrue(B.b1._constructor is sentinel) self.assertTrue(B.b2._constructor is sentinel) alt_db = SqliteDatabase(':memory:') with alt_db.bind_ctx([B]): # The constructor has been changed. self.assertTrue(B.b1._constructor is sqlite3.Binary) self.assertTrue(B.b2._constructor is sqlite3.Binary) # The constructor has been restored. self.assertTrue(B.b1._constructor is sentinel) self.assertTrue(B.b2._constructor is sentinel) class BigModel(TestModel): pk = BigAutoField() data = TextField() class TestBigAutoField(ModelTestCase): requires = [BigModel] def test_big_auto_field(self): b1 = BigModel.create(data='b1') b2 = BigModel.create(data='b2') b1_db = BigModel.get(BigModel.pk == b1.pk) b2_db = BigModel.get(BigModel.pk == b2.pk) self.assertTrue(b1_db.pk < b2_db.pk) self.assertTrue(b1_db.data, 'b1') self.assertTrue(b2_db.data, 'b2') class Item(TestModel): price = IntegerField() multiplier = FloatField(default=1.) class Bare(TestModel): key = BareField() value = BareField(adapt=int, null=True) class TestFieldValueHandling(ModelTestCase): requires = [Item] @skip_if(IS_CRDB, 'crdb requires cast to multiply int and float') def test_int_float_multi(self): i = Item.create(price=10, multiplier=0.75) query = (Item .select(Item, (Item.price * Item.multiplier).alias('total')) .where(Item.id == i.id)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."price", "t1"."multiplier", ' '("t1"."price" * "t1"."multiplier") AS "total" ' 'FROM "item" AS "t1" ' 'WHERE ("t1"."id" = ?)'), [i.id]) i_db = query.get() self.assertEqual(i_db.price, 10) self.assertEqual(i_db.multiplier, .75) self.assertEqual(i_db.total, 7.5) # By default, Peewee will use the Price field (integer) converter to # coerce the value of it's right-hand operand (converting to 0). query = (Item .select(Item, (Item.price * 0.75).alias('total')) .where(Item.id == i.id)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."price", "t1"."multiplier", ' '("t1"."price" * ?) AS "total" ' 'FROM "item" AS "t1" ' 'WHERE ("t1"."id" = ?)'), [0, i.id]) # We can explicitly pass "False" and the value will not be converted. exp = Item.price * Value(0.75, False) query = (Item .select(Item, exp.alias('total')) .where(Item.id == i.id)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."price", "t1"."multiplier", ' '("t1"."price" * ?) AS "total" ' 'FROM "item" AS "t1" ' 'WHERE ("t1"."id" = ?)'), [0.75, i.id]) i_db = query.get() self.assertEqual(i_db.price, 10) self.assertEqual(i_db.multiplier, .75) self.assertEqual(i_db.total, 7.5) def test_explicit_cast(self): prices = ((10, 1.1), (5, .5)) for price, multiplier in prices: Item.create(price=price, multiplier=multiplier) text = 'CHAR' if IS_MYSQL else 'TEXT' query = (Item .select(Item.price.cast(text).alias('price_text'), Item.multiplier.cast(text).alias('multiplier_text')) .order_by(Item.id) .dicts()) self.assertEqual(list(query), [ {'price_text': '10', 'multiplier_text': '1.1'}, {'price_text': '5', 'multiplier_text': '0.5'}, ]) item = (Item .select(Item.price.cast(text).alias('price'), Item.multiplier.cast(text).alias('multiplier')) .where(Item.price == 10) .get()) self.assertEqual(item.price, '10') self.assertEqual(item.multiplier, '1.1') @requires_sqlite @requires_models(Bare) def test_bare_model_adapt(self): b1 = Bare.create(key='k1', value=1) b2 = Bare.create(key='k2', value='2') b3 = Bare.create(key='k3', value=None) b1_db = Bare.get(Bare.id == b1.id) self.assertEqual(b1_db.key, 'k1') self.assertEqual(b1_db.value, 1) b2_db = Bare.get(Bare.id == b2.id) self.assertEqual(b2_db.key, 'k2') self.assertEqual(b2_db.value, 2) b3_db = Bare.get(Bare.id == b3.id) self.assertEqual(b3_db.key, 'k3') self.assertTrue(b3_db.value is None) class UUIDModel(TestModel): data = UUIDField(null=True) bdata = BinaryUUIDField(null=True) class TestUUIDField(ModelTestCase): requires = [UUIDModel] def test_uuid_field(self): uu = uuid.uuid4() u = UUIDModel.create(data=uu) u_db = UUIDModel.get(UUIDModel.id == u.id) self.assertEqual(u_db.data, uu) self.assertTrue(u_db.bdata is None) u_db2 = UUIDModel.get(UUIDModel.data == uu) self.assertEqual(u_db2.id, u.id) # Verify we can use hex string. uu = uuid.uuid4() u = UUIDModel.create(data=uu.hex) u_db = UUIDModel.get(UUIDModel.data == uu.hex) self.assertEqual(u.id, u_db.id) self.assertEqual(u_db.data, uu) # Verify we can use raw binary representation. uu = uuid.uuid4() u = UUIDModel.create(data=uu.bytes) u_db = UUIDModel.get(UUIDModel.data == uu.bytes) self.assertEqual(u.id, u_db.id) self.assertEqual(u_db.data, uu) def test_binary_uuid_field(self): uu = uuid.uuid4() u = UUIDModel.create(bdata=uu) u_db = UUIDModel.get(UUIDModel.id == u.id) self.assertEqual(u_db.bdata, uu) self.assertTrue(u_db.data is None) u_db2 = UUIDModel.get(UUIDModel.bdata == uu) self.assertEqual(u_db2.id, u.id) # Verify we can use hex string. uu = uuid.uuid4() u = UUIDModel.create(bdata=uu.hex) u_db = UUIDModel.get(UUIDModel.bdata == uu.hex) self.assertEqual(u.id, u_db.id) self.assertEqual(u_db.bdata, uu) # Verify we can use raw binary representation. uu = uuid.uuid4() u = UUIDModel.create(bdata=uu.bytes) u_db = UUIDModel.get(UUIDModel.bdata == uu.bytes) self.assertEqual(u.id, u_db.id) self.assertEqual(u_db.bdata, uu) class UU1(TestModel): id = UUIDField(default=uuid.uuid4, primary_key=True) name = TextField() class UU2(TestModel): id = UUIDField(default=uuid.uuid4, primary_key=True) u1 = ForeignKeyField(UU1) name = TextField() class TestForeignKeyUUIDField(ModelTestCase): requires = [UU1, UU2] def test_bulk_insert(self): # Create three UU1 instances. UU1.insert_many([{UU1.name: name} for name in 'abc'], fields=[UU1.id, UU1.name]).execute() ua, ub, uc = UU1.select().order_by(UU1.name) # Create several UU2 instances. data = ( ('a1', ua), ('b1', ub), ('b2', ub), ('c1', uc)) iq = UU2.insert_many([{UU2.name: name, UU2.u1: u} for name, u in data], fields=[UU2.id, UU2.name, UU2.u1]) iq.execute() query = UU2.select().order_by(UU2.name) for (name, u1), u2 in zip(data, query): self.assertEqual(u2.name, name) self.assertEqual(u2.u1.id, u1.id) class TSModel(TestModel): ts_s = TimestampField() ts_us = TimestampField(resolution=10 ** 6) ts_ms = TimestampField(resolution=3) # Milliseconds. ts_u = TimestampField(null=True, utc=True) class TSR(TestModel): ts_0 = TimestampField(resolution=0) ts_1 = TimestampField(resolution=1) ts_10 = TimestampField(resolution=10) ts_2 = TimestampField(resolution=2) class TestTimestampField(ModelTestCase): requires = [TSModel] @requires_models(TSR) def test_timestamp_field_resolutions(self): dt = datetime.datetime(2018, 3, 1, 3, 3, 7).replace(microsecond=123456) ts = TSR.create(ts_0=dt, ts_1=dt, ts_10=dt, ts_2=dt) ts_db = TSR[ts.id] # Zero and one are both treated as "seconds" resolution. self.assertEqual(ts_db.ts_0, dt.replace(microsecond=0)) self.assertEqual(ts_db.ts_1, dt.replace(microsecond=0)) self.assertEqual(ts_db.ts_10, dt.replace(microsecond=100000)) self.assertEqual(ts_db.ts_2, dt.replace(microsecond=120000)) def test_timestamp_field(self): dt = datetime.datetime(2018, 3, 1, 3, 3, 7) dt = dt.replace(microsecond=31337) # us=031_337, ms=031. ts = TSModel.create(ts_s=dt, ts_us=dt, ts_ms=dt, ts_u=dt) ts_db = TSModel.get(TSModel.id == ts.id) self.assertEqual(ts_db.ts_s, dt.replace(microsecond=0)) self.assertEqual(ts_db.ts_ms, dt.replace(microsecond=31000)) self.assertEqual(ts_db.ts_us, dt) self.assertEqual(ts_db.ts_u, dt.replace(microsecond=0)) self.assertEqual(TSModel.get(TSModel.ts_s == dt).id, ts.id) self.assertEqual(TSModel.get(TSModel.ts_ms == dt).id, ts.id) self.assertEqual(TSModel.get(TSModel.ts_us == dt).id, ts.id) self.assertEqual(TSModel.get(TSModel.ts_u == dt).id, ts.id) def test_timestamp_field_math(self): dt = datetime.datetime(2019, 1, 2, 3, 4, 5, 31337) ts = TSModel.create(ts_s=dt, ts_us=dt, ts_ms=dt) # Although these fields use different scales for storing the # timestamps, adding "1" has the effect of adding a single second - # the value will be multiplied by the correct scale via the converter. TSModel.update( ts_s=TSModel.ts_s + 1, ts_us=TSModel.ts_us + 1, ts_ms=TSModel.ts_ms + 1).execute() ts_db = TSModel.get(TSModel.id == ts.id) dt2 = dt + datetime.timedelta(seconds=1) self.assertEqual(ts_db.ts_s, dt2.replace(microsecond=0)) self.assertEqual(ts_db.ts_us, dt2) self.assertEqual(ts_db.ts_ms, dt2.replace(microsecond=31000)) def test_timestamp_field_value_as_ts(self): dt = datetime.datetime(2018, 3, 1, 3, 3, 7, 31337) unix_ts = time.mktime(dt.timetuple()) + 0.031337 ts = TSModel.create(ts_s=unix_ts, ts_us=unix_ts, ts_ms=unix_ts, ts_u=unix_ts) # Fetch from the DB and validate the values were stored correctly. ts_db = TSModel[ts.id] self.assertEqual(ts_db.ts_s, dt.replace(microsecond=0)) self.assertEqual(ts_db.ts_ms, dt.replace(microsecond=31000)) self.assertEqual(ts_db.ts_us, dt) utc_dt = TimestampField().local_to_utc(dt) self.assertEqual(ts_db.ts_u, utc_dt) # Verify we can query using a timestamp. self.assertEqual(TSModel.get(TSModel.ts_s == unix_ts).id, ts.id) self.assertEqual(TSModel.get(TSModel.ts_ms == unix_ts).id, ts.id) self.assertEqual(TSModel.get(TSModel.ts_us == unix_ts).id, ts.id) self.assertEqual(TSModel.get(TSModel.ts_u == unix_ts).id, ts.id) def test_timestamp_utc_vs_localtime(self): local_field = TimestampField() utc_field = TimestampField(utc=True) dt = datetime.datetime(2019, 1, 1, 12) unix_ts = int(local_field.get_timestamp(dt)) utc_ts = int(utc_field.get_timestamp(dt)) # Local timestamp is unmodified. Verify that when utc=True, the # timestamp is converted from local time to UTC. self.assertEqual(local_field.db_value(dt), unix_ts) self.assertEqual(utc_field.db_value(dt), utc_ts) self.assertEqual(local_field.python_value(unix_ts), dt) self.assertEqual(utc_field.python_value(utc_ts), dt) # Convert back-and-forth several times. dbv, pyv = local_field.db_value, local_field.python_value self.assertEqual(pyv(dbv(pyv(dbv(dt)))), dt) dbv, pyv = utc_field.db_value, utc_field.python_value self.assertEqual(pyv(dbv(pyv(dbv(dt)))), dt) def test_timestamp_field_parts(self): dt = datetime.datetime(2019, 1, 2, 3, 4, 5) dt_utc = TimestampField().local_to_utc(dt) ts = TSModel.create(ts_s=dt, ts_us=dt, ts_ms=dt, ts_u=dt_utc) fields = (TSModel.ts_s, TSModel.ts_us, TSModel.ts_ms, TSModel.ts_u) attrs = ('year', 'month', 'day', 'hour', 'minute', 'second') selection = [] for field in fields: for attr in attrs: selection.append(getattr(field, attr)) row = TSModel.select(*selection).tuples()[0] # First ensure that all 3 fields are returning the same data. ts_s, ts_us, ts_ms, ts_u = row[:6], row[6:12], row[12:18], row[18:] self.assertEqual(ts_s, ts_us) self.assertEqual(ts_s, ts_ms) self.assertEqual(ts_s, ts_u) # Now validate that the data is correct. We will receive the data back # as a UTC unix timestamp, however! y, m, d, H, M, S = ts_s self.assertEqual(y, 2019) self.assertEqual(m, 1) self.assertEqual(d, dt_utc.day) self.assertEqual(H, dt_utc.hour) self.assertEqual(M, 4) self.assertEqual(S, 5) def test_timestamp_field_from_ts(self): dt = datetime.datetime(2019, 1, 2, 3, 4, 5) dt_utc = TimestampField().local_to_utc(dt) ts = TSModel.create(ts_s=dt, ts_us=dt, ts_ms=dt, ts_u=dt_utc) query = TSModel.select( TSModel.ts_s.from_timestamp().alias('dt_s'), TSModel.ts_us.from_timestamp().alias('dt_us'), TSModel.ts_ms.from_timestamp().alias('dt_ms'), TSModel.ts_u.from_timestamp().alias('dt_u')) # Get row and unpack into variables corresponding to the fields. row = query.tuples()[0] dt_s, dt_us, dt_ms, dt_u = row # Ensure the timestamp values for all 4 fields are the same. self.assertEqual(dt_s, dt_us) self.assertEqual(dt_s, dt_ms) self.assertEqual(dt_s, dt_u) if IS_SQLITE: expected = dt_utc.strftime('%Y-%m-%d %H:%M:%S') self.assertEqual(dt_s, expected) elif IS_POSTGRESQL or IS_CRDB: # Postgres returns an aware UTC datetime. Strip this to compare # against our naive UTC datetime. self.assertEqual(dt_s.replace(tzinfo=None), dt_utc) def test_invalid_resolution(self): self.assertRaises(ValueError, TimestampField, resolution=7) self.assertRaises(ValueError, TimestampField, resolution=20) self.assertRaises(ValueError, TimestampField, resolution=10**7) class ListField(TextField): def db_value(self, value): return ','.join(value) if value else '' def python_value(self, value): return value.split(',') if value else [] class Todo(TestModel): content = TextField() tags = ListField() class TestCustomField(ModelTestCase): requires = [Todo] def test_custom_field(self): t1 = Todo.create(content='t1', tags=['t1-a', 't1-b']) t2 = Todo.create(content='t2', tags=[]) t1_db = Todo.get(Todo.id == t1.id) self.assertEqual(t1_db.tags, ['t1-a', 't1-b']) t2_db = Todo.get(Todo.id == t2.id) self.assertEqual(t2_db.tags, []) t1_db = Todo.get(Todo.tags == AsIs(['t1-a', 't1-b'])) self.assertEqual(t1_db.id, t1.id) t2_db = Todo.get(Todo.tags == AsIs([])) self.assertEqual(t2_db.id, t2.id) class UpperField(TextField): def db_value(self, value): return fn.UPPER(value) class UpperModel(TestModel): name = UpperField() class TestSQLFunctionDBValue(ModelTestCase): database = get_in_memory_db() requires = [UpperModel] def test_sql_function_db_value(self): # Verify that the db function is applied as part of an INSERT. um = UpperModel.create(name='huey') um_db = UpperModel.get(UpperModel.id == um.id) self.assertEqual(um_db.name, 'HUEY') # Verify that the db function is applied as part of an UPDATE. um_db.name = 'zaizee' um_db.save() # Ensure that the name was updated correctly. um_db2 = UpperModel.get(UpperModel.id == um.id) self.assertEqual(um_db2.name, 'ZAIZEE') # Verify that the db function is applied in a WHERE expression. um_db3 = UpperModel.get(UpperModel.name == 'zaiZee') self.assertEqual(um_db3.id, um.id) # If we nest the field in a function, the conversion is not applied. expr = fn.SUBSTR(UpperModel.name, 1, 1) == 'z' self.assertRaises(UpperModel.DoesNotExist, UpperModel.get, expr) class Schedule(TestModel): interval = IntegerField() class Task(TestModel): schedule = ForeignKeyField(Schedule) name = TextField() last_run = DateTimeField() class TestDateTimeMath(ModelTestCase): offset_to_names = ( (-10, ()), (5, ('s1',)), (10, ('s1', 's10')), (11, ('s1', 's10')), (60, ('s1', 's10', 's60')), (61, ('s1', 's10', 's60'))) requires = [Schedule, Task] def setUp(self): super(TestDateTimeMath, self).setUp() with self.database.atomic(): s1 = Schedule.create(interval=1) s10 = Schedule.create(interval=10) s60 = Schedule.create(interval=60) self.dt = datetime.datetime(2019, 1, 1, 12) for s, n in ((s1, 's1'), (s10, 's10'), (s60, 's60')): Task.create(schedule=s, name=n, last_run=self.dt) def _do_test_date_time_math(self, next_occurrence_expression): for offset, names in self.offset_to_names: dt = Value(self.dt + datetime.timedelta(seconds=offset)) query = (Task .select(Task, Schedule) .join(Schedule) .where(dt >= next_occurrence_expression) .order_by(Schedule.interval)) tnames = [task.name for task in query] self.assertEqual(list(names), tnames) @requires_pglike def test_date_time_math_pg(self): second = SQL("INTERVAL '1 second'") next_occurrence = Task.last_run + (Schedule.interval * second) self._do_test_date_time_math(next_occurrence) @requires_sqlite def test_date_time_math_sqlite(self): # Convert to a timestamp, add the scheduled seconds, then convert back # to a datetime string for comparison with the last occurrence. next_ts = Task.last_run.to_timestamp() + Schedule.interval next_occurrence = fn.datetime(next_ts, 'unixepoch') self._do_test_date_time_math(next_occurrence) @requires_mysql def test_date_time_math_mysql(self): nl = NodeList((SQL('INTERVAL'), Schedule.interval, SQL('SECOND'))) next_occurrence = fn.date_add(Task.last_run, nl) self._do_test_date_time_math(next_occurrence) class NQ(TestModel): name = TextField() class NQItem(TestModel): nq = ForeignKeyField(NQ, backref='items') nq_null = ForeignKeyField(NQ, backref='null_items', null=True) nq_lazy = ForeignKeyField(NQ, lazy_load=False, backref='lazy_items') nq_lazy_null = ForeignKeyField(NQ, lazy_load=False, backref='lazy_null_items', null=True) class TestForeignKeyLazyLoad(ModelTestCase): requires = [NQ, NQItem] def setUp(self): super(TestForeignKeyLazyLoad, self).setUp() with self.database.atomic(): a1, a2, a3, a4 = [NQ.create(name='a%s' % i) for i in range(1, 5)] ai = NQItem.create(nq=a1, nq_null=a2, nq_lazy=a3, nq_lazy_null=a4) b = NQ.create(name='b') bi = NQItem.create(nq=b, nq_lazy=b) def test_doesnotexist_lazy_load(self): n = NQ.create(name='n1') i = NQItem.create(nq=n, nq_null=n, nq_lazy=n, nq_lazy_null=n) i_db = NQItem.select(NQItem.id).where(NQItem.nq == n).get() with self.assertQueryCount(0): # Only raise DoesNotExist for non-nullable *and* lazy-load=True. # Otherwise we just return None. self.assertRaises(NQ.DoesNotExist, lambda: i_db.nq) self.assertTrue(i_db.nq_null is None) self.assertTrue(i_db.nq_lazy is None) self.assertTrue(i_db.nq_lazy_null is None) def test_foreign_key_lazy_load(self): a1, a2, a3, a4 = (NQ.select() .where(NQ.name.startswith('a')) .order_by(NQ.name)) b = NQ.get(NQ.name == 'b') ai = NQItem.get(NQItem.nq_id == a1.id) bi = NQItem.get(NQItem.nq_id == b.id) # Accessing the lazy foreign-key fields will not result in any queries # being executed. with self.assertQueryCount(0): self.assertEqual(ai.nq_lazy, a3.id) self.assertEqual(ai.nq_lazy_null, a4.id) self.assertEqual(bi.nq_lazy, b.id) self.assertTrue(bi.nq_lazy_null is None) self.assertTrue(bi.nq_null is None) # Accessing the regular foreign-key fields uses a query to get the # related model instance. with self.assertQueryCount(2): self.assertEqual(ai.nq.id, a1.id) self.assertEqual(ai.nq_null.id, a2.id) with self.assertQueryCount(1): self.assertEqual(bi.nq.id, b.id) def test_fk_lazy_load_related_instance(self): nq = NQ(name='b1') nqi = NQItem(nq=nq, nq_null=nq, nq_lazy=nq, nq_lazy_null=nq) nq.save() nqi.save() with self.assertQueryCount(1): nqi_db = NQItem.get(NQItem.id == nqi.id) self.assertEqual(nqi_db.nq_lazy, nq.id) self.assertEqual(nqi_db.nq_lazy_null, nq.id) def test_fk_lazy_select_related(self): NA, NB, NC, ND = [NQ.alias(a) for a in ('na', 'nb', 'nc', 'nd')] LO = JOIN.LEFT_OUTER query = (NQItem.select(NQItem, NA, NB, NC, ND) .join_from(NQItem, NA, LO, on=NQItem.nq) .join_from(NQItem, NB, LO, on=NQItem.nq_null) .join_from(NQItem, NC, LO, on=NQItem.nq_lazy) .join_from(NQItem, ND, LO, on=NQItem.nq_lazy_null) .order_by(NQItem.id)) # If we explicitly / eagerly select lazy foreign-key models, they # behave just like regular foreign keys. with self.assertQueryCount(1): ai, bi = [ni for ni in query] self.assertEqual(ai.nq.name, 'a1') self.assertEqual(ai.nq_null.name, 'a2') self.assertEqual(ai.nq_lazy.name, 'a3') self.assertEqual(ai.nq_lazy_null.name, 'a4') self.assertEqual(bi.nq.name, 'b') self.assertEqual(bi.nq_lazy.name, 'b') self.assertTrue(bi.nq_null is None) self.assertTrue(bi.nq_lazy_null is None) class SM(TestModel): text_field = TextField() char_field = CharField() class TestStringFields(ModelTestCase): requires = [SM] def test_string_fields(self): bdata = b'b1' udata = b'u1'.decode('utf8') sb = SM.create(text_field=bdata, char_field=bdata) su = SM.create(text_field=udata, char_field=udata) sb_db = SM.get(SM.id == sb.id) self.assertEqual(sb_db.text_field, 'b1') self.assertEqual(sb_db.char_field, 'b1') su_db = SM.get(SM.id == su.id) self.assertEqual(su_db.text_field, 'u1') self.assertEqual(su_db.char_field, 'u1') bvals = (b'b1', u'b1') uvals = (b'u1', u'u1') for field in (SM.text_field, SM.char_field): for bval in bvals: sb_db = SM.get(field == bval) self.assertEqual(sb.id, sb_db.id) for uval in uvals: sb_db = SM.get(field == uval) self.assertEqual(su.id, su_db.id) class InvalidTypes(TestModel): tfield = TextField() ifield = IntegerField() ffield = FloatField() class TestSqliteInvalidDataTypes(ModelTestCase): database = get_in_memory_db() requires = [InvalidTypes] def test_invalid_data_types(self): it = InvalidTypes.create(tfield=100, ifield='five', ffield='pi') it_db1 = InvalidTypes.get(InvalidTypes.tfield == 100) it_db2 = InvalidTypes.get(InvalidTypes.ifield == 'five') it_db3 = InvalidTypes.get(InvalidTypes.ffield == 'pi') self.assertTrue(it.id == it_db1.id == it_db2.id == it_db3.id) self.assertEqual(it_db1.tfield, '100') self.assertEqual(it_db1.ifield, 'five') self.assertEqual(it_db1.ffield, 'pi') peewee-3.17.7/tests/hybrid.py000066400000000000000000000121761470346076600161040ustar00rootroot00000000000000from peewee import * from playhouse.hybrid import * from .base import ModelTestCase from .base import TestModel from .base import get_in_memory_db from .base import requires_models class Interval(TestModel): start = IntegerField() end = IntegerField() @hybrid_property def length(self): return self.end - self.start @hybrid_method def contains(self, point): return (self.start <= point) & (point < self.end) @hybrid_property def radius(self): return int(abs(self.length) / 2) @radius.expression def radius(cls): return fn.ABS(cls.length) / 2 class Person(TestModel): first = TextField() last = TextField() @hybrid_property def full_name(self): return self.first + ' ' + self.last class SubPerson(Person): pass class TestHybridProperties(ModelTestCase): database = get_in_memory_db() requires = [Interval, Person] def setUp(self): super(TestHybridProperties, self).setUp() intervals = ( (1, 5), (2, 6), (3, 5), (2, 5)) for start, end in intervals: Interval.create(start=start, end=end) def test_hybrid_property(self): query = Interval.select().where(Interval.length == 4) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."start", "t1"."end" ' 'FROM "interval" AS "t1" ' 'WHERE (("t1"."end" - "t1"."start") = ?)'), [4]) results = sorted((i.start, i.end) for i in query) self.assertEqual(results, [(1, 5), (2, 6)]) query = Interval.select().order_by(Interval.id) self.assertEqual([i.length for i in query], [4, 4, 2, 3]) def test_hybrid_method(self): query = Interval.select().where(Interval.contains(2)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."start", "t1"."end" ' 'FROM "interval" AS "t1" ' 'WHERE (("t1"."start" <= ?) AND ("t1"."end" > ?))'), [2, 2]) results = sorted((i.start, i.end) for i in query) self.assertEqual(results, [(1, 5), (2, 5), (2, 6)]) query = Interval.select().order_by(Interval.id) self.assertEqual([i.contains(2) for i in query], [1, 1, 0, 1]) def test_expression(self): query = Interval.select().where(Interval.radius == 2) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."start", "t1"."end" ' 'FROM "interval" AS "t1" ' 'WHERE ((ABS("t1"."end" - "t1"."start") / ?) = ?)'), [2, 2]) self.assertEqual(sorted((i.start, i.end) for i in query), [(1, 5), (2, 6)]) query = Interval.select().order_by(Interval.id) self.assertEqual([i.radius for i in query], [2, 2, 1, 1]) def test_string_fields(self): huey = Person.create(first='huey', last='cat') zaizee = Person.create(first='zaizee', last='kitten') self.assertEqual(huey.full_name, 'huey cat') self.assertEqual(zaizee.full_name, 'zaizee kitten') query = Person.select().where(Person.full_name.startswith('huey c')) huey_db = query.get() self.assertEqual(huey_db.id, huey.id) def test_hybrid_model_alias(self): Person.create(first='huey', last='cat') PA = Person.alias() query = PA.select(PA.full_name).where(PA.last == 'cat') self.assertSQL(query, ( 'SELECT (("t1"."first" || ?) || "t1"."last") ' 'FROM "person" AS "t1" WHERE ("t1"."last" = ?)'), [' ', 'cat']) self.assertEqual(query.tuples()[0], ('huey cat',)) @requires_models(SubPerson) def test_hybrid_subclass_model_alias(self): SubPerson.create(first='huey', last='cat') SA = SubPerson.alias() query = SA.select(SA.full_name).where(SA.last == 'cat') self.assertSQL(query, ( 'SELECT (("t1"."first" || ?) || "t1"."last") ' 'FROM "sub_person" AS "t1" WHERE ("t1"."last" = ?)'), [' ', 'cat']) self.assertEqual(query.tuples()[0], ('huey cat',)) class Order(TestModel): name = TextField() @hybrid_property def quantity(self): return sum([item.qt for item in self.items]) @quantity.expression def quantity(cls): return fn.SUM(Item.qt).alias('quantity') class Item(TestModel): order = ForeignKeyField(Order, backref='items') qt = IntegerField() class TestHybridWithRelationship(ModelTestCase): database = get_in_memory_db() requires = [Order, Item] def test_hybrid_with_relationship(self): data = ( ('a', (4, 3, 2, 1)), ('b', (1000, 300, 30, 7)), ('c', ())) for name, qts in data: o = Order.create(name=name) for qt in qts: Item.create(order=o, qt=qt) query = Order.select().order_by(Order.name) self.assertEqual([o.quantity for o in query], [10, 1337, 0]) query = (Order .select(Order.name, Order.quantity.alias('sql_qt')) .join(Item, JOIN.LEFT_OUTER) .group_by(Order.name) .order_by(Order.name)) self.assertEqual([o.sql_qt for o in query], [10, 1337, None]) peewee-3.17.7/tests/keys.py000066400000000000000000000430641470346076600155760ustar00rootroot00000000000000from peewee import * from .base import IS_MYSQL from .base import IS_SQLITE from .base import ModelTestCase from .base import TestModel from .base import db from .base import get_in_memory_db from .base import requires_sqlite class Package(TestModel): barcode = CharField(unique=True) class PackageItem(TestModel): title = CharField() package = ForeignKeyField(Package, Package.barcode, backref='items') class Manufacturer(TestModel): name = CharField() class Component(TestModel): name = CharField() manufacturer = ForeignKeyField(Manufacturer, null=True) class Computer(TestModel): hard_drive = ForeignKeyField(Component, backref='c1') memory = ForeignKeyField(Component, backref='c2') processor = ForeignKeyField(Component, backref='c3') class User(TestModel): username = CharField() class Meta: table_name = 'users' class Relationship(TestModel): from_user = ForeignKeyField(User, backref='relationships') to_user = ForeignKeyField(User, backref='related_to') class Note(TestModel): user = ForeignKeyField(User, backref='notes') content = TextField() class CompositeKeyModel(TestModel): f1 = CharField() f2 = IntegerField() f3 = FloatField() class Meta: primary_key = CompositeKey('f1', 'f2') class UserThing(TestModel): thing = CharField() user = ForeignKeyField(User, backref='things') class Meta: primary_key = CompositeKey('thing', 'user') class Post(TestModel): title = CharField() class Tag(TestModel): tag = CharField() class TagPostThrough(TestModel): tag = ForeignKeyField(Tag, backref='posts') post = ForeignKeyField(Post, backref='tags') class Meta: primary_key = CompositeKey('tag', 'post') class TagPostThroughAlt(TestModel): tag = ForeignKeyField(Tag, backref='posts_alt') post = ForeignKeyField(Post, backref='tags_alt') class TestForeignKeyToNonPrimaryKey(ModelTestCase): requires = [Package, PackageItem] def setUp(self): super(TestForeignKeyToNonPrimaryKey, self).setUp() for barcode in ['101', '102']: Package.create(barcode=barcode) for i in range(2): PackageItem.create( package=barcode, title='%s-%s' % (barcode, i)) def test_fk_resolution(self): pi = PackageItem.get(PackageItem.title == '101-0') self.assertEqual(pi.__data__['package'], '101') self.assertEqual(pi.package, Package.get(Package.barcode == '101')) def test_select_generation(self): p = Package.get(Package.barcode == '101') self.assertEqual( [item.title for item in p.items.order_by(PackageItem.title)], ['101-0', '101-1']) class TestMultipleForeignKey(ModelTestCase): requires = [Manufacturer, Component, Computer] test_values = [ ['3TB', '16GB', 'i7'], ['128GB', '1GB', 'ARM'], ] def setUp(self): super(TestMultipleForeignKey, self).setUp() intel = Manufacturer.create(name='Intel') amd = Manufacturer.create(name='AMD') kingston = Manufacturer.create(name='Kingston') for hard_drive, memory, processor in self.test_values: c = Computer.create( hard_drive=Component.create(name=hard_drive), memory=Component.create(name=memory, manufacturer=kingston), processor=Component.create(name=processor, manufacturer=intel)) # The 2nd computer has an AMD processor. c.processor.manufacturer = amd c.processor.save() def test_multi_join(self): HDD = Component.alias('hdd') HDDMf = Manufacturer.alias('hddm') Memory = Component.alias('mem') MemoryMf = Manufacturer.alias('memm') Processor = Component.alias('proc') ProcessorMf = Manufacturer.alias('procm') query = (Computer .select( Computer, HDD, Memory, Processor, HDDMf, MemoryMf, ProcessorMf) .join(HDD, on=( Computer.hard_drive_id == HDD.id).alias('hard_drive')) .join( HDDMf, JOIN.LEFT_OUTER, on=(HDD.manufacturer_id == HDDMf.id)) .switch(Computer) .join(Memory, on=( Computer.memory_id == Memory.id).alias('memory')) .join( MemoryMf, JOIN.LEFT_OUTER, on=(Memory.manufacturer_id == MemoryMf.id)) .switch(Computer) .join(Processor, on=( Computer.processor_id == Processor.id).alias('processor')) .join( ProcessorMf, JOIN.LEFT_OUTER, on=(Processor.manufacturer_id == ProcessorMf.id)) .order_by(Computer.id)) with self.assertQueryCount(1): vals = [] manufacturers = [] for computer in query: components = [ computer.hard_drive, computer.memory, computer.processor] vals.append([component.name for component in components]) for component in components: if component.manufacturer: manufacturers.append(component.manufacturer.name) else: manufacturers.append(None) self.assertEqual(vals, self.test_values) self.assertEqual(manufacturers, [ None, 'Kingston', 'Intel', None, 'Kingston', 'AMD', ]) class TestMultipleForeignKeysJoining(ModelTestCase): requires = [User, Relationship] def test_multiple_fks(self): a = User.create(username='a') b = User.create(username='b') c = User.create(username='c') self.assertEqual(list(a.relationships), []) self.assertEqual(list(a.related_to), []) r_ab = Relationship.create(from_user=a, to_user=b) self.assertEqual(list(a.relationships), [r_ab]) self.assertEqual(list(a.related_to), []) self.assertEqual(list(b.relationships), []) self.assertEqual(list(b.related_to), [r_ab]) r_bc = Relationship.create(from_user=b, to_user=c) following = User.select().join( Relationship, on=Relationship.to_user ).where(Relationship.from_user == a) self.assertEqual(list(following), [b]) followers = User.select().join( Relationship, on=Relationship.from_user ).where(Relationship.to_user == a.id) self.assertEqual(list(followers), []) following = User.select().join( Relationship, on=Relationship.to_user ).where(Relationship.from_user == b.id) self.assertEqual(list(following), [c]) followers = User.select().join( Relationship, on=Relationship.from_user ).where(Relationship.to_user == b.id) self.assertEqual(list(followers), [a]) following = User.select().join( Relationship, on=Relationship.to_user ).where(Relationship.from_user == c.id) self.assertEqual(list(following), []) followers = User.select().join( Relationship, on=Relationship.from_user ).where(Relationship.to_user == c.id) self.assertEqual(list(followers), [b]) class TestCompositePrimaryKey(ModelTestCase): requires = [Tag, Post, TagPostThrough, CompositeKeyModel, User, UserThing] def setUp(self): super(TestCompositePrimaryKey, self).setUp() tags = [Tag.create(tag='t%d' % i) for i in range(1, 4)] posts = [Post.create(title='p%d' % i) for i in range(1, 4)] p12 = Post.create(title='p12') for t, p in zip(tags, posts): TagPostThrough.create(tag=t, post=p) TagPostThrough.create(tag=tags[0], post=p12) TagPostThrough.create(tag=tags[1], post=p12) def test_create_table_query(self): query, params = TagPostThrough._schema._create_table().query() sql = ('CREATE TABLE IF NOT EXISTS "tag_post_through" (' '"tag_id" INTEGER NOT NULL, ' '"post_id" INTEGER NOT NULL, ' 'PRIMARY KEY ("tag_id", "post_id"), ' 'FOREIGN KEY ("tag_id") REFERENCES "tag" ("id"), ' 'FOREIGN KEY ("post_id") REFERENCES "post" ("id"))') if IS_MYSQL: sql = sql.replace('"', '`') self.assertEqual(query, sql) def test_get_set_id(self): tpt = (TagPostThrough .select() .join(Tag) .switch(TagPostThrough) .join(Post) .order_by(Tag.tag, Post.title)).get() # Sanity check. self.assertEqual(tpt.tag.tag, 't1') self.assertEqual(tpt.post.title, 'p1') tag = Tag.select().where(Tag.tag == 't1').get() post = Post.select().where(Post.title == 'p1').get() self.assertEqual(tpt._pk, (tag.id, post.id)) # set_id is a no-op. with self.assertRaisesCtx(TypeError): tpt._pk = None self.assertEqual(tpt._pk, (tag.id, post.id)) t3 = Tag.get(Tag.tag == 't3') p3 = Post.get(Post.title == 'p3') tpt._pk = (t3, p3) self.assertEqual(tpt.tag.tag, 't3') self.assertEqual(tpt.post.title, 'p3') def test_querying(self): posts = (Post.select() .join(TagPostThrough) .join(Tag) .where(Tag.tag == 't1') .order_by(Post.title)) self.assertEqual([p.title for p in posts], ['p1', 'p12']) tags = (Tag.select() .join(TagPostThrough) .join(Post) .where(Post.title == 'p12') .order_by(Tag.tag)) self.assertEqual([t.tag for t in tags], ['t1', 't2']) def test_composite_key_model(self): CKM = CompositeKeyModel values = [ ('a', 1, 1.0), ('a', 2, 2.0), ('b', 1, 1.0), ('b', 2, 2.0)] c1, c2, c3, c4 = [ CKM.create(f1=f1, f2=f2, f3=f3) for f1, f2, f3 in values] # Update a single row, giving it a new value for `f3`. CKM.update(f3=3.0).where((CKM.f1 == 'a') & (CKM.f2 == 2)).execute() c = CKM.get((CKM.f1 == 'a') & (CKM.f2 == 2)) self.assertEqual(c.f3, 3.0) # Update the `f3` value and call `save()`, triggering an update. c3.f3 = 4.0 c3.save() c = CKM.get((CKM.f1 == 'b') & (CKM.f2 == 1)) self.assertEqual(c.f3, 4.0) # Only 1 row updated. query = CKM.select().where(CKM.f3 == 4.0) self.assertEqual(query.count(), 1) # Unfortunately this does not work since the original value of the # PK is lost (and hence cannot be used to update). c4.f1 = 'c' c4.save() self.assertRaises( CKM.DoesNotExist, lambda: CKM.get((CKM.f1 == 'c') & (CKM.f2 == 2))) def test_count_composite_key(self): CKM = CompositeKeyModel values = [ ('a', 1, 1.0), ('a', 2, 2.0), ('b', 1, 1.0), ('b', 2, 1.0)] for f1, f2, f3 in values: CKM.create(f1=f1, f2=f2, f3=f3) self.assertEqual(CKM.select().count(), 4) self.assertTrue(CKM.select().where( (CKM.f1 == 'a') & (CKM.f2 == 1)).exists()) self.assertFalse(CKM.select().where( (CKM.f1 == 'a') & (CKM.f2 == 3)).exists()) def test_delete_instance(self): u1, u2 = [User.create(username='u%s' % i) for i in range(2)] ut1 = UserThing.create(thing='t1', user=u1) ut2 = UserThing.create(thing='t2', user=u1) ut3 = UserThing.create(thing='t1', user=u2) ut4 = UserThing.create(thing='t3', user=u2) res = ut1.delete_instance() self.assertEqual(res, 1) self.assertEqual( [x.thing for x in UserThing.select().order_by(UserThing.thing)], ['t1', 't2', 't3']) def test_composite_key_inheritance(self): class Person(TestModel): first = TextField() last = TextField() class Meta: primary_key = CompositeKey('first', 'last') self.assertTrue(isinstance(Person._meta.primary_key, CompositeKey)) self.assertEqual(Person._meta.primary_key.field_names, ('first', 'last')) class Employee(Person): title = TextField() self.assertTrue(isinstance(Employee._meta.primary_key, CompositeKey)) self.assertEqual(Employee._meta.primary_key.field_names, ('first', 'last')) sql = ('CREATE TABLE IF NOT EXISTS "employee" (' '"first" TEXT NOT NULL, "last" TEXT NOT NULL, ' '"title" TEXT NOT NULL, PRIMARY KEY ("first", "last"))') if IS_MYSQL: sql = sql.replace('"', '`') self.assertEqual(Employee._schema._create_table().query(), (sql, [])) class TestForeignKeyConstraints(ModelTestCase): requires = [User, Note] def setUp(self): super(TestForeignKeyConstraints, self).setUp() self.set_foreign_key_pragma(True) def tearDown(self): self.set_foreign_key_pragma(False) super(TestForeignKeyConstraints, self).tearDown() def set_foreign_key_pragma(self, is_enabled): if IS_SQLITE: self.database.foreign_keys = 'on' if is_enabled else 'off' def test_constraint_exists(self): max_id = User.select(fn.MAX(User.id)).scalar() or 0 with self.assertRaisesCtx(IntegrityError): with self.database.atomic(): Note.create(user=max_id + 1, content='test') @requires_sqlite def test_disable_constraint(self): self.set_foreign_key_pragma(False) Note.create(user=0, content='test') class FK_A(TestModel): key = CharField(max_length=16, unique=True) class FK_B(TestModel): fk_a = ForeignKeyField(FK_A, field='key') class TestFKtoNonPKField(ModelTestCase): requires = [FK_A, FK_B] def test_fk_to_non_pk_field(self): a1 = FK_A.create(key='a1') a2 = FK_A.create(key='a2') b1 = FK_B.create(fk_a=a1) b2 = FK_B.create(fk_a=a2) args = (b1.fk_a, b1.fk_a_id, a1, a1.key) for arg in args: query = FK_B.select().where(FK_B.fk_a == arg) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."fk_a_id" FROM "fk_b" AS "t1" ' 'WHERE ("t1"."fk_a_id" = ?)'), ['a1']) b1_db = query.get() self.assertEqual(b1_db.id, b1.id) def test_fk_to_non_pk_insert_update(self): a1 = FK_A.create(key='a1') b1 = FK_B.create(fk_a=a1) self.assertEqual(FK_B.select().where(FK_B.fk_a == a1).count(), 1) exprs = ( {FK_B.fk_a: a1}, {'fk_a': a1}, {FK_B.fk_a: a1.key}, {'fk_a': a1.key}) for n, expr in enumerate(exprs, 2): self.assertTrue(FK_B.insert(expr).execute()) self.assertEqual(FK_B.select().where(FK_B.fk_a == a1).count(), n) a2 = FK_A.create(key='a2') exprs = ( {FK_B.fk_a: a2}, {'fk_a': a2}, {FK_B.fk_a: a2.key}, {'fk_a': a2.key}) b_list = list(FK_B.select().where(FK_B.fk_a == a1)) for i, (b, expr) in enumerate(zip(b_list[1:], exprs), 1): self.assertTrue(FK_B.update(expr).where(FK_B.id == b.id).execute()) self.assertEqual(FK_B.select().where(FK_B.fk_a == a2).count(), i) class TestDeferredForeignKeyIntegration(ModelTestCase): database = get_in_memory_db() def test_deferred_fk_simple(self): class Base(TestModel): class Meta: database = self.database class DFFk(Base): fk = DeferredForeignKey('DFPk') # Deferred key not bound yet. self.assertTrue(isinstance(DFFk.fk, DeferredForeignKey)) class DFPk(Base): pass # Deferred key is bound correctly. self.assertTrue(isinstance(DFFk.fk, ForeignKeyField)) self.assertEqual(DFFk.fk.rel_model, DFPk) self.assertEqual(DFFk._meta.refs, {DFFk.fk: DFPk}) self.assertEqual(DFFk._meta.backrefs, {}) self.assertEqual(DFPk._meta.refs, {}) self.assertEqual(DFPk._meta.backrefs, {DFFk.fk: DFFk}) self.assertSQL(DFFk._schema._create_table(False), ( 'CREATE TABLE "df_fk" ("id" INTEGER NOT NULL PRIMARY KEY, ' '"fk_id" INTEGER NOT NULL)'), []) def test_deferred_fk_as_pk(self): class Base(TestModel): class Meta: database = self.database class DFFk(Base): fk = DeferredForeignKey('DFPk', primary_key=True) # Deferred key not bound yet. self.assertTrue(isinstance(DFFk.fk, DeferredForeignKey)) self.assertTrue(DFFk._meta.primary_key is DFFk.fk) class DFPk(Base): pass # Resolved and primary-key set correctly. self.assertTrue(isinstance(DFFk.fk, ForeignKeyField)) self.assertTrue(DFFk._meta.primary_key is DFFk.fk) self.assertEqual(DFFk.fk.rel_model, DFPk) self.assertEqual(DFFk._meta.refs, {DFFk.fk: DFPk}) self.assertEqual(DFFk._meta.backrefs, {}) self.assertEqual(DFPk._meta.refs, {}) self.assertEqual(DFPk._meta.backrefs, {DFFk.fk: DFFk}) self.assertSQL(DFFk._schema._create_table(False), ( 'CREATE TABLE "df_fk" ("fk_id" INTEGER NOT NULL PRIMARY KEY)'), []) peewee-3.17.7/tests/kv.py000066400000000000000000000073541470346076600152450ustar00rootroot00000000000000from peewee import IntegerField from playhouse.kv import KeyValue from .base import DatabaseTestCase from .base import db class TestKeyValue(DatabaseTestCase): def setUp(self): super(TestKeyValue, self).setUp() self._kvs = [] def tearDown(self): if self._kvs: self.database.drop_tables([kv.model for kv in self._kvs]) super(TestKeyValue, self).tearDown() def create_kv(self, **kwargs): kv = KeyValue(database=self.database, **kwargs) self._kvs.append(kv) return kv def test_basic_apis(self): KV = self.create_kv() KV['k1'] = 'v1' KV['k2'] = [0, 1, 2] self.assertEqual(KV['k1'], 'v1') self.assertEqual(KV['k2'], [0, 1, 2]) self.assertRaises(KeyError, lambda: KV['k3']) self.assertTrue((KV.key < 'k2') in KV) self.assertFalse((KV.key > 'k2') in KV) del KV['k1'] KV['k3'] = 'v3' self.assertFalse('k1' in KV) self.assertTrue('k3' in KV) self.assertEqual(sorted(KV.keys()), ['k2', 'k3']) self.assertEqual(len(KV), 2) data = dict(KV) self.assertEqual(data, { 'k2': [0, 1, 2], 'k3': 'v3'}) self.assertEqual(dict(KV), dict(KV.items())) self.assertEqual(KV.pop('k2'), [0, 1, 2]) self.assertRaises(KeyError, lambda: KV['k2']) self.assertRaises(KeyError, KV.pop, 'k2') self.assertEqual(KV.get('k3'), 'v3') self.assertTrue(KV.get('kx') is None) self.assertEqual(KV.get('kx', 'vx'), 'vx') self.assertTrue(KV.get('k4') is None) self.assertEqual(KV.setdefault('k4', 'v4'), 'v4') self.assertEqual(KV.get('k4'), 'v4') self.assertEqual(KV.get('k4', 'v5'), 'v4') KV.clear() self.assertEqual(len(KV), 0) def test_update(self): KV = self.create_kv() with self.assertQueryCount(1): KV.update(k1='v1', k2='v2', k3='v3') self.assertEqual(len(KV), 3) with self.assertQueryCount(1): KV.update(k1='v1-x', k3='v3-x', k4='v4') self.assertEqual(len(KV), 4) self.assertEqual(dict(KV), { 'k1': 'v1-x', 'k2': 'v2', 'k3': 'v3-x', 'k4': 'v4'}) KV['k1'] = 'v1-y' self.assertEqual(len(KV), 4) self.assertEqual(dict(KV), { 'k1': 'v1-y', 'k2': 'v2', 'k3': 'v3-x', 'k4': 'v4'}) def test_expressions(self): KV = self.create_kv(value_field=IntegerField(), ordered=True) with self.database.atomic(): for i in range(1, 11): KV['k%d' % i] = i self.assertEqual(KV[KV.key < 'k2'], [1, 10]) self.assertEqual(KV[KV.value > 7], [10, 8, 9]) self.assertEqual(KV[(KV.key > 'k2') & (KV.key < 'k6')], [3, 4, 5]) self.assertEqual(KV[KV.key == 'kx'], []) del KV[KV.key > 'k3'] self.assertEqual(dict(KV), { 'k1': 1, 'k2': 2, 'k3': 3, 'k10': 10}) KV[KV.value > 2] = 99 self.assertEqual(dict(KV), { 'k1': 1, 'k2': 2, 'k3': 99, 'k10': 99}) def test_integer_keys(self): KV = self.create_kv(key_field=IntegerField(primary_key=True), ordered=True) KV[1] = 'v1' KV[2] = 'v2' KV[10] = 'v10' self.assertEqual(list(KV), [(1, 'v1'), (2, 'v2'), (10, 'v10')]) self.assertEqual(list(KV.keys()), [1, 2, 10]) self.assertEqual(list(KV.values()), ['v1', 'v2', 'v10']) del KV[2] KV[1] = 'v1-x' KV[3] = 'v3' self.assertEqual(dict(KV), { 1: 'v1-x', 3: 'v3', 10: 'v10'}) peewee-3.17.7/tests/libs/000077500000000000000000000000001470346076600151735ustar00rootroot00000000000000peewee-3.17.7/tests/libs/__init__.py000066400000000000000000000000001470346076600172720ustar00rootroot00000000000000peewee-3.17.7/tests/libs/mock.py000066400000000000000000002234071470346076600165060ustar00rootroot00000000000000# mock.py # Test tools for mocking and patching. # Copyright (C) 2007-2012 Michael Foord & the mock team # E-mail: fuzzyman AT voidspace DOT org DOT uk # mock 1.0 # http://www.voidspace.org.uk/python/mock/ # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # Comments, suggestions and bug reports welcome. __all__ = ( 'Mock', 'MagicMock', 'patch', 'sentinel', 'DEFAULT', 'ANY', 'call', 'create_autospec', 'FILTER_DIR', 'NonCallableMock', 'NonCallableMagicMock', 'mock_open', 'PropertyMock', ) __version__ = '1.0.1' import pprint import sys try: import inspect except ImportError: # for alternative platforms that # may not have inspect inspect = None try: from functools import wraps as original_wraps except ImportError: # Python 2.4 compatibility def wraps(original): def inner(f): f.__name__ = original.__name__ f.__doc__ = original.__doc__ f.__module__ = original.__module__ f.__wrapped__ = original return f return inner else: if sys.version_info[:2] >= (3, 3): wraps = original_wraps else: def wraps(func): def inner(f): f = original_wraps(func)(f) f.__wrapped__ = func return f return inner try: unicode except NameError: # Python 3 basestring = unicode = str try: long except NameError: # Python 3 long = int try: BaseException except NameError: # Python 2.4 compatibility BaseException = Exception try: next except NameError: def next(obj): return obj.next() BaseExceptions = (BaseException,) if 'java' in sys.platform: # jython import java BaseExceptions = (BaseException, java.lang.Throwable) try: _isidentifier = str.isidentifier except AttributeError: # Python 2.X import keyword import re regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) def _isidentifier(string): if string in keyword.kwlist: return False return regex.match(string) inPy3k = sys.version_info[0] == 3 # Needed to work around Python 3 bug where use of "super" interferes with # defining __class__ as a descriptor _super = super self = 'im_self' builtin = '__builtin__' if inPy3k: self = '__self__' builtin = 'builtins' FILTER_DIR = True def _is_instance_mock(obj): # can't use isinstance on Mock objects because they override __class__ # The base class for all mocks is NonCallableMock return issubclass(type(obj), NonCallableMock) def _is_exception(obj): return ( isinstance(obj, BaseExceptions) or isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions) ) class _slotted(object): __slots__ = ['a'] DescriptorTypes = ( type(_slotted.a), property, ) def _getsignature(func, skipfirst, instance=False): if inspect is None: raise ImportError('inspect module not available') if isinstance(func, ClassTypes) and not instance: try: func = func.__init__ except AttributeError: return skipfirst = True elif not isinstance(func, FunctionTypes): # for classes where instance is True we end up here too try: func = func.__call__ except AttributeError: return if inPy3k: try: argspec = inspect.getfullargspec(func) except TypeError: # C function / method, possibly inherited object().__init__ return regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec else: try: regargs, varargs, varkwargs, defaults = inspect.getargspec(func) except TypeError: # C function / method, possibly inherited object().__init__ return # instance methods and classmethods need to lose the self argument if getattr(func, self, None) is not None: regargs = regargs[1:] if skipfirst: # this condition and the above one are never both True - why? regargs = regargs[1:] if inPy3k: signature = inspect.formatargspec( regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann, formatvalue=lambda value: "") else: signature = inspect.formatargspec( regargs, varargs, varkwargs, defaults, formatvalue=lambda value: "") return signature[1:-1], func def _check_signature(func, mock, skipfirst, instance=False): if not _callable(func): return result = _getsignature(func, skipfirst, instance) if result is None: return signature, func = result # can't use self because "self" is common as an argument name # unfortunately even not in the first place src = "lambda _mock_self, %s: None" % signature checksig = eval(src, {}) _copy_func_details(func, checksig) type(mock)._mock_check_sig = checksig def _copy_func_details(func, funcopy): funcopy.__name__ = func.__name__ funcopy.__doc__ = func.__doc__ #funcopy.__dict__.update(func.__dict__) funcopy.__module__ = func.__module__ if not inPy3k: funcopy.func_defaults = func.func_defaults return funcopy.__defaults__ = func.__defaults__ funcopy.__kwdefaults__ = func.__kwdefaults__ def _callable(obj): if isinstance(obj, ClassTypes): return True if getattr(obj, '__call__', None) is not None: return True return False def _is_list(obj): # checks for list or tuples # XXXX badly named! return type(obj) in (list, tuple) def _instance_callable(obj): """Given an object, return True if the object is callable. For classes, return True if instances would be callable.""" if not isinstance(obj, ClassTypes): # already an instance return getattr(obj, '__call__', None) is not None klass = obj # uses __bases__ instead of __mro__ so that we work with old style classes if klass.__dict__.get('__call__') is not None: return True for base in klass.__bases__: if _instance_callable(base): return True return False def _set_signature(mock, original, instance=False): # creates a function with signature (*args, **kwargs) that delegates to a # mock. It still does signature checking by calling a lambda with the same # signature as the original. if not _callable(original): return skipfirst = isinstance(original, ClassTypes) result = _getsignature(original, skipfirst, instance) if result is None: # was a C function (e.g. object().__init__ ) that can't be mocked return signature, func = result src = "lambda %s: None" % signature checksig = eval(src, {}) _copy_func_details(func, checksig) name = original.__name__ if not _isidentifier(name): name = 'funcopy' context = {'_checksig_': checksig, 'mock': mock} src = """def %s(*args, **kwargs): _checksig_(*args, **kwargs) return mock(*args, **kwargs)""" % name exec (src, context) funcopy = context[name] _setup_func(funcopy, mock) return funcopy def _setup_func(funcopy, mock): funcopy.mock = mock # can't use isinstance with mocks if not _is_instance_mock(mock): return def assert_called_with(*args, **kwargs): return mock.assert_called_with(*args, **kwargs) def assert_called_once_with(*args, **kwargs): return mock.assert_called_once_with(*args, **kwargs) def assert_has_calls(*args, **kwargs): return mock.assert_has_calls(*args, **kwargs) def assert_any_call(*args, **kwargs): return mock.assert_any_call(*args, **kwargs) def reset_mock(): funcopy.method_calls = _CallList() funcopy.mock_calls = _CallList() mock.reset_mock() ret = funcopy.return_value if _is_instance_mock(ret) and not ret is mock: ret.reset_mock() funcopy.called = False funcopy.call_count = 0 funcopy.call_args = None funcopy.call_args_list = _CallList() funcopy.method_calls = _CallList() funcopy.mock_calls = _CallList() funcopy.return_value = mock.return_value funcopy.side_effect = mock.side_effect funcopy._mock_children = mock._mock_children funcopy.assert_called_with = assert_called_with funcopy.assert_called_once_with = assert_called_once_with funcopy.assert_has_calls = assert_has_calls funcopy.assert_any_call = assert_any_call funcopy.reset_mock = reset_mock mock._mock_delegate = funcopy def _is_magic(name): return '__%s__' % name[2:-2] == name class _SentinelObject(object): "A unique, named, sentinel object." def __init__(self, name): self.name = name def __repr__(self): return 'sentinel.%s' % self.name class _Sentinel(object): """Access attributes to return a named object, usable as a sentinel.""" def __init__(self): self._sentinels = {} def __getattr__(self, name): if name == '__bases__': # Without this help(mock) raises an exception raise AttributeError return self._sentinels.setdefault(name, _SentinelObject(name)) sentinel = _Sentinel() DEFAULT = sentinel.DEFAULT _missing = sentinel.MISSING _deleted = sentinel.DELETED class OldStyleClass: pass ClassType = type(OldStyleClass) def _copy(value): if type(value) in (dict, list, tuple, set): return type(value)(value) return value ClassTypes = (type,) if not inPy3k: ClassTypes = (type, ClassType) _allowed_names = set( [ 'return_value', '_mock_return_value', 'side_effect', '_mock_side_effect', '_mock_parent', '_mock_new_parent', '_mock_name', '_mock_new_name' ] ) def _delegating_property(name): _allowed_names.add(name) _the_name = '_mock_' + name def _get(self, name=name, _the_name=_the_name): sig = self._mock_delegate if sig is None: return getattr(self, _the_name) return getattr(sig, name) def _set(self, value, name=name, _the_name=_the_name): sig = self._mock_delegate if sig is None: self.__dict__[_the_name] = value else: setattr(sig, name, value) return property(_get, _set) class _CallList(list): def __contains__(self, value): if not isinstance(value, list): return list.__contains__(self, value) len_value = len(value) len_self = len(self) if len_value > len_self: return False for i in range(0, len_self - len_value + 1): sub_list = self[i:i+len_value] if sub_list == value: return True return False def __repr__(self): return pprint.pformat(list(self)) def _check_and_set_parent(parent, value, name, new_name): if not _is_instance_mock(value): return False if ((value._mock_name or value._mock_new_name) or (value._mock_parent is not None) or (value._mock_new_parent is not None)): return False _parent = parent while _parent is not None: # setting a mock (value) as a child or return value of itself # should not modify the mock if _parent is value: return False _parent = _parent._mock_new_parent if new_name: value._mock_new_parent = parent value._mock_new_name = new_name if name: value._mock_parent = parent value._mock_name = name return True class Base(object): _mock_return_value = DEFAULT _mock_side_effect = None def __init__(self, *args, **kwargs): pass class NonCallableMock(Base): """A non-callable version of `Mock`""" def __new__(cls, *args, **kw): # every instance has its own class # so we can create magic methods on the # class without stomping on other mocks new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__}) instance = object.__new__(new) return instance def __init__( self, spec=None, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs ): if _new_parent is None: _new_parent = parent __dict__ = self.__dict__ __dict__['_mock_parent'] = parent __dict__['_mock_name'] = name __dict__['_mock_new_name'] = _new_name __dict__['_mock_new_parent'] = _new_parent if spec_set is not None: spec = spec_set spec_set = True self._mock_add_spec(spec, spec_set) __dict__['_mock_children'] = {} __dict__['_mock_wraps'] = wraps __dict__['_mock_delegate'] = None __dict__['_mock_called'] = False __dict__['_mock_call_args'] = None __dict__['_mock_call_count'] = 0 __dict__['_mock_call_args_list'] = _CallList() __dict__['_mock_mock_calls'] = _CallList() __dict__['method_calls'] = _CallList() if kwargs: self.configure_mock(**kwargs) _super(NonCallableMock, self).__init__( spec, wraps, name, spec_set, parent, _spec_state ) def attach_mock(self, mock, attribute): """ Attach a mock as an attribute of this one, replacing its name and parent. Calls to the attached mock will be recorded in the `method_calls` and `mock_calls` attributes of this one.""" mock._mock_parent = None mock._mock_new_parent = None mock._mock_name = '' mock._mock_new_name = None setattr(self, attribute, mock) def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) def _mock_add_spec(self, spec, spec_set): _spec_class = None if spec is not None and not _is_list(spec): if isinstance(spec, ClassTypes): _spec_class = spec else: _spec_class = _get_class(spec) spec = dir(spec) __dict__ = self.__dict__ __dict__['_spec_class'] = _spec_class __dict__['_spec_set'] = spec_set __dict__['_mock_methods'] = spec def __get_return_value(self): ret = self._mock_return_value if self._mock_delegate is not None: ret = self._mock_delegate.return_value if ret is DEFAULT: ret = self._get_child_mock( _new_parent=self, _new_name='()' ) self.return_value = ret return ret def __set_return_value(self, value): if self._mock_delegate is not None: self._mock_delegate.return_value = value else: self._mock_return_value = value _check_and_set_parent(self, value, None, '()') __return_value_doc = "The value to be returned when the mock is called." return_value = property(__get_return_value, __set_return_value, __return_value_doc) @property def __class__(self): if self._spec_class is None: return type(self) return self._spec_class called = _delegating_property('called') call_count = _delegating_property('call_count') call_args = _delegating_property('call_args') call_args_list = _delegating_property('call_args_list') mock_calls = _delegating_property('mock_calls') def __get_side_effect(self): sig = self._mock_delegate if sig is None: return self._mock_side_effect return sig.side_effect def __set_side_effect(self, value): value = _try_iter(value) sig = self._mock_delegate if sig is None: self._mock_side_effect = value else: sig.side_effect = value side_effect = property(__get_side_effect, __set_side_effect) def reset_mock(self): "Restore the mock object to its initial state." self.called = False self.call_args = None self.call_count = 0 self.mock_calls = _CallList() self.call_args_list = _CallList() self.method_calls = _CallList() for child in self._mock_children.values(): if isinstance(child, _SpecState): continue child.reset_mock() ret = self._mock_return_value if _is_instance_mock(ret) and ret is not self: ret.reset_mock() def configure_mock(self, **kwargs): """Set attributes on the mock through keyword arguments. Attributes plus return values and side effects can be set on child mocks using standard dot notation and unpacking a dictionary in the method call: >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} >>> mock.configure_mock(**attrs)""" for arg, val in sorted(kwargs.items(), # we sort on the number of dots so that # attributes are set before we set attributes on # attributes key=lambda entry: entry[0].count('.')): args = arg.split('.') final = args.pop() obj = self for entry in args: obj = getattr(obj, entry) setattr(obj, final, val) def __getattr__(self, name): if name == '_mock_methods': raise AttributeError(name) elif self._mock_methods is not None: if name not in self._mock_methods or name in _all_magics: raise AttributeError("Mock object has no attribute %r" % name) elif _is_magic(name): raise AttributeError(name) result = self._mock_children.get(name) if result is _deleted: raise AttributeError(name) elif result is None: wraps = None if self._mock_wraps is not None: # XXXX should we get the attribute without triggering code # execution? wraps = getattr(self._mock_wraps, name) result = self._get_child_mock( parent=self, name=name, wraps=wraps, _new_name=name, _new_parent=self ) self._mock_children[name] = result elif isinstance(result, _SpecState): result = create_autospec( result.spec, result.spec_set, result.instance, result.parent, result.name ) self._mock_children[name] = result return result def __repr__(self): _name_list = [self._mock_new_name] _parent = self._mock_new_parent last = self dot = '.' if _name_list == ['()']: dot = '' seen = set() while _parent is not None: last = _parent _name_list.append(_parent._mock_new_name + dot) dot = '.' if _parent._mock_new_name == '()': dot = '' _parent = _parent._mock_new_parent # use ids here so as not to call __hash__ on the mocks if id(_parent) in seen: break seen.add(id(_parent)) _name_list = list(reversed(_name_list)) _first = last._mock_name or 'mock' if len(_name_list) > 1: if _name_list[1] not in ('()', '().'): _first += '.' _name_list[0] = _first name = ''.join(_name_list) name_string = '' if name not in ('mock', 'mock.'): name_string = ' name=%r' % name spec_string = '' if self._spec_class is not None: spec_string = ' spec=%r' if self._spec_set: spec_string = ' spec_set=%r' spec_string = spec_string % self._spec_class.__name__ return "<%s%s%s id='%s'>" % ( type(self).__name__, name_string, spec_string, id(self) ) def __dir__(self): """Filter the output of `dir(mock)` to only useful members. XXXX """ extras = self._mock_methods or [] from_type = dir(type(self)) from_dict = list(self.__dict__) if FILTER_DIR: from_type = [e for e in from_type if not e.startswith('_')] from_dict = [e for e in from_dict if not e.startswith('_') or _is_magic(e)] return sorted(set(extras + from_type + from_dict + list(self._mock_children))) def __setattr__(self, name, value): if name in _allowed_names: # property setters go through here return object.__setattr__(self, name, value) elif (self._spec_set and self._mock_methods is not None and name not in self._mock_methods and name not in self.__dict__): raise AttributeError("Mock object has no attribute '%s'" % name) elif name in _unsupported_magics: msg = 'Attempting to set unsupported magic method %r.' % name raise AttributeError(msg) elif name in _all_magics: if self._mock_methods is not None and name not in self._mock_methods: raise AttributeError("Mock object has no attribute '%s'" % name) if not _is_instance_mock(value): setattr(type(self), name, _get_method(name, value)) original = value value = lambda *args, **kw: original(self, *args, **kw) else: # only set _new_name and not name so that mock_calls is tracked # but not method calls _check_and_set_parent(self, value, None, name) setattr(type(self), name, value) self._mock_children[name] = value elif name == '__class__': self._spec_class = value return else: if _check_and_set_parent(self, value, name, name): self._mock_children[name] = value return object.__setattr__(self, name, value) def __delattr__(self, name): if name in _all_magics and name in type(self).__dict__: delattr(type(self), name) if name not in self.__dict__: # for magic methods that are still MagicProxy objects and # not set on the instance itself return if name in self.__dict__: object.__delattr__(self, name) obj = self._mock_children.get(name, _missing) if obj is _deleted: raise AttributeError(name) if obj is not _missing: del self._mock_children[name] self._mock_children[name] = _deleted def _format_mock_call_signature(self, args, kwargs): name = self._mock_name or 'mock' return _format_call_signature(name, args, kwargs) def _format_mock_failure_message(self, args, kwargs): message = 'Expected call: %s\nActual call: %s' expected_string = self._format_mock_call_signature(args, kwargs) call_args = self.call_args if len(call_args) == 3: call_args = call_args[1:] actual_string = self._format_mock_call_signature(*call_args) return message % (expected_string, actual_string) def assert_called_with(_mock_self, *args, **kwargs): """assert that the mock was called with the specified arguments. Raises an AssertionError if the args and keyword args passed in are different to the last call to the mock.""" self = _mock_self if self.call_args is None: expected = self._format_mock_call_signature(args, kwargs) raise AssertionError('Expected call: %s\nNot called' % (expected,)) if self.call_args != (args, kwargs): msg = self._format_mock_failure_message(args, kwargs) raise AssertionError(msg) def assert_called_once_with(_mock_self, *args, **kwargs): """assert that the mock was called exactly once and with the specified arguments.""" self = _mock_self if not self.call_count == 1: msg = ("Expected to be called once. Called %s times." % self.call_count) raise AssertionError(msg) return self.assert_called_with(*args, **kwargs) def assert_has_calls(self, calls, any_order=False): """assert the mock has been called with the specified calls. The `mock_calls` list is checked for the calls. If `any_order` is False (the default) then the calls must be sequential. There can be extra calls before or after the specified calls. If `any_order` is True then the calls can be in any order, but they must all appear in `mock_calls`.""" if not any_order: if calls not in self.mock_calls: raise AssertionError( 'Calls not found.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) return all_calls = list(self.mock_calls) not_found = [] for kall in calls: try: all_calls.remove(kall) except ValueError: not_found.append(kall) if not_found: raise AssertionError( '%r not all found in call list' % (tuple(not_found),) ) def assert_any_call(self, *args, **kwargs): """assert the mock has been called with the specified arguments. The assert passes if the mock has *ever* been called, unlike `assert_called_with` and `assert_called_once_with` that only pass if the call is the most recent one.""" kall = call(*args, **kwargs) if kall not in self.call_args_list: expected_string = self._format_mock_call_signature(args, kwargs) raise AssertionError( '%s call not found' % expected_string ) def _get_child_mock(self, **kw): """Create the child mocks for attributes and return value. By default child mocks will be the same type as the parent. Subclasses of Mock may want to override this to customize the way child mocks are made. For non-callable mocks the callable variant will be used (rather than any custom subclass).""" _type = type(self) if not issubclass(_type, CallableMixin): if issubclass(_type, NonCallableMagicMock): klass = MagicMock elif issubclass(_type, NonCallableMock) : klass = Mock else: klass = _type.__mro__[1] return klass(**kw) def _try_iter(obj): if obj is None: return obj if _is_exception(obj): return obj if _callable(obj): return obj try: return iter(obj) except TypeError: # XXXX backwards compatibility # but this will blow up on first call - so maybe we should fail early? return obj class CallableMixin(Base): def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs): self.__dict__['_mock_return_value'] = return_value _super(CallableMixin, self).__init__( spec, wraps, name, spec_set, parent, _spec_state, _new_name, _new_parent, **kwargs ) self.side_effect = side_effect def _mock_check_sig(self, *args, **kwargs): # stub method that can be replaced with one with a specific signature pass def __call__(_mock_self, *args, **kwargs): # can't use self in-case a function / method we are mocking uses self # in the signature _mock_self._mock_check_sig(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) def _mock_call(_mock_self, *args, **kwargs): self = _mock_self self.called = True self.call_count += 1 self.call_args = _Call((args, kwargs), two=True) self.call_args_list.append(_Call((args, kwargs), two=True)) _new_name = self._mock_new_name _new_parent = self._mock_new_parent self.mock_calls.append(_Call(('', args, kwargs))) seen = set() skip_next_dot = _new_name == '()' do_method_calls = self._mock_parent is not None name = self._mock_name while _new_parent is not None: this_mock_call = _Call((_new_name, args, kwargs)) if _new_parent._mock_new_name: dot = '.' if skip_next_dot: dot = '' skip_next_dot = False if _new_parent._mock_new_name == '()': skip_next_dot = True _new_name = _new_parent._mock_new_name + dot + _new_name if do_method_calls: if _new_name == name: this_method_call = this_mock_call else: this_method_call = _Call((name, args, kwargs)) _new_parent.method_calls.append(this_method_call) do_method_calls = _new_parent._mock_parent is not None if do_method_calls: name = _new_parent._mock_name + '.' + name _new_parent.mock_calls.append(this_mock_call) _new_parent = _new_parent._mock_new_parent # use ids here so as not to call __hash__ on the mocks _new_parent_id = id(_new_parent) if _new_parent_id in seen: break seen.add(_new_parent_id) ret_val = DEFAULT effect = self.side_effect if effect is not None: if _is_exception(effect): raise effect if not _callable(effect): result = next(effect) if _is_exception(result): raise result return result ret_val = effect(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value if (self._mock_wraps is not None and self._mock_return_value is DEFAULT): return self._mock_wraps(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value return ret_val class Mock(CallableMixin, NonCallableMock): """ Create a new `Mock` object. `Mock` takes several optional arguments that specify the behaviour of the Mock object: * `spec`: This can be either a list of strings or an existing object (a class or instance) that acts as the specification for the mock object. If you pass in an object then a list of strings is formed by calling dir on the object (excluding unsupported magic attributes and methods). Accessing any attribute not in this list will raise an `AttributeError`. If `spec` is an object (rather than a list of strings) then `mock.__class__` returns the class of the spec object. This allows mocks to pass `isinstance` tests. * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* or get an attribute on the mock that isn't on the object passed as `spec_set` will raise an `AttributeError`. * `side_effect`: A function to be called whenever the Mock is called. See the `side_effect` attribute. Useful for raising exceptions or dynamically changing return values. The function is called with the same arguments as the mock, and unless it returns `DEFAULT`, the return value of this function is used as the return value. Alternatively `side_effect` can be an exception class or instance. In this case the exception will be raised when the mock is called. If `side_effect` is an iterable then each call to the mock will return the next value from the iterable. If any of the members of the iterable are exceptions they will be raised instead of returned. * `return_value`: The value returned when the mock is called. By default this is a new Mock (created on first access). See the `return_value` attribute. * `wraps`: Item for the mock object to wrap. If `wraps` is not None then calling the Mock will pass the call through to the wrapped object (returning the real result). Attribute access on the mock will return a Mock object that wraps the corresponding attribute of the wrapped object (so attempting to access an attribute that doesn't exist will raise an `AttributeError`). If the mock has an explicit `return_value` set then calls are not passed to the wrapped object and the `return_value` is returned instead. * `name`: If the mock has a name then it will be used in the repr of the mock. This can be useful for debugging. The name is propagated to child mocks. Mocks can also be called with arbitrary keyword arguments. These will be used to set attributes on the mock after it is created. """ def _dot_lookup(thing, comp, import_path): try: return getattr(thing, comp) except AttributeError: __import__(import_path) return getattr(thing, comp) def _importer(target): components = target.split('.') import_path = components.pop(0) thing = __import__(import_path) for comp in components: import_path += ".%s" % comp thing = _dot_lookup(thing, comp, import_path) return thing def _is_started(patcher): # XXXX horrible return hasattr(patcher, 'is_local') class _patch(object): attribute_name = None _active_patches = set() def __init__( self, getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ): if new_callable is not None: if new is not DEFAULT: raise ValueError( "Cannot use 'new' and 'new_callable' together" ) if autospec is not None: raise ValueError( "Cannot use 'autospec' and 'new_callable' together" ) self.getter = getter self.attribute = attribute self.new = new self.new_callable = new_callable self.spec = spec self.create = create self.has_local = False self.spec_set = spec_set self.autospec = autospec self.kwargs = kwargs self.additional_patchers = [] def copy(self): patcher = _patch( self.getter, self.attribute, self.new, self.spec, self.create, self.spec_set, self.autospec, self.new_callable, self.kwargs ) patcher.attribute_name = self.attribute_name patcher.additional_patchers = [ p.copy() for p in self.additional_patchers ] return patcher def __call__(self, func): if isinstance(func, ClassTypes): return self.decorate_class(func) return self.decorate_callable(func) def decorate_class(self, klass): for attr in dir(klass): if not attr.startswith(patch.TEST_PREFIX): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue patcher = self.copy() setattr(klass, attr, patcher(attr_value)) return klass def decorate_callable(self, func): if hasattr(func, 'patchings'): func.patchings.append(self) return func @wraps(func) def patched(*args, **keywargs): # don't use a with here (backwards compatibility with Python 2.4) extra_args = [] entered_patchers = [] # can't use try...except...finally because of Python 2.4 # compatibility exc_info = tuple() try: try: for patching in patched.patchings: arg = patching.__enter__() entered_patchers.append(patching) if patching.attribute_name is not None: keywargs.update(arg) elif patching.new is DEFAULT: extra_args.append(arg) args += tuple(extra_args) return func(*args, **keywargs) except: if (patching not in entered_patchers and _is_started(patching)): # the patcher may have been started, but an exception # raised whilst entering one of its additional_patchers entered_patchers.append(patching) # Pass the exception to __exit__ exc_info = sys.exc_info() # re-raise the exception raise finally: for patching in reversed(entered_patchers): patching.__exit__(*exc_info) patched.patchings = [self] if hasattr(func, 'func_code'): # not in Python 3 patched.compat_co_firstlineno = getattr( func, "compat_co_firstlineno", func.func_code.co_firstlineno ) return patched def get_original(self): target = self.getter() name = self.attribute original = DEFAULT local = False try: original = target.__dict__[name] except (AttributeError, KeyError): original = getattr(target, name, DEFAULT) else: local = True if not self.create and original is DEFAULT: raise AttributeError( "%s does not have the attribute %r" % (target, name) ) return original, local def __enter__(self): """Perform the patch.""" new, spec, spec_set = self.new, self.spec, self.spec_set autospec, kwargs = self.autospec, self.kwargs new_callable = self.new_callable self.target = self.getter() # normalise False to None if spec is False: spec = None if spec_set is False: spec_set = None if autospec is False: autospec = None if spec is not None and autospec is not None: raise TypeError("Can't specify spec and autospec") if ((spec is not None or autospec is not None) and spec_set not in (True, None)): raise TypeError("Can't provide explicit spec_set *and* spec or autospec") original, local = self.get_original() if new is DEFAULT and autospec is None: inherit = False if spec is True: # set spec to the object we are replacing spec = original if spec_set is True: spec_set = original spec = None elif spec is not None: if spec_set is True: spec_set = spec spec = None elif spec_set is True: spec_set = original if spec is not None or spec_set is not None: if original is DEFAULT: raise TypeError("Can't use 'spec' with create=True") if isinstance(original, ClassTypes): # If we're patching out a class and there is a spec inherit = True Klass = MagicMock _kwargs = {} if new_callable is not None: Klass = new_callable elif spec is not None or spec_set is not None: this_spec = spec if spec_set is not None: this_spec = spec_set if _is_list(this_spec): not_callable = '__call__' not in this_spec else: not_callable = not _callable(this_spec) if not_callable: Klass = NonCallableMagicMock if spec is not None: _kwargs['spec'] = spec if spec_set is not None: _kwargs['spec_set'] = spec_set # add a name to mocks if (isinstance(Klass, type) and issubclass(Klass, NonCallableMock) and self.attribute): _kwargs['name'] = self.attribute _kwargs.update(kwargs) new = Klass(**_kwargs) if inherit and _is_instance_mock(new): # we can only tell if the instance should be callable if the # spec is not a list this_spec = spec if spec_set is not None: this_spec = spec_set if (not _is_list(this_spec) and not _instance_callable(this_spec)): Klass = NonCallableMagicMock _kwargs.pop('name') new.return_value = Klass(_new_parent=new, _new_name='()', **_kwargs) elif autospec is not None: # spec is ignored, new *must* be default, spec_set is treated # as a boolean. Should we check spec is not None and that spec_set # is a bool? if new is not DEFAULT: raise TypeError( "autospec creates the mock for you. Can't specify " "autospec and new." ) if original is DEFAULT: raise TypeError("Can't use 'autospec' with create=True") spec_set = bool(spec_set) if autospec is True: autospec = original new = create_autospec(autospec, spec_set=spec_set, _name=self.attribute, **kwargs) elif kwargs: # can't set keyword args when we aren't creating the mock # XXXX If new is a Mock we could call new.configure_mock(**kwargs) raise TypeError("Can't pass kwargs to a mock we aren't creating") new_attr = new self.temp_original = original self.is_local = local setattr(self.target, self.attribute, new_attr) if self.attribute_name is not None: extra_args = {} if self.new is DEFAULT: extra_args[self.attribute_name] = new for patching in self.additional_patchers: arg = patching.__enter__() if patching.new is DEFAULT: extra_args.update(arg) return extra_args return new def __exit__(self, *exc_info): """Undo the patch.""" if not _is_started(self): raise RuntimeError('stop called on unstarted patcher') if self.is_local and self.temp_original is not DEFAULT: setattr(self.target, self.attribute, self.temp_original) else: delattr(self.target, self.attribute) if not self.create and not hasattr(self.target, self.attribute): # needed for proxy objects like django settings setattr(self.target, self.attribute, self.temp_original) del self.temp_original del self.is_local del self.target for patcher in reversed(self.additional_patchers): if _is_started(patcher): patcher.__exit__(*exc_info) def start(self): """Activate a patch, returning any created mock.""" result = self.__enter__() self._active_patches.add(self) return result def stop(self): """Stop an active patch.""" self._active_patches.discard(self) return self.__exit__() def _get_target(target): try: target, attribute = target.rsplit('.', 1) except (TypeError, ValueError): raise TypeError("Need a valid target to patch. You supplied: %r" % (target,)) getter = lambda: _importer(target) return getter, attribute def _patch_object( target, attribute, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs ): """ patch.object(target, attribute, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs) patch the named member (`attribute`) on an object (`target`) with a mock object. `patch.object` can be used as a decorator, class decorator or a context manager. Arguments `new`, `spec`, `create`, `spec_set`, `autospec` and `new_callable` have the same meaning as for `patch`. Like `patch`, `patch.object` takes arbitrary keyword arguments for configuring the mock object it creates. When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ getter = lambda: target return _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ) def _patch_multiple(target, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs): """Perform multiple patches in a single call. It takes the object to be patched (either as an object or a string to fetch the object by importing) and keyword arguments for the patches:: with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): ... Use `DEFAULT` as the value if you want `patch.multiple` to create mocks for you. In this case the created mocks are passed into a decorated function by keyword, and a dictionary is returned when `patch.multiple` is used as a context manager. `patch.multiple` can be used as a decorator, class decorator or a context manager. The arguments `spec`, `spec_set`, `create`, `autospec` and `new_callable` have the same meaning as for `patch`. These arguments will be applied to *all* patches done by `patch.multiple`. When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ if type(target) in (unicode, str): getter = lambda: _importer(target) else: getter = lambda: target if not kwargs: raise ValueError( 'Must supply at least one keyword argument with patch.multiple' ) # need to wrap in a list for python 3, where items is a view items = list(kwargs.items()) attribute, new = items[0] patcher = _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, {} ) patcher.attribute_name = attribute for attribute, new in items[1:]: this_patcher = _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, {} ) this_patcher.attribute_name = attribute patcher.additional_patchers.append(this_patcher) return patcher def patch( target, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs ): """ `patch` acts as a function decorator, class decorator or a context manager. Inside the body of the function or with statement, the `target` is patched with a `new` object. When the function/with statement exits the patch is undone. If `new` is omitted, then the target is replaced with a `MagicMock`. If `patch` is used as a decorator and `new` is omitted, the created mock is passed in as an extra argument to the decorated function. If `patch` is used as a context manager the created mock is returned by the context manager. `target` should be a string in the form `'package.module.ClassName'`. The `target` is imported and the specified object replaced with the `new` object, so the `target` must be importable from the environment you are calling `patch` from. The target is imported when the decorated function is executed, not at decoration time. The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` if patch is creating one for you. In addition you can pass `spec=True` or `spec_set=True`, which causes patch to pass in the object being mocked as the spec/spec_set object. `new_callable` allows you to specify a different class, or callable object, that will be called to create the `new` object. By default `MagicMock` is used. A more powerful form of `spec` is `autospec`. If you set `autospec=True` then the mock with be created with a spec from the object being replaced. All attributes of the mock will also have the spec of the corresponding attribute of the object being replaced. Methods and functions being mocked will have their arguments checked and will raise a `TypeError` if they are called with the wrong signature. For mocks replacing a class, their return value (the 'instance') will have the same spec as the class. Instead of `autospec=True` you can pass `autospec=some_object` to use an arbitrary object as the spec instead of the one being replaced. By default `patch` will fail to replace attributes that don't exist. If you pass in `create=True`, and the attribute doesn't exist, patch will create the attribute for you when the patched function is called, and delete it again afterwards. This is useful for writing tests against attributes that your production code creates at runtime. It is off by by default because it can be dangerous. With it switched on you can write passing tests against APIs that don't actually exist! Patch can be used as a `TestCase` class decorator. It works by decorating each test method in the class. This reduces the boilerplate code when your test methods share a common patchings set. `patch` finds tests by looking for method names that start with `patch.TEST_PREFIX`. By default this is `test`, which matches the way `unittest` finds tests. You can specify an alternative prefix by setting `patch.TEST_PREFIX`. Patch can be used as a context manager, with the with statement. Here the patching applies to the indented block after the with statement. If you use "as" then the patched object will be bound to the name after the "as"; very useful if `patch` is creating a mock object for you. `patch` takes arbitrary keyword arguments. These will be passed to the `Mock` (or `new_callable`) on construction. `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are available for alternate use-cases. """ getter, attribute = _get_target(target) return _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ) class _patch_dict(object): """ Patch a dictionary, or dictionary like object, and restore the dictionary to its original state after the test. `in_dict` can be a dictionary or a mapping like container. If it is a mapping then it must at least support getting, setting and deleting items plus iterating over keys. `in_dict` can also be a string specifying the name of the dictionary, which will then be fetched by importing it. `values` can be a dictionary of values to set in the dictionary. `values` can also be an iterable of `(key, value)` pairs. If `clear` is True then the dictionary will be cleared before the new values are set. `patch.dict` can also be called with arbitrary keyword arguments to set values in the dictionary:: with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): ... `patch.dict` can be used as a context manager, decorator or class decorator. When used as a class decorator `patch.dict` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ def __init__(self, in_dict, values=(), clear=False, **kwargs): if isinstance(in_dict, basestring): in_dict = _importer(in_dict) self.in_dict = in_dict # support any argument supported by dict(...) constructor self.values = dict(values) self.values.update(kwargs) self.clear = clear self._original = None def __call__(self, f): if isinstance(f, ClassTypes): return self.decorate_class(f) @wraps(f) def _inner(*args, **kw): self._patch_dict() try: return f(*args, **kw) finally: self._unpatch_dict() return _inner def decorate_class(self, klass): for attr in dir(klass): attr_value = getattr(klass, attr) if (attr.startswith(patch.TEST_PREFIX) and hasattr(attr_value, "__call__")): decorator = _patch_dict(self.in_dict, self.values, self.clear) decorated = decorator(attr_value) setattr(klass, attr, decorated) return klass def __enter__(self): """Patch the dict.""" self._patch_dict() def _patch_dict(self): values = self.values in_dict = self.in_dict clear = self.clear try: original = in_dict.copy() except AttributeError: # dict like object with no copy method # must support iteration over keys original = {} for key in in_dict: original[key] = in_dict[key] self._original = original if clear: _clear_dict(in_dict) try: in_dict.update(values) except AttributeError: # dict like object with no update method for key in values: in_dict[key] = values[key] def _unpatch_dict(self): in_dict = self.in_dict original = self._original _clear_dict(in_dict) try: in_dict.update(original) except AttributeError: for key in original: in_dict[key] = original[key] def __exit__(self, *args): """Unpatch the dict.""" self._unpatch_dict() return False start = __enter__ stop = __exit__ def _clear_dict(in_dict): try: in_dict.clear() except AttributeError: keys = list(in_dict) for key in keys: del in_dict[key] def _patch_stopall(): """Stop all active patches.""" for patch in list(_patch._active_patches): patch.stop() patch.object = _patch_object patch.dict = _patch_dict patch.multiple = _patch_multiple patch.stopall = _patch_stopall patch.TEST_PREFIX = 'test' magic_methods = ( "lt le gt ge eq ne " "getitem setitem delitem " "len contains iter " "hash str sizeof " "enter exit " "divmod neg pos abs invert " "complex int float index " "trunc floor ceil " ) numerics = "add sub mul div floordiv mod lshift rshift and xor or pow " inplace = ' '.join('i%s' % n for n in numerics.split()) right = ' '.join('r%s' % n for n in numerics.split()) extra = '' if inPy3k: extra = 'bool next ' else: extra = 'unicode long nonzero oct hex truediv rtruediv ' # not including __prepare__, __instancecheck__, __subclasscheck__ # (as they are metaclass methods) # __del__ is not supported at all as it causes problems if it exists _non_defaults = set('__%s__' % method for method in [ 'cmp', 'getslice', 'setslice', 'coerce', 'subclasses', 'format', 'get', 'set', 'delete', 'reversed', 'missing', 'reduce', 'reduce_ex', 'getinitargs', 'getnewargs', 'getstate', 'setstate', 'getformat', 'setformat', 'repr', 'dir' ]) def _get_method(name, func): "Turns a callable object (like a mock) into a real function" def method(self, *args, **kw): return func(self, *args, **kw) method.__name__ = name return method _magics = set( '__%s__' % method for method in ' '.join([magic_methods, numerics, inplace, right, extra]).split() ) _all_magics = _magics | _non_defaults _unsupported_magics = set([ '__getattr__', '__setattr__', '__init__', '__new__', '__prepare__' '__instancecheck__', '__subclasscheck__', '__del__' ]) _calculate_return_value = { '__hash__': lambda self: object.__hash__(self), '__str__': lambda self: object.__str__(self), '__sizeof__': lambda self: object.__sizeof__(self), '__unicode__': lambda self: unicode(object.__str__(self)), } _return_values = { '__lt__': NotImplemented, '__gt__': NotImplemented, '__le__': NotImplemented, '__ge__': NotImplemented, '__int__': 1, '__contains__': False, '__len__': 0, '__exit__': False, '__complex__': 1j, '__float__': 1.0, '__bool__': True, '__nonzero__': True, '__oct__': '1', '__hex__': '0x1', '__long__': long(1), '__index__': 1, } def _get_eq(self): def __eq__(other): ret_val = self.__eq__._mock_return_value if ret_val is not DEFAULT: return ret_val return self is other return __eq__ def _get_ne(self): def __ne__(other): if self.__ne__._mock_return_value is not DEFAULT: return DEFAULT return self is not other return __ne__ def _get_iter(self): def __iter__(): ret_val = self.__iter__._mock_return_value if ret_val is DEFAULT: return iter([]) # if ret_val was already an iterator, then calling iter on it should # return the iterator unchanged return iter(ret_val) return __iter__ _side_effect_methods = { '__eq__': _get_eq, '__ne__': _get_ne, '__iter__': _get_iter, } def _set_return_value(mock, method, name): fixed = _return_values.get(name, DEFAULT) if fixed is not DEFAULT: method.return_value = fixed return return_calulator = _calculate_return_value.get(name) if return_calulator is not None: try: return_value = return_calulator(mock) except AttributeError: # XXXX why do we return AttributeError here? # set it as a side_effect instead? return_value = AttributeError(name) method.return_value = return_value return side_effector = _side_effect_methods.get(name) if side_effector is not None: method.side_effect = side_effector(mock) class MagicMixin(object): def __init__(self, *args, **kw): _super(MagicMixin, self).__init__(*args, **kw) self._mock_set_magics() def _mock_set_magics(self): these_magics = _magics if self._mock_methods is not None: these_magics = _magics.intersection(self._mock_methods) remove_magics = set() remove_magics = _magics - these_magics for entry in remove_magics: if entry in type(self).__dict__: # remove unneeded magic methods delattr(self, entry) # don't overwrite existing attributes if called a second time these_magics = these_magics - set(type(self).__dict__) _type = type(self) for entry in these_magics: setattr(_type, entry, MagicProxy(entry, self)) class NonCallableMagicMock(MagicMixin, NonCallableMock): """A version of `MagicMock` that isn't callable.""" def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) self._mock_set_magics() class MagicMock(MagicMixin, Mock): """ MagicMock is a subclass of Mock with default implementations of most of the magic methods. You can use MagicMock without having to configure the magic methods yourself. If you use the `spec` or `spec_set` arguments then *only* magic methods that exist in the spec will be created. Attributes and the return value of a `MagicMock` will also be `MagicMocks`. """ def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) self._mock_set_magics() class MagicProxy(object): def __init__(self, name, parent): self.name = name self.parent = parent def __call__(self, *args, **kwargs): m = self.create_mock() return m(*args, **kwargs) def create_mock(self): entry = self.name parent = self.parent m = parent._get_child_mock(name=entry, _new_name=entry, _new_parent=parent) setattr(parent, entry, m) _set_return_value(parent, m, entry) return m def __get__(self, obj, _type=None): return self.create_mock() class _ANY(object): "A helper object that compares equal to everything." def __eq__(self, other): return True def __ne__(self, other): return False def __repr__(self): return '' ANY = _ANY() def _format_call_signature(name, args, kwargs): message = '%s(%%s)' % name formatted_args = '' args_string = ', '.join([repr(arg) for arg in args]) kwargs_string = ', '.join([ '%s=%r' % (key, value) for key, value in kwargs.items() ]) if args_string: formatted_args = args_string if kwargs_string: if formatted_args: formatted_args += ', ' formatted_args += kwargs_string return message % formatted_args class _Call(tuple): """ A tuple for holding the results of a call to a mock, either in the form `(args, kwargs)` or `(name, args, kwargs)`. If args or kwargs are empty then a call tuple will compare equal to a tuple without those values. This makes comparisons less verbose:: _Call(('name', (), {})) == ('name',) _Call(('name', (1,), {})) == ('name', (1,)) _Call(((), {'a': 'b'})) == ({'a': 'b'},) The `_Call` object provides a useful shortcut for comparing with call:: _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) If the _Call has no name then it will match any name. """ def __new__(cls, value=(), name=None, parent=None, two=False, from_kall=True): name = '' args = () kwargs = {} _len = len(value) if _len == 3: name, args, kwargs = value elif _len == 2: first, second = value if isinstance(first, basestring): name = first if isinstance(second, tuple): args = second else: kwargs = second else: args, kwargs = first, second elif _len == 1: value, = value if isinstance(value, basestring): name = value elif isinstance(value, tuple): args = value else: kwargs = value if two: return tuple.__new__(cls, (args, kwargs)) return tuple.__new__(cls, (name, args, kwargs)) def __init__(self, value=(), name=None, parent=None, two=False, from_kall=True): self.name = name self.parent = parent self.from_kall = from_kall def __eq__(self, other): if other is ANY: return True try: len_other = len(other) except TypeError: return False self_name = '' if len(self) == 2: self_args, self_kwargs = self else: self_name, self_args, self_kwargs = self other_name = '' if len_other == 0: other_args, other_kwargs = (), {} elif len_other == 3: other_name, other_args, other_kwargs = other elif len_other == 1: value, = other if isinstance(value, tuple): other_args = value other_kwargs = {} elif isinstance(value, basestring): other_name = value other_args, other_kwargs = (), {} else: other_args = () other_kwargs = value else: # len 2 # could be (name, args) or (name, kwargs) or (args, kwargs) first, second = other if isinstance(first, basestring): other_name = first if isinstance(second, tuple): other_args, other_kwargs = second, {} else: other_args, other_kwargs = (), second else: other_args, other_kwargs = first, second if self_name and other_name != self_name: return False # this order is important for ANY to work! return (other_args, other_kwargs) == (self_args, self_kwargs) def __ne__(self, other): return not self.__eq__(other) def __call__(self, *args, **kwargs): if self.name is None: return _Call(('', args, kwargs), name='()') name = self.name + '()' return _Call((self.name, args, kwargs), name=name, parent=self) def __getattr__(self, attr): if self.name is None: return _Call(name=attr, from_kall=False) name = '%s.%s' % (self.name, attr) return _Call(name=name, parent=self, from_kall=False) def __repr__(self): if not self.from_kall: name = self.name or 'call' if name.startswith('()'): name = 'call%s' % name return name if len(self) == 2: name = 'call' args, kwargs = self else: name, args, kwargs = self if not name: name = 'call' elif not name.startswith('()'): name = 'call.%s' % name else: name = 'call%s' % name return _format_call_signature(name, args, kwargs) def call_list(self): """For a call object that represents multiple calls, `call_list` returns a list of all the intermediate calls as well as the final call.""" vals = [] thing = self while thing is not None: if thing.from_kall: vals.append(thing) thing = thing.parent return _CallList(reversed(vals)) call = _Call(from_kall=False) def create_autospec(spec, spec_set=False, instance=False, _parent=None, _name=None, **kwargs): """Create a mock object using another object as a spec. Attributes on the mock will use the corresponding attribute on the `spec` object as their spec. Functions or methods being mocked will have their arguments checked to check that they are called with the correct signature. If `spec_set` is True then attempting to set attributes that don't exist on the spec object will raise an `AttributeError`. If a class is used as a spec then the return value of the mock (the instance of the class) will have the same spec. You can use a class as the spec for an instance object by passing `instance=True`. The returned mock will only be callable if instances of the mock are callable. `create_autospec` also takes arbitrary keyword arguments that are passed to the constructor of the created mock.""" if _is_list(spec): # can't pass a list instance to the mock constructor as it will be # interpreted as a list of strings spec = type(spec) is_type = isinstance(spec, ClassTypes) _kwargs = {'spec': spec} if spec_set: _kwargs = {'spec_set': spec} elif spec is None: # None we mock with a normal mock without a spec _kwargs = {} _kwargs.update(kwargs) Klass = MagicMock if type(spec) in DescriptorTypes: # descriptors don't have a spec # because we don't know what type they return _kwargs = {} elif not _callable(spec): Klass = NonCallableMagicMock elif is_type and instance and not _instance_callable(spec): Klass = NonCallableMagicMock _new_name = _name if _parent is None: # for a top level object no _new_name should be set _new_name = '' mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, name=_name, **_kwargs) if isinstance(spec, FunctionTypes): # should only happen at the top level because we don't # recurse for functions mock = _set_signature(mock, spec) else: _check_signature(spec, mock, is_type, instance) if _parent is not None and not instance: _parent._mock_children[_name] = mock if is_type and not instance and 'return_value' not in kwargs: mock.return_value = create_autospec(spec, spec_set, instance=True, _name='()', _parent=mock) for entry in dir(spec): if _is_magic(entry): # MagicMock already does the useful magic methods for us continue if isinstance(spec, FunctionTypes) and entry in FunctionAttributes: # allow a mock to actually be a function continue # XXXX do we need a better way of getting attributes without # triggering code execution (?) Probably not - we need the actual # object to mock it so we would rather trigger a property than mock # the property descriptor. Likewise we want to mock out dynamically # provided attributes. # XXXX what about attributes that raise exceptions other than # AttributeError on being fetched? # we could be resilient against it, or catch and propagate the # exception when the attribute is fetched from the mock try: original = getattr(spec, entry) except AttributeError: continue kwargs = {'spec': original} if spec_set: kwargs = {'spec_set': original} if not isinstance(original, FunctionTypes): new = _SpecState(original, spec_set, mock, entry, instance) mock._mock_children[entry] = new else: parent = mock if isinstance(spec, FunctionTypes): parent = mock.mock new = MagicMock(parent=parent, name=entry, _new_name=entry, _new_parent=parent, **kwargs) mock._mock_children[entry] = new skipfirst = _must_skip(spec, entry, is_type) _check_signature(original, new, skipfirst=skipfirst) # so functions created with _set_signature become instance attributes, # *plus* their underlying mock exists in _mock_children of the parent # mock. Adding to _mock_children may be unnecessary where we are also # setting as an instance attribute? if isinstance(new, FunctionTypes): setattr(mock, entry, new) return mock def _must_skip(spec, entry, is_type): if not isinstance(spec, ClassTypes): if entry in getattr(spec, '__dict__', {}): # instance attribute - shouldn't skip return False spec = spec.__class__ if not hasattr(spec, '__mro__'): # old style class: can't have descriptors anyway return is_type for klass in spec.__mro__: result = klass.__dict__.get(entry, DEFAULT) if result is DEFAULT: continue if isinstance(result, (staticmethod, classmethod)): return False return is_type # shouldn't get here unless function is a dynamically provided attribute # XXXX untested behaviour return is_type def _get_class(obj): try: return obj.__class__ except AttributeError: # in Python 2, _sre.SRE_Pattern objects have no __class__ return type(obj) class _SpecState(object): def __init__(self, spec, spec_set=False, parent=None, name=None, ids=None, instance=False): self.spec = spec self.ids = ids self.spec_set = spec_set self.parent = parent self.instance = instance self.name = name FunctionTypes = ( # python function type(create_autospec), # instance method type(ANY.__eq__), # unbound method type(_ANY.__eq__), ) FunctionAttributes = set([ 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name', ]) file_spec = None def mock_open(mock=None, read_data=''): """ A helper function to create a mock to replace the use of `open`. It works for `open` called directly or used as a context manager. The `mock` argument is the mock object to configure. If `None` (the default) then a `MagicMock` will be created for you, with the API limited to methods or attributes available on standard file handles. `read_data` is a string for the `read` method of the file handle to return. This is an empty string by default. """ global file_spec if file_spec is None: # set on first use if inPy3k: import _io file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) else: file_spec = file if mock is None: mock = MagicMock(name='open', spec=open) handle = MagicMock(spec=file_spec) handle.write.return_value = None handle.__enter__.return_value = handle handle.read.return_value = read_data mock.return_value = handle return mock class PropertyMock(Mock): """ A mock intended to be used as a property, or other descriptor, on a class. `PropertyMock` provides `__get__` and `__set__` methods so you can specify a return value when it is fetched. Fetching a `PropertyMock` instance from an object calls the mock, with no args. Setting it calls the mock with the value being set. """ def _get_child_mock(self, **kwargs): return MagicMock(**kwargs) def __get__(self, obj, obj_type): return self() def __set__(self, obj, val): self(val) peewee-3.17.7/tests/manytomany.py000066400000000000000000000541621470346076600170200ustar00rootroot00000000000000from peewee import * from .base import ModelTestCase from .base import TestModel from .base import get_in_memory_db from .base import requires_models from .base_models import Tweet from .base_models import User class User(TestModel): username = TextField(unique=True) class Note(TestModel): text = TextField() users = ManyToManyField(User) NoteUserThrough = Note.users.get_through_model() AltThroughDeferred = DeferredThroughModel() class AltNote(TestModel): text = TextField() users = ManyToManyField(User, through_model=AltThroughDeferred) class AltThroughModel(TestModel): user = ForeignKeyField(User, backref='_xx_rel') note = ForeignKeyField(AltNote, backref='_xx_rel') class Meta: primary_key = CompositeKey('user', 'note') AltThroughDeferred.set_model(AltThroughModel) class Student(TestModel): name = TextField() CourseStudentDeferred = DeferredThroughModel() class Course(TestModel): name = TextField() students = ManyToManyField(Student, backref='+') students2 = ManyToManyField(Student, through_model=CourseStudentDeferred) CourseStudent = Course.students.get_through_model() class CourseStudent2(TestModel): course = ForeignKeyField(Course, backref='+') student = ForeignKeyField(Student, backref='+') CourseStudentDeferred.set_model(CourseStudent2) class Color(TestModel): name = TextField(unique=True) LogoColorDeferred = DeferredThroughModel() class Logo(TestModel): name = TextField(unique=True) colors = ManyToManyField(Color, through_model=LogoColorDeferred) class LogoColor(TestModel): logo = ForeignKeyField(Logo, field=Logo.name) color = ForeignKeyField(Color, field=Color.name) # FK to non-PK column. LogoColorDeferred.set_model(LogoColor) class TestManyToManyFKtoNonPK(ModelTestCase): database = get_in_memory_db() requires = [Color, Logo, LogoColor] def test_manytomany_fk_to_non_pk(self): red = Color.create(name='red') green = Color.create(name='green') blue = Color.create(name='blue') lrg = Logo.create(name='logo-rg') lrb = Logo.create(name='logo-rb') lrgb = Logo.create(name='logo-rgb') lrg.colors.add([red, green]) lrb.colors.add([red, blue]) lrgb.colors.add([red, green, blue]) def assertColors(logo, expected): colors = [c.name for c in logo.colors.order_by(Color.name)] self.assertEqual(colors, expected) assertColors(lrg, ['green', 'red']) assertColors(lrb, ['blue', 'red']) assertColors(lrgb, ['blue', 'green', 'red']) def assertLogos(color, expected): logos = [l.name for l in color.logos.order_by(Logo.name)] self.assertEqual(logos, expected) assertLogos(red, ['logo-rb', 'logo-rg', 'logo-rgb']) assertLogos(green, ['logo-rg', 'logo-rgb']) assertLogos(blue, ['logo-rb', 'logo-rgb']) # Verify we can delete data as well. lrg.colors.remove(red) self.assertEqual([c.name for c in lrg.colors], ['green']) blue.logos.remove(lrb) self.assertEqual([c.name for c in lrb.colors], ['red']) # Verify we can insert using a SELECT query. lrg.colors.add(Color.select().where(Color.name != 'blue'), True) assertColors(lrg, ['green', 'red']) lrb.colors.add(Color.select().where(Color.name == 'blue')) assertColors(lrb, ['blue', 'red']) # Verify we can insert logos using a SELECT query. black = Color.create(name='black') black.logos.add(Logo.select().where(Logo.name != 'logo-rgb')) assertLogos(black, ['logo-rb', 'logo-rg']) assertColors(lrb, ['black', 'blue', 'red']) assertColors(lrg, ['black', 'green', 'red']) assertColors(lrgb, ['blue', 'green', 'red']) # Verify we can delete using a SELECT query. lrg.colors.remove(Color.select().where(Color.name == 'red')) assertColors(lrg, ['black', 'green']) black.logos.remove(Logo.select().where(Logo.name == 'logo-rg')) assertLogos(black, ['logo-rb']) # Verify we can clear. lrg.colors.clear() assertColors(lrg, []) assertColors(lrb, ['black', 'blue', 'red']) # Not affected. black.logos.clear() assertLogos(black, []) assertLogos(red, ['logo-rb', 'logo-rgb']) class TestManyToManyBackrefBehavior(ModelTestCase): database = get_in_memory_db() requires = [Student, Course, CourseStudent, CourseStudent2] def setUp(self): super(TestManyToManyBackrefBehavior, self).setUp() math = Course.create(name='math') engl = Course.create(name='engl') huey, mickey, zaizee = [Student.create(name=name) for name in ('huey', 'mickey', 'zaizee')] # Set up relationships. math.students.add([huey, zaizee]) engl.students.add([mickey]) math.students2.add([mickey]) engl.students2.add([huey, zaizee]) def test_manytomanyfield_disabled_backref(self): math = Course.get(name='math') query = math.students.order_by(Student.name) self.assertEqual([s.name for s in query], ['huey', 'zaizee']) huey = Student.get(name='huey') math.students.remove(huey) self.assertEqual([s.name for s in math.students], ['zaizee']) # The backref is via the CourseStudent2 through-model. self.assertEqual([c.name for c in huey.courses], ['engl']) def test_through_model_disabled_backrefs(self): # Here we're testing the case where the many-to-many field does not # explicitly disable back-references, but the foreign-keys on the # through model have disabled back-references. engl = Course.get(name='engl') query = engl.students2.order_by(Student.name) self.assertEqual([s.name for s in query], ['huey', 'zaizee']) zaizee = Student.get(Student.name == 'zaizee') engl.students2.remove(zaizee) self.assertEqual([s.name for s in engl.students2], ['huey']) math = Course.get(name='math') self.assertEqual([s.name for s in math.students2], ['mickey']) class TestManyToManyInheritance(ModelTestCase): def test_manytomany_inheritance(self): class BaseModel(TestModel): class Meta: database = self.database class User(BaseModel): username = TextField() class Project(BaseModel): name = TextField() users = ManyToManyField(User, backref='projects') def subclass_project(): class VProject(Project): pass # We cannot subclass Project, because the many-to-many field "users" # will be inherited, but the through-model does not contain a # foreign-key to VProject. The through-model in this case is # ProjectUsers, which has foreign-keys to project and user. self.assertRaises(ValueError, subclass_project) PThrough = Project.users.through_model self.assertTrue(PThrough.project.rel_model is Project) self.assertTrue(PThrough.user.rel_model is User) class TestManyToMany(ModelTestCase): database = get_in_memory_db() requires = [User, Note, NoteUserThrough, AltNote, AltThroughModel] user_to_note = { 'gargie': [1, 2], 'huey': [2, 3], 'mickey': [3, 4], 'zaizee': [4, 5], } def setUp(self): super(TestManyToMany, self).setUp() for username in sorted(self.user_to_note): User.create(username=username) for i in range(5): Note.create(text='note-%s' % (i + 1)) def test_through_model(self): self.assertEqual(len(NoteUserThrough._meta.fields), 3) fields = NoteUserThrough._meta.fields self.assertEqual(sorted(fields), ['id', 'note', 'user']) note_field = fields['note'] self.assertEqual(note_field.rel_model, Note) self.assertFalse(note_field.null) user_field = fields['user'] self.assertEqual(user_field.rel_model, User) self.assertFalse(user_field.null) def _set_data(self): for username, notes in self.user_to_note.items(): user = User.get(User.username == username) for note in notes: NoteUserThrough.create( note=Note.get(Note.text == 'note-%s' % note), user=user) def assertNotes(self, query, expected): notes = [note.text for note in query] self.assertEqual(sorted(notes), ['note-%s' % i for i in sorted(expected)]) def assertUsers(self, query, expected): usernames = [user.username for user in query] self.assertEqual(sorted(usernames), sorted(expected)) def test_accessor_query(self): self._set_data() gargie, huey, mickey, zaizee = User.select().order_by(User.username) with self.assertQueryCount(1): self.assertNotes(gargie.notes, [1, 2]) with self.assertQueryCount(1): self.assertNotes(zaizee.notes, [4, 5]) with self.assertQueryCount(2): self.assertNotes(User.create(username='x').notes, []) n1, n2, n3, n4, n5 = Note.select().order_by(Note.text) with self.assertQueryCount(1): self.assertUsers(n1.users, ['gargie']) with self.assertQueryCount(1): self.assertUsers(n2.users, ['gargie', 'huey']) with self.assertQueryCount(1): self.assertUsers(n5.users, ['zaizee']) with self.assertQueryCount(2): self.assertUsers(Note.create(text='x').users, []) def test_prefetch_notes(self): self._set_data() for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(3): gargie, huey, mickey, zaizee = prefetch( User.select().order_by(User.username), NoteUserThrough, Note, prefetch_type=pt) with self.assertQueryCount(0): self.assertNotes(gargie.notes, [1, 2]) with self.assertQueryCount(0): self.assertNotes(zaizee.notes, [4, 5]) with self.assertQueryCount(2): self.assertNotes(User.create(username='x').notes, []) def test_prefetch_users(self): self._set_data() for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(3): n1, n2, n3, n4, n5 = prefetch( Note.select().order_by(Note.text), NoteUserThrough, User, prefetch_type=pt) with self.assertQueryCount(0): self.assertUsers(n1.users, ['gargie']) with self.assertQueryCount(0): self.assertUsers(n2.users, ['gargie', 'huey']) with self.assertQueryCount(0): self.assertUsers(n5.users, ['zaizee']) with self.assertQueryCount(2): self.assertUsers(Note.create(text='x').users, []) def test_query_filtering(self): self._set_data() gargie, huey, mickey, zaizee = User.select().order_by(User.username) with self.assertQueryCount(1): notes = gargie.notes.where(Note.text != 'note-2') self.assertNotes(notes, [1]) def test_set_value(self): self._set_data() gargie = User.get(User.username == 'gargie') huey = User.get(User.username == 'huey') n1, n2, n3, n4, n5 = Note.select().order_by(Note.text) with self.assertQueryCount(2): gargie.notes = n3 self.assertNotes(gargie.notes, [3]) self.assertUsers(n3.users, ['gargie', 'huey', 'mickey']) self.assertUsers(n1.users, []) gargie.notes = [n3, n4] self.assertNotes(gargie.notes, [3, 4]) self.assertUsers(n3.users, ['gargie', 'huey', 'mickey']) self.assertUsers(n4.users, ['gargie', 'mickey', 'zaizee']) def test_set_query(self): huey = User.get(User.username == 'huey') with self.assertQueryCount(2): huey.notes = Note.select().where(~Note.text.endswith('4')) self.assertNotes(huey.notes, [1, 2, 3, 5]) def test_add(self): gargie = User.get(User.username == 'gargie') huey = User.get(User.username == 'huey') n1, n2, n3, n4, n5 = Note.select().order_by(Note.text) gargie.notes.add([n1, n2]) self.assertNotes(gargie.notes, [1, 2]) self.assertUsers(n1.users, ['gargie']) self.assertUsers(n2.users, ['gargie']) for note in [n3, n4, n5]: self.assertUsers(note.users, []) with self.assertQueryCount(1): huey.notes.add(Note.select().where( fn.substr(Note.text, 6, 1) << ['1', '3', '5'])) self.assertNotes(huey.notes, [1, 3, 5]) self.assertUsers(n1.users, ['gargie', 'huey']) self.assertUsers(n2.users, ['gargie']) self.assertUsers(n3.users, ['huey']) self.assertUsers(n4.users, []) self.assertUsers(n5.users, ['huey']) with self.assertQueryCount(1): gargie.notes.add(n4) self.assertNotes(gargie.notes, [1, 2, 4]) with self.assertQueryCount(2): n3.users.add( User.select().where(User.username != 'gargie'), clear_existing=True) self.assertUsers(n3.users, ['huey', 'mickey', 'zaizee']) def test_add_by_pk(self): huey = User.get(User.username == 'huey') n1, n2, n3 = Note.select().order_by(Note.text).limit(3) huey.notes.add([n1.id, n2.id]) self.assertNotes(huey.notes, [1, 2]) self.assertUsers(n1.users, ['huey']) self.assertUsers(n2.users, ['huey']) self.assertUsers(n3.users, []) def test_unique(self): n1 = Note.get(Note.text == 'note-1') huey = User.get(User.username == 'huey') def add_user(note, user): with self.assertQueryCount(1): note.users.add(user) add_user(n1, huey) self.assertRaises(IntegrityError, add_user, n1, huey) add_user(n1, User.get(User.username == 'zaizee')) self.assertUsers(n1.users, ['huey', 'zaizee']) def test_remove(self): self._set_data() gargie, huey, mickey, zaizee = User.select().order_by(User.username) n1, n2, n3, n4, n5 = Note.select().order_by(Note.text) with self.assertQueryCount(1): gargie.notes.remove([n1, n2, n3]) self.assertNotes(gargie.notes, []) self.assertNotes(huey.notes, [2, 3]) with self.assertQueryCount(1): huey.notes.remove(Note.select().where( Note.text << ['note-2', 'note-4', 'note-5'])) self.assertNotes(huey.notes, [3]) self.assertNotes(mickey.notes, [3, 4]) self.assertNotes(zaizee.notes, [4, 5]) with self.assertQueryCount(1): n4.users.remove([gargie, mickey]) self.assertUsers(n4.users, ['zaizee']) with self.assertQueryCount(1): n5.users.remove(User.select()) self.assertUsers(n5.users, []) def test_remove_by_id(self): self._set_data() gargie, huey = User.select().order_by(User.username).limit(2) n1, n2, n3, n4 = Note.select().order_by(Note.text).limit(4) gargie.notes.add([n3, n4]) with self.assertQueryCount(1): gargie.notes.remove([n1.id, n3.id]) self.assertNotes(gargie.notes, [2, 4]) self.assertNotes(huey.notes, [2, 3]) def test_clear(self): gargie = User.get(User.username == 'gargie') huey = User.get(User.username == 'huey') gargie.notes = Note.select() huey.notes = Note.select() self.assertEqual(gargie.notes.count(), 5) self.assertEqual(huey.notes.count(), 5) gargie.notes.clear() self.assertEqual(gargie.notes.count(), 0) self.assertEqual(huey.notes.count(), 5) n1 = Note.get(Note.text == 'note-1') n2 = Note.get(Note.text == 'note-2') n1.users = User.select() n2.users = User.select() self.assertEqual(n1.users.count(), 4) self.assertEqual(n2.users.count(), 4) n1.users.clear() self.assertEqual(n1.users.count(), 0) self.assertEqual(n2.users.count(), 4) def test_manual_through(self): gargie, huey, mickey, zaizee = User.select().order_by(User.username) alt_notes = [] for i in range(5): alt_notes.append(AltNote.create(text='note-%s' % (i + 1))) self.assertNotes(gargie.altnotes, []) for alt_note in alt_notes: self.assertUsers(alt_note.users, []) n1, n2, n3, n4, n5 = alt_notes # Test adding relationships by setting the descriptor. gargie.altnotes = [n1, n2] with self.assertQueryCount(2): huey.altnotes = AltNote.select().where( fn.substr(AltNote.text, 6, 1) << ['1', '3', '5']) mickey.altnotes.add([n1, n4]) with self.assertQueryCount(2): zaizee.altnotes = AltNote.select() # Test that the notes were added correctly. with self.assertQueryCount(1): self.assertNotes(gargie.altnotes, [1, 2]) with self.assertQueryCount(1): self.assertNotes(huey.altnotes, [1, 3, 5]) with self.assertQueryCount(1): self.assertNotes(mickey.altnotes, [1, 4]) with self.assertQueryCount(1): self.assertNotes(zaizee.altnotes, [1, 2, 3, 4, 5]) # Test removing notes. with self.assertQueryCount(1): gargie.altnotes.remove(n1) self.assertNotes(gargie.altnotes, [2]) with self.assertQueryCount(1): huey.altnotes.remove([n1, n2, n3]) self.assertNotes(huey.altnotes, [5]) with self.assertQueryCount(1): sq = (AltNote .select() .where(fn.SUBSTR(AltNote.text, 6, 1) << ['1', '2', '4'])) zaizee.altnotes.remove(sq) self.assertNotes(zaizee.altnotes, [3, 5]) # Test the backside of the relationship. n1.users = User.select().where(User.username != 'gargie') with self.assertQueryCount(1): self.assertUsers(n1.users, ['huey', 'mickey', 'zaizee']) with self.assertQueryCount(1): self.assertUsers(n2.users, ['gargie']) with self.assertQueryCount(1): self.assertUsers(n3.users, ['zaizee']) with self.assertQueryCount(1): self.assertUsers(n4.users, ['mickey']) with self.assertQueryCount(1): self.assertUsers(n5.users, ['huey', 'zaizee']) with self.assertQueryCount(1): n1.users.remove(User.select()) with self.assertQueryCount(1): n5.users.remove([gargie, huey]) with self.assertQueryCount(1): self.assertUsers(n1.users, []) with self.assertQueryCount(1): self.assertUsers(n5.users, ['zaizee']) class Person(TestModel): name = CharField() class Account(TestModel): person = ForeignKeyField(Person, primary_key=True) class AccountList(TestModel): name = CharField() accounts = ManyToManyField(Account, backref='lists') AccountListThrough = AccountList.accounts.get_through_model() class TestForeignKeyPrimaryKeyManyToMany(ModelTestCase): database = get_in_memory_db() requires = [Person, Account, AccountList, AccountListThrough] test_data = ( ('huey', ('cats', 'evil')), ('zaizee', ('cats', 'good')), ('mickey', ('dogs', 'good')), ('zombie', ()), ) def setUp(self): super(TestForeignKeyPrimaryKeyManyToMany, self).setUp() name2list = {} for name, lists in self.test_data: p = Person.create(name=name) a = Account.create(person=p) for l in lists: if l not in name2list: name2list[l] = AccountList.create(name=l) name2list[l].accounts.add(a) def account_for(self, name): return Account.select().join(Person).where(Person.name == name).get() def assertLists(self, l1, l2): self.assertEqual(sorted(list(l1)), sorted(list(l2))) def test_pk_is_fk(self): list2names = {} for name, lists in self.test_data: account = self.account_for(name) self.assertLists([l.name for l in account.lists], lists) for l in lists: list2names.setdefault(l, []) list2names[l].append(name) for list_name, names in list2names.items(): account_list = AccountList.get(AccountList.name == list_name) self.assertLists([s.person.name for s in account_list.accounts], names) def test_empty(self): al = AccountList.create(name='empty') self.assertEqual(list(al.accounts), []) class Permission(TestModel): name = TextField() DeniedThroughDeferred = DeferredThroughModel() class Visitor(TestModel): name = TextField() allowed = ManyToManyField(Permission) denied = ManyToManyField(Permission, through_model=DeniedThroughDeferred) class DeniedThrough(TestModel): permission = ForeignKeyField(Permission) visitor = ForeignKeyField(Visitor) DeniedThroughDeferred.set_model(DeniedThrough) class TestMultipleManyToManySameTables(ModelTestCase): database = get_in_memory_db() requires = [Permission, Visitor, Visitor.allowed.through_model, Visitor.denied.through_model] def test_multiple_manytomany_same_tables(self): p1, p2, p3 = [Permission.create(name=n) for n in ('p1', 'p2', 'p3')] v1, v2, v3 = [Visitor.create(name=n) for n in ('v1', 'v2', 'v3')] v1.allowed.add([p1, p2, p3]) v2.allowed.add(p2) v2.denied.add([p1, p3]) v3.allowed.add(p3) v3.denied.add(p1) accum = [] for v in Visitor.select().order_by(Visitor.name): allowed, denied = [], [] for p in v.allowed.order_by(Permission.name): allowed.append(p.name) for p in v.denied.order_by(Permission.name): denied.append(p.name) accum.append((v.name, allowed, denied)) self.assertEqual(accum, [ ('v1', ['p1', 'p2', 'p3'], []), ('v2', ['p2'], ['p1', 'p3']), ('v3', ['p3'], ['p1'])]) peewee-3.17.7/tests/migrations.py000066400000000000000000001075771470346076600170110ustar00rootroot00000000000000import datetime import os from functools import partial from peewee import * from playhouse.migrate import * from .base import BaseTestCase from .base import IS_CRDB from .base import IS_MYSQL from .base import IS_POSTGRESQL from .base import IS_PSYCOPG3 from .base import IS_SQLITE from .base import IS_SQLITE_25 from .base import IS_SQLITE_35 from .base import ModelTestCase from .base import TestModel from .base import db from .base import get_in_memory_db from .base import requires_models from .base import requires_pglike from .base import requires_postgresql from .base import requires_sqlite from .base import skip_if from .base import skip_unless try: from psycopg2cffi import compat compat.register() except ImportError: pass class Tag(TestModel): tag = CharField() class Person(TestModel): first_name = CharField() last_name = CharField() dob = DateField(null=True) class User(TestModel): id = CharField(primary_key=True, max_length=20) password = CharField(default='secret') class Meta: table_name = 'users' class Page(TestModel): name = CharField(max_length=100, unique=True, null=True) user = ForeignKeyField(User, null=True, backref='pages') class Session(TestModel): user = ForeignKeyField(User, unique=True, backref='sessions') updated_at = DateField(null=True) class IndexModel(TestModel): first_name = CharField() last_name = CharField() data = IntegerField(unique=True) class Meta: indexes = ( (('first_name', 'last_name'), True), ) class Category(TestModel): name = TextField() class TestSchemaMigration(ModelTestCase): requires = [Person, Tag, User, Page, Session] # Each database behaves slightly differently. _exception_add_not_null = not IS_MYSQL _person_data = [ ('Charlie', 'Leifer', None), ('Huey', 'Kitty', datetime.date(2011, 5, 1)), ('Mickey', 'Dog', datetime.date(2008, 6, 1)), ] def setUp(self): super(TestSchemaMigration, self).setUp() self.migrator = SchemaMigrator.from_database(self.database) def tearDown(self): try: super(TestSchemaMigration, self).tearDown() finally: self.database.close() @requires_pglike def test_add_table_constraint(self): price = FloatField(default=0.) migrate(self.migrator.add_column('tag', 'price', price), self.migrator.add_constraint('tag', 'price_check', Check('price >= 0'))) class Tag2(Model): tag = CharField() price = FloatField(default=0.) class Meta: database = self.database table_name = Tag._meta.table_name with self.database.atomic(): self.assertRaises(IntegrityError, Tag2.create, tag='t1', price=-1) Tag2.create(tag='t1', price=1.0) t1_db = Tag2.get(Tag2.tag == 't1') self.assertEqual(t1_db.price, 1.0) @skip_if(IS_SQLITE) def test_add_unique(self): alt_id = IntegerField(default=0) migrate( self.migrator.add_column('tag', 'alt_id', alt_id), self.migrator.add_unique('tag', 'alt_id')) class Tag2(Model): tag = CharField() alt_id = IntegerField(default=0) class Meta: database = self.database table_name = Tag._meta.table_name Tag2.create(tag='t1', alt_id=1) with self.database.atomic(): self.assertRaises(IntegrityError, Tag2.create, tag='t2', alt_id=1) @requires_pglike def test_drop_table_constraint(self): price = FloatField(default=0.) migrate( self.migrator.add_column('tag', 'price', price), self.migrator.add_constraint('tag', 'price_check', Check('price >= 0'))) class Tag2(Model): tag = CharField() price = FloatField(default=0.) class Meta: database = self.database table_name = Tag._meta.table_name with self.database.atomic(): self.assertRaises(IntegrityError, Tag2.create, tag='t1', price=-1) migrate(self.migrator.drop_constraint('tag', 'price_check')) Tag2.create(tag='t1', price=-1) t1_db = Tag2.get(Tag2.tag == 't1') self.assertEqual(t1_db.price, -1.0) def test_add_column(self): # Create some fields with a variety of NULL / default values. df = DateTimeField(null=True) df_def = DateTimeField(default=datetime.datetime(2012, 1, 1)) cf = CharField(max_length=200, default='') bf = BooleanField(default=True) ff = FloatField(default=0) # Create two rows in the Tag table to test the handling of adding # non-null fields. t1 = Tag.create(tag='t1') t2 = Tag.create(tag='t2') # Convenience function for generating `add_column` migrations. add_column = partial(self.migrator.add_column, 'tag') # Run the migration. migrate( add_column('pub_date', df), add_column('modified_date', df_def), add_column('comment', cf), add_column('is_public', bf), add_column('popularity', ff)) # Create a new tag model to represent the fields we added. class NewTag(Model): tag = CharField() pub_date = df modified_date = df_def comment = cf is_public = bf popularity = ff class Meta: database = self.database table_name = Tag._meta.table_name query = (NewTag .select( NewTag.id, NewTag.tag, NewTag.pub_date, NewTag.modified_date, NewTag.comment, NewTag.is_public, NewTag.popularity) .order_by(NewTag.tag.asc())) # Verify the resulting rows are correct. self.assertEqual(list(query.tuples()), [ (t1.id, 't1', None, datetime.datetime(2012, 1, 1), '', True, 0.0), (t2.id, 't2', None, datetime.datetime(2012, 1, 1), '', True, 0.0), ]) @skip_if(IS_MYSQL, 'mysql does not support CHECK()') def test_add_column_constraint(self): cf = CharField(null=True, constraints=[SQL('default \'foo\'')]) ff = FloatField(default=0., constraints=[Check('val < 1.0')]) t1 = Tag.create(tag='t1') migrate( self.migrator.add_column('tag', 'misc', cf), self.migrator.add_column('tag', 'val', ff)) class NewTag(Model): tag = CharField() misc = CharField() val = FloatField() class Meta: database = self.database table_name = Tag._meta.table_name t1_db = NewTag.get(NewTag.tag == 't1') self.assertEqual(t1_db.misc, 'foo') self.assertEqual(t1_db.val, 0.) with self.database.atomic(): self.assertRaises(IntegrityError, NewTag.create, tag='t2', misc='bar', val=2.) NewTag.create(tag='t3', misc='baz', val=0.9) t3_db = NewTag.get(NewTag.tag == 't3') self.assertEqual(t3_db.misc, 'baz') self.assertEqual(t3_db.val, 0.9) def _create_people(self): for first, last, dob in self._person_data: Person.create(first_name=first, last_name=last, dob=dob) def get_column_names(self, tbl): cursor = self.database.execute_sql('select * from %s limit 1' % tbl) return set([col[0] for col in cursor.description]) def test_drop_column(self, legacy=False): kw = {'legacy': legacy} if IS_SQLITE else {} self._create_people() migrate( self.migrator.drop_column('person', 'last_name', **kw), self.migrator.drop_column('person', 'dob', **kw)) column_names = self.get_column_names('person') self.assertEqual(column_names, set(['id', 'first_name'])) User.create(id='charlie', password='12345') User.create(id='huey', password='meow') migrate(self.migrator.drop_column('users', 'password', **kw)) column_names = self.get_column_names('users') self.assertEqual(column_names, set(['id'])) data = [row for row in User.select(User.id).order_by(User.id).tuples()] self.assertEqual(data, [ ('charlie',), ('huey',),]) @skip_unless(IS_SQLITE_35, 'Requires sqlite 3.35 or newer') def test_drop_column_sqlite_legacy(self): self.test_drop_column(legacy=True) def test_rename_column(self, legacy=False): kw = {'legacy': legacy} if IS_SQLITE else {} self._create_people() migrate( self.migrator.rename_column('person', 'first_name', 'first', **kw), self.migrator.rename_column('person', 'last_name', 'last', **kw)) column_names = self.get_column_names('person') self.assertEqual(column_names, set(['id', 'first', 'last', 'dob'])) class NewPerson(Model): first = CharField() last = CharField() dob = DateField() class Meta: database = self.database table_name = Person._meta.table_name query = (NewPerson .select( NewPerson.first, NewPerson.last, NewPerson.dob) .order_by(NewPerson.first)) self.assertEqual(list(query.tuples()), self._person_data) @skip_unless(IS_SQLITE_25, 'Requires sqlite 3.25 or newer') def test_rename_column_sqlite_legacy(self): self.test_rename_column(legacy=True) def test_rename_gh380(self, legacy=False): kw = {'legacy': legacy} if IS_SQLITE else {} u1 = User.create(id='charlie') u2 = User.create(id='huey') p1 = Page.create(name='p1-1', user=u1) p2 = Page.create(name='p2-1', user=u1) p3 = Page.create(name='p3-2', user=u2) migrate(self.migrator.rename_column('page', 'name', 'title', **kw)) column_names = self.get_column_names('page') self.assertEqual(column_names, set(['id', 'title', 'user_id'])) class NewPage(Model): title = CharField(max_length=100, unique=True, null=True) user = ForeignKeyField(User, null=True, backref='newpages') class Meta: database = self.database table_name = Page._meta.table_name query = (NewPage .select( NewPage.title, NewPage.user) .order_by(NewPage.title)) self.assertEqual( [(np.title, np.user.id) for np in query], [('p1-1', 'charlie'), ('p2-1', 'charlie'), ('p3-2', 'huey')]) @skip_unless(IS_SQLITE_25, 'Requires sqlite 3.25 or newer') def test_rename_gh380_sqlite_legacy(self): self.test_rename_gh380(legacy=True) @skip_if(IS_PSYCOPG3, 'Psycopg3 chokes on the default value.') def test_add_default_drop_default(self): with self.database.transaction(): migrate(self.migrator.add_column_default('person', 'first_name', default='x')) p = Person.create(last_name='Last') p_db = Person.get(Person.last_name == 'Last') self.assertEqual(p_db.first_name, 'x') with self.database.transaction(): migrate(self.migrator.drop_column_default('person', 'first_name')) if IS_MYSQL: # MySQL, even though the column is NOT NULL, does not seem to be # enforcing the constraint(?). Person.create(last_name='Last2') p_db = Person.get(Person.last_name == 'Last2') self.assertEqual(p_db.first_name, '') else: with self.assertRaises(IntegrityError): with self.database.transaction(): Person.create(last_name='Last2') def test_add_not_null(self): self._create_people() def addNotNull(): with self.database.transaction(): migrate(self.migrator.add_not_null('person', 'dob')) # We cannot make the `dob` field not null because there is currently # a null value there. if self._exception_add_not_null: with self.assertRaisesCtx((IntegrityError, InternalError)): addNotNull() (Person .update(dob=datetime.date(2000, 1, 2)) .where(Person.dob >> None) .execute()) # Now we can make the column not null. addNotNull() # And attempting to insert a null value results in an integrity error. with self.database.transaction(): with self.assertRaisesCtx((IntegrityError, OperationalError)): Person.create( first_name='Kirby', last_name='Snazebrauer', dob=None) def test_drop_not_null(self): self._create_people() migrate( self.migrator.drop_not_null('person', 'first_name'), self.migrator.drop_not_null('person', 'last_name')) p = Person.create(first_name=None, last_name=None) query = (Person .select() .where( (Person.first_name >> None) & (Person.last_name >> None))) self.assertEqual(query.count(), 1) def test_modify_not_null_foreign_key(self): user = User.create(id='charlie') Page.create(name='null user') Page.create(name='charlie', user=user) def addNotNull(): with self.database.transaction(): migrate(self.migrator.add_not_null('page', 'user_id')) if self._exception_add_not_null: with self.assertRaisesCtx((IntegrityError, InternalError)): addNotNull() with self.database.transaction(): Page.update(user=user).where(Page.user.is_null()).execute() addNotNull() # And attempting to insert a null value results in an integrity error. with self.database.transaction(): with self.assertRaisesCtx((OperationalError, IntegrityError)): Page.create( name='fails', user=None) # Now we will drop it. with self.database.transaction(): migrate(self.migrator.drop_not_null('page', 'user_id')) self.assertEqual(Page.select().where(Page.user.is_null()).count(), 0) Page.create(name='succeeds', user=None) self.assertEqual(Page.select().where(Page.user.is_null()).count(), 1) def test_rename_table(self): t1 = Tag.create(tag='t1') t2 = Tag.create(tag='t2') # Move the tag data into a new model/table. class Tag_asdf(Tag): pass self.assertEqual(Tag_asdf._meta.table_name, 'tag_asdf') # Drop the new table just to be safe. Tag_asdf._schema.drop_all(True) # Rename the tag table. migrate(self.migrator.rename_table('tag', 'tag_asdf')) # Verify the data was moved. query = (Tag_asdf .select() .order_by(Tag_asdf.tag)) self.assertEqual([t.tag for t in query], ['t1', 't2']) # Verify the old table is gone. with self.database.transaction(): self.assertRaises( DatabaseError, Tag.create, tag='t3') self.database.execute_sql('drop table tag_asdf') def test_add_index(self): # Create a unique index on first and last names. columns = ('first_name', 'last_name') migrate(self.migrator.add_index('person', columns, True)) Person.create(first_name='first', last_name='last') with self.database.transaction(): with self.assertRaisesCtx((IntegrityError, InternalError)): Person.create(first_name='first', last_name='last') def test_add_unique_column(self): uf = CharField(default='', unique=True) # Run the migration. migrate(self.migrator.add_column('tag', 'unique_field', uf)) # Create a new tag model to represent the fields we added. class NewTag(Model): tag = CharField() unique_field = uf class Meta: database = self.database table_name = Tag._meta.table_name NewTag.create(tag='t1', unique_field='u1') NewTag.create(tag='t2', unique_field='u2') with self.database.atomic(): self.assertRaises(IntegrityError, NewTag.create, tag='t3', unique_field='u1') def test_drop_index(self): # Create a unique index. self.test_add_index() # Now drop the unique index. migrate( self.migrator.drop_index('person', 'person_first_name_last_name')) Person.create(first_name='first', last_name='last') query = (Person .select() .where( (Person.first_name == 'first') & (Person.last_name == 'last'))) self.assertEqual(query.count(), 2) def test_add_and_remove(self): operations = [] field = CharField(default='foo') for i in range(10): operations.append(self.migrator.add_column('tag', 'foo', field)) operations.append(self.migrator.drop_column('tag', 'foo')) migrate(*operations) col_names = self.get_column_names('tag') self.assertEqual(col_names, set(['id', 'tag'])) def test_multiple_operations(self): self.database.execute_sql('drop table if exists person_baze;') self.database.execute_sql('drop table if exists person_nugg;') self._create_people() field_n = CharField(null=True) field_d = CharField(default='test') operations = [ self.migrator.add_column('person', 'field_null', field_n), self.migrator.drop_column('person', 'first_name'), self.migrator.add_column('person', 'field_default', field_d), self.migrator.rename_table('person', 'person_baze'), self.migrator.rename_table('person_baze', 'person_nugg'), self.migrator.rename_column('person_nugg', 'last_name', 'last'), self.migrator.add_index('person_nugg', ('last',), True), ] migrate(*operations) class PersonNugg(Model): field_null = field_n field_default = field_d last = CharField() dob = DateField(null=True) class Meta: database = self.database table_name = 'person_nugg' people = (PersonNugg .select( PersonNugg.field_null, PersonNugg.field_default, PersonNugg.last, PersonNugg.dob) .order_by(PersonNugg.last) .tuples()) expected = [ (None, 'test', 'Dog', datetime.date(2008, 6, 1)), (None, 'test', 'Kitty', datetime.date(2011, 5, 1)), (None, 'test', 'Leifer', None), ] self.assertEqual(list(people), expected) with self.database.transaction(): self.assertRaises( IntegrityError, PersonNugg.create, last='Leifer', field_default='bazer') self.database.execute_sql('drop table person_nugg;') def test_add_foreign_key(self): if hasattr(Person, 'newtag_set'): delattr(Person, 'newtag_set') # Ensure no foreign keys are present at the beginning of the test. self.assertEqual(self.database.get_foreign_keys('tag'), []) field = ForeignKeyField(Person, field=Person.id, null=True) migrate(self.migrator.add_column('tag', 'person_id', field)) class NewTag(Tag): person = field class Meta: table_name = 'tag' p = Person.create(first_name='First', last_name='Last') t1 = NewTag.create(tag='t1', person=p) t2 = NewTag.create(tag='t2') t1_db = NewTag.get(NewTag.tag == 't1') self.assertEqual(t1_db.person, p) t2_db = NewTag.get(NewTag.tag == 't2') self.assertIsNone(t2_db.person) foreign_keys = self.database.get_foreign_keys('tag') self.assertEqual(len(foreign_keys), 1) foreign_key = foreign_keys[0] self.assertEqual(foreign_key.column, 'person_id') self.assertEqual(foreign_key.dest_column, 'id') self.assertEqual(foreign_key.dest_table, 'person') def test_drop_foreign_key(self): kw = {'legacy': True} if IS_SQLITE else {} migrate(self.migrator.drop_column('page', 'user_id', **kw)) columns = self.database.get_columns('page') self.assertEqual( sorted(column.name for column in columns), ['id', 'name']) self.assertEqual(self.database.get_foreign_keys('page'), []) def test_rename_foreign_key(self): migrate(self.migrator.rename_column('page', 'user_id', 'huey_id')) columns = self.database.get_columns('page') self.assertEqual( sorted(column.name for column in columns), ['huey_id', 'id', 'name']) foreign_keys = self.database.get_foreign_keys('page') self.assertEqual(len(foreign_keys), 1) foreign_key = foreign_keys[0] self.assertEqual(foreign_key.column, 'huey_id') self.assertEqual(foreign_key.dest_column, 'id') self.assertEqual(foreign_key.dest_table, 'users') def test_rename_unique_foreign_key(self): migrate(self.migrator.rename_column('session', 'user_id', 'huey_id')) columns = self.database.get_columns('session') self.assertEqual( sorted(column.name for column in columns), ['huey_id', 'id', 'updated_at']) foreign_keys = self.database.get_foreign_keys('session') self.assertEqual(len(foreign_keys), 1) foreign_key = foreign_keys[0] self.assertEqual(foreign_key.column, 'huey_id') self.assertEqual(foreign_key.dest_column, 'id') self.assertEqual(foreign_key.dest_table, 'users') @requires_pglike @requires_models(Tag) def test_add_column_with_index_type(self): from playhouse.postgres_ext import BinaryJSONField self.reset_sql_history() field = BinaryJSONField(default=dict, index=True, null=True) migrate(self.migrator.add_column('tag', 'metadata', field)) queries = [x.msg for x in self.history] self.assertEqual(queries, [ ('ALTER TABLE "tag" ADD COLUMN "metadata" JSONB', []), ('CREATE INDEX "tag_metadata" ON "tag" USING GIN ("metadata")', []), ]) @skip_if(IS_CRDB, 'crdb is still finnicky about changing types.') def test_alter_column_type(self): # Convert varchar to text. field = TextField() migrate(self.migrator.alter_column_type('tag', 'tag', field)) _, tag = self.database.get_columns('tag') # name, type, null?, primary-key?, table, default. data_type = 'TEXT' if IS_SQLITE else 'text' self.assertEqual(tag, ('tag', data_type, False, False, 'tag', None)) # Convert date to datetime. field = DateTimeField() migrate(self.migrator.alter_column_type('person', 'dob', field)) _, _, _, dob = self.database.get_columns('person') if IS_POSTGRESQL or IS_CRDB: self.assertTrue(dob.data_type.startswith('timestamp')) else: self.assertEqual(dob.data_type.lower(), 'datetime') # Convert text to integer. field = IntegerField() cast = '(tag::integer)' if IS_POSTGRESQL or IS_CRDB else None migrate(self.migrator.alter_column_type('tag', 'tag', field, cast)) _, tag = self.database.get_columns('tag') if IS_SQLITE: data_type = 'INTEGER' elif IS_MYSQL: data_type = 'int' else: data_type = 'integer' self.assertEqual(tag, ('tag', data_type, False, False, 'tag', None)) @requires_sqlite def test_valid_column_required(self): self.assertRaises( (OperationalError, ValueError), migrate, self.migrator.drop_column('page', 'column_does_not_exist')) self.assertRaises( (OperationalError, ValueError), migrate, self.migrator.rename_column('page', 'xx', 'yy')) @requires_sqlite @requires_models(IndexModel) def test_table_case_insensitive(self): migrate(self.migrator.drop_column('PaGe', 'name', legacy=True)) column_names = self.get_column_names('page') self.assertEqual(column_names, set(['id', 'user_id'])) testing_field = CharField(default='xx') migrate(self.migrator.add_column('pAGE', 'testing', testing_field)) column_names = self.get_column_names('page') self.assertEqual(column_names, set(['id', 'user_id', 'testing'])) migrate(self.migrator.drop_column('indeX_mOdel', 'first_name', legacy=True)) indexes = self.migrator.database.get_indexes('index_model') self.assertEqual(len(indexes), 1) self.assertEqual(indexes[0].name, 'index_model_data') @requires_sqlite @requires_models(IndexModel) def test_add_column_indexed_table(self): # Ensure that columns can be added to tables that have indexes. field = CharField(default='') migrate(self.migrator.add_column('index_model', 'foo', field)) db = self.migrator.database columns = db.get_columns('index_model') self.assertEqual(sorted(column.name for column in columns), ['data', 'first_name', 'foo', 'id', 'last_name']) indexes = db.get_indexes('index_model') self.assertEqual( sorted((index.name, index.columns) for index in indexes), [('index_model_data', ['data']), ('index_model_first_name_last_name', ['first_name', 'last_name'])]) @requires_sqlite def test_rename_column_to_table_name(self): db = self.migrator.database columns = lambda: sorted(col.name for col in db.get_columns('page')) indexes = lambda: sorted((idx.name, idx.columns) for idx in db.get_indexes('page')) orig_columns = columns() orig_indexes = indexes() # Rename "page"."name" to "page"."page". migrate(self.migrator.rename_column('page', 'name', 'page')) # Ensure that the index on "name" is preserved, and that the index on # the user_id foreign key is also preserved. self.assertEqual(columns(), ['id', 'page', 'user_id']) self.assertEqual(indexes(), [ ('page_name', ['page']), ('page_user_id', ['user_id'])]) # Revert the operation and verify migrate(self.migrator.rename_column('page', 'page', 'name')) self.assertEqual(columns(), orig_columns) self.assertEqual(indexes(), orig_indexes) @requires_sqlite @requires_models(Category) def test_add_fk_with_constraints(self): self.reset_sql_history() field = ForeignKeyField(Category, Category.id, backref='children', null=True, on_delete='SET NULL') migrate(self.migrator.add_column( Category._meta.table_name, 'parent_id', field)) queries = [x.msg for x in self.history] self.assertEqual(queries, [ ('ALTER TABLE "category" ADD COLUMN "parent_id" ' 'INTEGER REFERENCES "category" ("id") ON DELETE SET NULL', []), ('CREATE INDEX "category_parent_id" ON "category" ("parent_id")', []), ]) @requires_sqlite @requires_models(IndexModel) def test_index_preservation(self): self.reset_sql_history() migrate(self.migrator.rename_column( 'index_model', 'first_name', 'first', legacy=True)) queries = [x.msg for x in self.history] self.assertEqual(queries, [ # Get all the columns. ('PRAGMA "main".table_info("index_model")', None), # Get the table definition. ('select name, sql from sqlite_master ' 'where type=? and LOWER(name)=?', ['table', 'index_model']), # Get the indexes and indexed columns for the table. ('SELECT name, sql FROM "main".sqlite_master ' 'WHERE tbl_name = ? AND type = ? ORDER BY name', ('index_model', 'index')), ('PRAGMA "main".index_list("index_model")', None), ('PRAGMA "main".index_info("index_model_data")', None), ('PRAGMA "main".index_info("index_model_first_name_last_name")', None), # Get foreign keys. ('PRAGMA "main".foreign_key_list("index_model")', None), # Drop any temporary table, if it exists. ('DROP TABLE IF EXISTS "index_model__tmp__"', []), # Create a temporary table with the renamed column. ('CREATE TABLE "index_model__tmp__" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"first" VARCHAR(255) NOT NULL, ' '"last_name" VARCHAR(255) NOT NULL, ' '"data" INTEGER NOT NULL)', []), # Copy data from original table into temporary table. ('INSERT INTO "index_model__tmp__" ' '("id", "first", "last_name", "data") ' 'SELECT "id", "first_name", "last_name", "data" ' 'FROM "index_model"', []), # Drop the original table. ('DROP TABLE "index_model"', []), # Rename the temporary table, replacing the original. ('ALTER TABLE "index_model__tmp__" RENAME TO "index_model"', []), # Re-create the indexes. ('CREATE UNIQUE INDEX "index_model_data" ' 'ON "index_model" ("data")', []), ('CREATE UNIQUE INDEX "index_model_first_name_last_name" ' 'ON "index_model" ("first", "last_name")', []) ]) @requires_sqlite @requires_models(User, Page) def test_modify_fk_constraint(self): self.reset_sql_history() new_fk = ForeignKeyField(User, User.id, null=True, on_delete='CASCADE') migrate( self.migrator.drop_column('page', 'user_id', legacy=True), self.migrator.add_column('page', 'user_id', new_fk)) queries = [x.msg for x in self.history] self.assertEqual(queries, [ # Get all columns for table. ('PRAGMA "main".table_info("page")', None), # Get the SQL used to generate the table and indexes. ('select name, sql from sqlite_master ' 'where type=? and LOWER(name)=?', ['table', 'page']), ('SELECT name, sql FROM "main".sqlite_master ' 'WHERE tbl_name = ? AND type = ? ORDER BY name', ('page', 'index')), # Get the indexes and indexed columns for the table. ('PRAGMA "main".index_list("page")', None), ('PRAGMA "main".index_info("page_name")', None), ('PRAGMA "main".index_info("page_user_id")', None), ('PRAGMA "main".foreign_key_list("page")', None), # Clear out a temp table and create it w/o the user_id FK. ('DROP TABLE IF EXISTS "page__tmp__"', []), ('CREATE TABLE "page__tmp__" (' '"id" INTEGER NOT NULL PRIMARY KEY, "name" VARCHAR(100))', []), # Copy data into the temp table, drop the original and rename # the temp -> original. Recreate index(es). ('INSERT INTO "page__tmp__" ("id", "name") ' 'SELECT "id", "name" FROM "page"', []), ('DROP TABLE "page"', []), ('ALTER TABLE "page__tmp__" RENAME TO "page"', []), ('CREATE UNIQUE INDEX "page_name" ON "page" ("name")', []), # Add new foreign-key field with appropriate constraint. ('ALTER TABLE "page" ADD COLUMN "user_id" VARCHAR(20) ' 'REFERENCES "users" ("id") ON DELETE CASCADE', []), ('CREATE INDEX "page_user_id" ON "page" ("user_id")', []), ]) self.database.pragma('foreign_keys', 1) huey = User.create(id='huey') huey_page = Page.create(user=huey, name='huey page') self.assertEqual(Page.select().count(), 1) # Deleting the user will cascade to the associated page. User.delete().where(User.id == 'huey').execute() self.assertEqual(Page.select().count(), 0) def test_make_index_name(self): self.assertEqual(make_index_name('table', ['column']), 'table_column') def test_make_index_name_long(self): columns = [ 'very_long_column_name_number_1', 'very_long_column_name_number_2', 'very_long_column_name_number_3', 'very_long_column_name_number_4' ] name = make_index_name('very_long_table_name', columns) self.assertEqual(len(name), 64) class BadNames(TestModel): primary_data = TextField() foreign_data = TextField() data = TextField() class Meta: constraints = [ SQL('CONSTRAINT const1 UNIQUE (primary_data)'), SQL('CONSTRAINT const2 UNIQUE (foreign_data)')] class HasChecks(TestModel): key = TextField() value = IntegerField() class Meta: constraints = [ SQL("CHECK (key != '')"), SQL('CHECK (value > 0)')] class TestSqliteColumnNameRegression(ModelTestCase): database = get_in_memory_db() requires = [BadNames, HasChecks] def test_sqlite_check_constraints(self): HasChecks.create(key='k1', value=1) migrator = SchemaMigrator.from_database(self.database) extra = TextField(default='') migrate(migrator.add_column('has_checks', 'extra', extra)) columns = self.database.get_columns('has_checks') self.assertEqual([c.name for c in columns], ['id', 'key', 'value', 'extra']) HC = Table('has_checks', ('id', 'key', 'value', 'extra')) HC = HC.bind(self.database) # Sanity-check: ensure we can create a new row. data = {'key': 'k2', 'value': 2, 'extra': 'x2'} self.assertTrue(HC.insert(data).execute()) # Check constraints preserved. data = {'key': 'k0', 'value': 0, 'extra': 'x0'} self.assertRaises(IntegrityError, HC.insert(data).execute) data = {'key': '', 'value': 3, 'extra': 'x3'} self.assertRaises(IntegrityError, HC.insert(data).execute) def test_sqlite_column_name_constraint_regression(self): BadNames.create(primary_data='pd', foreign_data='fd', data='d') migrator = SchemaMigrator.from_database(self.database) new_data = TextField(default='foo') migrate(migrator.add_column('bad_names', 'new_data', new_data), migrator.drop_column('bad_names', 'data')) columns = self.database.get_columns('bad_names') column_names = [column.name for column in columns] self.assertEqual(column_names, ['id', 'primary_data', 'foreign_data', 'new_data']) BNT = Table('bad_names', ('id', 'primary_data', 'foreign_data', 'new_data')).bind(self.database) self.assertEqual([row for row in BNT.select()], [{ 'id': 1, 'primary_data': 'pd', 'foreign_data': 'fd', 'new_data': 'foo'}]) # Verify constraints were carried over. data = {'primary_data': 'pd', 'foreign_data': 'xx', 'new_data': 'd'} self.assertRaises(IntegrityError, BNT.insert(data).execute) data.update(primary_data='px', foreign_data='fd') self.assertRaises(IntegrityError, BNT.insert(data).execute) data.update(foreign_data='fx') self.assertTrue(BNT.insert(data).execute()) peewee-3.17.7/tests/model_save.py000066400000000000000000000117551470346076600167430ustar00rootroot00000000000000from peewee import * from .base import ModelTestCase from .base import TestModel from .base import requires_pglike class T1(TestModel): pk = AutoField() value = IntegerField() class T2(TestModel): pk = IntegerField(constraints=[SQL('DEFAULT 3')], primary_key=True) value = IntegerField() class T3(TestModel): pk = IntegerField(primary_key=True) value = IntegerField() class T4(TestModel): pk1 = IntegerField() pk2 = IntegerField() value = IntegerField() class Meta: primary_key = CompositeKey('pk1', 'pk2') class T5(TestModel): val = IntegerField(null=True) class TestPrimaryKeySaveHandling(ModelTestCase): requires = [T1, T2, T3, T4] def test_auto_field(self): # AutoField will be inserted if the PK is not set, after which the new # ID will be populated. t11 = T1(value=1) self.assertEqual(t11.save(), 1) self.assertTrue(t11.pk is not None) # Calling save() a second time will issue an update. t11.value = 100 self.assertEqual(t11.save(), 1) # Verify the record was updated. t11_db = T1[t11.pk] self.assertEqual(t11_db.value, 100) # We can explicitly specify the value of an auto-incrementing # primary-key, but we must be sure to call save(force_insert=True), # otherwise peewee will attempt to do an update. t12 = T1(pk=1337, value=2) self.assertEqual(t12.save(), 0) self.assertEqual(T1.select().count(), 1) self.assertEqual(t12.save(force_insert=True), 1) # Attempting to force-insert an already-existing PK will fail with an # integrity error. with self.database.atomic(): with self.assertRaises(IntegrityError): t12.value = 3 t12.save(force_insert=True) query = T1.select().order_by(T1.value).tuples() self.assertEqual(list(query), [(1337, 2), (t11.pk, 100)]) @requires_pglike def test_server_default_pk(self): # The new value of the primary-key will be returned to us, since # postgres supports RETURNING. t2 = T2(value=1) self.assertEqual(t2.save(), 1) self.assertEqual(t2.pk, 3) # Saving after the PK is set will issue an update. t2.value = 100 self.assertEqual(t2.save(), 1) t2_db = T2[3] self.assertEqual(t2_db.value, 100) # If we just set the pk and try to save, peewee issues an update which # doesn't have any effect. t22 = T2(pk=2, value=20) self.assertEqual(t22.save(), 0) self.assertEqual(T2.select().count(), 1) # We can force-insert the value we specify explicitly. self.assertEqual(t22.save(force_insert=True), 1) self.assertEqual(T2[2].value, 20) def test_integer_field_pk(self): # For a non-auto-incrementing primary key, we have to use force_insert. t3 = T3(pk=2, value=1) self.assertEqual(t3.save(), 0) # Oops, attempts to do an update. self.assertEqual(T3.select().count(), 0) # Force to be an insert. self.assertEqual(t3.save(force_insert=True), 1) # Now we can update the value and call save() to issue an update. t3.value = 100 self.assertEqual(t3.save(), 1) # Verify data is correct. t3_db = T3[2] self.assertEqual(t3_db.value, 100) def test_composite_pk(self): t4 = T4(pk1=1, pk2=2, value=10) # Will attempt to do an update on non-existant rows. self.assertEqual(t4.save(), 0) self.assertEqual(t4.save(force_insert=True), 1) # Modifying part of the composite PK and attempt an update will fail. t4.pk2 = 3 t4.value = 30 self.assertEqual(t4.save(), 0) t4.pk2 = 2 self.assertEqual(t4.save(), 1) t4_db = T4[1, 2] self.assertEqual(t4_db.value, 30) @requires_pglike def test_returning_object(self): query = T2.insert(value=10).returning(T2).objects() t2_db, = list(query) self.assertEqual(t2_db.pk, 3) self.assertEqual(t2_db.value, 10) class TestSaveNoData(ModelTestCase): requires = [T5] def test_save_no_data(self): t5 = T5.create() self.assertTrue(t5.id >= 1) t5.val = 3 t5.save() t5_db = T5.get(T5.id == t5.id) self.assertEqual(t5_db.val, 3) t5.val = None t5.save() t5_db = T5.get(T5.id == t5.id) self.assertTrue(t5_db.val is None) def test_save_no_data2(self): t5 = T5.create() t5_db = T5.get(T5.id == t5.id) t5_db.save() t5_db = T5.get(T5.id == t5.id) self.assertTrue(t5_db.val is None) def test_save_no_data3(self): t5 = T5.create() self.assertRaises(ValueError, t5.save) def test_save_only_no_data(self): t5 = T5.create(val=1) t5.val = 2 self.assertRaises(ValueError, t5.save, only=[]) t5_db = T5.get(T5.id == t5.id) self.assertEqual(t5_db.val, 1) peewee-3.17.7/tests/model_sql.py000066400000000000000000001344061470346076600166030ustar00rootroot00000000000000import datetime from peewee import * from peewee import Alias from peewee import Database from peewee import ModelIndex from .base import get_in_memory_db from .base import requires_pglike from .base import BaseTestCase from .base import ModelDatabaseTestCase from .base import TestModel from .base import __sql__ from .base_models import * class CKM(TestModel): category = CharField() key = CharField() value = IntegerField() class Meta: primary_key = CompositeKey('category', 'key') class TestModelSQL(ModelDatabaseTestCase): database = get_in_memory_db() requires = [Category, CKM, Note, Person, Relationship, Sample, User, DfltM] def test_select(self): query = (Person .select( Person.first, Person.last, fn.COUNT(Note.id).alias('ct')) .join(Note) .where((Person.last == 'Leifer') & (Person.id < 4))) self.assertSQL(query, ( 'SELECT "t1"."first", "t1"."last", COUNT("t2"."id") AS "ct" ' 'FROM "person" AS "t1" ' 'INNER JOIN "note" AS "t2" ON ("t2"."author_id" = "t1"."id") ' 'WHERE (' '("t1"."last" = ?) AND ' '("t1"."id" < ?))'), ['Leifer', 4]) def test_reselect(self): sql = 'SELECT "t1"."name", "t1"."parent_id" FROM "category" AS "t1"' query = Category.select() self.assertSQL(query, sql, []) query2 = query.select() self.assertSQL(query2, sql, []) query = Category.select(Category.name, Category.parent) self.assertSQL(query, sql, []) query2 = query.select() self.assertSQL(query2, 'SELECT FROM "category" AS "t1"', []) query = query2.select(Category.name) self.assertSQL(query, 'SELECT "t1"."name" FROM "category" AS "t1"', []) def test_select_extend(self): query = Note.select() ext = query.join(Person).select_extend(Person) self.assertSQL(ext, ( 'SELECT "t1"."id", "t1"."author_id", "t1"."content", "t2"."id", ' '"t2"."first", "t2"."last", "t2"."dob" ' 'FROM "note" AS "t1" INNER JOIN "person" AS "t2" ' 'ON ("t1"."author_id" = "t2"."id")'), []) def test_selected_columns(self): query = (Person .select( Person.first, Person.last, fn.COUNT(Note.id).alias('ct')) .join(Note)) f_first, f_last, f_ct = query.selected_columns self.assertEqual(f_first.name, 'first') self.assertTrue(f_first.model is Person) self.assertEqual(f_last.name, 'last') self.assertTrue(f_last.model is Person) self.assertTrue(isinstance(f_ct, Alias)) f_ct = f_ct.unwrap() self.assertEqual(f_ct.name, 'COUNT') f_nid, = f_ct.arguments self.assertEqual(f_nid.name, 'id') self.assertTrue(f_nid.model is Note) query.selected_columns = (Person.first,) f_first, = query.selected_columns self.assertEqual(f_first.name, 'first') self.assertTrue(f_first.model is Person) def test_where_coerce(self): query = Person.select(Person.last).where(Person.id == '1337') self.assertSQL(query, ( 'SELECT "t1"."last" FROM "person" AS "t1" ' 'WHERE ("t1"."id" = ?)'), [1337]) query = Person.select(Person.last).where(Person.id < (Person.id - '5')) self.assertSQL(query, ( 'SELECT "t1"."last" FROM "person" AS "t1" ' 'WHERE ("t1"."id" < ("t1"."id" - ?))'), [5]) query = Person.select(Person.last).where(Person.first == b'foo') self.assertSQL(query, ( 'SELECT "t1"."last" FROM "person" AS "t1" ' 'WHERE ("t1"."first" = ?)'), ['foo']) def test_group_by(self): query = (User .select(User, fn.COUNT(Tweet.id).alias('tweet_count')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username", ' 'COUNT("t2"."id") AS "tweet_count" ' 'FROM "users" AS "t1" ' 'LEFT OUTER JOIN "tweet" AS "t2" ON ("t2"."user_id" = "t1"."id") ' 'GROUP BY "t1"."id", "t1"."username"'), []) def test_group_by_extend(self): query = (User .select(User, fn.COUNT(Tweet.id).alias('tweet_count')) .join(Tweet, JOIN.LEFT_OUTER) .group_by_extend(User.id).group_by_extend(User.username)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username", ' 'COUNT("t2"."id") AS "tweet_count" ' 'FROM "users" AS "t1" ' 'LEFT OUTER JOIN "tweet" AS "t2" ON ("t2"."user_id" = "t1"."id") ' 'GROUP BY "t1"."id", "t1"."username"'), []) def test_order_by(self): query = (User .select() .order_by(User.username.desc(), User.id)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'ORDER BY "t1"."username" DESC, "t1"."id"'), []) def test_order_by_extend(self): query = (User .select() .order_by_extend(User.username.desc()) .order_by_extend(User.id)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'ORDER BY "t1"."username" DESC, "t1"."id"'), []) def test_paginate(self): # Get the first page, default is limit of 20. query = User.select().paginate(1) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'LIMIT ? OFFSET ?'), [20, 0]) # Page 3 contains rows 31-45. query = User.select().paginate(3, 15) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'LIMIT ? OFFSET ?'), [15, 30]) def test_subquery_correction(self): users = User.select().where(User.username.in_(['foo', 'bar'])) query = Tweet.select().where(Tweet.user.in_(users)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."user_id", "t1"."content", ' '"t1"."timestamp" ' 'FROM "tweet" AS "t1" ' 'WHERE ("t1"."user_id" IN (' 'SELECT "t2"."id" FROM "users" AS "t2" ' 'WHERE ("t2"."username" IN (?, ?))))'), ['foo', 'bar']) def test_value_flattening(self): sql = ('SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."username" IN (?, ?))') expected = (sql, ['foo', 'bar']) users = User.select().where(User.username.in_(['foo', 'bar'])) self.assertSQL(users, *expected) users = User.select().where(User.username.in_(('foo', 'bar'))) self.assertSQL(users, *expected) users = User.select().where(User.username.in_(set(['foo', 'bar']))) # Sets are unordered so params may be in either order: sql, params = __sql__(users) self.assertEqual(sql, expected[0]) self.assertTrue(params in (['foo', 'bar'], ['bar', 'foo'])) def test_join_ctx(self): query = Tweet.select(Tweet.id).join(Favorite).switch(Tweet).join(User) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "tweet" AS "t1" ' 'INNER JOIN "favorite" AS "t2" ON ("t2"."tweet_id" = "t1"."id") ' 'INNER JOIN "users" AS "t3" ON ("t1"."user_id" = "t3"."id")'), []) query = Tweet.select(Tweet.id).join(User).switch(Tweet).join(Favorite) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") ' 'INNER JOIN "favorite" AS "t3" ON ("t3"."tweet_id" = "t1"."id")'), []) query = Tweet.select(Tweet.id).left_outer_join(Favorite).switch(Tweet).left_outer_join(User) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "tweet" AS "t1" ' 'LEFT OUTER JOIN "favorite" AS "t2" ON ("t2"."tweet_id" = "t1"."id") ' 'LEFT OUTER JOIN "users" AS "t3" ON ("t1"."user_id" = "t3"."id")'), []) query = Tweet.select(Tweet.id).left_outer_join(User).switch(Tweet).left_outer_join(Favorite) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "tweet" AS "t1" ' 'LEFT OUTER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") ' 'LEFT OUTER JOIN "favorite" AS "t3" ON ("t3"."tweet_id" = "t1"."id")'), []) def test_model_alias(self): TA = Tweet.alias() query = (User .select(User, fn.COUNT(TA.id).alias('tc')) .join(TA, on=(User.id == TA.user)) .group_by(User)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username", COUNT("t2"."id") AS "tc" ' 'FROM "users" AS "t1" ' 'INNER JOIN "tweet" AS "t2" ON ("t1"."id" = "t2"."user_id") ' 'GROUP BY "t1"."id", "t1"."username"'), []) def test_model_alias_with_schema(self): class Note(TestModel): content = TextField() class Meta: schema = 'notes' query = Note.alias().select() self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."content" ' 'FROM "notes"."note" AS "t1"'), []) def test_filter_simple(self): query = User.filter(username='huey') self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."username" = ?)'), ['huey']) query = User.filter(username='huey', id__gte=1, id__lt=5) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE ((("t1"."id" >= ?) AND ("t1"."id" < ?)) AND ' '("t1"."username" = ?))'), [1, 5, 'huey']) query = User.filter(~DQ(id=1), username__in=('foo', 'bar')) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE (NOT ("t1"."id" = ?) AND ("t1"."username" IN (?, ?)))'), [1, 'foo', 'bar']) query = User.filter((DQ(id=1) | DQ(id=2)), username__in=('foo', 'bar')) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE ((("t1"."id" = ?) OR ("t1"."id" = ?)) AND ' '("t1"."username" IN (?, ?)))'), [1, 2, 'foo', 'bar']) def test_filter_expressions(self): query = User.filter( DQ(username__in=['huey', 'zaizee']) | (DQ(id__gt=2) & DQ(id__lt=4))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" ' 'FROM "users" AS "t1" ' 'WHERE (("t1"."username" IN (?, ?)) OR ' '(("t1"."id" > ?) AND ("t1"."id" < ?)))'), ['huey', 'zaizee', 2, 4]) def test_filter_join(self): query = Tweet.select(Tweet.content).filter(user__username='huey') self.assertSQL(query, ( 'SELECT "t1"."content" FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") ' 'WHERE ("t2"."username" = ?)'), ['huey']) UA = User.alias('ua') query = (Tweet .select(Tweet.content) .join(UA) .filter(ua__username='huey')) self.assertSQL(query, ( 'SELECT "t1"."content" FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "ua" ON ("t1"."user_id" = "ua"."id") ' 'WHERE ("ua"."username" = ?)'), ['huey']) def test_filter_join_combine_models(self): query = (Tweet .select(Tweet.content) .filter(user__username='huey') .filter(DQ(user__id__gte=1) | DQ(id__lt=5))) self.assertSQL(query, ( 'SELECT "t1"."content" FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") ' 'WHERE (("t2"."username" = ?) AND ' '(("t2"."id" >= ?) OR ("t1"."id" < ?)))'), ['huey', 1, 5]) def test_mix_filter_methods(self): query = (User .select(User, fn.COUNT(Tweet.id).alias('count')) .filter(username__in=('huey', 'zaizee')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.id, User.username) .order_by(fn.COUNT(Tweet.id).desc())) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username", COUNT("t2"."id") AS "count" ' 'FROM "users" AS "t1" ' 'LEFT OUTER JOIN "tweet" AS "t2" ON ("t2"."user_id" = "t1"."id") ' 'WHERE ("t1"."username" IN (?, ?)) ' 'GROUP BY "t1"."id", "t1"."username" ' 'ORDER BY COUNT("t2"."id") DESC'), ['huey', 'zaizee']) def test_join_parent(self): query = (Category .select() .where(Category.parent == 'test')) self.assertSQL(query, ( 'SELECT "t1"."name", "t1"."parent_id" FROM "category" AS "t1" ' 'WHERE ("t1"."parent_id" = ?)'), ['test']) query = Category.filter(parent='test') self.assertSQL(query, ( 'SELECT "t1"."name", "t1"."parent_id" FROM "category" AS "t1" ' 'WHERE ("t1"."parent_id" = ?)'), ['test']) def test_cross_join(self): class A(TestModel): id = IntegerField(primary_key=True) class B(TestModel): id = IntegerField(primary_key=True) query = (A .select(A.id.alias('aid'), B.id.alias('bid')) .join(B, JOIN.CROSS) .order_by(A.id, B.id)) self.assertSQL(query, ( 'SELECT "t1"."id" AS "aid", "t2"."id" AS "bid" ' 'FROM "a" AS "t1" ' 'CROSS JOIN "b" AS "t2" ' 'ORDER BY "t1"."id", "t2"."id"'), []) def test_join_expr(self): class User(TestModel): username = TextField(primary_key=True) class Tweet(TestModel): user = ForeignKeyField(User, backref='tweets') content = TextField() sql = ('SELECT "t1"."id", "t1"."user_id", "t1"."content", ' '"t2"."username" FROM "tweet" AS "t1" ' 'INNER JOIN "user" AS "t2" ' 'ON ("t1"."user_id" = "t2"."username")') query = Tweet.select(Tweet, User).join(User) self.assertSQL(query, sql, []) query = (Tweet .select(Tweet, User) .join(User, on=(Tweet.user == User.username))) self.assertSQL(query, sql, []) join_expr = ((Tweet.user == User.username) & (Value(1) == 1)) query = Tweet.select().join(User, on=join_expr) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."user_id", "t1"."content" ' 'FROM "tweet" AS "t1" ' 'INNER JOIN "user" AS "t2" ' 'ON (("t1"."user_id" = "t2"."username") AND (? = ?))'), [1, 1]) def test_join_multiple_fks(self): class A(TestModel): name = TextField() class B(TestModel): name = TextField(primary_key=True) a1 = ForeignKeyField(A, backref='b_set1') a2 = ForeignKeyField(A, field=A.name, backref='b_set2') A1 = A.alias('a1') A2 = A.alias('a2') sql = ('SELECT "t1"."name", "t1"."a1_id", "t1"."a2_id", ' '"a1"."id", "a1"."name", "a2"."id", "a2"."name" ' 'FROM "b" AS "t1" ' 'INNER JOIN "a" AS "a1" ON ("t1"."a1_id" = "a1"."id") ' 'INNER JOIN "a" AS "a2" ON ("t1"."a2_id" = "a2"."name")') query = (B.select(B, A1, A2) .join_from(B, A1, on=B.a1) .join_from(B, A2, on=B.a2)) self.assertSQL(query, sql, []) query = (B.select(B, A1, A2) .join(A1, on=(B.a1 == A1.id)).switch(B) .join(A2, on=(B.a2 == A2.name))) self.assertSQL(query, sql, []) jx1 = (B.a1 == A1.id) & (Value(1) == 1) jx2 = (Value(1) == 1) & (B.a2 == A2.name) query = (B.select() .join(A1, on=jx1).switch(B) .join(A2, on=jx2)) self.assertSQL(query, ( 'SELECT "t1"."name", "t1"."a1_id", "t1"."a2_id" ' 'FROM "b" AS "t1" ' 'INNER JOIN "a" AS "a1" ' 'ON (("t1"."a1_id" = "a1"."id") AND (? = ?)) ' 'INNER JOIN "a" AS "a2" ' 'ON ((? = ?) AND ("t1"."a2_id" = "a2"."name"))'), [1, 1, 1, 1]) def test_raw(self): query = (Person .raw('SELECT first, last, dob FROM person ' 'WHERE first = ? AND substr(last, 1, 1) = ? ' 'ORDER BY last', 'huey', 'l')) self.assertSQL(query, ( 'SELECT first, last, dob FROM person ' 'WHERE first = ? AND substr(last, 1, 1) = ? ' 'ORDER BY last'), ['huey', 'l']) def test_insert(self): query = (Person .insert({Person.first: 'huey', Person.last: 'cat', Person.dob: datetime.date(2011, 1, 1)})) self.assertSQL(query, ( 'INSERT INTO "person" ("first", "last", "dob") ' 'VALUES (?, ?, ?)'), ['huey', 'cat', datetime.date(2011, 1, 1)]) query = (Note .insert({Note.author: Person(id=1337), Note.content: 'leet'})) self.assertSQL(query, ( 'INSERT INTO "note" ("author_id", "content") ' 'VALUES (?, ?)'), [1337, 'leet']) query = Person.insert(first='huey', last='cat') self.assertSQL(query, ( 'INSERT INTO "person" ("first", "last") VALUES (?, ?)'), ['huey', 'cat']) def test_replace(self): query = (Person .replace({Person.first: 'huey', Person.last: 'cat'})) self.assertSQL(query, ( 'INSERT OR REPLACE INTO "person" ("first", "last") ' 'VALUES (?, ?)'), ['huey', 'cat']) def test_insert_many(self): query = (Note .insert_many(( {Note.author: Person(id=1), Note.content: 'note-1'}, {Note.author: Person(id=2), Note.content: 'note-2'}, {Note.author: Person(id=3), Note.content: 'note-3'}))) self.assertSQL(query, ( 'INSERT INTO "note" ("author_id", "content") ' 'VALUES (?, ?), (?, ?), (?, ?)'), [1, 'note-1', 2, 'note-2', 3, 'note-3']) query = (Note .insert_many(( {'author': Person(id=1), 'content': 'note-1'}, {'author': Person(id=2), 'content': 'note-2'}))) self.assertSQL(query, ( 'INSERT INTO "note" ("author_id", "content") ' 'VALUES (?, ?), (?, ?)'), [1, 'note-1', 2, 'note-2']) def test_insert_many_defaults(self): # Verify fields are inferred and values are read correctly, when # partial data is given and a field has default values. s2 = {'counter': 2, 'value': 2.} s3 = {'counter': 3} self.assertSQL(Sample.insert_many([s2, s3]), ( 'INSERT INTO "sample" ("counter", "value") VALUES (?, ?), (?, ?)'), [2, 2., 3, 1.]) self.assertSQL(Sample.insert_many([s3, s2]), ( 'INSERT INTO "sample" ("counter", "value") VALUES (?, ?), (?, ?)'), [3, 1., 2, 2.]) def test_insert_many_defaults_nulls(self): data = [ {'name': 'd1'}, {'name': 'd2', 'dflt1': 10}, {'name': 'd3', 'dflt2': 30}, {'name': 'd4', 'dfltn': 40}] fields = [DfltM.name, DfltM.dflt1, DfltM.dflt2, DfltM.dfltn] self.assertSQL(DfltM.insert_many(data, fields=fields), ( 'INSERT INTO "dflt_m" ("name", "dflt1", "dflt2", "dfltn") VALUES ' '(?, ?, ?, ?), (?, ?, ?, ?), (?, ?, ?, ?), (?, ?, ?, ?)'), ['d1', 1, 2, None, 'd2', 10, 2, None, 'd3', 1, 30, None, 'd4', 1, 2, 40]) def test_insert_many_list_with_fields(self): data = [(i,) for i in ('charlie', 'huey', 'zaizee')] query = User.insert_many(data, fields=[User.username]) self.assertSQL(query, ( 'INSERT INTO "users" ("username") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) # Use field name instead of field obj. query = User.insert_many(data, fields=['username']) self.assertSQL(query, ( 'INSERT INTO "users" ("username") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) def test_insert_many_infer_fields(self): data = [('f1', 'l1', '1980-01-01'), ('f2', 'l2', '1980-02-02')] self.assertSQL(Person.insert_many(data), ( 'INSERT INTO "person" ("first", "last", "dob") ' 'VALUES (?, ?, ?), (?, ?, ?)'), ['f1', 'l1', datetime.date(1980, 1, 1), 'f2', 'l2', datetime.date(1980, 2, 2)]) # When primary key is not auto-increment, PKs are included. data = [('c1', 'k1', 1), ('c2', 'k2', 2)] self.assertSQL(CKM.insert_many(data), ( 'INSERT INTO "ckm" ("category", "key", "value") ' 'VALUES (?, ?, ?), (?, ?, ?)'), ['c1', 'k1', 1, 'c2', 'k2', 2]) def test_insert_query(self): select = (Person .select(Person.id, Person.first) .where(Person.last == 'cat')) query = Note.insert_from(select, (Note.author, Note.content)) self.assertSQL(query, ('INSERT INTO "note" ("author_id", "content") ' 'SELECT "t1"."id", "t1"."first" ' 'FROM "person" AS "t1" ' 'WHERE ("t1"."last" = ?)'), ['cat']) query = Note.insert_from(select, ('author', 'content')) self.assertSQL(query, ('INSERT INTO "note" ("author_id", "content") ' 'SELECT "t1"."id", "t1"."first" ' 'FROM "person" AS "t1" ' 'WHERE ("t1"."last" = ?)'), ['cat']) def test_insert_returning(self): class TestDB(Database): returning_clause = True class User(Model): username = CharField() class Meta: database = TestDB(None) query = User.insert({User.username: 'zaizee'}) self.assertSQL(query, ( 'INSERT INTO "user" ("username") ' 'VALUES (?) RETURNING "user"."id"'), ['zaizee']) class Person(Model): name = CharField() ssn = CharField(primary_key=True) class Meta: database = TestDB(None) query = Person.insert({Person.name: 'charlie', Person.ssn: '123'}) self.assertSQL(query, ( 'INSERT INTO "person" ("ssn", "name") VALUES (?, ?) ' 'RETURNING "person"."ssn"'), ['123', 'charlie']) query = Person.insert({Person.name: 'huey'}).returning() self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?)'), ['huey']) query = (Person .insert({Person.name: 'foo'}) .returning(Person.ssn.alias('new_ssn'))) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?) ' 'RETURNING "person"."ssn" AS "new_ssn"'), ['foo']) def test_insert_get_field_values(self): class User(TestModel): username = TextField(primary_key=True) class Meta: database = self.database class Tweet(TestModel): user = ForeignKeyField(User) content = TextField() class Meta: database = self.database queries = ( User.insert(username='a'), User.insert({'username': 'a'}), User.insert({User.username: 'a'})) for query in queries: self.assertSQL(query, ('INSERT INTO "user" ("username") ' 'VALUES (?)'), ['a']) # Verify that we can provide all kinds of combinations to the # constructor to INSERT and it will map the parameters correctly # without losing values. a = User(username='a') queries = ( Tweet.insert(user=a, content='ca'), Tweet.insert({'user': a, 'content': 'ca'}), Tweet.insert({Tweet.user: a, 'content': 'ca'}), Tweet.insert({'user': a, Tweet.content: 'ca'}), Tweet.insert({Tweet.user: a, Tweet.content: 'ca'}), Tweet.insert({Tweet.user: a}, content='ca'), Tweet.insert({Tweet.content: 'ca'}, user=a), Tweet.insert({'user': a}, content='ca'), Tweet.insert({'content': 'ca'}, user=a), # Also test using the foreign-key descriptor and column name. Tweet.insert({Tweet.user_id: a, Tweet.content: 'ca'}), Tweet.insert(user_id=a, content='ca'), Tweet.insert({'user_id': a, 'content': 'ca'})) for query in queries: self.assertSQL(query, ('INSERT INTO "tweet" ("user_id", "content")' ' VALUES (?, ?)'), ['a', 'ca']) def test_insert_many_get_field_values(self): class User(TestModel): username = TextField(primary_key=True) class Meta: database = self.database class Tweet(TestModel): user = ForeignKeyField(User) content = TextField() class Meta: database = self.database # Ensure we can handle any combination of insert-data key and field # list value. pairs = ((User.username, 'username'), ('username', User.username), ('username', 'username'), (User.username, User.username)) for dict_key, fields_key in pairs: iq = User.insert_many([{dict_key: u} for u in 'abc'], fields=[fields_key]) self.assertSQL(iq, ( 'INSERT INTO "user" ("username") VALUES (?), (?), (?)'), ['a', 'b', 'c']) a, b = User(username='a'), User(username='b') user_content = ( (a, 'ca1'), (a, 'ca2'), (b, 'cb1'), ('a', 'ca3')) # Specify user id directly. # Ensure we can mix-and-match key type within insert-data. pairs = (('user', 'content'), (Tweet.user, Tweet.content), (Tweet.user, 'content'), ('user', Tweet.content), ('user_id', 'content'), (Tweet.user_id, Tweet.content)) for ukey, ckey in pairs: iq = Tweet.insert_many([{ukey: u, ckey: c} for u, c in user_content]) self.assertSQL(iq, ( 'INSERT INTO "tweet" ("user_id", "content") VALUES ' '(?, ?), (?, ?), (?, ?), (?, ?)'), ['a', 'ca1', 'a', 'ca2', 'b', 'cb1', 'a', 'ca3']) def test_insert_many_dict_and_list(self): class R(TestModel): k = TextField(column_name='key') v = IntegerField(column_name='value', default=0) class Meta: database = self.database data = ( {'k': 'k1', 'v': 1}, {R.k: 'k2', R.v: 2}, {'key': 'k3', 'value': 3}, ('k4', 4), ('k5', '5'), # Will be converted properly. {R.k: 'k6', R.v: '6'}, {'key': 'k7', 'value': '7'}, {'k': 'kx'}, ('ky',)) param_str = ', '.join('(?, ?)' for _ in range(len(data))) queries = ( R.insert_many(data), R.insert_many(data, fields=[R.k, R.v]), R.insert_many(data, fields=['k', 'v'])) for query in queries: self.assertSQL(query, ( 'INSERT INTO "r" ("key", "value") VALUES %s' % param_str), ['k1', 1, 'k2', 2, 'k3', 3, 'k4', 4, 'k5', 5, 'k6', 6, 'k7', 7, 'kx', 0, 'ky', 0]) def test_insert_modelalias(self): UA = User.alias('ua') self.assertSQL(UA.insert({UA.username: 'huey'}), ( 'INSERT INTO "users" ("username") VALUES (?)'), ['huey']) self.assertSQL(UA.insert(username='huey'), ( 'INSERT INTO "users" ("username") VALUES (?)'), ['huey']) def test_update(self): class Stat(TestModel): url = TextField() count = IntegerField() timestamp = TimestampField(utc=True) query = (Stat .update({Stat.count: Stat.count + 1, Stat.timestamp: datetime.datetime(2017, 1, 1)}) .where(Stat.url == '/peewee')) self.assertSQL(query, ( 'UPDATE "stat" SET "count" = ("stat"."count" + ?), ' '"timestamp" = ? ' 'WHERE ("stat"."url" = ?)'), [1, 1483228800, '/peewee']) query = (Stat .update(count=Stat.count + 1) .where(Stat.url == '/peewee')) self.assertSQL(query, ( 'UPDATE "stat" SET "count" = ("stat"."count" + ?) ' 'WHERE ("stat"."url" = ?)'), [1, '/peewee']) def test_update_subquery(self): class U(TestModel): username = TextField() flood_count = IntegerField() class T(TestModel): user = ForeignKeyField(U) ctq = T.select(fn.COUNT(T.id) / 100).where(T.user == U.id) subq = (T .select(T.user) .group_by(T.user) .having(fn.COUNT(T.id) > 100)) query = (U .update({U.flood_count: ctq}) .where(U.id.in_(subq))) self.assertSQL(query, ( 'UPDATE "u" SET "flood_count" = (' 'SELECT (COUNT("t1"."id") / ?) FROM "t" AS "t1" ' 'WHERE ("t1"."user_id" = "u"."id")) ' 'WHERE ("u"."id" IN (' 'SELECT "t1"."user_id" FROM "t" AS "t1" ' 'GROUP BY "t1"."user_id" ' 'HAVING (COUNT("t1"."id") > ?)))'), [100, 100]) def test_update_from(self): class SalesPerson(TestModel): first = TextField() last = TextField() class Account(TestModel): contact_first = TextField() contact_last = TextField() sales = ForeignKeyField(SalesPerson) query = (Account .update(contact_first=SalesPerson.first, contact_last=SalesPerson.last) .from_(SalesPerson) .where(Account.sales == SalesPerson.id)) self.assertSQL(query, ( 'UPDATE "account" SET ' '"contact_first" = "t1"."first", ' '"contact_last" = "t1"."last" ' 'FROM "sales_person" AS "t1" ' 'WHERE ("account"."sales_id" = "t1"."id")'), []) query = (User .update({User.username: Tweet.content}) .from_(Tweet) .where(Tweet.content == 'tx')) self.assertSQL(query, ( 'UPDATE "users" SET "username" = "t1"."content" ' 'FROM "tweet" AS "t1" WHERE ("t1"."content" = ?)'), ['tx']) def test_update_from_qualnames(self): data = [(1, 'u1-x'), (2, 'u2-x')] vl = ValuesList(data, columns=('id', 'username'), alias='tmp') query = (User .update({User.username: vl.c.username}) .from_(vl) .where(User.id == vl.c.id)) self.assertSQL(query, ( 'UPDATE "users" SET "username" = "tmp"."username" ' 'FROM (VALUES (?, ?), (?, ?)) AS "tmp"("id", "username") ' 'WHERE ("users"."id" = "tmp"."id")'), [1, 'u1-x', 2, 'u2-x']) def test_update_from_subselect(self): data = [(1, 'u1-x'), (2, 'u2-x')] vl = ValuesList(data, columns=('id', 'username'), alias='tmp') subq = vl.select(vl.c.id, vl.c.username) query = (User .update({User.username: subq.c.username}) .from_(subq) .where(User.id == subq.c.id)) self.assertSQL(query, ( 'UPDATE "users" SET "username" = "t1"."username" FROM (' 'SELECT "tmp"."id", "tmp"."username" ' 'FROM (VALUES (?, ?), (?, ?)) AS "tmp"("id", "username")) AS "t1" ' 'WHERE ("users"."id" = "t1"."id")'), [1, 'u1-x', 2, 'u2-x']) def test_delete(self): query = (Note .delete() .where(Note.author << (Person.select(Person.id) .where(Person.last == 'cat')))) self.assertSQL(query, ('DELETE FROM "note" ' 'WHERE ("note"."author_id" IN (' 'SELECT "t1"."id" FROM "person" AS "t1" ' 'WHERE ("t1"."last" = ?)))'), ['cat']) query = Note.delete().where(Note.author == Person(id=123)) self.assertSQL(query, ( 'DELETE FROM "note" WHERE ("note"."author_id" = ?)'), [123]) def test_delete_recursive(self): class User(TestModel): username = CharField() class Tweet(TestModel): user = ForeignKeyField(User, backref='tweets') content = TextField() class Relationship(TestModel): from_user = ForeignKeyField(User, backref='relationships') to_user = ForeignKeyField(User, backref='related_to') class Like(TestModel): user = ForeignKeyField(User) tweet = ForeignKeyField(Tweet) queries = list(User(id=1).dependencies()) accum = [] for expr, fk in list(queries): query = fk.model.delete().where(expr) accum.append(__sql__(query)) self.assertEqual(sorted(accum), [ ('DELETE FROM "like" WHERE (' '"like"."tweet_id" IN (' 'SELECT "t1"."id" FROM "tweet" AS "t1" WHERE (' '"t1"."user_id" = ?)))', [1]), ('DELETE FROM "like" WHERE ("like"."user_id" = ?)', [1]), ('DELETE FROM "relationship" ' 'WHERE ("relationship"."from_user_id" = ?)', [1]), ('DELETE FROM "relationship" ' 'WHERE ("relationship"."to_user_id" = ?)', [1]), ('DELETE FROM "tweet" WHERE ("tweet"."user_id" = ?)', [1]), ]) def test_aliases(self): class A(TestModel): a = CharField() class B(TestModel): b = CharField() a_link = ForeignKeyField(A) class C(TestModel): c = CharField() b_link = ForeignKeyField(B) class D(TestModel): d = CharField() c_link = ForeignKeyField(C) query = (D .select(D.d, C.c) .join(C) .where(C.b_link << ( B.select(B.id).join(A).where(A.a == 'a')))) self.assertSQL(query, ( 'SELECT "t1"."d", "t2"."c" ' 'FROM "d" AS "t1" ' 'INNER JOIN "c" AS "t2" ON ("t1"."c_link_id" = "t2"."id") ' 'WHERE ("t2"."b_link_id" IN (' 'SELECT "t3"."id" FROM "b" AS "t3" ' 'INNER JOIN "a" AS "t4" ON ("t3"."a_link_id" = "t4"."id") ' 'WHERE ("t4"."a" = ?)))'), ['a']) def test_schema(self): class WithSchema(TestModel): data = CharField(primary_key=True) class Meta: schema = 'huey' query = WithSchema.select().where(WithSchema.data == 'zaizee') self.assertSQL(query, ( 'SELECT "t1"."data" ' 'FROM "huey"."with_schema" AS "t1" ' 'WHERE ("t1"."data" = ?)'), ['zaizee']) @requires_pglike class TestOnConflictSQL(ModelDatabaseTestCase): requires = [Emp, OCTest, UKVP] def test_atomic_update(self): query = OCTest.insert(a='foo', b=1).on_conflict( conflict_target=(OCTest.a,), update={OCTest.b: OCTest.b + 2}) self.assertSQL(query, ( 'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) ' 'ON CONFLICT ("a") ' 'DO UPDATE SET "b" = ("oc_test"."b" + ?) ' 'RETURNING "oc_test"."id"'), ['foo', 1, 0, 2]) def test_on_conflict_do_nothing(self): query = OCTest.insert(a='foo', b=1).on_conflict(action='IGNORE') self.assertSQL(query, ( 'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) ' 'ON CONFLICT DO NOTHING ' 'RETURNING "oc_test"."id"'), ['foo', 1, 0]) query = OCTest.insert(a='foo', b=1).on_conflict( conflict_target=(OCTest.a,), action='IGNORE') self.assertSQL(query, ( 'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) ' 'ON CONFLICT ("a") DO NOTHING ' 'RETURNING "oc_test"."id"'), ['foo', 1, 0]) def test_update_where_clause(self): # Add a new row with the given "a" value. If a conflict occurs, # re-insert with b=b+2 so long as the original b < 3. query = OCTest.insert(a='foo', b=1).on_conflict( conflict_target=(OCTest.a,), update={OCTest.b: OCTest.b + 2}, where=(OCTest.b < 3)) self.assertSQL(query, ( 'INSERT INTO "oc_test" ("a", "b", "c") VALUES (?, ?, ?) ' 'ON CONFLICT ("a") DO UPDATE SET "b" = ("oc_test"."b" + ?) ' 'WHERE ("oc_test"."b" < ?) ' 'RETURNING "oc_test"."id"'), ['foo', 1, 0, 2, 3]) def test_conflict_target_constraint_where(self): fields = [UKVP.key, UKVP.value, UKVP.extra] data = [('k1', 1, 2), ('k2', 2, 3)] query = (UKVP.insert_many(data, fields) .on_conflict(conflict_target=(UKVP.key, UKVP.value), conflict_where=(UKVP.extra > 1), preserve=(UKVP.extra,), where=(UKVP.key != 'kx'))) self.assertSQL(query, ( 'INSERT INTO "ukvp" ("key", "value", "extra") ' 'VALUES (?, ?, ?), (?, ?, ?) ' 'ON CONFLICT ("key", "value") WHERE ("extra" > ?) ' 'DO UPDATE SET "extra" = EXCLUDED."extra" ' 'WHERE ("ukvp"."key" != ?) RETURNING "ukvp"."id"'), ['k1', 1, 2, 'k2', 2, 3, 1, 'kx']) class TestStringsForFieldsa(ModelDatabaseTestCase): database = get_in_memory_db() requires = [Note, Person, Relationship] def test_insert(self): qkwargs = Person.insert(first='huey', last='kitty') qliteral = Person.insert({'first': 'huey', 'last': 'kitty'}) for query in (qkwargs, qliteral): self.assertSQL(query, ( 'INSERT INTO "person" ("first", "last") VALUES (?, ?)'), ['huey', 'kitty']) def test_insert_many(self): data = [ {'first': 'huey', 'last': 'cat'}, {'first': 'zaizee', 'last': 'cat'}, {'first': 'mickey', 'last': 'dog'}] query = Person.insert_many(data) self.assertSQL(query, ( 'INSERT INTO "person" ("first", "last") VALUES (?, ?), (?, ?), ' '(?, ?)'), ['huey', 'cat', 'zaizee', 'cat', 'mickey', 'dog']) def test_update(self): qkwargs = Person.update(last='kitty').where(Person.last == 'cat') qliteral = Person.update({'last': 'kitty'}).where(Person.last == 'cat') for query in (qkwargs, qliteral): self.assertSQL(query, ( 'UPDATE "person" SET "last" = ? WHERE ("person"."last" = ?)'), ['kitty', 'cat']) compound_db = get_in_memory_db() class CompoundTestModel(Model): class Meta: database = compound_db class Alpha(CompoundTestModel): alpha = IntegerField() class Beta(CompoundTestModel): beta = IntegerField() other = IntegerField(default=0) class Gamma(CompoundTestModel): gamma = IntegerField() other = IntegerField(default=1) class TestModelCompoundSelect(BaseTestCase): def test_unions(self): lhs = Alpha.select(Alpha.alpha) rhs = Beta.select(Beta.beta) self.assertSQL((lhs | rhs), ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" UNION ' 'SELECT "t2"."beta" FROM "beta" AS "t2"'), []) rrhs = Gamma.select(Gamma.gamma) query = (lhs | (rhs | rrhs)) self.assertSQL(query, ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" UNION ' 'SELECT "t2"."beta" FROM "beta" AS "t2" UNION ' 'SELECT "t3"."gamma" FROM "gamma" AS "t3"'), []) def test_union_same_model(self): q1 = Alpha.select(Alpha.alpha) q2 = Alpha.select(Alpha.alpha) q3 = Alpha.select(Alpha.alpha) compound = (q1 | q2) | q3 self.assertSQL(compound, ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" UNION ' 'SELECT "t2"."alpha" FROM "alpha" AS "t2" UNION ' 'SELECT "t2"."alpha" FROM "alpha" AS "t2"'), []) compound = q1 | (q2 | q3) self.assertSQL(compound, ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" UNION ' 'SELECT "t2"."alpha" FROM "alpha" AS "t2" UNION ' 'SELECT "t3"."alpha" FROM "alpha" AS "t3"'), []) def test_where(self): q1 = Alpha.select(Alpha.alpha).where(Alpha.alpha < 2) q2 = Alpha.select(Alpha.alpha).where(Alpha.alpha > 5) compound = q1 | q2 self.assertSQL(compound, ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" ' 'WHERE ("t1"."alpha" < ?) ' 'UNION ' 'SELECT "t2"."alpha" FROM "alpha" AS "t2" ' 'WHERE ("t2"."alpha" > ?)'), [2, 5]) q3 = Beta.select(Beta.beta).where(Beta.beta < 3) q4 = Beta.select(Beta.beta).where(Beta.beta > 4) compound = q1 | q3 self.assertSQL(compound, ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" ' 'WHERE ("t1"."alpha" < ?) ' 'UNION ' 'SELECT "t2"."beta" FROM "beta" AS "t2" ' 'WHERE ("t2"."beta" < ?)'), [2, 3]) compound = q1 | q3 | q2 | q4 self.assertSQL(compound, ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" ' 'WHERE ("t1"."alpha" < ?) ' 'UNION ' 'SELECT "t2"."beta" FROM "beta" AS "t2" ' 'WHERE ("t2"."beta" < ?) ' 'UNION ' 'SELECT "t3"."alpha" FROM "alpha" AS "t3" ' 'WHERE ("t3"."alpha" > ?) ' 'UNION ' 'SELECT "t2"."beta" FROM "beta" AS "t2" ' 'WHERE ("t2"."beta" > ?)'), [2, 3, 5, 4]) def test_limit(self): lhs = Alpha.select(Alpha.alpha).order_by(Alpha.alpha).limit(3) rhs = Beta.select(Beta.beta).order_by(Beta.beta).limit(4) compound = (lhs | rhs).limit(5) # This may be invalid SQL, but this at least documents the behavior. self.assertSQL(compound, ( 'SELECT "t1"."alpha" FROM "alpha" AS "t1" ' 'ORDER BY "t1"."alpha" LIMIT ? UNION ' 'SELECT "t2"."beta" FROM "beta" AS "t2" ' 'ORDER BY "t2"."beta" LIMIT ? LIMIT ?'), [3, 4, 5]) def test_union_from(self): lhs = Alpha.select(Alpha.alpha).where(Alpha.alpha < 2) rhs = Alpha.select(Alpha.alpha).where(Alpha.alpha > 5) compound = (lhs | rhs).alias('cq') query = Alpha.select(compound.c.alpha).from_(compound) self.assertSQL(query, ( 'SELECT "cq"."alpha" FROM (' 'SELECT "t1"."alpha" FROM "alpha" AS "t1" ' 'WHERE ("t1"."alpha" < ?) ' 'UNION ' 'SELECT "t2"."alpha" FROM "alpha" AS "t2" ' 'WHERE ("t2"."alpha" > ?)) AS "cq"'), [2, 5]) b = Beta.select(Beta.beta).where(Beta.beta < 3) g = Gamma.select(Gamma.gamma).where(Gamma.gamma < 0) compound = (lhs | b | g).alias('cq') query = Alpha.select(SQL('1')).from_(compound) self.assertSQL(query, ( 'SELECT 1 FROM (' 'SELECT "t1"."alpha" FROM "alpha" AS "t1" ' 'WHERE ("t1"."alpha" < ?) ' 'UNION SELECT "t2"."beta" FROM "beta" AS "t2" ' 'WHERE ("t2"."beta" < ?) ' 'UNION SELECT "t3"."gamma" FROM "gamma" AS "t3" ' 'WHERE ("t3"."gamma" < ?)) AS "cq"'), [2, 3, 0]) def test_parentheses(self): query = (Alpha.select().where(Alpha.alpha < 2) | Beta.select(Beta.id, Beta.beta).where(Beta.beta > 3)) self.assertSQL(query, ( '(SELECT "t1"."id", "t1"."alpha" FROM "alpha" AS "t1" ' 'WHERE ("t1"."alpha" < ?)) ' 'UNION ' '(SELECT "t2"."id", "t2"."beta" FROM "beta" AS "t2" ' 'WHERE ("t2"."beta" > ?))'), [2, 3], compound_select_parentheses=True) def test_where_in(self): union = (Alpha.select(Alpha.alpha) | Beta.select(Beta.beta)) query = Alpha.select().where(Alpha.alpha << union) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."alpha" ' 'FROM "alpha" AS "t1" ' 'WHERE ("t1"."alpha" IN ' '(SELECT "t1"."alpha" FROM "alpha" AS "t1" ' 'UNION ' 'SELECT "t2"."beta" FROM "beta" AS "t2"))'), []) class TestModelIndex(BaseTestCase): database = SqliteDatabase(None) def test_model_index(self): class Article(Model): name = TextField() timestamp = TimestampField() status = IntegerField() flags = IntegerField() aidx = ModelIndex(Article, (Article.name, Article.timestamp),) self.assertSQL(aidx, ( 'CREATE INDEX IF NOT EXISTS "article_name_timestamp" ON "article" ' '("name", "timestamp")'), []) aidx = aidx.where(Article.status == 1) self.assertSQL(aidx, ( 'CREATE INDEX IF NOT EXISTS "article_name_timestamp" ON "article" ' '("name", "timestamp") ' 'WHERE ("status" = ?)'), [1]) aidx = ModelIndex(Article, (Article.timestamp.desc(), Article.flags.bin_and(4)), unique=True) self.assertSQL(aidx, ( 'CREATE UNIQUE INDEX IF NOT EXISTS "article_timestamp" ' 'ON "article" ("timestamp" DESC, ("flags" & ?))'), [4]) class TestModelArgument(BaseTestCase): database = SqliteDatabase(None) def test_model_as_argument(self): class Post(TestModel): content = TextField() timestamp = DateTimeField() class Meta: database = self.database query = (Post .select(Post.id, fn.score(Post).alias('score')) .order_by(Post.timestamp)) self.assertSQL(query, ( 'SELECT "t1"."id", score("t1") AS "score" ' 'FROM "post" AS "t1" ORDER BY "t1"."timestamp"'), []) peewee-3.17.7/tests/models.py000066400000000000000000005466611470346076600161210ustar00rootroot00000000000000import datetime import sys import threading import time import unittest from peewee import * from peewee import Entity from peewee import NodeList from peewee import SubclassAwareMetadata from peewee import sort_models from .base import db from .base import get_in_memory_db from .base import mock from .base import new_connection from .base import requires_models from .base import requires_mysql from .base import requires_pglike from .base import requires_postgresql from .base import requires_sqlite from .base import skip_if from .base import skip_unless from .base import BaseTestCase from .base import IS_CRDB from .base import IS_MYSQL from .base import IS_MYSQL_ADVANCED_FEATURES from .base import IS_POSTGRESQL from .base import IS_SQLITE from .base import IS_SQLITE_OLD from .base import IS_SQLITE_15 # Row-values. from .base import IS_SQLITE_24 # Upsert. from .base import IS_SQLITE_25 # Window functions. from .base import IS_SQLITE_30 # FILTER clause functions. from .base import IS_SQLITE_9 from .base import ModelTestCase from .base import TestModel from .base_models import * if sys.version_info[0] >= 3: long = int class Color(TestModel): name = CharField(primary_key=True) is_neutral = BooleanField(default=False) class Post(TestModel): content = TextField(column_name='Content') timestamp = DateTimeField(column_name='TimeStamp', default=datetime.datetime.now) class PostNote(TestModel): post = ForeignKeyField(Post, backref='notes', primary_key=True) note = TextField() class Point(TestModel): x = IntegerField() y = IntegerField() class Meta: primary_key = False class CPK(TestModel): key = CharField() value = IntegerField() extra = IntegerField() class Meta: primary_key = CompositeKey('key', 'value') class City(TestModel): name = CharField() class Venue(TestModel): name = CharField() city = ForeignKeyField(City, backref='venues') city_n = ForeignKeyField(City, backref='venues_n', null=True) class Event(TestModel): name = CharField() venue = ForeignKeyField(Venue, backref='events', null=True) class TestModelAPIs(ModelTestCase): def add_user(self, username): return User.create(username=username) def add_tweets(self, user, *tweets): accum = [] for tweet in tweets: accum.append(Tweet.create(user=user, content=tweet)) return accum @requires_models(Point) def test_no_primary_key(self): p11 = Point.create(x=1, y=1) p33 = Point.create(x=3, y=3) p_db = Point.get((Point.x == 3) & (Point.y == 3)) self.assertEqual(p_db.x, 3) self.assertEqual(p_db.y, 3) @requires_models(Post, PostNote) def test_pk_is_fk(self): with self.database.atomic(): p1 = Post.create(content='p1') p2 = Post.create(content='p2') p1n = PostNote.create(post=p1, note='p1n') p2n = PostNote.create(post=p2, note='p2n') with self.assertQueryCount(2): pn = PostNote.get(PostNote.note == 'p1n') self.assertEqual(pn.post.content, 'p1') with self.assertQueryCount(1): pn = (PostNote .select(PostNote, Post) .join(Post) .where(PostNote.note == 'p2n') .get()) self.assertEqual(pn.post.content, 'p2') if not IS_SQLITE: exc_class = ProgrammingError if IS_CRDB else IntegrityError with self.database.atomic() as txn: self.assertRaises(exc_class, PostNote.create, note='pxn') txn.rollback() @requires_models(User, Tweet) def test_assertQueryCount(self): self.add_tweets(self.add_user('charlie'), 'foo', 'bar', 'baz') def do_test(n): with self.assertQueryCount(n): authors = [tweet.user.username for tweet in Tweet.select()] self.assertRaises(AssertionError, do_test, 1) self.assertRaises(AssertionError, do_test, 3) do_test(4) self.assertRaises(AssertionError, do_test, 5) @requires_models(Post) def test_column_field_translation(self): ts = datetime.datetime(2017, 2, 1, 13, 37) ts2 = datetime.datetime(2017, 2, 2, 13, 37) p = Post.create(content='p1', timestamp=ts) p2 = Post.create(content='p2', timestamp=ts2) p_db = Post.get(Post.content == 'p1') self.assertEqual(p_db.content, 'p1') self.assertEqual(p_db.timestamp, ts) pd1, pd2 = Post.select().order_by(Post.id).dicts() self.assertEqual(pd1['content'], 'p1') self.assertEqual(pd1['timestamp'], ts) self.assertEqual(pd2['content'], 'p2') self.assertEqual(pd2['timestamp'], ts2) @requires_models(User) def test_insert_many(self): data = [('u%02d' % i,) for i in range(100)] with self.database.atomic(): for chunk in chunked(data, 10): User.insert_many(chunk).execute() self.assertEqual(User.select().count(), 100) names = [u.username for u in User.select().order_by(User.username)] self.assertEqual(names, ['u%02d' % i for i in range(100)]) @requires_models(DfltM) def test_insert_many_defaults_nullable(self): data = [ {'name': 'd1'}, {'name': 'd2', 'dflt1': 10}, {'name': 'd3', 'dflt2': 30}, {'name': 'd4', 'dfltn': 40}] fields = [DfltM.name, DfltM.dflt1, DfltM.dflt2, DfltM.dfltn] DfltM.insert_many(data, fields).execute() expected = [ ('d1', 1, 2, None), ('d2', 10, 2, None), ('d3', 1, 30, None), ('d4', 1, 2, 40)] query = DfltM.select().order_by(DfltM.name) actual = [(d.name, d.dflt1, d.dflt2, d.dfltn) for d in query] self.assertEqual(actual, expected) @requires_models(User, Tweet) def test_create(self): with self.assertQueryCount(1): huey = self.add_user('huey') self.assertEqual(huey.username, 'huey') self.assertTrue(isinstance(huey.id, (int, long))) self.assertTrue(huey.id > 0) with self.assertQueryCount(1): tweet = Tweet.create(user=huey, content='meow') self.assertEqual(tweet.user.id, huey.id) self.assertEqual(tweet.user.username, 'huey') self.assertEqual(tweet.content, 'meow') self.assertTrue(isinstance(tweet.id, int)) self.assertTrue(tweet.id > 0) @requires_models(User) def test_bulk_create(self): users = [User(username='u%s' % i) for i in range(5)] self.assertEqual(User.select().count(), 0) with self.assertQueryCount(1): User.bulk_create(users) self.assertEqual(User.select().count(), 5) self.assertEqual([u.username for u in User.select().order_by(User.id)], ['u0', 'u1', 'u2', 'u3', 'u4']) if IS_POSTGRESQL: self.assertEqual([u.id for u in User.select().order_by(User.id)], [user.id for user in users]) @requires_models(User) def test_bulk_create_empty(self): self.assertEqual(User.select().count(), 0) User.bulk_create([]) @requires_models(User) def test_bulk_create_batching(self): users = [User(username=str(i)) for i in range(10)] with self.assertQueryCount(4): User.bulk_create(users, 3) self.assertEqual(User.select().count(), 10) self.assertEqual([u.username for u in User.select().order_by(User.id)], list('0123456789')) if IS_POSTGRESQL: self.assertEqual([u.id for u in User.select().order_by(User.id)], [user.id for user in users]) @requires_models(Person) def test_bulk_create_error(self): people = [Person(first='a', last='b'), Person(first='b', last='c'), Person(first='a', last='b')] with self.assertRaises(IntegrityError): with self.database.atomic(): Person.bulk_create(people) self.assertEqual(Person.select().count(), 0) @requires_models(CPK) def test_bulk_create_composite_key(self): self.assertEqual(CPK.select().count(), 0) items = [CPK(key='k1', value=1, extra=1), CPK(key='k2', value=2, extra=2)] CPK.bulk_create(items) self.assertEqual([(c.key, c.value, c.extra) for c in items], [('k1', 1, 1), ('k2', 2, 2)]) query = CPK.select().order_by(CPK.key).tuples() self.assertEqual(list(query), [('k1', 1, 1), ('k2', 2, 2)]) @requires_models(Person) def test_bulk_update(self): data = [('f%s' % i, 'l%s' % i, datetime.date(1980, i, i)) for i in range(1, 5)] Person.insert_many(data).execute() p1, p2, p3, p4 = list(Person.select().order_by(Person.id)) p1.first = 'f1-x' p1.last = 'l1-x' p2.first = 'f2-y' p3.last = 'l3-z' with self.assertQueryCount(1): n = Person.bulk_update([p1, p2, p3, p4], ['first', 'last']) self.assertEqual(n, 3 if IS_MYSQL else 4) query = Person.select().order_by(Person.id) self.assertEqual([(p.first, p.last) for p in query], [ ('f1-x', 'l1-x'), ('f2-y', 'l2'), ('f3', 'l3-z'), ('f4', 'l4')]) # Modify multiple fields, but only update "first". p1.first = 'f1-x2' p1.last = 'l1-x2' p2.first = 'f2-y2' p3.last = 'f3-z2' with self.assertQueryCount(2): # Two batches, so two queries. n = Person.bulk_update([p1, p2, p3, p4], [Person.first], 2) self.assertEqual(n, 2 if IS_MYSQL else 4) query = Person.select().order_by(Person.id) self.assertEqual([(p.first, p.last) for p in query], [ ('f1-x2', 'l1-x'), ('f2-y2', 'l2'), ('f3', 'l3-z'), ('f4', 'l4')]) @requires_models(User, Tweet) def test_bulk_update_foreign_key(self): for username in ('charlie', 'huey', 'zaizee'): user = User.create(username=username) for i in range(2): Tweet.create(user=user, content='%s-%s' % (username, i)) c, h, z = list(User.select().order_by(User.id)) c0, c1, h0, h1, z0, z1 = list(Tweet.select().order_by(Tweet.id)) c0.content = 'charlie-0x' c1.user = h h0.user = z h1.content = 'huey-1x' z0.user = c z0.content = 'zaizee-0x' with self.assertQueryCount(1): Tweet.bulk_update([c0, c1, h0, h1, z0, z1], ['user', 'content']) query = (Tweet .select(Tweet.content, User.username) .join(User) .order_by(Tweet.id) .objects()) self.assertEqual([(t.username, t.content) for t in query], [ ('charlie', 'charlie-0x'), ('huey', 'charlie-1'), ('zaizee', 'huey-0'), ('huey', 'huey-1x'), ('charlie', 'zaizee-0x'), ('zaizee', 'zaizee-1')]) @requires_models(Person) def test_bulk_update_integrityerror(self): people = [Person(first='f%s' % i, last='l%s' % i, dob='1980-01-01') for i in range(10)] Person.bulk_create(people) # Get list of people w/the IDs populated. They will not be set if the # underlying DB is Sqlite or MySQL. people = list(Person.select().order_by(Person.id)) # First we'll just modify all the first and last names. for person in people: person.first += '-x' person.last += '-x' # Now we'll introduce an issue that will cause an integrity error. p3, p7 = people[3], people[7] p3.first = p7.first = 'fx' p3.last = p7.last = 'lx' with self.assertRaises(IntegrityError): with self.assertQueryCount(1): with self.database.atomic(): Person.bulk_update(people, fields=['first', 'last']) with self.assertRaises(IntegrityError): # 10 objects, batch size=4, so 0-3, 4-7, 8&9. But we never get to 8 # and 9 because of the integrity error processing the 2nd batch. with self.assertQueryCount(2): with self.database.atomic(): Person.bulk_update(people, ['first', 'last'], 4) # Ensure no changes were made. vals = [(p.first, p.last) for p in Person.select().order_by(Person.id)] self.assertEqual(vals, [('f%s' % i, 'l%s' % i) for i in range(10)]) @requires_models(User, Tweet) def test_bulk_update_apply_dbvalue(self): u = User.create(username='u') t1, t2, t3 = [Tweet.create(user=u, content=str(i)) for i in (1, 2, 3)] # If we don't end up applying the field's db_value() to these timestamp # values, then we will end up with bad data or an error when attempting # to do the update. t1.timestamp = datetime.datetime(2019, 1, 2, 3, 4, 5) t2.timestamp = datetime.date(2019, 1, 3) t3.timestamp = 1337133700 # 2012-05-15T21:1:40. t3_dt = datetime.datetime.fromtimestamp(1337133700) Tweet.bulk_update([t1, t2, t3], fields=['timestamp']) # Ensure that the values were handled appropriately. t1, t2, t3 = list(Tweet.select().order_by(Tweet.id)) self.assertEqual(t1.timestamp, datetime.datetime(2019, 1, 2, 3, 4, 5)) self.assertEqual(t2.timestamp, datetime.datetime(2019, 1, 3, 0, 0, 0)) self.assertEqual(t3.timestamp, t3_dt) @skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB) @requires_models(CPK) def test_bulk_update_cte(self): CPK.insert_many([('k1', 1, 1), ('k2', 2, 2), ('k3', 3, 3)]).execute() # We can also do a bulk-update using ValuesList when the primary-key of # the model is a composite-pk. new_values = [('k1', 1, 10), ('k3', 3, 30)] cte = ValuesList(new_values).cte('new_values', columns=('k', 'v', 'x')) # We have to use a subquery to update the individual column, as SQLite # does not support UPDATE/FROM syntax. subq = (cte .select(cte.c.x) .where(CPK._meta.primary_key == (cte.c.k, cte.c.v))) # Perform the update, assigning extra the new value from the values # list, and restricting the overall update using the composite pk. res = (CPK .update(extra=subq) .where(CPK._meta.primary_key.in_(cte.select(cte.c.k, cte.c.v))) .with_cte(cte) .execute()) self.assertEqual(list(sorted(CPK.select().tuples())), [ ('k1', 1, 10), ('k2', 2, 2), ('k3', 3, 30)]) @requires_models(User) def test_insert_rowcount(self): User.create(username='u0') # Ensure that last insert ID != rowcount. iq = User.insert_many([(u,) for u in ('u1', 'u2', 'u3')]) self.assertEqual(iq.as_rowcount().execute(), 3) # Now explicitly specify empty returning() for all DBs. iq = User.insert_many([(u,) for u in ('u4', 'u5')]).returning() self.assertEqual(iq.as_rowcount().execute(), 2) query = (User .select(User.username.concat('-x')) .where(User.username.in_(['u1', 'u2']))) iq = User.insert_from(query, ['username']) self.assertEqual(iq.as_rowcount().execute(), 2) query = (User .select(User.username.concat('-y')) .where(User.username.in_(['u3', 'u4']))) iq = User.insert_from(query, ['username']).returning() self.assertEqual(iq.as_rowcount().execute(), 2) query = User.insert({'username': 'u5'}) self.assertEqual(query.as_rowcount().execute(), 1) @skip_if(IS_POSTGRESQL or IS_CRDB, 'requires sqlite or mysql') @requires_models(Emp) def test_replace_rowcount(self): Emp.create(first='beanie', last='cat', empno='998') data = [ ('beanie', 'cat', '999'), ('mickey', 'dog', '123')] fields = (Emp.first, Emp.last, Emp.empno) # MySQL returns 3, Sqlite 2. However, older stdlib sqlite3 does not # work properly, so we don't assert a result count here. Emp.replace_many(data, fields=fields).execute() query = Emp.select(Emp.first, Emp.last, Emp.empno).order_by(Emp.last) self.assertEqual(list(query.tuples()), [ ('beanie', 'cat', '999'), ('mickey', 'dog', '123')]) @requires_models(User, Tweet) def test_get_shortcut(self): huey = self.add_user('huey') self.add_tweets(huey, 'meow', 'purr', 'wheeze') mickey = self.add_user('mickey') self.add_tweets(mickey, 'woof', 'yip') # Lookup using just the ID. huey_db = User.get(huey.id) self.assertEqual(huey.id, huey_db.id) # Lookup using an expression. huey_db = User.get(User.username == 'huey') self.assertEqual(huey.id, huey_db.id) mickey_db = User.get(User.username == 'mickey') self.assertEqual(mickey.id, mickey_db.id) self.assertEqual(User.get(username='mickey').id, mickey.id) # No results is an exception. self.assertRaises(User.DoesNotExist, User.get, User.username == 'x') # Multiple results is OK. tweet = Tweet.get(Tweet.user == huey_db) self.assertTrue(tweet.content in ('meow', 'purr', 'wheeze')) # We cannot traverse a join like this. @self.database.atomic() def has_error(): Tweet.get(User.username == 'huey') self.assertRaises(Exception, has_error) # This is OK, though. tweet = Tweet.get(user__username='mickey') self.assertTrue(tweet.content in ('woof', 'yip')) tweet = Tweet.get(content__ilike='w%', user__username__ilike='%ck%') self.assertEqual(tweet.content, 'woof') @requires_models(User) def test_get_with_alias(self): huey = self.add_user('huey') query = (User .select(User.username.alias('name')) .where(User.username == 'huey')) obj = query.dicts().get() self.assertEqual(obj, {'name': 'huey'}) obj = query.objects().get() self.assertEqual(obj.name, 'huey') @requires_models(User, Tweet) def test_get_or_none(self): huey = self.add_user('huey') self.assertEqual(User.get_or_none(User.username == 'huey').username, 'huey') self.assertIsNone(User.get_or_none(User.username == 'foo')) @requires_models(User, Tweet) def test_model_select_get_or_none(self): huey = self.add_user('huey') huey_db = User.select().where(User.username == 'huey').get_or_none() self.assertEqual(huey_db.username, 'huey') self.assertIsNone( User.select().where(User.username == 'foo').get_or_none()) @requires_models(User, Color) def test_get_by_id(self): huey = self.add_user('huey') self.assertEqual(User.get_by_id(huey.id).username, 'huey') Color.insert_many([ {'name': 'red', 'is_neutral': False}, {'name': 'blue', 'is_neutral': False}]).execute() self.assertEqual(Color.get_by_id('red').name, 'red') self.assertRaises(Color.DoesNotExist, Color.get_by_id, 'green') self.assertEqual(Color['red'].name, 'red') self.assertRaises(Color.DoesNotExist, lambda: Color['green']) @requires_models(User, Color) def test_get_set_item(self): huey = self.add_user('huey') huey_db = User[huey.id] self.assertEqual(huey_db.username, 'huey') User[huey.id] = {'username': 'huey-x'} huey_db = User[huey.id] self.assertEqual(huey_db.username, 'huey-x') del User[huey.id] self.assertEqual(len(User), 0) # Allow creation by specifying None for key. User[None] = {'username': 'zaizee'} User.get(User.username == 'zaizee') @requires_models(User) def test_get_or_create(self): huey, created = User.get_or_create(username='huey') self.assertTrue(created) huey2, created2 = User.get_or_create(username='huey') self.assertFalse(created2) self.assertEqual(huey.id, huey2.id) @requires_models(Category) def test_get_or_create_self_referential_fk(self): parent = Category.create(name='parent') child, created = Category.get_or_create(parent=parent, name='child') child_db = Category.get(Category.parent == parent) self.assertEqual(child_db.parent.name, 'parent') self.assertEqual(child_db.name, 'child') @requires_models(Person) def test_get_or_create_defaults(self): p, created = Person.get_or_create(first='huey', defaults={ 'last': 'cat', 'dob': datetime.date(2010, 7, 1)}) self.assertTrue(created) p_db = Person.get(Person.first == 'huey') self.assertEqual(p_db.first, 'huey') self.assertEqual(p_db.last, 'cat') self.assertEqual(p_db.dob, datetime.date(2010, 7, 1)) p2, created = Person.get_or_create(first='huey', defaults={ 'last': 'kitten', 'dob': datetime.date(2020, 1, 1)}) self.assertFalse(created) self.assertEqual(p2.first, 'huey') self.assertEqual(p2.last, 'cat') self.assertEqual(p2.dob, datetime.date(2010, 7, 1)) @requires_models(Person) def test_save(self): huey = Person(first='huey', last='cat', dob=datetime.date(2010, 7, 1)) self.assertTrue(huey.save() > 0) self.assertTrue(huey.id is not None) # Ensure PK is set. orig_id = huey.id # Test initial save (INSERT) worked and data is all present. huey_db = Person.get(first='huey', last='cat') self.assertEqual(huey_db.id, huey.id) self.assertEqual(huey_db.first, 'huey') self.assertEqual(huey_db.last, 'cat') self.assertEqual(huey_db.dob, datetime.date(2010, 7, 1)) # Make a change and do a second save (UPDATE). huey.dob = datetime.date(2010, 7, 2) self.assertTrue(huey.save() > 0) self.assertEqual(huey.id, orig_id) # Test UPDATE worked correctly. huey_db = Person.get(first='huey', last='cat') self.assertEqual(huey_db.id, huey.id) self.assertEqual(huey_db.first, 'huey') self.assertEqual(huey_db.last, 'cat') self.assertEqual(huey_db.dob, datetime.date(2010, 7, 2)) self.assertEqual(Person.select().count(), 1) @requires_models(Person) def test_save_only(self): huey = Person(first='huey', last='cat', dob=datetime.date(2010, 7, 1)) huey.save() huey.first = 'huker' huey.last = 'kitten' self.assertTrue(huey.save(only=('first',)) > 0) huey_db = Person.get_by_id(huey.id) self.assertEqual(huey_db.first, 'huker') self.assertEqual(huey_db.last, 'cat') self.assertEqual(huey_db.dob, datetime.date(2010, 7, 1)) huey.first = 'hubie' self.assertTrue(huey.save(only=[Person.last]) > 0) huey_db = Person.get_by_id(huey.id) self.assertEqual(huey_db.first, 'huker') self.assertEqual(huey_db.last, 'kitten') self.assertEqual(huey_db.dob, datetime.date(2010, 7, 1)) self.assertEqual(Person.select().count(), 1) @requires_models(Color, User) def test_save_force(self): huey = User(username='huey') self.assertTrue(huey.save() > 0) huey_id = huey.id huey.username = 'zaizee' self.assertTrue(huey.save(force_insert=True, only=('username',)) > 0) zaizee_id = huey.id self.assertTrue(huey_id != zaizee_id) query = User.select().order_by(User.username) self.assertEqual([user.username for user in query], ['huey', 'zaizee']) color = Color(name='red') self.assertFalse(bool(color.save())) self.assertEqual(Color.select().count(), 0) color = Color(name='blue') color.save(force_insert=True) self.assertEqual(Color.select().count(), 1) with self.database.atomic(): self.assertRaises(IntegrityError, color.save, force_insert=True) @requires_models(User, Tweet) def test_populate_unsaved_relations(self): user = User(username='charlie') tweet = Tweet(user=user, content='foo') self.assertTrue(user.save()) self.assertTrue(user.id is not None) with self.assertQueryCount(1): self.assertEqual(tweet.user_id, user.id) self.assertTrue(tweet.save()) self.assertEqual(tweet.user_id, user.id) tweet_db = Tweet.get(Tweet.content == 'foo') self.assertEqual(tweet_db.user.username, 'charlie') @requires_models(User, Tweet) def test_model_select(self): huey = self.add_user('huey') mickey = self.add_user('mickey') zaizee = self.add_user('zaizee') self.add_tweets(huey, 'meow', 'hiss', 'purr') self.add_tweets(mickey, 'woof', 'whine') with self.assertQueryCount(1): query = (Tweet .select(Tweet.content, User.username) .join(User) .order_by(User.username, Tweet.content)) self.assertSQL(query, ( 'SELECT "t1"."content", "t2"."username" ' 'FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "t2" ' 'ON ("t1"."user_id" = "t2"."id") ' 'ORDER BY "t2"."username", "t1"."content"'), []) tweets = list(query) self.assertEqual([(t.content, t.user.username) for t in tweets], [ ('hiss', 'huey'), ('meow', 'huey'), ('purr', 'huey'), ('whine', 'mickey'), ('woof', 'mickey')]) @requires_models(User, Tweet, Favorite) def test_join_two_fks(self): with self.database.atomic(): huey = self.add_user('huey') mickey = self.add_user('mickey') h_m, h_p, h_h = self.add_tweets(huey, 'meow', 'purr', 'hiss') m_w, m_b = self.add_tweets(mickey, 'woof', 'bark') Favorite.create(user=huey, tweet=m_w) Favorite.create(user=mickey, tweet=h_m) Favorite.create(user=mickey, tweet=h_p) with self.assertQueryCount(1): UA = User.alias() query = (Favorite .select(Favorite, Tweet, User, UA) .join(Tweet) .join(User) .switch(Favorite) .join(UA, on=Favorite.user) .order_by(Favorite.id)) accum = [(f.tweet.user.username, f.tweet.content, f.user.username) for f in query] self.assertEqual(accum, [ ('mickey', 'woof', 'huey'), ('huey', 'meow', 'mickey'), ('huey', 'purr', 'mickey')]) with self.assertQueryCount(5): # Test intermediate models not selected. query = (Favorite .select() .join(Tweet) .switch(Favorite) .join(User) .where(User.username == 'mickey') .order_by(Favorite.id)) accum = [(f.user.username, f.tweet.content) for f in query] self.assertEqual(accum, [('mickey', 'meow'), ('mickey', 'purr')]) @requires_models(A, B, C) def test_join_issue_1482(self): a1 = A.create(a='a1') b1 = B.create(a=a1, b='b1') c1 = C.create(b=b1, c='c1') with self.assertQueryCount(3): query = C.select().join(B).join(A).where(A.a == 'a1') accum = [(c.c, c.b.b, c.b.a.a) for c in query] self.assertEqual(accum, [('c1', 'b1', 'a1')]) @requires_models(A, B, C) def test_join_empty_intermediate_model(self): a1 = A.create(a='a1') a2 = A.create(a='a2') b11 = B.create(a=a1, b='b11') b12 = B.create(a=a1, b='b12') b21 = B.create(a=a2, b='b21') c111 = C.create(b=b11, c='c111') c112 = C.create(b=b11, c='c112') c211 = C.create(b=b21, c='c211') with self.assertQueryCount(1): query = C.select(C, A.a).join(B).join(A).order_by(C.c) accum = [(c.c, c.b.a.a) for c in query] self.assertEqual(accum, [ ('c111', 'a1'), ('c112', 'a1'), ('c211', 'a2')]) with self.assertQueryCount(1): query = C.select(C, B, A).join(B).join(A).order_by(C.c) accum = [(c.c, c.b.b, c.b.a.a) for c in query] self.assertEqual(accum, [ ('c111', 'b11', 'a1'), ('c112', 'b11', 'a1'), ('c211', 'b21', 'a2')]) @requires_models(City, Venue, Event) def test_join_empty_relations(self): with self.database.atomic(): city = City.create(name='Topeka') venue1 = Venue.create(name='House', city=city, city_n=city) venue2 = Venue.create(name='Nowhere', city=city, city_n=None) event1 = Event.create(name='House Party', venue=venue1) event2 = Event.create(name='Holiday') event3 = Event.create(name='Nowhere Party', venue=venue2) with self.assertQueryCount(1): query = (Event .select(Event, Venue, City) .join(Venue, JOIN.LEFT_OUTER) .join(City, JOIN.LEFT_OUTER, on=Venue.city) .order_by(Event.id)) # Here we have two left-outer joins, and the second Event # ("Holiday"), does not have an associated Venue (hence, no City). # Peewee would attach an empty Venue() model to the event, however. # It did this since we are selecting from Venue/City and Venue is # an intermediary model. It is more correct for Event.venue to be # None in this case. This is now patched / fixed. r = [(e.name, e.venue and e.venue.city.name or None) for e in query] self.assertEqual(r, [ ('House Party', 'Topeka'), ('Holiday', None), ('Nowhere Party', 'Topeka')]) with self.assertQueryCount(1): query = (Event .select(Event, Venue, City) .join(Venue, JOIN.INNER) .join(City, JOIN.LEFT_OUTER, on=Venue.city_n) .order_by(Event.id)) # Here we have an inner join and a left-outer join. The furthest # object (City) will be NULL for the "Nowhere Party". Make sure # that the object is left as None and not populated with an empty # City instance. accum = [] for event in query: city_name = event.venue.city_n and event.venue.city_n.name accum.append((event.name, event.venue.name, city_name)) self.assertEqual(accum, [ ('House Party', 'House', 'Topeka'), ('Nowhere Party', 'Nowhere', None)]) @requires_models(Relationship, Person) def test_join_same_model_twice(self): d = datetime.date(2010, 1, 1) huey = Person.create(first='huey', last='cat', dob=d) zaizee = Person.create(first='zaizee', last='cat', dob=d) mickey = Person.create(first='mickey', last='dog', dob=d) relationships = ( (huey, zaizee), (zaizee, huey), (mickey, huey), ) for src, dest in relationships: Relationship.create(from_person=src, to_person=dest) PA = Person.alias() with self.assertQueryCount(1): query = (Relationship .select(Relationship, Person, PA) .join(Person, on=Relationship.from_person) .switch(Relationship) .join(PA, on=Relationship.to_person) .order_by(Relationship.id)) results = [(r.from_person.first, r.to_person.first) for r in query] self.assertEqual(results, [ ('huey', 'zaizee'), ('zaizee', 'huey'), ('mickey', 'huey')]) @requires_models(User) def test_peek(self): for username in ('huey', 'mickey', 'zaizee'): self.add_user(username) with self.assertQueryCount(1): query = User.select(User.username).order_by(User.username).dicts() self.assertEqual(query.peek(n=1), {'username': 'huey'}) self.assertEqual(query.peek(n=2), [{'username': 'huey'}, {'username': 'mickey'}]) @requires_models(User, Tweet, Favorite) def test_multi_join(self): u1 = User.create(username='u1') u2 = User.create(username='u2') u3 = User.create(username='u3') t1_1 = Tweet.create(user=u1, content='t1-1') t1_2 = Tweet.create(user=u1, content='t1-2') t2_1 = Tweet.create(user=u2, content='t2-1') t2_2 = Tweet.create(user=u2, content='t2-2') favorites = ((u1, t2_1), (u1, t2_2), (u2, t1_1), (u3, t1_2), (u3, t2_2)) for user, tweet in favorites: Favorite.create(user=user, tweet=tweet) TweetUser = User.alias('u2') with self.assertQueryCount(1): query = (Favorite .select(Favorite.id, Tweet.content, User.username, TweetUser.username) .join(Tweet) .join(TweetUser, on=(Tweet.user == TweetUser.id)) .switch(Favorite) .join(User) .order_by(Tweet.content, Favorite.id)) self.assertSQL(query, ( 'SELECT ' '"t1"."id", "t2"."content", "t3"."username", "u2"."username" ' 'FROM "favorite" AS "t1" ' 'INNER JOIN "tweet" AS "t2" ON ("t1"."tweet_id" = "t2"."id") ' 'INNER JOIN "users" AS "u2" ON ("t2"."user_id" = "u2"."id") ' 'INNER JOIN "users" AS "t3" ON ("t1"."user_id" = "t3"."id") ' 'ORDER BY "t2"."content", "t1"."id"'), []) accum = [(f.tweet.user.username, f.tweet.content, f.user.username) for f in query] self.assertEqual(accum, [ ('u1', 't1-1', 'u2'), ('u1', 't1-2', 'u3'), ('u2', 't2-1', 'u1'), ('u2', 't2-2', 'u1'), ('u2', 't2-2', 'u3')]) res = query.count() self.assertEqual(res, 5) def _create_user_tweets(self): data = (('huey', ('meow', 'purr', 'hiss')), ('zaizee', ()), ('mickey', ('woof', 'grr'))) with self.database.atomic(): ts = int(time.time()) for username, tweets in data: user = User.create(username=username) for tweet in tweets: Tweet.create(user=user, content=tweet, timestamp=ts) ts += 1 @requires_models(User, Tweet) def test_join_subquery(self): self._create_user_tweets() # Select note user and timestamp of most recent tweet. with self.assertQueryCount(1): TA = Tweet.alias() max_q = (TA .select(TA.user, fn.MAX(TA.timestamp).alias('max_ts')) .group_by(TA.user) .alias('max_q')) predicate = ((Tweet.user == max_q.c.user_id) & (Tweet.timestamp == max_q.c.max_ts)) latest = (Tweet .select(Tweet.user, Tweet.content, Tweet.timestamp) .join(max_q, on=predicate) .alias('latest')) query = (User .select(User, latest.c.content, latest.c.timestamp) .join(latest, on=(User.id == latest.c.user_id))) data = [(user.username, user.tweet.content) for user in query] # Failing on travis-ci...old SQLite? if not IS_SQLITE_OLD: self.assertEqual(data, [ ('huey', 'hiss'), ('mickey', 'grr')]) with self.assertQueryCount(1): query = (Tweet .select(Tweet, User) .join(max_q, on=predicate) .switch(Tweet) .join(User)) data = [(note.user.username, note.content) for note in query] self.assertEqual(data, [ ('huey', 'hiss'), ('mickey', 'grr')]) @requires_models(User, Tweet) def test_join_subquery_2(self): self._create_user_tweets() with self.assertQueryCount(1): users = (User .select(User.id, User.username) .where(User.username.in_(['huey', 'zaizee']))) query = (Tweet .select(Tweet.content.alias('content'), users.c.username.alias('username')) .join(users, on=(Tweet.user == users.c.id)) .order_by(Tweet.id)) self.assertSQL(query, ( 'SELECT "t1"."content" AS "content", ' '"t2"."username" AS "username"' ' FROM "tweet" AS "t1" ' 'INNER JOIN (SELECT "t3"."id", "t3"."username" ' 'FROM "users" AS "t3" ' 'WHERE ("t3"."username" IN (?, ?))) AS "t2" ' 'ON ("t1"."user_id" = "t2"."id") ' 'ORDER BY "t1"."id"'), ['huey', 'zaizee']) results = [(t.content, t.user.username) for t in query] self.assertEqual(results, [ ('meow', 'huey'), ('purr', 'huey'), ('hiss', 'huey')]) @skip_if(IS_SQLITE_OLD or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES)) @requires_models(User, Tweet) def test_join_subquery_cte(self): self._create_user_tweets() cte = (User .select(User.id, User.username) .where(User.username.in_(['huey', 'zaizee']))\ .cte('cats')) with self.assertQueryCount(1): # Attempt join with subquery as common-table expression. query = (Tweet .select(Tweet.content, cte.c.username) .join(cte, on=(Tweet.user == cte.c.id)) .order_by(Tweet.id) .with_cte(cte)) self.assertSQL(query, ( 'WITH "cats" AS (' 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."username" IN (?, ?))) ' 'SELECT "t2"."content", "cats"."username" FROM "tweet" AS "t2" ' 'INNER JOIN "cats" ON ("t2"."user_id" = "cats"."id") ' 'ORDER BY "t2"."id"'), ['huey', 'zaizee']) self.assertEqual([t.content for t in query], ['meow', 'purr', 'hiss']) @skip_if(IS_MYSQL) # MariaDB does not support LIMIT in subqueries! @requires_models(User) def test_subquery_emulate_window(self): # We have duplicated users. Select a maximum of 2 instances of the # username. name2count = { 'beanie': 6, 'huey': 5, 'mickey': 3, 'pipey': 1, 'zaizee': 4} names = [] for name, count in sorted(name2count.items()): names += [name] * count User.insert_many([(i, n) for i, n in enumerate(names, 1)], [User.id, User.username]).execute() # The results we are trying to obtain. expected = [ ('beanie', 1), ('beanie', 2), ('huey', 7), ('huey', 8), ('mickey', 12), ('mickey', 13), ('pipey', 15), ('zaizee', 16), ('zaizee', 17)] with self.assertQueryCount(1): # Using a self-join. UA = User.alias() query = (User .select(User.username, UA.id) .join(UA, on=((UA.username == User.username) & (UA.id >= User.id))) .group_by(User.username, UA.id) .having(fn.COUNT(UA.id) < 3) .order_by(User.username, UA.id)) self.assertEqual(query.tuples()[:], expected) with self.assertQueryCount(1): # Using a correlated subquery. subq = (UA .select(UA.id) .where(User.username == UA.username) .order_by(UA.id) .limit(2)) query = (User .select(User.username, User.id) .where(User.id.in_(subq.alias('subq'))) .order_by(User.username, User.id)) self.assertEqual(query.tuples()[:], expected) @requires_models(User, Tweet) def test_subquery_alias_selection(self): data = ( ('huey', ('meow', 'hiss', 'purr')), ('mickey', ('woof', 'bark')), ('zaizee', ())) with self.database.atomic(): for username, tweets in data: user = User.create(username=username) for tweet in tweets: Tweet.create(user=user, content=tweet) with self.assertQueryCount(1): subq = (Tweet .select(fn.COUNT(Tweet.id)) .where(Tweet.user == User.id)) query = (User .select(User.username, subq.alias('tweet_count')) .order_by(User.id)) self.assertEqual([(u.username, u.tweet_count) for u in query], [ ('huey', 3), ('mickey', 2), ('zaizee', 0)]) @requires_pglike @requires_models(User) def test_join_on_valueslist(self): for username in ('huey', 'mickey', 'zaizee'): User.create(username=username) vl = ValuesList([('huey',), ('zaizee',)], columns=['username']) with self.assertQueryCount(1): query = (User .select(vl.c.username) .join(vl, on=(User.username == vl.c.username)) .order_by(vl.c.username.desc())) self.assertEqual([u.username for u in query], ['zaizee', 'huey']) @skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB) @requires_models(User) def test_multi_update(self): data = [(i, 'u%s' % i) for i in range(1, 4)] User.insert_many(data, fields=[User.id, User.username]).execute() data = [(i, 'u%sx' % i) for i in range(1, 3)] vl = ValuesList(data) cte = vl.select().cte('uv', columns=('id', 'username')) subq = cte.select(cte.c.username).where(cte.c.id == User.id) res = (User .update(username=subq) .where(User.id.in_(cte.select(cte.c.id))) .with_cte(cte) .execute()) query = User.select().order_by(User.id) self.assertEqual([(u.id, u.username) for u in query], [ (1, 'u1x'), (2, 'u2x'), (3, 'u3')]) @requires_models(User, Tweet) def test_insert_query_value(self): huey = self.add_user('huey') query = User.select(User.id).where(User.username == 'huey') tid = Tweet.insert(content='meow', user=query).execute() tweet = Tweet[tid] self.assertEqual(tweet.user.id, huey.id) self.assertEqual(tweet.user.username, 'huey') @skip_if(IS_SQLITE and not IS_SQLITE_9, 'requires sqlite >= 3.9') @requires_models(Register) def test_compound_select(self): for i in range(10): Register.create(value=i) q1 = Register.select().where(Register.value < 2) q2 = Register.select().where(Register.value > 7) c1 = (q1 | q2).order_by(SQL('2')) self.assertSQL(c1, ( 'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" ' 'WHERE ("t1"."value" < ?) UNION ' 'SELECT "t2"."id", "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" > ?) ORDER BY 2'), [2, 7]) self.assertEqual([row.value for row in c1], [0, 1, 8, 9], [row.__data__ for row in c1]) self.assertEqual(c1.count(), 4) q3 = Register.select().where(Register.value == 5) c2 = (c1.order_by() | q3).order_by(SQL('2')) self.assertSQL(c2, ( 'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" ' 'WHERE ("t1"."value" < ?) UNION ' 'SELECT "t2"."id", "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" > ?) UNION ' 'SELECT "t2"."id", "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" = ?) ORDER BY 2'), [2, 7, 5]) self.assertEqual([row.value for row in c2], [0, 1, 5, 8, 9]) self.assertEqual(c2.count(), 5) @requires_models(User, Tweet) def test_union_column_resolution(self): u1 = User.create(id=1, username='u1') u2 = User.create(id=2, username='u2') q1 = User.select().where(User.id == 1) q2 = User.select() union = q1 | q2 self.assertSQL(union, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."id" = ?) ' 'UNION ' 'SELECT "t2"."id", "t2"."username" FROM "users" AS "t2"'), [1]) results = [(user.id, user.username) for user in union] self.assertEqual(sorted(results), [ (1, 'u1'), (2, 'u2')]) t1_1 = Tweet.create(id=1, user=u1, content='u1-t1') t1_2 = Tweet.create(id=2, user=u1, content='u1-t2') t2_1 = Tweet.create(id=3, user=u2, content='u2-t1') with self.assertQueryCount(1): q1 = Tweet.select(Tweet, User).join(User).where(User.id == 1) q2 = Tweet.select(Tweet, User).join(User) union = q1 | q2 self.assertSQL(union, ( 'SELECT "t1"."id", "t1"."user_id", "t1"."content", ' '"t1"."timestamp", "t2"."id", "t2"."username" ' 'FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") ' 'WHERE ("t2"."id" = ?) ' 'UNION ' 'SELECT "t3"."id", "t3"."user_id", "t3"."content", ' '"t3"."timestamp", "t4"."id", "t4"."username" ' 'FROM "tweet" AS "t3" ' 'INNER JOIN "users" AS "t4" ON ("t3"."user_id" = "t4"."id")'), [1]) results = [(t.id, t.content, t.user.username) for t in union] self.assertEqual(sorted(results), [ (1, 'u1-t1', 'u1'), (2, 'u1-t2', 'u1'), (3, 'u2-t1', 'u2')]) with self.assertQueryCount(1): union_flat = (q1 | q2).objects() results = [(t.id, t.content, t.username) for t in union_flat] self.assertEqual(sorted(results), [ (1, 'u1-t1', 'u1'), (2, 'u1-t2', 'u1'), (3, 'u2-t1', 'u2')]) @requires_models(User, Tweet) def test_compound_select_as_subquery(self): with self.database.atomic(): for i in range(5): user = User.create(username='u%s' % i) for j in range(i * 2): Tweet.create(user=user, content='t%s-%s' % (i, j)) q1 = (Tweet .select(Tweet.id, Tweet.content, User.username) .join(User) .where(User.username == 'u3')) q2 = (Tweet .select(Tweet.id, Tweet.content, User.username) .join(User) .where(User.username.in_(['u2', 'u4']))) union = (q1 | q2) q = (union .select_from(union.c.username, fn.COUNT(union.c.id).alias('ct')) .group_by(union.c.username) .order_by(fn.COUNT(union.c.id).desc()) .dicts()) self.assertEqual(list(q), [ {'username': 'u4', 'ct': 8}, {'username': 'u3', 'ct': 6}, {'username': 'u2', 'ct': 4}]) @requires_models(User, Tweet) def test_union_with_join(self): u1, u2 = [User.create(username='u%s' % i) for i in (1, 2)] for u, ts in ((u1, ('t1', 't2')), (u2, ('t1',))): for t in ts: Tweet.create(user=u, content='%s-%s' % (u.username, t)) with self.assertQueryCount(1): q1 = (User .select(User, Tweet) .join(Tweet, on=(Tweet.user == User.id).alias('foo'))) q2 = (User .select(User, Tweet) .join(Tweet, on=(Tweet.user == User.id).alias('foo'))) self.assertEqual( sorted([(user.username, user.foo.content) for user in q1]), [('u1', 'u1-t1'), ('u1', 'u1-t2'), ('u2', 'u2-t1')]) with self.assertQueryCount(1): uq = q1.union_all(q2) result = [(user.username, user.foo.content) for user in uq] self.assertEqual(sorted(result), [ ('u1', 'u1-t1'), ('u1', 'u1-t1'), ('u1', 'u1-t2'), ('u1', 'u1-t2'), ('u2', 'u2-t1'), ('u2', 'u2-t1'), ]) @skip_if(IS_SQLITE_OLD or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES)) @requires_models(User) def test_union_cte(self): with self.database.atomic(): (User .insert_many({'username': 'u%s' % i} for i in range(10)) .execute()) lhs = User.select().where(User.username.in_(['u1', 'u3'])) rhs = User.select().where(User.username.in_(['u5', 'u7'])) u_cte = (lhs | rhs).cte('users_union') query = (User .select(User.username) .join(u_cte, on=(User.id == u_cte.c.id)) .where(User.username.in_(['u1', 'u7'])) .with_cte(u_cte)) self.assertEqual(sorted([u.username for u in query]), ['u1', 'u7']) @requires_models(Category) def test_self_referential_fk(self): self.assertTrue(Category.parent.rel_model is Category) root = Category.create(name='root') c1 = Category.create(parent=root, name='child-1') c2 = Category.create(parent=root, name='child-2') with self.assertQueryCount(1): Parent = Category.alias('p') query = (Category .select( Parent.name, Category.name) .where(Category.parent == root) .order_by(Category.name)) query = query.join(Parent, on=(Category.parent == Parent.name)) c1_db, c2_db = list(query) self.assertEqual(c1_db.name, 'child-1') self.assertEqual(c1_db.parent.name, 'root') self.assertEqual(c2_db.name, 'child-2') self.assertEqual(c2_db.parent.name, 'root') @requires_models(Category) def test_empty_joined_instance(self): root = Category.create(name='a') c1 = Category.create(name='c1', parent=root) c2 = Category.create(name='c2', parent=root) with self.assertQueryCount(1): Parent = Category.alias('p') query = (Category .select(Category, Parent) .join(Parent, JOIN.LEFT_OUTER, on=(Category.parent == Parent.name)) .order_by(Category.name)) result = [(category.name, category.parent is None) for category in query] self.assertEqual(result, [('a', True), ('c1', False), ('c2', False)]) @requires_models(User, Tweet) def test_from_multi_table(self): self.add_tweets(self.add_user('huey'), 'meow', 'hiss', 'purr') self.add_tweets(self.add_user('mickey'), 'woof', 'wheeze') with self.assertQueryCount(1): query = (Tweet .select(Tweet, User) .from_(Tweet, User) .where( (Tweet.user == User.id) & (User.username == 'huey')) .order_by(Tweet.id) .dicts()) self.assertEqual([t['content'] for t in query], ['meow', 'hiss', 'purr']) self.assertEqual([t['username'] for t in query], ['huey', 'huey', 'huey']) @requires_models(Point) def test_subquery_in_select_expression(self): for x, y in ((1, 1), (1, 2), (10, 10), (10, 20)): Point.create(x=x, y=y) with self.assertQueryCount(1): PA = Point.alias('pa') subq = PA.select(fn.SUM(PA.y)).where(PA.x == Point.x) query = (Point .select(Point.x, Point.y, subq.alias('sy')) .order_by(Point.x, Point.y)) self.assertEqual(list(query.tuples()), [ (1, 1, 3), (1, 2, 3), (10, 10, 30), (10, 20, 30)]) with self.assertQueryCount(1): query = (Point .select(Point.x, (Point.y + subq).alias('sy')) .order_by(Point.x, Point.y)) self.assertEqual(list(query.tuples()), [ (1, 4), (1, 5), (10, 40), (10, 50)]) @requires_models(User, Tweet) def test_filtering(self): with self.database.atomic(): huey = self.add_user('huey') mickey = self.add_user('mickey') self.add_tweets(huey, 'meow', 'hiss', 'purr') self.add_tweets(mickey, 'woof', 'wheeze') with self.assertQueryCount(1): query = Tweet.filter(user__username='huey').order_by(Tweet.content) self.assertEqual([row.content for row in query], ['hiss', 'meow', 'purr']) with self.assertQueryCount(1): query = User.filter(tweets__content__ilike='w%') self.assertEqual([user.username for user in query], ['mickey', 'mickey']) def test_deferred_fk(self): class Note(TestModel): foo = DeferredForeignKey('Foo', backref='notes') class Foo(TestModel): note = ForeignKeyField(Note) self.assertTrue(Note.foo.rel_model is Foo) self.assertTrue(Foo.note.rel_model is Note) f = Foo(id=1337) self.assertSQL(f.notes, ( 'SELECT "t1"."id", "t1"."foo_id" FROM "note" AS "t1" ' 'WHERE ("t1"."foo_id" = ?)'), [1337]) def test_deferred_fk_dependency_graph(self): class AUser(TestModel): foo = DeferredForeignKey('Tweet') class ZTweet(TestModel): user = ForeignKeyField(AUser, backref='ztweets') self.assertEqual(sort_models([AUser, ZTweet]), [AUser, ZTweet]) def test_table_schema(self): class Schema(TestModel): pass self.assertTrue(Schema._meta.schema is None) self.assertSQL(Schema.select(), ( 'SELECT "t1"."id" FROM "schema" AS "t1"'), []) Schema._meta.schema = 'test' self.assertSQL(Schema.select(), ( 'SELECT "t1"."id" FROM "test"."schema" AS "t1"'), []) Schema._meta.schema = 'another' self.assertSQL(Schema.select(), ( 'SELECT "t1"."id" FROM "another"."schema" AS "t1"'), []) @requires_models(User) def test_noop(self): query = User.noop() self.assertEqual(list(query), []) @requires_models(User) def test_iteration(self): self.assertEqual(list(User), []) self.assertEqual(len(User), 0) self.assertTrue(User) User.insert_many((['charlie'], ['huey']), [User.username]).execute() self.assertEqual(sorted(u.username for u in User), ['charlie', 'huey']) self.assertEqual(len(User), 2) self.assertTrue(User) @requires_models(User) def test_iterator(self): users = ['charlie', 'huey', 'zaizee'] with self.database.atomic(): for username in users: User.create(username=username) with self.assertQueryCount(1): query = User.select().order_by(User.username).iterator() self.assertEqual([u.username for u in query], users) self.assertEqual(list(query), []) @requires_models(User) def test_select_count(self): users = [self.add_user(u) for u in ('huey', 'charlie', 'mickey')] self.assertEqual(User.select().count(), 3) qr = User.select().execute() self.assertEqual(qr.count, 0) list(qr) self.assertEqual(qr.count, 3) @requires_models(User) def test_batch_commit(self): commit_method = self.database.commit def assertBatch(n_rows, batch_size, n_commits): User.delete().execute() user_data = [{'username': 'u%s' % i} for i in range(n_rows)] with mock.patch.object(self.database, 'commit') as mock_commit: mock_commit.side_effect = commit_method for row in self.database.batch_commit(user_data, batch_size): User.create(**row) self.assertEqual(mock_commit.call_count, n_commits) self.assertEqual(User.select().count(), n_rows) assertBatch(6, 1, 6) assertBatch(6, 2, 3) assertBatch(6, 3, 2) assertBatch(6, 4, 2) assertBatch(6, 6, 1) assertBatch(6, 7, 1) class TestRaw(ModelTestCase): database = get_in_memory_db() requires = [User] def test_raw(self): with self.database.atomic(): for username in ('charlie', 'chuck', 'huey', 'zaizee'): User.create(username=username) query = (User .raw('SELECT username, SUBSTR(username, 1, 1) AS first ' 'FROM users ' 'WHERE SUBSTR(username, 1, 1) = ? ' 'ORDER BY username DESC', 'c')) self.assertEqual([(row.username, row.first) for row in query], [('chuck', 'c'), ('charlie', 'c')]) def test_raw_iterator(self): (User .insert_many([('charlie',), ('huey',)], fields=[User.username]) .execute()) with self.assertQueryCount(1): query = User.raw('SELECT * FROM users ORDER BY id') results = [user.username for user in query.iterator()] self.assertEqual(results, ['charlie', 'huey']) # Since we used iterator(), the results were not cached. self.assertEqual([u.username for u in query], []) class TestDeleteInstance(ModelTestCase): database = get_in_memory_db() requires = [User, Account, Tweet, Favorite] def setUp(self): super(TestDeleteInstance, self).setUp() with self.database.atomic(): huey = User.create(username='huey') acct = Account.create(user=huey, email='huey@meow.com') for content in ('meow', 'purr'): Tweet.create(user=huey, content=content) mickey = User.create(username='mickey') woof = Tweet.create(user=mickey, content='woof') Favorite.create(user=huey, tweet=woof) Favorite.create(user=mickey, tweet=Tweet.create(user=huey, content='hiss')) def test_delete_instance_recursive(self): huey = User.get(User.username == 'huey') a = [] for d in huey.dependencies(): a.append(d) with self.assertQueryCount(5): huey.delete_instance(recursive=True) self.assertHistory(5, [ ('DELETE FROM "favorite" WHERE ("favorite"."user_id" = ?)', [huey.id]), ('DELETE FROM "favorite" WHERE (' '"favorite"."tweet_id" IN (' 'SELECT "t1"."id" FROM "tweet" AS "t1" WHERE (' '"t1"."user_id" = ?)))', [huey.id]), ('DELETE FROM "tweet" WHERE ("tweet"."user_id" = ?)', [huey.id]), ('UPDATE "account" SET "user_id" = ? ' 'WHERE ("account"."user_id" = ?)', [None, huey.id]), ('DELETE FROM "users" WHERE ("users"."id" = ?)', [huey.id]), ]) # Only one user left. self.assertEqual(User.select().count(), 1) # Huey's account has had the FK cleared out. acct = Account.get(Account.email == 'huey@meow.com') self.assertTrue(acct.user is None) # Huey owned a favorite and one of huey's tweets was the other fav. self.assertEqual(Favorite.select().count(), 0) # The only tweet left is mickey's. self.assertEqual(Tweet.select().count(), 1) tweet = Tweet.get() self.assertEqual(tweet.content, 'woof') def test_delete_nullable(self): huey = User.get(User.username == 'huey') # Favorite -> Tweet -> User (other users' favorites of huey's tweets) # Favorite -> User (huey's favorite tweets) # Account -> User (huey's account) # User ... for a total of 5. Favorite x2, Tweet, Account, User. with self.assertQueryCount(5): huey.delete_instance(recursive=True, delete_nullable=True) # Get the last 5 delete queries. self.assertHistory(5, [ ('DELETE FROM "favorite" WHERE ("favorite"."user_id" = ?)', [huey.id]), ('DELETE FROM "favorite" WHERE (' '"favorite"."tweet_id" IN (' 'SELECT "t1"."id" FROM "tweet" AS "t1" WHERE (' '"t1"."user_id" = ?)))', [huey.id]), ('DELETE FROM "tweet" WHERE ("tweet"."user_id" = ?)', [huey.id]), ('DELETE FROM "account" WHERE ("account"."user_id" = ?)', [huey.id]), ('DELETE FROM "users" WHERE ("users"."id" = ?)', [huey.id]), ]) self.assertEqual(User.select().count(), 1) self.assertEqual(Account.select().count(), 0) self.assertEqual(Favorite.select().count(), 0) self.assertEqual(Tweet.select().count(), 1) tweet = Tweet.get() self.assertEqual(tweet.content, 'woof') def incrementer(): d = {'value': 0} def increment(): d['value'] += 1 return d['value'] return increment class AutoCounter(TestModel): counter = IntegerField(default=incrementer()) control = IntegerField(default=1) class TestDefaultDirtyBehavior(ModelTestCase): database = get_in_memory_db() requires = [AutoCounter] def tearDown(self): super(TestDefaultDirtyBehavior, self).tearDown() AutoCounter._meta.only_save_dirty = False def test_default_dirty(self): AutoCounter._meta.only_save_dirty = True ac = AutoCounter() ac.save() self.assertEqual(ac.counter, 1) self.assertEqual(ac.control, 1) ac_db = AutoCounter.get((AutoCounter.counter == 1) & (AutoCounter.control == 1)) self.assertEqual(ac_db.counter, 1) self.assertEqual(ac_db.control, 1) # No changes. self.assertFalse(ac_db.save()) ac = AutoCounter.create() self.assertEqual(ac.counter, 2) self.assertEqual(ac.control, 1) AutoCounter._meta.only_save_dirty = False ac = AutoCounter() self.assertEqual(ac.counter, 3) self.assertEqual(ac.control, 1) ac.save() ac_db = AutoCounter.get(AutoCounter.id == ac.id) self.assertEqual(ac_db.counter, 3) @requires_models(Person) def test_save_only_dirty(self): today = datetime.date.today() try: for only_save_dirty in (False, True): Person._meta.only_save_dirty = only_save_dirty p = Person.create(first='f', last='l', dob=today) p.first = 'f2' p.last = 'l2' p.save(only=[Person.first]) self.assertEqual(p.dirty_fields, [Person.last]) p_db = Person.get(Person.id == p.id) self.assertEqual((p_db.first, p_db.last), ('f2', 'l')) p.save() self.assertEqual(p.dirty_fields, []) p_db = Person.get(Person.id == p.id) self.assertEqual((p_db.first, p_db.last), ('f2', 'l2')) p.delete_instance() finally: # Reset only_save_dirty property for other tests. Person._meta.only_save_dirty = False class TestDefaultValues(ModelTestCase): database = get_in_memory_db() requires = [Sample, SampleMeta] def test_default_present_on_insert(self): # Although value is not specified, it has a default, which is included # in the INSERT. query = Sample.insert(counter=0) self.assertSQL(query, ( 'INSERT INTO "sample" ("counter", "value") ' 'VALUES (?, ?)'), [0, 1.0]) # Default values are also included when doing bulk inserts. query = Sample.insert_many([ {'counter': '0'}, {'counter': 1, 'value': 2}, {'counter': '2'}]) self.assertSQL(query, ( 'INSERT INTO "sample" ("counter", "value") ' 'VALUES (?, ?), (?, ?), (?, ?)'), [0, 1.0, 1, 2.0, 2, 1.0]) query = Sample.insert_many([(0,), (1, 2.)], fields=[Sample.counter]) self.assertSQL(query, ( 'INSERT INTO "sample" ("counter", "value") ' 'VALUES (?, ?), (?, ?)'), [0, 1.0, 1, 2.0]) def test_default_present_on_create(self): s = Sample.create(counter=3) s_db = Sample.get(Sample.counter == 3) self.assertEqual(s_db.value, 1.) def test_defaults_from_cursor(self): s = Sample.create(counter=1) sm1 = SampleMeta.create(sample=s, value=1.) sm2 = SampleMeta.create(sample=s, value=2.) # Defaults are not present when doing a read query. with self.assertQueryCount(1): # Simple query. query = (SampleMeta.select(SampleMeta.sample) .order_by(SampleMeta.value)) sm1_db, sm2_db = list(query) self.assertIsNone(sm1_db.value) self.assertIsNone(sm2_db.value) with self.assertQueryCount(1): # Join-graph query. query = (SampleMeta .select(SampleMeta.sample, Sample.counter) .join(Sample) .order_by(SampleMeta.value)) sm1_db, sm2_db = list(query) self.assertIsNone(sm1_db.value) self.assertIsNone(sm2_db.value) self.assertIsNone(sm1_db.sample.value) self.assertIsNone(sm2_db.sample.value) self.assertEqual(sm1_db.sample.counter, 1) self.assertEqual(sm2_db.sample.counter, 1) class TestFunctionCoerce(ModelTestCase): database = get_in_memory_db() requires = [Sample] def test_coerce(self): for i in range(3): Sample.create(counter=i, value=i) counter_group = fn.GROUP_CONCAT(Sample.counter).coerce(False) query = Sample.select(counter_group.alias('counter')) self.assertEqual(query.get().counter, '0,1,2') query = Sample.select(counter_group.alias('counter_group')) self.assertEqual(query.get().counter_group, '0,1,2') query = Sample.select(counter_group) self.assertEqual(query.scalar(), '0,1,2') def test_scalar(self): for i in range(4): Sample.create(counter=i, value=i) query = Sample.select(fn.SUM(Sample.counter).alias('total')) self.assertEqual(query.scalar(), 6) self.assertEqual(query.scalar(as_tuple=True), (6,)) self.assertEqual(query.scalar(as_dict=True), {'total': 6}) Sample.delete().execute() self.assertTrue(query.scalar() is None) self.assertEqual(query.scalar(as_tuple=True), (None,)) self.assertEqual(query.scalar(as_dict=True), {'total': None}) def test_safe_python_value(self): for i in range(3): Sample.create(counter=i, value=i) counter_group = fn.GROUP_CONCAT(Sample.counter) query = Sample.select(counter_group.alias('counter')) self.assertEqual(query.get().counter, '0,1,2') self.assertEqual(query.scalar(), '0,1,2') query = Sample.select(counter_group.alias('counter_group')) self.assertEqual(query.get().counter_group, '0,1,2') self.assertEqual(query.scalar(), '0,1,2') def test_conv_using_python_value(self): for i in range(3): Sample.create(counter=i, value=i) counter = (fn .GROUP_CONCAT(Sample.counter) .python_value(lambda x: [int(i) for i in x.split(',')])) query = Sample.select(counter.alias('counter')) self.assertEqual(query.get().counter, [0, 1, 2]) query = Sample.select(counter.alias('counter_group')) self.assertEqual(query.get().counter_group, [0, 1, 2]) query = Sample.select(counter) self.assertEqual(query.scalar(), [0, 1, 2]) @requires_models(Category, Sample) def test_no_coerce_count_avg(self): for i in range(10): Category.create(name=str(i)) # COUNT() does not result in the value being coerced. query = Category.select(fn.COUNT(Category.name)) self.assertEqual(query.scalar(), 10) # Force the value to be coerced using the field's db_value(). query = Category.select(fn.COUNT(Category.name).coerce(True)) self.assertEqual(query.scalar(), '10') # Ensure avg over an integer field is returned as a float. Sample.insert_many([(1, 0), (2, 0)]).execute() query = Sample.select(fn.AVG(Sample.counter).alias('a')) self.assertEqual(query.get().a, 1.5) class TestJoinModelAlias(ModelTestCase): data = ( ('huey', 'meow'), ('huey', 'purr'), ('zaizee', 'hiss'), ('mickey', 'woof')) requires = [User, Tweet] def setUp(self): super(TestJoinModelAlias, self).setUp() users = {} for pk, (username, tweet) in enumerate(self.data, 1): if username not in users: user = User.create(id=len(users) + 1, username=username) users[username] = user else: user = users[username] Tweet.create(id=pk, user=user, content=tweet) def _test_query(self, alias_expr): UA = alias_expr() return (Tweet .select(Tweet, UA) .order_by(UA.username, Tweet.content)) def assertTweets(self, query, user_attr='user'): with self.assertQueryCount(1): data = [(getattr(tweet, user_attr).username, tweet.content) for tweet in query] self.assertEqual(sorted(self.data), data) def test_control(self): self.assertTweets(self._test_query(lambda: User).join(User)) def test_join_aliased_columns(self): query = (Tweet .select(Tweet.id.alias('tweet_id'), Tweet.content) .order_by(Tweet.id)) self.assertEqual([(t.tweet_id, t.content) for t in query], [ (1, 'meow'), (2, 'purr'), (3, 'hiss'), (4, 'woof')]) query = (Tweet .select(Tweet.id.alias('tweet_id'), Tweet.content) .join(User) .where(User.username == 'huey') .order_by(Tweet.id)) self.assertEqual([(t.tweet_id, t.content) for t in query], [ (1, 'meow'), (2, 'purr')]) def test_join(self): UA = User.alias('ua') query = self._test_query(lambda: UA).join(UA) self.assertTweets(query) def test_join_on(self): UA = User.alias('ua') query = self._test_query(lambda: UA).join(UA, on=(Tweet.user == UA.id)) self.assertTweets(query) def test_join_on_field(self): UA = User.alias('ua') query = self._test_query(lambda: UA) query = query.join(UA, on=Tweet.user) self.assertTweets(query) def test_join_on_alias(self): UA = User.alias('ua') query = self._test_query(lambda: UA) query = query.join(UA, on=(Tweet.user == UA.id).alias('foo')) self.assertTweets(query, 'foo') def test_join_attr(self): UA = User.alias('ua') query = self._test_query(lambda: UA).join(UA, attr='baz') self.assertTweets(query, 'baz') def test_join_on_alias_attr(self): UA = User.alias('ua') q = self._test_query(lambda: UA) q = q.join(UA, on=(Tweet.user == UA.id).alias('foo'), attr='bar') self.assertTweets(q, 'bar') def _test_query_backref(self, alias_expr): TA = alias_expr() return (User .select(User, TA) .order_by(User.username, TA.content)) def assertUsers(self, query, tweet_attr='tweet'): with self.assertQueryCount(1): data = [(user.username, getattr(user, tweet_attr).content) for user in query] self.assertEqual(sorted(self.data), data) def test_control_backref(self): self.assertUsers(self._test_query_backref(lambda: Tweet).join(Tweet)) def test_join_backref(self): TA = Tweet.alias('ta') query = self._test_query_backref(lambda: TA).join(TA) self.assertUsers(query) def test_join_on_backref(self): TA = Tweet.alias('ta') query = self._test_query_backref(lambda: TA) query = query.join(TA, on=(User.id == TA.user_id)) self.assertUsers(query) def test_join_on_field_backref(self): TA = Tweet.alias('ta') query = self._test_query_backref(lambda: TA) query = query.join(TA, on=TA.user) self.assertUsers(query) def test_join_on_alias_backref(self): TA = Tweet.alias('ta') query = self._test_query_backref(lambda: TA) query = query.join(TA, on=(User.id == TA.user_id).alias('foo')) self.assertUsers(query, 'foo') def test_join_attr_backref(self): TA = Tweet.alias('ta') query = self._test_query_backref(lambda: TA).join(TA, attr='baz') self.assertUsers(query, 'baz') def test_join_alias_twice(self): # Test that a model-alias can be both the source and the dest by # joining from User -> Tweet -> User (as "foo"). TA = Tweet.alias('ta') UA = User.alias('ua') with self.assertQueryCount(1): query = (User .select(User, TA, UA) .join(TA) .join(UA, on=(TA.user_id == UA.id).alias('foo')) .order_by(User.username, TA.content)) data = [(row.username, row.tweet.content, row.tweet.foo.username) for row in query] self.assertEqual(data, [ ('huey', 'meow', 'huey'), ('huey', 'purr', 'huey'), ('mickey', 'woof', 'mickey'), ('zaizee', 'hiss', 'zaizee')]) def test_alias_filter(self): UA = User.alias('ua') lookups = ({'ua__username': 'huey'}, {'user__username': 'huey'}) for lookup in lookups: with self.assertQueryCount(1): query = (Tweet .select(Tweet.content, UA.username) .join(UA) .filter(**lookup) .order_by(Tweet.content)) self.assertSQL(query, ( 'SELECT "t1"."content", "ua"."username" ' 'FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "ua" ' 'ON ("t1"."user_id" = "ua"."id") ' 'WHERE ("ua"."username" = ?) ' 'ORDER BY "t1"."content"'), ['huey']) data = [(t.content, t.user.username) for t in query] self.assertEqual(data, [('meow', 'huey'), ('purr', 'huey')]) @skip_unless( IS_POSTGRESQL or IS_MYSQL_ADVANCED_FEATURES or IS_SQLITE_25 or IS_CRDB, 'window function') class TestWindowFunctionIntegration(ModelTestCase): requires = [Sample] def setUp(self): super(TestWindowFunctionIntegration, self).setUp() values = ((1, 10), (1, 20), (2, 1), (2, 3), (3, 100)) with self.database.atomic(): for counter, value in values: Sample.create(counter=counter, value=value) def test_simple_partition(self): query = (Sample .select(Sample.counter, Sample.value, fn.AVG(Sample.value).over( partition_by=[Sample.counter])) .order_by(Sample.counter, Sample.value) .tuples()) expected = [ (1, 10., 15.), (1, 20., 15.), (2, 1., 2.), (2, 3., 2.), (3, 100., 100.)] self.assertEqual(list(query), expected) window = Window(partition_by=[Sample.counter]) query = (Sample .select(Sample.counter, Sample.value, fn.AVG(Sample.value).over(window)) .window(window) .order_by(Sample.counter, Sample.value) .tuples()) self.assertEqual(list(query), expected) def test_mixed_ordering(self): s = fn.SUM(Sample.value).over(order_by=[Sample.value]) query = (Sample .select(Sample.counter, Sample.value, s.alias('rtotal')) .order_by(Sample.id)) # We end up with window going 1., 3., 10., 20., 100.. # So: # 1 | 10 | (1 + 3 + 10) # 1 | 20 | (1 + 3 + 10 + 20) # 2 | 1 | (1) # 2 | 3 | (1 + 3) # 3 | 100 | (1 + 3 + 10 + 20 + 100) self.assertEqual([(r.counter, r.value, r.rtotal) for r in query], [ (1, 10., 14.), (1, 20., 34.), (2, 1., 1.), (2, 3., 4.), (3, 100., 134.)]) def test_reuse_window(self): w = Window(order_by=[Sample.value]) with self.database.atomic(): Sample.delete().execute() for i in range(10): Sample.create(counter=i, value=10 * i) query = (Sample .select(Sample.counter, Sample.value, fn.NTILE(4).over(w).alias('quartile'), fn.NTILE(5).over(w).alias('quintile'), fn.NTILE(100).over(w).alias('percentile')) .window(w) .order_by(Sample.id)) results = [(r.counter, r.value, r.quartile, r.quintile, r.percentile) for r in query] self.assertEqual(results, [ # ct, v, 4tile, 5tile, 100tile (0, 0., 1, 1, 1), (1, 10., 1, 1, 2), (2, 20., 1, 2, 3), (3, 30., 2, 2, 4), (4, 40., 2, 3, 5), (5, 50., 2, 3, 6), (6, 60., 3, 4, 7), (7, 70., 3, 4, 8), (8, 80., 4, 5, 9), (9, 90., 4, 5, 10), ]) def test_ordered_window(self): window = Window(partition_by=[Sample.counter], order_by=[Sample.value.desc()]) query = (Sample .select(Sample.counter, Sample.value, fn.RANK().over(window=window).alias('rank')) .window(window) .order_by(Sample.counter, fn.RANK().over(window=window)) .tuples()) self.assertEqual(list(query), [ (1, 20., 1), (1, 10., 2), (2, 3., 1), (2, 1., 2), (3, 100., 1)]) def test_two_windows(self): w1 = Window(partition_by=[Sample.counter]).alias('w1') w2 = Window(order_by=[Sample.counter]).alias('w2') query = (Sample .select(Sample.counter, Sample.value, fn.AVG(Sample.value).over(window=w1), fn.RANK().over(window=w2)) .window(w1, w2) .order_by(Sample.id) .tuples()) self.assertEqual(list(query), [ (1, 10., 15., 1), (1, 20., 15., 1), (2, 1., 2., 3), (2, 3., 2., 3), (3, 100., 100., 5)]) def test_empty_over(self): query = (Sample .select(Sample.counter, Sample.value, fn.LAG(Sample.counter, 1).over(order_by=[Sample.id])) .order_by(Sample.id) .tuples()) self.assertEqual(list(query), [ (1, 10., None), (1, 20., 1), (2, 1., 1), (2, 3., 2), (3, 100., 2)]) def test_bounds(self): query = (Sample .select(Sample.value, fn.SUM(Sample.value).over( partition_by=[Sample.counter], start=Window.preceding(), end=Window.following(1))) .order_by(Sample.id) .tuples()) self.assertEqual(list(query), [ (10., 30.), (20., 30.), (1., 4.), (3., 4.), (100., 100.)]) query = (Sample .select(Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.id], start=Window.preceding(2))) .order_by(Sample.id) .tuples()) self.assertEqual(list(query), [ (1, 10., 10.), (1, 20., 30.), (2, 1., 31.), (2, 3., 24.), (3, 100., 104.)]) def test_frame_types(self): Sample.create(counter=1, value=20.) Sample.create(counter=2, value=1.) # Observe logical peer handling. # Defaults to RANGE. query = (Sample .select(Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.counter, Sample.value])) .order_by(Sample.id)) self.assertEqual(list(query.tuples()), [ (1, 10., 10.), (1, 20., 50.), (2, 1., 52.), (2, 3., 55.), (3, 100., 155.), (1, 20., 50.), (2, 1., 52.)]) # Explicitly specify ROWS. query = (Sample .select(Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.counter, Sample.value], frame_type=Window.ROWS)) .order_by(Sample.counter, Sample.value)) self.assertEqual(list(query.tuples()), [ (1, 10., 10.), (1, 20., 30.), (1, 20., 50.), (2, 1., 51.), (2, 1., 52.), (2, 3., 55.), (3, 100., 155.)]) # Including a boundary results in ROWS. query = (Sample .select(Sample.counter, Sample.value, fn.SUM(Sample.value).over( order_by=[Sample.counter, Sample.value], start=Window.preceding(2))) .order_by(Sample.counter, Sample.value)) self.assertEqual(list(query.tuples()), [ (1, 10., 10.), (1, 20., 30.), (1, 20., 50.), (2, 1., 41.), (2, 1., 22.), (2, 3., 5.), (3, 100., 104.)]) @skip_if(IS_MYSQL, 'requires OVER() with FILTER') def test_filter_clause(self): condsum = fn.SUM(Sample.value).filter(Sample.counter > 1).over( order_by=[Sample.id], start=Window.preceding(1)) query = (Sample .select(Sample.counter, Sample.value, condsum.alias('cs')) .order_by(Sample.value)) self.assertEqual(list(query.tuples()), [ (2, 1., 1.), (2, 3., 4.), (1, 10., None), (1, 20., None), (3, 100., 103.), ]) @skip_if(IS_MYSQL or (IS_SQLITE and not IS_SQLITE_30), 'requires FILTER with aggregates') def test_filter_with_aggregate(self): condsum = fn.SUM(Sample.value).filter(Sample.counter > 1) query = (Sample .select(Sample.counter, condsum.alias('cs')) .group_by(Sample.counter) .order_by(Sample.counter)) self.assertEqual(list(query.tuples()), [ (1, None), (2, 4.), (3, 100.)]) @skip_if(IS_SQLITE or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES)) @skip_unless(db.for_update, 'requires for update') class TestForUpdateIntegration(ModelTestCase): requires = [User, Tweet] def setUp(self): super(TestForUpdateIntegration, self).setUp() self.alt_db = new_connection() class AltUser(User): class Meta: database = self.alt_db table_name = User._meta.table_name class AltTweet(Tweet): class Meta: database = self.alt_db table_name = Tweet._meta.table_name self.AltUser = AltUser self.AltTweet = AltTweet def tearDown(self): self.alt_db.close() super(TestForUpdateIntegration, self).tearDown() @skip_if(IS_CRDB, 'crdb locks-up on this test, blocking reads') def test_for_update(self): with self.database.atomic(): User.create(username='huey') zaizee = User.create(username='zaizee') AltUser = self.AltUser with self.database.manual_commit(): self.database.begin() users = (User.select().where(User.username == 'zaizee') .for_update() .execute()) updated = (User .update(username='ziggy') .where(User.username == 'zaizee') .execute()) self.assertEqual(updated, 1) if IS_POSTGRESQL: nrows = (AltUser .update(username='huey-x') .where(AltUser.username == 'huey') .execute()) self.assertEqual(nrows, 1) query = (AltUser .select(AltUser.username) .where(AltUser.id == zaizee.id)) self.assertEqual(query.get().username, 'zaizee') self.database.commit() self.assertEqual(query.get().username, 'ziggy') def test_for_update_blocking(self): User.create(username='u1') AltUser = self.AltUser evt = threading.Event() def run_in_thread(): with self.alt_db.atomic(): evt.wait() n = (AltUser.update(username='u1-y') .where(AltUser.username == 'u1') .execute()) self.assertEqual(n, 0) t = threading.Thread(target=run_in_thread) t.daemon = True t.start() with self.database.atomic() as txn: q = (User.select() .where(User.username == 'u1') .for_update() .execute()) evt.set() n = (User.update(username='u1-x') .where(User.username == 'u1') .execute()) self.assertEqual(n, 1) t.join(timeout=5) u = User.get() self.assertEqual(u.username, 'u1-x') def test_for_update_nested(self): User.insert_many([(u,) for u in 'abc']).execute() subq = User.select().where(User.username != 'b').for_update() nrows = (User .delete() .where(User.id.in_(subq)) .execute()) self.assertEqual(nrows, 2) def test_for_update_nowait(self): User.create(username='huey') zaizee = User.create(username='zaizee') AltUser = self.AltUser with self.database.manual_commit(): self.database.begin() users = (User .select(User.username) .where(User.username == 'zaizee') .for_update(nowait=True) .execute()) def will_fail(): return (AltUser .select() .where(AltUser.username == 'zaizee') .for_update(nowait=True) .get()) self.assertRaises((OperationalError, InternalError), will_fail) self.database.commit() @requires_postgresql @requires_models(User, Tweet) def test_for_update_of(self): h = User.create(username='huey') z = User.create(username='zaizee') Tweet.create(user=h, content='h') Tweet.create(user=z, content='z') AltUser, AltTweet = self.AltUser, self.AltTweet with self.database.manual_commit(): self.database.begin() # Lock tweets by huey. query = (Tweet .select() .join(User) .where(User.username == 'huey') .for_update(of=Tweet, nowait=True)) qr = query.execute() # No problem updating zaizee's tweet or huey's user. nrows = (AltTweet .update(content='zx') .where(AltTweet.user == z.id) .execute()) self.assertEqual(nrows, 1) nrows = (AltUser .update(username='huey-x') .where(AltUser.username == 'huey') .execute()) self.assertEqual(nrows, 1) def will_fail(): (AltTweet .select() .where(AltTweet.user == h) .for_update(nowait=True) .get()) self.assertRaises((OperationalError, InternalError), will_fail) self.database.commit() query = Tweet.select(Tweet, User).join(User).order_by(Tweet.id) self.assertEqual([(t.content, t.user.username) for t in query], [('h', 'huey-x'), ('zx', 'zaizee')]) class ServerDefault(TestModel): timestamp = DateTimeField(constraints=[SQL('default (now())')]) @requires_postgresql class TestReturningIntegration(ModelTestCase): requires = [User] def test_simple_returning(self): query = User.insert(username='charlie') self.assertSQL(query, ( 'INSERT INTO "users" ("username") VALUES (?) ' 'RETURNING "users"."id"'), ['charlie']) self.assertEqual(query.execute(), 1) # By default returns a tuple. query = User.insert(username='huey') self.assertEqual(query.execute(), 2) self.assertEqual(list(query), [(2,)]) # If we specify a returning clause we get user instances. query = User.insert(username='snoobie').returning(User) query.execute() self.assertEqual([x.username for x in query], ['snoobie']) query = (User .insert(username='zaizee') .returning(User.id, User.username) .dicts()) self.assertSQL(query, ( 'INSERT INTO "users" ("username") VALUES (?) ' 'RETURNING "users"."id", "users"."username"'), ['zaizee']) cursor = query.execute() row, = list(cursor) self.assertEqual(row, {'id': 4, 'username': 'zaizee'}) query = (User .insert(username='mickey') .returning(User) .objects()) self.assertSQL(query, ( 'INSERT INTO "users" ("username") VALUES (?) ' 'RETURNING "users"."id", "users"."username"'), ['mickey']) cursor = query.execute() row, = list(cursor) self.assertEqual(row.id, 5) self.assertEqual(row.username, 'mickey') # Can specify aliases. query = (User .insert(username='sipp') .returning(User.username.alias('new_username'))) self.assertEqual([x.new_username for x in query.execute()], ['sipp']) # Minimal test with insert_many. query = User.insert_many([('u7',), ('u8',)]) self.assertEqual([r for r, in query.execute()], [7, 8]) # Test with insert / on conflict. query = (User .insert_many([(7, 'u7',), (9, 'u9',)], [User.id, User.username]) .on_conflict(conflict_target=[User.id], update={User.username: User.username + 'x'}) .returning(User)) self.assertEqual([(x.id, x.username) for x in query], [(7, 'u7x'), (9, 'u9')]) def test_simple_returning_insert_update_delete(self): res = User.insert(username='charlie').returning(User).execute() self.assertEqual([u.username for u in res], ['charlie']) res = (User .update(username='charlie2') .where(User.id == 1) .returning(User) .execute()) # Subsequent iterations are cached. for _ in range(2): self.assertEqual([u.username for u in res], ['charlie2']) res = (User .delete() .where(User.id == 1) .returning(User) .execute()) # Subsequent iterations are cached. for _ in range(2): self.assertEqual([u.username for u in res], ['charlie2']) def test_simple_insert_update_delete_no_returning(self): query = User.insert(username='charlie') self.assertEqual(query.execute(), 1) query = User.insert(username='huey') self.assertEqual(query.execute(), 2) query = User.update(username='huey2').where(User.username == 'huey') self.assertEqual(query.execute(), 1) self.assertEqual(query.execute(), 0) # No rows updated! query = User.delete().where(User.username == 'huey2') self.assertEqual(query.execute(), 1) self.assertEqual(query.execute(), 0) # No rows updated! @requires_models(ServerDefault) def test_returning_server_defaults(self): query = (ServerDefault .insert() .returning(ServerDefault.id, ServerDefault.timestamp)) self.assertSQL(query, ( 'INSERT INTO "server_default" ' 'DEFAULT VALUES ' 'RETURNING "server_default"."id", "server_default"."timestamp"'), []) with self.assertQueryCount(1): cursor = query.dicts().execute() row, = list(cursor) self.assertTrue(row['timestamp'] is not None) obj = ServerDefault.get(ServerDefault.id == row['id']) self.assertEqual(obj.timestamp, row['timestamp']) def test_no_return(self): query = User.insert(username='huey').returning() self.assertIsNone(query.execute()) user = User.get(User.username == 'huey') self.assertEqual(user.username, 'huey') self.assertTrue(user.id >= 1) @requires_models(Category) def test_non_int_pk_returning(self): query = Category.insert(name='root') self.assertSQL(query, ( 'INSERT INTO "category" ("name") VALUES (?) ' 'RETURNING "category"."name"'), ['root']) self.assertEqual(query.execute(), 'root') def test_returning_multi(self): data = [{'username': 'huey'}, {'username': 'mickey'}] query = User.insert_many(data) self.assertSQL(query, ( 'INSERT INTO "users" ("username") VALUES (?), (?) ' 'RETURNING "users"."id"'), ['huey', 'mickey']) data = query.execute() # Check that the result wrapper is correctly set up. self.assertTrue(len(data.select) == 1 and data.select[0] is User.id) self.assertEqual(list(data), [(1,), (2,)]) query = (User .insert_many([{'username': 'foo'}, {'username': 'bar'}, {'username': 'baz'}]) .returning(User.id, User.username) .namedtuples()) data = query.execute() self.assertEqual([(row.id, row.username) for row in data], [ (3, 'foo'), (4, 'bar'), (5, 'baz')]) @requires_models(Category) def test_returning_query(self): for name in ('huey', 'mickey', 'zaizee'): Category.create(name=name) source = Category.select(Category.name).order_by(Category.name) query = User.insert_from(source, (User.username,)) self.assertSQL(query, ( 'INSERT INTO "users" ("username") ' 'SELECT "t1"."name" FROM "category" AS "t1" ORDER BY "t1"."name" ' 'RETURNING "users"."id"'), []) data = query.execute() # Check that the result wrapper is correctly set up. self.assertTrue(len(data.select) == 1 and data.select[0] is User.id) self.assertEqual(list(data), [(1,), (2,), (3,)]) def test_update_returning(self): id_list = User.insert_many([{'username': 'huey'}, {'username': 'zaizee'}]).execute() huey_id, zaizee_id = [pk for pk, in id_list] query = (User .update(username='ziggy') .where(User.username == 'zaizee') .returning(User.id, User.username)) self.assertSQL(query, ( 'UPDATE "users" SET "username" = ? ' 'WHERE ("users"."username" = ?) ' 'RETURNING "users"."id", "users"."username"'), ['ziggy', 'zaizee']) data = query.execute() user = data[0] self.assertEqual(user.username, 'ziggy') self.assertEqual(user.id, zaizee_id) def test_delete_returning(self): id_list = User.insert_many([{'username': 'huey'}, {'username': 'zaizee'}]).execute() huey_id, zaizee_id = [pk for pk, in id_list] query = (User .delete() .where(User.username == 'zaizee') .returning(User.id, User.username)) self.assertSQL(query, ( 'DELETE FROM "users" WHERE ("users"."username" = ?) ' 'RETURNING "users"."id", "users"."username"'), ['zaizee']) data = query.execute() user = data[0] self.assertEqual(user.username, 'zaizee') self.assertEqual(user.id, zaizee_id) class Member(TestModel): name = TextField() recommendedby = ForeignKeyField('self', null=True) class TestCTEIntegration(ModelTestCase): requires = [Category] def setUp(self): super(TestCTEIntegration, self).setUp() CC = Category.create root = CC(name='root') p1 = CC(name='p1', parent=root) p2 = CC(name='p2', parent=root) p3 = CC(name='p3', parent=root) c11 = CC(name='c11', parent=p1) c12 = CC(name='c12', parent=p1) c31 = CC(name='c31', parent=p3) @skip_if(IS_SQLITE_OLD or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES) or IS_CRDB) @requires_models(Member) def test_docs_example(self): f = Member.create(name='founder') gen2_1 = Member.create(name='g2-1', recommendedby=f) gen2_2 = Member.create(name='g2-2', recommendedby=f) gen2_3 = Member.create(name='g2-3', recommendedby=f) gen3_1_1 = Member.create(name='g3-1-1', recommendedby=gen2_1) gen3_1_2 = Member.create(name='g3-1-2', recommendedby=gen2_1) gen3_3_1 = Member.create(name='g3-3-1', recommendedby=gen2_3) # Get recommender chain for 331. base = (Member .select(Member.recommendedby) .where(Member.id == gen3_3_1.id) .cte('recommenders', recursive=True, columns=('recommender',))) MA = Member.alias() recursive = (MA .select(MA.recommendedby) .join(base, on=(MA.id == base.c.recommender))) cte = base.union_all(recursive) query = (cte .select_from(cte.c.recommender, Member.name) .join(Member, on=(cte.c.recommender == Member.id)) .order_by(Member.id.desc())) self.assertEqual([m.name for m in query], ['g2-3', 'founder']) @skip_if(IS_SQLITE_OLD or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES)) def test_simple_cte(self): cte = (Category .select(Category.name, Category.parent) .cte('catz', columns=('name', 'parent'))) cte_sql = ('WITH "catz" ("name", "parent") AS (' 'SELECT "t1"."name", "t1"."parent_id" ' 'FROM "category" AS "t1") ' 'SELECT "catz"."name", "catz"."parent" AS "pname" ' 'FROM "catz" ' 'ORDER BY "catz"."name"') query = (Category .select(cte.c.name, cte.c.parent.alias('pname')) .from_(cte) .order_by(cte.c.name) .with_cte(cte)) self.assertSQL(query, cte_sql, []) query2 = (cte.select_from(cte.c.name, cte.c.parent.alias('pname')) .order_by(cte.c.name)) self.assertSQL(query2, cte_sql, []) self.assertEqual([(row.name, row.pname) for row in query], [ ('c11', 'p1'), ('c12', 'p1'), ('c31', 'p3'), ('p1', 'root'), ('p2', 'root'), ('p3', 'root'), ('root', None)]) self.assertEqual([(row.name, row.pname) for row in query], [(row.name, row.pname) for row in query2]) @skip_if(IS_SQLITE_OLD or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES)) def test_cte_join(self): cte = (Category .select(Category.name) .cte('parents', columns=('name',))) query = (Category .select(Category.name, cte.c.name.alias('pname')) .join(cte, on=(Category.parent == cte.c.name)) .order_by(Category.name) .with_cte(cte)) self.assertSQL(query, ( 'WITH "parents" ("name") AS (' 'SELECT "t1"."name" FROM "category" AS "t1") ' 'SELECT "t2"."name", "parents"."name" AS "pname" ' 'FROM "category" AS "t2" ' 'INNER JOIN "parents" ON ("t2"."parent_id" = "parents"."name") ' 'ORDER BY "t2"."name"'), []) self.assertEqual([(c.name, c.parents['pname']) for c in query], [ ('c11', 'p1'), ('c12', 'p1'), ('c31', 'p3'), ('p1', 'root'), ('p2', 'root'), ('p3', 'root'), ]) @skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB, 'requires recursive cte') def test_recursive_cte(self): def get_parents(cname): C1 = Category.alias() C2 = Category.alias() level = SQL('1').cast('integer').alias('level') path = C1.name.cast('text').alias('path') base = (C1 .select(C1.name, C1.parent, level, path) .where(C1.name == cname) .cte('parents', recursive=True)) rlevel = (base.c.level + 1).alias('level') rpath = base.c.path.concat('->').concat(C2.name).alias('path') recursive = (C2 .select(C2.name, C2.parent, rlevel, rpath) .from_(base) .join(C2, on=(C2.name == base.c.parent_id))) cte = base + recursive query = (cte .select_from(cte.c.name, cte.c.level, cte.c.path) .order_by(cte.c.level)) self.assertSQL(query, ( 'WITH RECURSIVE "parents" AS (' 'SELECT "t1"."name", "t1"."parent_id", ' 'CAST(1 AS integer) AS "level", ' 'CAST("t1"."name" AS text) AS "path" ' 'FROM "category" AS "t1" ' 'WHERE ("t1"."name" = ?) ' 'UNION ALL ' 'SELECT "t2"."name", "t2"."parent_id", ' '("parents"."level" + ?) AS "level", ' '(("parents"."path" || ?) || "t2"."name") AS "path" ' 'FROM "parents" ' 'INNER JOIN "category" AS "t2" ' 'ON ("t2"."name" = "parents"."parent_id")) ' 'SELECT "parents"."name", "parents"."level", "parents"."path" ' 'FROM "parents" ' 'ORDER BY "parents"."level"'), [cname, 1, '->']) return query data = [row for row in get_parents('c31').tuples()] self.assertEqual(data, [ ('c31', 1, 'c31'), ('p3', 2, 'c31->p3'), ('root', 3, 'c31->p3->root')]) data = [(c.name, c.level, c.path) for c in get_parents('c12').namedtuples()] self.assertEqual(data, [ ('c12', 1, 'c12'), ('p1', 2, 'c12->p1'), ('root', 3, 'c12->p1->root')]) query = get_parents('root') data = [(r.name, r.level, r.path) for r in query] self.assertEqual(data, [('root', 1, 'root')]) @skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB, 'requires recursive cte') def test_recursive_cte2(self): hierarchy = (Category .select(Category.name, Value(0).alias('level')) .where(Category.parent.is_null(True)) .cte(name='hierarchy', recursive=True)) C = Category.alias() recursive = (C .select(C.name, (hierarchy.c.level + 1).alias('level')) .join(hierarchy, on=(C.parent == hierarchy.c.name))) cte = hierarchy.union_all(recursive) query = (cte .select_from(cte.c.name, cte.c.level) .order_by(cte.c.name)) self.assertEqual([(r.name, r.level) for r in query], [ ('c11', 2), ('c12', 2), ('c31', 2), ('p1', 1), ('p2', 1), ('p3', 1), ('root', 0)]) @skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB, 'requires recursive cte') def test_recursive_cte_docs_example(self): # Define the base case of our recursive CTE. This will be categories that # have a null parent foreign-key. Base = Category.alias() level = Value(1).cast('integer').alias('level') path = Base.name.cast('text').alias('path') base_case = (Base .select(Base.name, Base.parent, level, path) .where(Base.parent.is_null()) .cte('base', recursive=True)) # Define the recursive terms. RTerm = Category.alias() rlevel = (base_case.c.level + 1).alias('level') rpath = base_case.c.path.concat('->').concat(RTerm.name).alias('path') recursive = (RTerm .select(RTerm.name, RTerm.parent, rlevel, rpath) .join(base_case, on=(RTerm.parent == base_case.c.name))) # The recursive CTE is created by taking the base case and UNION ALL with # the recursive term. cte = base_case.union_all(recursive) # We will now query from the CTE to get the categories, their levels, and # their paths. query = (cte .select_from(cte.c.name, cte.c.level, cte.c.path) .order_by(cte.c.path)) data = [(obj.name, obj.level, obj.path) for obj in query] self.assertEqual(data, [ ('root', 1, 'root'), ('p1', 2, 'root->p1'), ('c11', 3, 'root->p1->c11'), ('c12', 3, 'root->p1->c12'), ('p2', 2, 'root->p2'), ('p3', 2, 'root->p3'), ('c31', 3, 'root->p3->c31')]) @requires_models(Sample) @skip_if(IS_SQLITE_OLD or IS_MYSQL, 'sqlite too old for ctes, mysql flaky') def test_cte_reuse_aggregate(self): data = ( (1, (1.25, 1.5, 1.75)), (2, (2.1, 2.3, 2.5, 2.7, 2.9)), (3, (3.5, 3.5))) with self.database.atomic(): for counter, values in data: (Sample .insert_many([(counter, value) for value in values], fields=[Sample.counter, Sample.value]) .execute()) cte = (Sample .select(Sample.counter, fn.AVG(Sample.value).alias('avg_value')) .group_by(Sample.counter) .cte('count_to_avg', columns=('counter', 'avg_value'))) query = (Sample .select(Sample.counter, (Sample.value - cte.c.avg_value).alias('diff')) .join(cte, on=(Sample.counter == cte.c.counter)) .where(Sample.value > cte.c.avg_value) .order_by(Sample.value) .with_cte(cte)) self.assertEqual([(a, round(b, 2)) for a, b in query.tuples()], [ (1, .25), (2, .2), (2, .4)]) @skip_if(not IS_SQLITE_15, 'requires row-values') class TestTupleComparison(ModelTestCase): requires = [User] def test_tuples(self): ua, ub, uc = [User.create(username=username) for username in 'abc'] query = User.select().where( Tuple(User.username, User.id) == ('b', ub.id)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'WHERE (("t1"."username", "t1"."id") = (?, ?))'), ['b', ub.id]) self.assertEqual(query.count(), 1) obj = query.get() self.assertEqual(obj, ub) def test_tuple_subquery(self): ua, ub, uc = [User.create(username=username) for username in 'abc'] UA = User.alias() subquery = (UA .select(UA.username, UA.id) .where(UA.username != 'b')) query = (User .select(User.username) .where(Tuple(User.username, User.id).in_(subquery)) .order_by(User.username)) self.assertEqual([u.username for u in query], ['a', 'c']) @requires_models(CPK) def test_row_value_composite_key(self): CPK.insert_many([('k1', 1, 1), ('k2', 2, 2), ('k3', 3, 3)]).execute() cpk = CPK.get(CPK._meta.primary_key == ('k2', 2)) self.assertEqual(cpk._pk, ('k2', 2)) cpk = CPK['k3', 3] self.assertEqual(cpk._pk, ('k3', 3)) uq = CPK.update(extra=20).where(CPK._meta.primary_key != ('k2', 2)) uq.execute() self.assertEqual(list(sorted(CPK.select().tuples())), [ ('k1', 1, 20), ('k2', 2, 2), ('k3', 3, 20)]) class TestModelGraph(BaseTestCase): def test_bind_model_database(self): class User(Model): pass class Tweet(Model): user = ForeignKeyField(User) class Relationship(Model): from_user = ForeignKeyField(User, backref='relationships') to_user = ForeignKeyField(User, backref='related_to') class Flag(Model): tweet = ForeignKeyField(Tweet) class Unrelated(Model): pass fake_db = SqliteDatabase(None) User.bind(fake_db) for model in (User, Tweet, Relationship, Flag): self.assertTrue(model._meta.database is fake_db) self.assertTrue(Unrelated._meta.database is None) User.bind(None) with User.bind_ctx(fake_db) as (FUser,): self.assertTrue(FUser._meta.database is fake_db) self.assertTrue(Unrelated._meta.database is None) self.assertTrue(User._meta.database is None) class TestFieldInheritance(BaseTestCase): def test_field_inheritance(self): class BaseModel(Model): class Meta: database = get_in_memory_db() class BasePost(BaseModel): content = TextField() timestamp = TimestampField() class Photo(BasePost): image = TextField() class Note(BasePost): category = TextField() self.assertEqual(BasePost._meta.sorted_field_names, ['id', 'content', 'timestamp']) self.assertEqual(BasePost._meta.sorted_fields, [ BasePost.id, BasePost.content, BasePost.timestamp]) self.assertEqual(Photo._meta.sorted_field_names, ['id', 'content', 'timestamp', 'image']) self.assertEqual(Photo._meta.sorted_fields, [ Photo.id, Photo.content, Photo.timestamp, Photo.image]) self.assertEqual(Note._meta.sorted_field_names, ['id', 'content', 'timestamp', 'category']) self.assertEqual(Note._meta.sorted_fields, [ Note.id, Note.content, Note.timestamp, Note.category]) self.assertTrue(id(Photo.id) != id(Note.id)) def test_foreign_key_field_inheritance(self): class BaseModel(Model): class Meta: database = get_in_memory_db() class Category(BaseModel): name = TextField() class BasePost(BaseModel): category = ForeignKeyField(Category) timestamp = TimestampField() class Photo(BasePost): image = TextField() class Note(BasePost): content = TextField() self.assertEqual(BasePost._meta.sorted_field_names, ['id', 'category', 'timestamp']) self.assertEqual(BasePost._meta.sorted_fields, [ BasePost.id, BasePost.category, BasePost.timestamp]) self.assertEqual(Photo._meta.sorted_field_names, ['id', 'category', 'timestamp', 'image']) self.assertEqual(Photo._meta.sorted_fields, [ Photo.id, Photo.category, Photo.timestamp, Photo.image]) self.assertEqual(Note._meta.sorted_field_names, ['id', 'category', 'timestamp', 'content']) self.assertEqual(Note._meta.sorted_fields, [ Note.id, Note.category, Note.timestamp, Note.content]) self.assertEqual(Category._meta.backrefs, { BasePost.category: BasePost, Photo.category: Photo, Note.category: Note}) self.assertEqual(BasePost._meta.refs, {BasePost.category: Category}) self.assertEqual(Photo._meta.refs, {Photo.category: Category}) self.assertEqual(Note._meta.refs, {Note.category: Category}) self.assertEqual(BasePost.category.backref, 'basepost_set') self.assertEqual(Photo.category.backref, 'photo_set') self.assertEqual(Note.category.backref, 'note_set') def test_foreign_key_pk_inheritance(self): class BaseModel(Model): class Meta: database = get_in_memory_db() class Account(BaseModel): pass class BaseUser(BaseModel): account = ForeignKeyField(Account, primary_key=True) class User(BaseUser): username = TextField() class Admin(BaseUser): role = TextField() self.assertEqual(Account._meta.backrefs, { Admin.account: Admin, User.account: User, BaseUser.account: BaseUser}) self.assertEqual(BaseUser.account.backref, 'baseuser_set') self.assertEqual(User.account.backref, 'user_set') self.assertEqual(Admin.account.backref, 'admin_set') self.assertTrue(Account.user_set.model is Account) self.assertTrue(Account.admin_set.model is Account) self.assertTrue(Account.user_set.rel_model is User) self.assertTrue(Account.admin_set.rel_model is Admin) self.assertSQL(Account._schema._create_table(), ( 'CREATE TABLE IF NOT EXISTS "account" (' '"id" INTEGER NOT NULL PRIMARY KEY)'), []) self.assertSQL(User._schema._create_table(), ( 'CREATE TABLE IF NOT EXISTS "user" (' '"account_id" INTEGER NOT NULL PRIMARY KEY, ' '"username" TEXT NOT NULL, ' 'FOREIGN KEY ("account_id") REFERENCES "account" ("id"))'), []) self.assertSQL(Admin._schema._create_table(), ( 'CREATE TABLE IF NOT EXISTS "admin" (' '"account_id" INTEGER NOT NULL PRIMARY KEY, ' '"role" TEXT NOT NULL, ' 'FOREIGN KEY ("account_id") REFERENCES "account" ("id"))'), []) def test_backref_inheritance(self): class Category(TestModel): pass def backref(fk_field): return '%ss' % fk_field.model._meta.name class BasePost(TestModel): category = ForeignKeyField(Category, backref=backref) class Note(BasePost): pass class Photo(BasePost): pass self.assertEqual(Category._meta.backrefs, { BasePost.category: BasePost, Note.category: Note, Photo.category: Photo}) self.assertEqual(BasePost.category.backref, 'baseposts') self.assertEqual(Note.category.backref, 'notes') self.assertEqual(Photo.category.backref, 'photos') self.assertTrue(Category.baseposts.rel_model is BasePost) self.assertTrue(Category.baseposts.model is Category) self.assertTrue(Category.notes.rel_model is Note) self.assertTrue(Category.notes.model is Category) self.assertTrue(Category.photos.rel_model is Photo) self.assertTrue(Category.photos.model is Category) class BaseItem(TestModel): category = ForeignKeyField(Category, backref='items') class ItemA(BaseItem): pass class ItemB(BaseItem): pass self.assertEqual(BaseItem.category.backref, 'items') self.assertEqual(ItemA.category.backref, 'itema_set') self.assertEqual(ItemB.category.backref, 'itemb_set') self.assertTrue(Category.items.rel_model is BaseItem) self.assertTrue(Category.itema_set.rel_model is ItemA) self.assertTrue(Category.itema_set.model is Category) self.assertTrue(Category.itemb_set.rel_model is ItemB) self.assertTrue(Category.itemb_set.model is Category) @skip_if(IS_SQLITE, 'sqlite is not supported') @skip_if(IS_MYSQL, 'mysql is not raising this error(?)') @skip_if(IS_CRDB, 'crdb is not raising the error in this test(?)') def test_deferred_fk_creation(self): class B(TestModel): a = DeferredForeignKey('A', null=True) b = TextField() class A(TestModel): a = TextField() db.create_tables([A, B]) try: # Test that we can create B with null "a_id" column: a = A.create(a='a') b = B.create(b='b') # Test that we can create B that has no corresponding A: fake_a = A(id=31337) b2 = B.create(a=fake_a, b='b2') b2_db = B.get(B.a == fake_a) self.assertEqual(b2_db.b, 'b2') # Ensure error occurs trying to create_foreign_key. with db.atomic(): self.assertRaises( IntegrityError, B._schema.create_foreign_key, B.a) b2_db.delete_instance() # We can now create the foreign key. B._schema.create_foreign_key(B.a) # The foreign-key is enforced: with db.atomic(): self.assertRaises(IntegrityError, B.create, a=fake_a, b='b3') finally: db.drop_tables([A, B]) class TestMetaTableName(BaseTestCase): def test_table_name_behavior(self): def make_model(model_name, table=None): class Meta: legacy_table_names = False table_name = table return type(model_name, (Model,), {'Meta': Meta}) def assertTableName(expected, model_name, table_name=None): model_class = make_model(model_name, table_name) self.assertEqual(model_class._meta.table_name, expected) assertTableName('users', 'User', 'users') assertTableName('tweet', 'Tweet') assertTableName('user_profile', 'UserProfile') assertTableName('activity_log_status', 'ActivityLogStatus') assertTableName('camel_case', 'CamelCase') assertTableName('camel_camel_case', 'CamelCamelCase') assertTableName('camel2_camel2_case', 'Camel2Camel2Case') assertTableName('http_request', 'HTTPRequest') assertTableName('api_response', 'APIResponse') assertTableName('api_response', 'API_Response') assertTableName('web_http_request', 'WebHTTPRequest') assertTableName('get_http_response_code', 'getHTTPResponseCode') assertTableName('foo_bar', 'foo_Bar') assertTableName('foo_bar', 'Foo__Bar') class TestMetaInheritance(BaseTestCase): def test_table_name(self): class Foo(Model): class Meta: def table_function(klass): return 'xxx_%s' % klass.__name__.lower() class Bar(Foo): pass class Baze(Foo): class Meta: table_name = 'yyy_baze' class Biz(Baze): pass class Nug(Foo): class Meta: def table_function(klass): return 'zzz_%s' % klass.__name__.lower() self.assertEqual(Foo._meta.table_name, 'xxx_foo') self.assertEqual(Bar._meta.table_name, 'xxx_bar') self.assertEqual(Baze._meta.table_name, 'yyy_baze') self.assertEqual(Biz._meta.table_name, 'xxx_biz') self.assertEqual(Nug._meta.table_name, 'zzz_nug') def test_composite_key_inheritance(self): class Foo(Model): key = TextField() value = TextField() class Meta: primary_key = CompositeKey('key', 'value') class Bar(Foo): pass class Baze(Foo): value = IntegerField() foo = Foo(key='k1', value='v1') self.assertEqual(foo.__composite_key__, ('k1', 'v1')) bar = Bar(key='k2', value='v2') self.assertEqual(bar.__composite_key__, ('k2', 'v2')) baze = Baze(key='k3', value=3) self.assertEqual(baze.__composite_key__, ('k3', 3)) def test_no_primary_key_inheritable(self): class Foo(Model): data = TextField() class Meta: primary_key = False class Bar(Foo): pass class Baze(Foo): pk = AutoField() class Zai(Foo): zee = TextField(primary_key=True) self.assertFalse(Foo._meta.primary_key) self.assertEqual(Foo._meta.sorted_field_names, ['data']) self.assertFalse(Bar._meta.primary_key) self.assertEqual(Bar._meta.sorted_field_names, ['data']) self.assertTrue(Baze._meta.primary_key is Baze.pk) self.assertEqual(Baze._meta.sorted_field_names, ['pk', 'data']) self.assertTrue(Zai._meta.primary_key is Zai.zee) self.assertEqual(Zai._meta.sorted_field_names, ['zee', 'data']) def test_inheritance(self): db = SqliteDatabase(':memory:') class Base(Model): class Meta: constraints = ['c1', 'c2'] database = db indexes = ( (('username',), True), ) only_save_dirty = True options = {'key': 'value'} schema = 'magic' class Child(Base): pass class GrandChild(Child): pass for ModelClass in (Child, GrandChild): self.assertEqual(ModelClass._meta.constraints, ['c1', 'c2']) self.assertTrue(ModelClass._meta.database is db) self.assertEqual(ModelClass._meta.indexes, [(('username',), True)]) self.assertEqual(ModelClass._meta.options, {'key': 'value'}) self.assertTrue(ModelClass._meta.only_save_dirty) self.assertEqual(ModelClass._meta.schema, 'magic') class Overrides(Base): class Meta: constraints = None indexes = None only_save_dirty = False options = {'foo': 'bar'} schema = None self.assertTrue(Overrides._meta.constraints is None) self.assertEqual(Overrides._meta.indexes, []) self.assertFalse(Overrides._meta.only_save_dirty) self.assertEqual(Overrides._meta.options, {'foo': 'bar'}) self.assertTrue(Overrides._meta.schema is None) def test_temporary_inheritance(self): class T0(TestModel): pass class T1(TestModel): class Meta: temporary = True class T2(T1): pass class T3(T1): class Meta: temporary = False self.assertFalse(T0._meta.temporary) self.assertTrue(T1._meta.temporary) self.assertTrue(T2._meta.temporary) self.assertFalse(T3._meta.temporary) class TestModelMetadataMisc(BaseTestCase): database = get_in_memory_db() def test_subclass_aware_metadata(self): class SchemaPropagateMetadata(SubclassAwareMetadata): @property def schema(self): return self._schema @schema.setter def schema(self, value): # self.models is a singleton, essentially, shared among all # classes that use this metadata implementation. for model in self.models: model._meta._schema = value class Base(Model): class Meta: database = self.database model_metadata_class = SchemaPropagateMetadata class User(Base): username = TextField() class Tweet(Base): user = ForeignKeyField(User, backref='tweets') content = TextField() self.assertTrue(User._meta.schema is None) self.assertTrue(Tweet._meta.schema is None) Base._meta.schema = 'temp' self.assertEqual(User._meta.schema, 'temp') self.assertEqual(Tweet._meta.schema, 'temp') User._meta.schema = None for model in (Base, User, Tweet): self.assertTrue(model._meta.schema is None) class TestModelSetDatabase(BaseTestCase): def test_set_database(self): class Register(Model): value = IntegerField() db_a = get_in_memory_db() db_b = get_in_memory_db() Register._meta.set_database(db_a) Register.create_table() Register._meta.set_database(db_b) self.assertFalse(Register.table_exists()) self.assertEqual(db_a.get_tables(), ['register']) self.assertEqual(db_b.get_tables(), []) db_a.close() db_b.close() class TestForeignKeyFieldDescriptors(BaseTestCase): def test_foreign_key_field_descriptors(self): class User(Model): pass class T0(Model): user = ForeignKeyField(User) class T1(Model): user = ForeignKeyField(User, column_name='uid') class T2(Model): user = ForeignKeyField(User, object_id_name='uid') class T3(Model): user = ForeignKeyField(User, column_name='x', object_id_name='uid') class T4(Model): foo = ForeignKeyField(User, column_name='user') class T5(Model): foo = ForeignKeyField(User, object_id_name='uid') self.assertEqual(T0.user.object_id_name, 'user_id') self.assertEqual(T1.user.object_id_name, 'uid') self.assertEqual(T2.user.object_id_name, 'uid') self.assertEqual(T3.user.object_id_name, 'uid') self.assertEqual(T4.foo.object_id_name, 'user') self.assertEqual(T5.foo.object_id_name, 'uid') user = User(id=1337) self.assertEqual(T0(user=user).user_id, 1337) self.assertEqual(T1(user=user).uid, 1337) self.assertEqual(T2(user=user).uid, 1337) self.assertEqual(T3(user=user).uid, 1337) self.assertEqual(T4(foo=user).user, 1337) self.assertEqual(T5(foo=user).uid, 1337) def conflicts_with_field(): class TE(Model): user = ForeignKeyField(User, object_id_name='user') self.assertRaises(ValueError, conflicts_with_field) def test_column_name(self): class User(Model): pass class T1(Model): user = ForeignKeyField(User, column_name='user') self.assertEqual(T1.user.column_name, 'user') self.assertEqual(T1.user.object_id_name, 'user_id') class TestModelAliasFieldProperties(ModelTestCase): database = get_in_memory_db() def test_field_properties(self): class Person(TestModel): name = TextField() dob = DateField() class Meta: database = self.database class Job(TestModel): worker = ForeignKeyField(Person, backref='jobs') client = ForeignKeyField(Person, backref='jobs_hired') class Meta: database = self.database Worker = Person.alias() Client = Person.alias() expected_sql = ( 'SELECT "t1"."id", "t1"."worker_id", "t1"."client_id" ' 'FROM "job" AS "t1" ' 'INNER JOIN "person" AS "t2" ON ("t1"."client_id" = "t2"."id") ' 'INNER JOIN "person" AS "t3" ON ("t1"."worker_id" = "t3"."id") ' 'WHERE (date_part(?, "t2"."dob") = ?)') expected_params = ['year', 1983] query = (Job .select() .join(Client, on=(Job.client == Client.id)) .switch(Job) .join(Worker, on=(Job.worker == Worker.id)) .where(Client.dob.year == 1983)) self.assertSQL(query, expected_sql, expected_params) query = (Job .select() .join(Client, on=(Job.client == Client.id)) .switch(Job) .join(Person, on=(Job.worker == Person.id)) .where(Client.dob.year == 1983)) self.assertSQL(query, expected_sql, expected_params) query = (Job .select() .join(Person, on=(Job.client == Person.id)) .switch(Job) .join(Worker, on=(Job.worker == Worker.id)) .where(Person.dob.year == 1983)) self.assertSQL(query, expected_sql, expected_params) class OnConflictTests(object): requires = [Emp] test_data = ( ('huey', 'cat', '123'), ('zaizee', 'cat', '124'), ('mickey', 'dog', '125'), ) def setUp(self): super(OnConflictTests, self).setUp() for first, last, empno in self.test_data: Emp.create(first=first, last=last, empno=empno) def assertData(self, expected): query = (Emp .select(Emp.first, Emp.last, Emp.empno) .order_by(Emp.id) .tuples()) self.assertEqual(list(query), expected) def test_ignore(self): query = (Emp .insert(first='foo', last='bar', empno='123') .on_conflict('ignore') .execute()) self.assertData(list(self.test_data)) def requires_upsert(m): return skip_unless(IS_SQLITE_24 or IS_POSTGRESQL or IS_CRDB, 'requires upsert')(m) class KV(TestModel): key = CharField(unique=True) value = IntegerField() class PGOnConflictTests(OnConflictTests): @requires_upsert def test_update(self): # Conflict on empno - we'll preserve name and update the ID. This will # overwrite the previous row and set a new ID. res = (Emp .insert(first='foo', last='bar', empno='125') .on_conflict( conflict_target=(Emp.empno,), preserve=(Emp.first, Emp.last), update={Emp.empno: '125.1'}) .execute()) self.assertData([ ('huey', 'cat', '123'), ('zaizee', 'cat', '124'), ('foo', 'bar', '125.1')]) # Conflicts on first/last name. The first name is preserved while the # last-name is updated. The new empno is thrown out. res = (Emp .insert(first='foo', last='bar', empno='126') .on_conflict( conflict_target=(Emp.first, Emp.last), preserve=(Emp.first,), update={Emp.last: 'baze'}) .execute()) self.assertData([ ('huey', 'cat', '123'), ('zaizee', 'cat', '124'), ('foo', 'baze', '125.1')]) @requires_upsert @requires_models(OCTest) def test_update_ignore_with_conflict_target(self): query = OCTest.insert(a='foo', b=1).on_conflict( action='IGNORE', conflict_target=(OCTest.a,)) rowid1 = query.execute() self.assertTrue(rowid1 is not None) query.clone().execute() # Nothing happens, insert is ignored. self.assertEqual(OCTest.select().count(), 1) OCTest.insert(a='foo', b=2).on_conflict_ignore().execute() self.assertEqual(OCTest.select().count(), 1) OCTest.insert(a='bar', b=1).on_conflict_ignore().execute() self.assertEqual(OCTest.select().count(), 2) @requires_upsert @requires_models(OCTest) def test_update_atomic(self): # Add a new row with the given "a" value. If a conflict occurs, # re-insert with b=b+2. query = OCTest.insert(a='foo', b=1).on_conflict( conflict_target=(OCTest.a,), update={OCTest.b: OCTest.b + 2}) # First execution returns rowid=1. Second execution hits the conflict- # resolution, and will update the value in "b" from 1 -> 3. rowid1 = query.execute() rowid2 = query.clone().execute() self.assertEqual(rowid1, rowid2) obj = OCTest.get() self.assertEqual(obj.a, 'foo') self.assertEqual(obj.b, 3) query = OCTest.insert(a='foo', b=4, c=5).on_conflict( conflict_target=[OCTest.a], preserve=[OCTest.c], update={OCTest.b: OCTest.b + 100}) self.assertEqual(query.execute(), rowid2) obj = OCTest.get() self.assertEqual(obj.a, 'foo') self.assertEqual(obj.b, 103) self.assertEqual(obj.c, 5) @requires_upsert @requires_models(OCTest) def test_update_where_clause(self): # Add a new row with the given "a" value. If a conflict occurs, # re-insert with b=b+2 so long as the original b < 3. query = OCTest.insert(a='foo', b=1).on_conflict( conflict_target=(OCTest.a,), update={OCTest.b: OCTest.b + 2}, where=(OCTest.b < 3)) # First execution returns rowid=1. Second execution hits the conflict- # resolution, and will update the value in "b" from 1 -> 3. rowid1 = query.execute() rowid2 = query.clone().execute() self.assertEqual(rowid1, rowid2) obj = OCTest.get() self.assertEqual(obj.a, 'foo') self.assertEqual(obj.b, 3) # Third execution also returns rowid=1. The WHERE clause prevents us # from updating "b" again. If this is SQLite, we get the rowid back, if # this is Postgresql we get None (since nothing happened). rowid3 = query.clone().execute() if IS_SQLITE: self.assertEqual(rowid1, rowid3) else: self.assertTrue(rowid3 is None) # Because we didn't satisfy the WHERE clause, the value in "b" is # not incremented again. obj = OCTest.get() self.assertEqual(obj.a, 'foo') self.assertEqual(obj.b, 3) @requires_upsert @requires_models(Emp) # Has unique on first/last, unique on empno. def test_conflict_update_excluded(self): e1 = Emp.create(first='huey', last='c', empno='10') e2 = Emp.create(first='zaizee', last='c', empno='20') res = (Emp.insert(first='huey', last='c', empno='30') .on_conflict(conflict_target=(Emp.first, Emp.last), update={Emp.empno: Emp.empno + EXCLUDED.empno}, where=(EXCLUDED.empno != Emp.empno)) .execute()) data = sorted(Emp.select(Emp.first, Emp.last, Emp.empno).tuples()) self.assertEqual(data, [('huey', 'c', '1030'), ('zaizee', 'c', '20')]) @requires_upsert @requires_models(KV) def test_conflict_update_excluded2(self): KV.create(key='k1', value=1) query = (KV.insert(key='k1', value=10) .on_conflict(conflict_target=[KV.key], update={KV.value: KV.value + EXCLUDED.value}, where=(EXCLUDED.value > KV.value))) query.execute() self.assertEqual(KV.select(KV.key, KV.value).tuples()[:], [('k1', 11)]) # Running it again will have no effect this time, since the new value # (10) is not greater than the pre-existing row value (11). query.execute() self.assertEqual(KV.select(KV.key, KV.value).tuples()[:], [('k1', 11)]) @requires_upsert @skip_if(IS_CRDB, 'crdb does not support the WHERE clause') @requires_models(UKVP) def test_conflict_target_constraint_where(self): u1 = UKVP.create(key='k1', value=1, extra=1) u2 = UKVP.create(key='k2', value=2, extra=2) fields = [UKVP.key, UKVP.value, UKVP.extra] data = [('k1', 1, 2), ('k2', 2, 3)] # XXX: SQLite does not seem to accept parameterized values for the # conflict target WHERE clause (e.g., the partial index). So we have to # express this literally as ("extra" > 1) rather than using an # expression which will be parameterized. Hopefully SQLite's authors # decide this is a bug and fix it. if IS_SQLITE: conflict_where = UKVP.extra > SQL('1') else: conflict_where = UKVP.extra > 1 res = (UKVP.insert_many(data, fields) .on_conflict(conflict_target=(UKVP.key, UKVP.value), conflict_where=conflict_where, preserve=(UKVP.extra,)) .execute()) # How many rows exist? The first one would not have triggered the # conflict resolution, since the existing k1/1 row's "extra" value was # not greater than 1, thus it did not satisfy the index condition. # The second row (k2/2/3) would have triggered the resolution. self.assertEqual(UKVP.select().count(), 3) query = (UKVP .select(UKVP.key, UKVP.value, UKVP.extra) .order_by(UKVP.key, UKVP.value, UKVP.extra) .tuples()) self.assertEqual(list(query), [ ('k1', 1, 1), ('k1', 1, 2), ('k2', 2, 3)]) # Verify the primary-key of k2 did not change. u2_db = UKVP.get(UKVP.key == 'k2') self.assertEqual(u2_db.id, u2.id) @requires_mysql class TestUpsertMySQL(OnConflictTests, ModelTestCase): def test_replace(self): # Unique constraint on first/last would fail - replace. query = (Emp .insert(first='mickey', last='dog', empno='1337') .on_conflict('replace') .execute()) self.assertData([ ('huey', 'cat', '123'), ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337')]) # Unique constraint on empno would fail - replace. query = (Emp .insert(first='nuggie', last='dog', empno='123') .on_conflict('replace') .execute()) self.assertData([ ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337'), ('nuggie', 'dog', '123')]) # No problems, data added. query = (Emp .insert(first='beanie', last='cat', empno='126') .on_conflict('replace') .execute()) self.assertData([ ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337'), ('nuggie', 'dog', '123'), ('beanie', 'cat', '126')]) @requires_models(OCTest) def test_update(self): pk = (OCTest .insert(a='a', b=3) .on_conflict(update={OCTest.b: 1337}) .execute()) oc = OCTest.get(OCTest.a == 'a') self.assertEqual(oc.b, 3) pk2 = (OCTest .insert(a='a', b=4) .on_conflict(update={OCTest.b: OCTest.b + 10}) .execute()) self.assertEqual(pk, pk2) self.assertEqual(OCTest.select().count(), 1) oc = OCTest.get(OCTest.a == 'a') self.assertEqual(oc.b, 13) pk3 = (OCTest .insert(a='a2', b=5) .on_conflict(update={OCTest.b: 1337}) .execute()) self.assertTrue(pk3 != pk2) self.assertEqual(OCTest.select().count(), 2) oc = OCTest.get(OCTest.a == 'a2') self.assertEqual(oc.b, 5) @requires_models(OCTest) def test_update_preserve(self): OCTest.create(a='a', b=3) pk = (OCTest .insert(a='a', b=4) .on_conflict(preserve=[OCTest.b]) .execute()) oc = OCTest.get(OCTest.a == 'a') self.assertEqual(oc.b, 4) pk2 = (OCTest .insert(a='a', b=5, c=6) .on_conflict( preserve=[OCTest.c], update={OCTest.b: OCTest.b + 100}) .execute()) self.assertEqual(pk, pk2) self.assertEqual(OCTest.select().count(), 1) oc = OCTest.get(OCTest.a == 'a') self.assertEqual(oc.b, 104) self.assertEqual(oc.c, 6) class TestReplaceSqlite(OnConflictTests, ModelTestCase): database = get_in_memory_db() def test_replace(self): # Unique constraint on first/last would fail - replace. query = (Emp .insert(first='mickey', last='dog', empno='1337') .on_conflict('replace') .execute()) self.assertData([ ('huey', 'cat', '123'), ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337')]) # Unique constraint on empno would fail - replace. query = (Emp .insert(first='nuggie', last='dog', empno='123') .on_conflict('replace') .execute()) self.assertData([ ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337'), ('nuggie', 'dog', '123')]) # No problems, data added. query = (Emp .insert(first='beanie', last='cat', empno='126') .on_conflict('replace') .execute()) self.assertData([ ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337'), ('nuggie', 'dog', '123'), ('beanie', 'cat', '126')]) def test_model_replace(self): Emp.replace(first='mickey', last='dog', empno='1337').execute() self.assertData([ ('huey', 'cat', '123'), ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337')]) Emp.replace(first='beanie', last='cat', empno='999').execute() self.assertData([ ('huey', 'cat', '123'), ('zaizee', 'cat', '124'), ('mickey', 'dog', '1337'), ('beanie', 'cat', '999')]) Emp.replace_many([('h', 'cat', '123'), ('z', 'cat', '124'), ('b', 'cat', '125')], fields=[Emp.first, Emp.last, Emp.empno]).execute() self.assertData([ ('mickey', 'dog', '1337'), ('beanie', 'cat', '999'), ('h', 'cat', '123'), ('z', 'cat', '124'), ('b', 'cat', '125')]) @requires_sqlite class TestUpsertSqlite(PGOnConflictTests, ModelTestCase): database = get_in_memory_db() @skip_if(IS_SQLITE_24, 'requires sqlite < 3.24') def test_no_preserve_update_where(self): # Ensure on SQLite < 3.24 we cannot update or preserve values. base = Emp.insert(first='foo', last='bar', empno='125') preserve = base.on_conflict(preserve=[Emp.last]) self.assertRaises(ValueError, preserve.execute) update = base.on_conflict(update={Emp.empno: 'xxx'}) self.assertRaises(ValueError, update.execute) where = base.on_conflict(where=(Emp.id > 10)) self.assertRaises(ValueError, where.execute) @skip_unless(IS_SQLITE_24, 'requires sqlite >= 3.24') def test_update_meets_requirements(self): # Ensure that on >= 3.24 any updates meet the minimum criteria. base = Emp.insert(first='foo', last='bar', empno='125') # Must specify update or preserve. no_update_preserve = base.on_conflict(conflict_target=(Emp.empno,)) self.assertRaises(ValueError, no_update_preserve.execute) # Must specify a conflict target. no_conflict_target = base.on_conflict(update={Emp.empno: '125.1'}) self.assertRaises(ValueError, no_conflict_target.execute) @skip_unless(IS_SQLITE_24, 'requires sqlite >= 3.24') def test_do_nothing(self): query = (Emp .insert(first='foo', last='bar', empno='123') .on_conflict('nothing')) self.assertSQL(query, ( 'INSERT INTO "emp" ("first", "last", "empno") ' 'VALUES (?, ?, ?) ON CONFLICT DO NOTHING'), ['foo', 'bar', '123']) query.execute() # Conflict occurs with empno='123'. self.assertData(list(self.test_data)) class UKV(TestModel): key = TextField() value = TextField() extra = TextField(default='') class Meta: constraints = [ SQL('constraint ukv_key_value unique(key, value)'), ] class UKVRel(TestModel): key = TextField() value = TextField() extra = TextField() class Meta: indexes = ( (('key', 'value'), True), ) @requires_pglike class TestUpsertPostgresql(PGOnConflictTests, ModelTestCase): @requires_postgresql @requires_models(UKV) def test_conflict_target_constraint(self): u1 = UKV.create(key='k1', value='v1') u2 = UKV.create(key='k2', value='v2') ret = (UKV.insert(key='k1', value='v1', extra='e1') .on_conflict(conflict_target=(UKV.key, UKV.value), preserve=(UKV.extra,)) .execute()) self.assertEqual(ret, u1.id) # Changes were saved successfully. u1_db = UKV.get(UKV.key == 'k1') self.assertEqual(u1_db.key, 'k1') self.assertEqual(u1_db.value, 'v1') self.assertEqual(u1_db.extra, 'e1') self.assertEqual(UKV.select().count(), 2) ret = (UKV.insert(key='k2', value='v2', extra='e2') .on_conflict(conflict_constraint='ukv_key_value', preserve=(UKV.extra,)) .execute()) self.assertEqual(ret, u2.id) # Changes were saved successfully. u2_db = UKV.get(UKV.key == 'k2') self.assertEqual(u2_db.key, 'k2') self.assertEqual(u2_db.value, 'v2') self.assertEqual(u2_db.extra, 'e2') self.assertEqual(UKV.select().count(), 2) ret = (UKV.insert(key='k3', value='v3', extra='e3') .on_conflict(conflict_target=[UKV.key, UKV.value], preserve=[UKV.extra]) .execute()) self.assertTrue(ret > u2_db.id) self.assertEqual(UKV.select().count(), 3) @requires_models(UKV, UKVRel) def test_conflict_ambiguous_column(self): # k1/v1/e1, k2/v2/e0, k3/v3/e1 for i in [1, 2, 3]: UKV.create(key='k%s' % i, value='v%s' % i, extra='e%s' % (i % 2)) UKVRel.create(key='k1', value='v1', extra='x1') UKVRel.create(key='k2', value='v2', extra='x2') subq = UKV.select(UKV.key, UKV.value, UKV.extra) query = (UKVRel .insert_from(subq, [UKVRel.key, UKVRel.value, UKVRel.extra]) .on_conflict(conflict_target=[UKVRel.key, UKVRel.value], preserve=[UKVRel.extra], where=(UKVRel.key != 'k2'))) self.assertSQL(query, ( 'INSERT INTO "ukv_rel" ("key", "value", "extra") ' 'SELECT "t1"."key", "t1"."value", "t1"."extra" FROM "ukv" AS "t1" ' 'ON CONFLICT ("key", "value") DO UPDATE ' 'SET "extra" = EXCLUDED."extra" ' 'WHERE ("ukv_rel"."key" != ?) RETURNING "ukv_rel"."id"'), ['k2']) query.execute() query = (UKVRel .select(UKVRel.key, UKVRel.value, UKVRel.extra) .order_by(UKVRel.key)) self.assertEqual(list(query.tuples()), [ ('k1', 'v1', 'e1'), ('k2', 'v2', 'x2'), ('k3', 'v3', 'e1')]) class TestJoinSubquery(ModelTestCase): requires = [Person, Relationship] def test_join_subquery(self): # Set up some relationships such that there exists a relationship from # the left-hand to the right-hand name. data = ( ('charlie', None), ('huey', 'charlie'), ('mickey', 'charlie'), ('zaizee', 'charlie'), ('zaizee', 'huey')) people = {} def get_person(name): if name not in people: people[name] = Person.create(first=name, last=name, dob=datetime.date(2017, 1, 1)) return people[name] for person, related_to in data: p1 = get_person(person) if related_to is not None: p2 = get_person(related_to) Relationship.create(from_person=p1, to_person=p2) # Create the subquery. Friend = Person.alias('friend') subq = (Relationship .select(Friend.first.alias('friend_name'), Relationship.from_person) .join(Friend, on=(Relationship.to_person == Friend.id)) .alias('subq')) # Outer query does a LEFT OUTER JOIN. We join on the subquery because # it uses an INNER JOIN, saving us doing two LEFT OUTER joins in the # single query. query = (Person .select(Person.first, subq.c.friend_name) .join(subq, JOIN.LEFT_OUTER, on=(Person.id == subq.c.from_person_id)) .order_by(Person.first, subq.c.friend_name)) self.assertSQL(query, ( 'SELECT "t1"."first", "subq"."friend_name" ' 'FROM "person" AS "t1" ' 'LEFT OUTER JOIN (' 'SELECT "friend"."first" AS "friend_name", "t2"."from_person_id" ' 'FROM "relationship" AS "t2" ' 'INNER JOIN "person" AS "friend" ' 'ON ("t2"."to_person_id" = "friend"."id")) AS "subq" ' 'ON ("t1"."id" = "subq"."from_person_id") ' 'ORDER BY "t1"."first", "subq"."friend_name"'), []) db_data = [row for row in query.tuples()] self.assertEqual(db_data, list(data)) class TestSumCase(ModelTestCase): @requires_models(User) def test_sum_case(self): for username in ('charlie', 'huey', 'zaizee'): User.create(username=username) case = Case(None, [(User.username.endswith('e'), 1)], 0) e_sum = fn.SUM(case) query = (User .select(User.username, e_sum.alias('e_sum')) .group_by(User.username) .order_by(User.username)) self.assertSQL(query, ( 'SELECT "t1"."username", ' 'SUM(CASE WHEN ("t1"."username" ILIKE ?) THEN ? ELSE ? END) ' 'AS "e_sum" ' 'FROM "users" AS "t1" ' 'GROUP BY "t1"."username" ' 'ORDER BY "t1"."username"'), ['%e', 1, 0]) data = [(user.username, user.e_sum) for user in query] self.assertEqual(data, [ ('charlie', 1), ('huey', 0), ('zaizee', 1)]) class TUser(TestModel): username = TextField() class Transaction(TestModel): user = ForeignKeyField(TUser, backref='transactions') amount = FloatField(default=0.) class TestMaxAlias(ModelTestCase): requires = [Transaction, TUser] def test_max_alias(self): with self.database.atomic(): charlie = TUser.create(username='charlie') huey = TUser.create(username='huey') data = ( (charlie, 10.), (charlie, 20.), (charlie, 30.), (huey, 1.5), (huey, 2.5)) for user, amount in data: Transaction.create(user=user, amount=amount) with self.assertQueryCount(1): amount = fn.MAX(Transaction.amount).alias('amount') query = (Transaction .select(amount, TUser.username) .join(TUser) .group_by(TUser.username) .order_by(TUser.username)) data = [(txn.amount, txn.user.username) for txn in query] self.assertEqual(data, [ (30., 'charlie'), (2.5, 'huey')]) class CNote(TestModel): content = TextField() timestamp = TimestampField() class CFile(TestModel): filename = CharField(primary_key=True) data = TextField() timestamp = TimestampField() class TestCompoundSelectModels(ModelTestCase): requires = [CFile, CNote] def setUp(self): super(TestCompoundSelectModels, self).setUp() def generate_ts(): i = [0] def _inner(): i[0] += 1 return datetime.datetime(2018, 1, i[0]) return _inner make_ts = generate_ts() self.ts = lambda i: datetime.datetime(2018, 1, i) with self.database.atomic(): for i, content in enumerate(('note-a', 'note-b', 'note-c'), 1): CNote.create(id=i, content=content, timestamp=make_ts()) file_data = ( ('peewee.txt', 'peewee orm'), ('walrus.txt', 'walrus redis toolkit'), ('huey.txt', 'huey task queue')) for filename, data in file_data: CFile.create(filename=filename, data=data, timestamp=make_ts()) def test_mix_models_with_model_row_type(self): cast = 'CHAR' if IS_MYSQL else 'TEXT' lhs = CNote.select(CNote.id.cast(cast).alias('id_text'), CNote.content, CNote.timestamp) rhs = CFile.select(CFile.filename, CFile.data, CFile.timestamp) query = (lhs | rhs).order_by(SQL('timestamp')).limit(4) data = [(n.id_text, n.content, n.timestamp) for n in query] self.assertEqual(data, [ ('1', 'note-a', self.ts(1)), ('2', 'note-b', self.ts(2)), ('3', 'note-c', self.ts(3)), ('peewee.txt', 'peewee orm', self.ts(4))]) def test_mixed_models_tuple_row_type(self): cast = 'CHAR' if IS_MYSQL else 'TEXT' lhs = CNote.select(CNote.id.cast(cast).alias('id'), CNote.content, CNote.timestamp) rhs = CFile.select(CFile.filename, CFile.data, CFile.timestamp) query = (lhs | rhs).order_by(SQL('timestamp')).limit(5) self.assertEqual(list(query.tuples()), [ ('1', 'note-a', self.ts(1)), ('2', 'note-b', self.ts(2)), ('3', 'note-c', self.ts(3)), ('peewee.txt', 'peewee orm', self.ts(4)), ('walrus.txt', 'walrus redis toolkit', self.ts(5))]) def test_mixed_models_dict_row_type(self): notes = CNote.select(CNote.content, CNote.timestamp) files = CFile.select(CFile.filename, CFile.timestamp) query = (notes | files).order_by(SQL('timestamp').desc()).limit(4) self.assertEqual(list(query.dicts()), [ {'content': 'huey.txt', 'timestamp': self.ts(6)}, {'content': 'walrus.txt', 'timestamp': self.ts(5)}, {'content': 'peewee.txt', 'timestamp': self.ts(4)}, {'content': 'note-c', 'timestamp': self.ts(3)}]) class SequenceModel(TestModel): seq_id = IntegerField(sequence='seq_id_sequence') key = TextField() @requires_pglike class TestSequence(ModelTestCase): requires = [SequenceModel] def test_create_table(self): query = SequenceModel._schema._create_table() self.assertSQL(query, ( 'CREATE TABLE IF NOT EXISTS "sequence_model" (' '"id" SERIAL NOT NULL PRIMARY KEY, ' '"seq_id" INTEGER NOT NULL DEFAULT NEXTVAL(\'seq_id_sequence\'), ' '"key" TEXT NOT NULL)'), []) def test_sequence(self): for key in ('k1', 'k2', 'k3'): SequenceModel.create(key=key) s1, s2, s3 = SequenceModel.select().order_by(SequenceModel.key) self.assertEqual(s1.seq_id, 1) self.assertEqual(s2.seq_id, 2) self.assertEqual(s3.seq_id, 3) @requires_postgresql class TestUpdateFromIntegration(ModelTestCase): requires = [User] def test_update_from(self): u1, u2 = [User.create(username=username) for username in ('u1', 'u2')] data = [(u1.id, 'u1-x'), (u2.id, 'u2-x')] vl = ValuesList(data, columns=('id', 'username'), alias='tmp') (User .update({User.username: vl.c.username}) .from_(vl) .where(User.id == vl.c.id) .execute()) usernames = [u.username for u in User.select().order_by(User.username)] self.assertEqual(usernames, ['u1-x', 'u2-x']) def test_update_from_subselect(self): u1, u2 = [User.create(username=username) for username in ('u1', 'u2')] data = [(u1.id, 'u1-y'), (u2.id, 'u2-y')] vl = ValuesList(data, columns=('id', 'username'), alias='tmp') subq = vl.select(vl.c.id, vl.c.username) (User .update({User.username: subq.c.username}) .from_(subq) .where(User.id == subq.c.id) .execute()) usernames = [u.username for u in User.select().order_by(User.username)] self.assertEqual(usernames, ['u1-y', 'u2-y']) @requires_models(User, Tweet) def test_update_from_simple(self): u = User.create(username='u1') t1 = Tweet.create(user=u, content='t1') t2 = Tweet.create(user=u, content='t2') (User .update({User.username: Tweet.content}) .from_(Tweet) .where(Tweet.content == 't2') .execute()) self.assertEqual(User.get(User.id == u.id).username, 't2') @requires_postgresql class TestLateralJoin(ModelTestCase): requires = [User, Tweet] def test_lateral_join(self): with self.database.atomic(): for i in range(3): u = User.create(username='u%s' % i) for j in range(4): Tweet.create(user=u, content='u%s-t%s' % (i, j)) # GOAL: query users and their 2 most-recent tweets (by ID). TA = Tweet.alias() # The "outer loop" will be iterating over the users whose tweets we are # trying to find. user_query = (User .select(User.id, User.username) .order_by(User.id) .alias('uq')) # The inner loop will select tweets and is correlated to the outer loop # via the WHERE clause. Note that we are using a LIMIT clause. tweet_query = (TA .select(TA.id, TA.content) .where(TA.user == user_query.c.id) .order_by(TA.id.desc()) .limit(2) .alias('pq')) join = NodeList((user_query, SQL('LEFT JOIN LATERAL'), tweet_query, SQL('ON %s', [True]))) query = (Tweet .select(user_query.c.username, tweet_query.c.content) .from_(join) .dicts()) self.assertEqual([row for row in query], [ {'username': 'u0', 'content': 'u0-t3'}, {'username': 'u0', 'content': 'u0-t2'}, {'username': 'u1', 'content': 'u1-t3'}, {'username': 'u1', 'content': 'u1-t2'}, {'username': 'u2', 'content': 'u2-t3'}, {'username': 'u2', 'content': 'u2-t2'}]) class Task(TestModel): heading = ForeignKeyField('self', backref='tasks', null=True) project = ForeignKeyField('self', backref='projects', null=True) title = TextField() type = IntegerField() PROJECT = 1 HEADING = 2 class TestMultiSelfJoin(ModelTestCase): requires = [Task] def setUp(self): super(TestMultiSelfJoin, self).setUp() with self.database.atomic(): p_dev = Task.create(title='dev', type=Task.PROJECT) p_p = Task.create(title='peewee', project=p_dev, type=Task.PROJECT) p_h = Task.create(title='huey', project=p_dev, type=Task.PROJECT) heading_data = ( ('peewee-1', p_p, 2), ('peewee-2', p_p, 0), ('huey-1', p_h, 1), ('huey-2', p_h, 1)) for title, proj, n_subtasks in heading_data: t = Task.create(title=title, project=proj, type=Task.HEADING) for i in range(n_subtasks): Task.create(title='%s-%s' % (title, i + 1), project=proj, heading=t, type=Task.HEADING) def test_multi_self_join(self): Project = Task.alias() Heading = Task.alias() query = (Task .select(Task, Project, Heading) .join(Heading, JOIN.LEFT_OUTER, on=(Task.heading == Heading.id).alias('heading')) .switch(Task) .join(Project, JOIN.LEFT_OUTER, on=(Task.project == Project.id).alias('project')) .order_by(Task.id)) with self.assertQueryCount(1): accum = [] for task in query: h_title = task.heading.title if task.heading else None p_title = task.project.title if task.project else None accum.append((task.title, h_title, p_title)) self.assertEqual(accum, [ # title - heading - project ('dev', None, None), ('peewee', None, 'dev'), ('huey', None, 'dev'), ('peewee-1', None, 'peewee'), ('peewee-1-1', 'peewee-1', 'peewee'), ('peewee-1-2', 'peewee-1', 'peewee'), ('peewee-2', None, 'peewee'), ('huey-1', None, 'huey'), ('huey-1-1', 'huey-1', 'huey'), ('huey-2', None, 'huey'), ('huey-2-1', 'huey-2', 'huey'), ]) class Product(TestModel): name = TextField() price = IntegerField() flags = IntegerField(constraints=[SQL('DEFAULT 99')]) status = CharField(constraints=[Check("status IN ('a', 'b', 'c')")]) class Meta: constraints = [Check('price > 0')] class TestModelConstraints(ModelTestCase): requires = [Product] @skip_if(IS_MYSQL) # MySQL fails intermittently on Travis-CI (?). def test_model_constraints(self): p = Product.create(name='p1', price=1, status='a') self.assertTrue(p.flags is None) # Price was saved successfully, flags got server-side default value. p_db = Product.get(Product.id == p.id) self.assertEqual(p_db.price, 1) self.assertEqual(p_db.flags, 99) self.assertEqual(p_db.status, 'a') # Cannot update price with invalid value, must be > 0. with self.database.atomic(): p.price = -1 self.assertRaises(IntegrityError, p.save) # Nor can we create a new product with an invalid price. with self.database.atomic(): self.assertRaises(IntegrityError, Product.create, name='p2', price=0, status='a') # Cannot set status to a value other than 1, 2 or 3. with self.database.atomic(): p.price = 1 p.status = 'd' self.assertRaises(IntegrityError, p.save) # Cannot create a new product with invalid status. with self.database.atomic(): self.assertRaises(IntegrityError, Product.create, name='p3', price=1, status='x') class TestModelFieldReprs(BaseTestCase): def test_model_reprs(self): class User(Model): username = TextField(primary_key=True) class Tweet(Model): user = ForeignKeyField(User, backref='tweets') content = TextField() timestamp = TimestampField() class EAV(Model): entity = TextField() attribute = TextField() value = TextField() class Meta: primary_key = CompositeKey('entity', 'attribute') class NoPK(Model): key = TextField() class Meta: primary_key = False self.assertEqual(repr(User), '') self.assertEqual(repr(Tweet), '') self.assertEqual(repr(EAV), '') self.assertEqual(repr(NoPK), '') self.assertEqual(repr(User()), '') self.assertEqual(repr(Tweet()), '') self.assertEqual(repr(EAV()), '') self.assertEqual(repr(NoPK()), '') self.assertEqual(repr(User(username='huey')), '') self.assertEqual(repr(Tweet(id=1337)), '') self.assertEqual(repr(EAV(entity='e', attribute='a')), "") self.assertEqual(repr(NoPK(key='k')), '') self.assertEqual(repr(User.username), '') self.assertEqual(repr(Tweet.user), '') self.assertEqual(repr(EAV.entity), '') self.assertEqual(repr(TextField()), '') def test_model_str_method(self): class User(Model): username = TextField(primary_key=True) def __str__(self): return self.username.title() u = User(username='charlie') self.assertEqual(repr(u), '') class TestGetWithSecondDatabase(ModelTestCase): database = get_in_memory_db() requires = [User] def test_get_with_second_database(self): User.create(username='huey') query = User.select().where(User.username == 'huey') self.assertEqual(query.get().username, 'huey') alt_db = get_in_memory_db() with User.bind_ctx(alt_db): User.create_table() self.assertRaises(User.DoesNotExist, query.get, alt_db) with User.bind_ctx(alt_db): User.create(username='zaizee') query = User.select().where(User.username == 'zaizee') self.assertRaises(User.DoesNotExist, query.get) self.assertEqual(query.get(alt_db).username, 'zaizee') class TestMixModelsTables(ModelTestCase): database = get_in_memory_db() requires = [User] def test_mix_models_tables(self): Tbl = User._meta.table self.assertEqual(Tbl.insert({Tbl.username: 'huey'}).execute(), 1) huey = Tbl.select(User.username).get() self.assertEqual(huey, {'username': 'huey'}) huey = User.select(Tbl.username).get() self.assertEqual(huey.username, 'huey') Tbl.update(username='huey-x').where(Tbl.username == 'huey').execute() self.assertEqual(User.select().get().username, 'huey-x') Tbl.delete().where(User.username == 'huey-x').execute() self.assertEqual(Tbl.select().count(), 0) class TestDatabaseExecuteQuery(ModelTestCase): database = get_in_memory_db() requires = [User] def test_execute_query(self): for username in ('huey', 'zaizee'): User.create(username=username) query = User.select().order_by(User.username.desc()) cursor = self.database.execute(query) self.assertEqual([row[1] for row in cursor], ['zaizee', 'huey']) class Datum(TestModel): key = TextField() value = IntegerField(null=True) class TestNullOrdering(ModelTestCase): requires = [Datum] def test_null_ordering(self): values = [('k1', 1), ('ka', None), ('k2', 2), ('kb', None)] Datum.insert_many(values, fields=[Datum.key, Datum.value]).execute() def assertOrder(ordering, expected): query = Datum.select().order_by(*ordering) self.assertEqual([d.key for d in query], expected) # Ascending order. nulls_last = (Datum.value.asc(nulls='last'), Datum.key) assertOrder(nulls_last, ['k1', 'k2', 'ka', 'kb']) nulls_first = (Datum.value.asc(nulls='first'), Datum.key) assertOrder(nulls_first, ['ka', 'kb', 'k1', 'k2']) # Descending order. nulls_last = (Datum.value.desc(nulls='last'), Datum.key) assertOrder(nulls_last, ['k2', 'k1', 'ka', 'kb']) nulls_first = (Datum.value.desc(nulls='first'), Datum.key) assertOrder(nulls_first, ['ka', 'kb', 'k2', 'k1']) # Invalid values. self.assertRaises(ValueError, Datum.value.desc, nulls='bar') self.assertRaises(ValueError, Datum.value.asc, nulls='foo') class Student(TestModel): name = TextField() class Course(TestModel): name = TextField() class Attendance(TestModel): student = ForeignKeyField(Student) course = ForeignKeyField(Course) class TestManyToManyJoining(ModelTestCase): requires = [Student, Course, Attendance] def setUp(self): super(TestManyToManyJoining, self).setUp() data = ( ('charlie', ('eng101', 'cs101', 'cs111')), ('huey', ('cats1', 'cats2', 'cats3')), ('zaizee', ('cats2', 'cats3'))) c = {} with self.database.atomic(): for name, courses in data: student = Student.create(name=name) for course in courses: if course not in c: c[course] = Course.create(name=course) Attendance.create(student=student, course=c[course]) def assertQuery(self, query): with self.assertQueryCount(1): query = query.order_by(Attendance.id) results = [(a.student.name, a.course.name) for a in query] self.assertEqual(results, [ ('charlie', 'eng101'), ('charlie', 'cs101'), ('charlie', 'cs111'), ('huey', 'cats1'), ('huey', 'cats2'), ('zaizee', 'cats2')]) def test_join_subquery(self): courses = (Course .select(Course.id, Course.name) .order_by(Course.id) .limit(5)) query = (Attendance .select(Attendance, Student, courses.c.name) .join_from(Attendance, Student) .join_from(Attendance, courses, on=(Attendance.course == courses.c.id))) self.assertQuery(query) @skip_if(IS_MYSQL) def test_join_where_subquery(self): courses = Course.select().order_by(Course.id).limit(5) query = (Attendance .select(Attendance, Student, Course) .join_from(Attendance, Student) .join_from(Attendance, Course) .where(Attendance.course.in_(courses))) self.assertQuery(query) class TestColumnNameStripping(ModelTestCase): database = get_in_memory_db() requires = [Person] def test_column_name_stripping(self): d1 = datetime.date(1990, 1, 1) d2 = datetime.date(1990, 1, 1) p1 = Person.create(first='f1', last='l1', dob=d1) p2 = Person.create(first='f2', last='l2', dob=d2) query = Person.select( fn.MIN(Person.dob), fn.MAX(Person.dob).alias('mdob')) # Get the row as a model. row = query.get() self.assertEqual(row.dob, d1) self.assertEqual(row.mdob, d2) row = query.dicts().get() self.assertEqual(row['dob'], d1) self.assertEqual(row['mdob'], d2) class VL(TestModel): n = IntegerField() s = CharField() @skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB) class TestValuesListIntegration(ModelTestCase): requires = [VL] _data = [(1, 'one'), (2, 'two'), (3, 'three')] def test_insert_into_select_from_vl(self): vl = ValuesList(self._data) cte = vl.cte('newvals', columns=['n', 's']) res = (VL .insert_from(cte.select(cte.c.n, cte.c.s), fields=[VL.n, VL.s]) .with_cte(cte) .execute()) vq = VL.select().order_by(VL.n) self.assertEqual([(v.n, v.s) for v in vq], self._data) def test_update_vl_cte(self): VL.insert_many(self._data).execute() new_values = [(1, 'One'), (3, 'Three'), (4, 'Four')] cte = ValuesList(new_values).cte('new_values', columns=('n', 's')) # We have to use a subquery to update the individual column, as SQLite # does not support UPDATE/FROM syntax. subq = (cte .select(cte.c.s) .where(VL.n == cte.c.n)) # Perform the update, assigning extra the new value from the values # list, and restricting the overall update using the composite pk. res = (VL .update(s=subq) .where(VL.n.in_(cte.select(cte.c.n))) .with_cte(cte) .execute()) vq = VL.select().order_by(VL.n) self.assertEqual([(v.n, v.s) for v in vq], [ (1, 'One'), (2, 'two'), (3, 'Three')]) def test_values_list(self): vl = ValuesList(self._data) query = vl.select(SQL('*')) self.assertEqual(list(query.tuples().bind(self.database)), self._data) @requires_postgresql def test_values_list_named_columns(self): vl = ValuesList(self._data).columns('idx', 'name') query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx.desc())) self.assertEqual(list(query.tuples().bind(self.database)), self._data[::-1]) def test_values_list_named_columns_in_cte(self): vl = ValuesList(self._data) cte = vl.cte('val', columns=('idx', 'name')) query = (cte .select(cte.c.idx, cte.c.name) .order_by(cte.c.idx.desc()) .with_cte(cte)) self.assertEqual(list(query.tuples().bind(self.database)), self._data[::-1]) def test_named_values_list(self): vl = ValuesList(self._data).alias('vl') query = vl.select() self.assertEqual(list(query.tuples().bind(self.database)), self._data) class C_Product(TestModel): name = CharField() price = IntegerField(default=0) class C_Archive(TestModel): name = CharField() price = IntegerField(default=0) class C_Part(TestModel): part = CharField(primary_key=True) sub_part = ForeignKeyField('self', null=True) @skip_unless(IS_POSTGRESQL) class TestDataModifyingCTEIntegration(ModelTestCase): requires = [C_Product, C_Archive, C_Part] def setUp(self): super(TestDataModifyingCTEIntegration, self).setUp() for i in range(5): C_Product.create(name='p%s' % i, price=i) mp1_c_g = C_Part.create(part='mp1-c-g') mp1_c = C_Part.create(part='mp1-c', sub_part=mp1_c_g) mp1 = C_Part.create(part='mp1', sub_part=mp1_c) mp2_c_g = C_Part.create(part='mp2-c-g') mp2_c = C_Part.create(part='mp2-c', sub_part=mp2_c_g) mp2 = C_Part.create(part='mp2', sub_part=mp2_c) def test_data_modifying_cte_delete(self): query = (C_Product.delete() .where(C_Product.price < 3) .returning(C_Product)) cte = query.cte('moved_rows') src = Select((cte,), (cte.c.id, cte.c.name, cte.c.price)) res = (C_Archive .insert_from(src, (C_Archive.id, C_Archive.name, C_Archive.price)) .with_cte(cte) .execute()) self.assertEqual(len(list(res)), 3) self.assertEqual( sorted([(p.name, p.price) for p in C_Product.select()]), [('p3', 3), ('p4', 4)]) self.assertEqual( sorted([(p.name, p.price) for p in C_Archive.select()]), [('p0', 0), ('p1', 1), ('p2', 2)]) base = (C_Part .select(C_Part.sub_part, C_Part.part) .where(C_Part.part == 'mp1') .cte('included_parts', recursive=True, columns=('sub_part', 'part'))) PA = C_Part.alias('p') recursive = (PA .select(PA.sub_part, PA.part) .join(base, on=(PA.part == base.c.sub_part))) cte = base.union_all(recursive) sq = Select((cte,), (cte.c.part,)) res = (C_Part.delete() .where(C_Part.part.in_(sq)) .with_cte(cte) .execute()) self.assertEqual(sorted([p.part for p in C_Part.select()]), ['mp2', 'mp2-c', 'mp2-c-g']) def test_data_modifying_cte_update(self): # Populate archive table w/copy of data in product. C_Archive.insert_from( C_Product.select(), (C_Product.id, C_Product.name, C_Product.price)).execute() query = (C_Product .update(price=C_Product.price * 2) .returning(C_Product.id, C_Product.name, C_Product.price)) cte = query.cte('t') sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price) self.assertEqual(sorted([(x.name, x.price) for x in sq]), [ ('p0', 0), ('p1', 2), ('p2', 4), ('p3', 6), ('p4', 8)]) # Ensure changes were persisted. self.assertEqual(sorted([(x.name, x.price) for x in C_Product]), [ ('p0', 0), ('p1', 2), ('p2', 4), ('p3', 6), ('p4', 8)]) sq = Select((cte,), (cte.c.id, cte.c.price)) res = (C_Archive .update(price=sq.c.price) .from_(sq) .where(C_Archive.id == sq.c.id) .with_cte(cte) .execute()) self.assertEqual(sorted([(x.name, x.price) for x in C_Product]), [ ('p0', 0), ('p1', 4), ('p2', 8), ('p3', 12), ('p4', 16)]) self.assertEqual(sorted([(x.name, x.price) for x in C_Archive]), [ ('p0', 0), ('p1', 4), ('p2', 8), ('p3', 12), ('p4', 16)]) def test_data_modifying_cte_insert(self): query = (C_Product .insert({'name': 'p5', 'price': 5}) .returning(C_Product.id, C_Product.name, C_Product.price)) cte = query.cte('t') sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price) self.assertEqual([(p.name, p.price) for p in sq], [('p5', 5)]) query = (C_Product .insert({'name': 'p6', 'price': 6}) .returning(C_Product.id, C_Product.name, C_Product.price)) cte = query.cte('t') sq = Select((cte,), (cte.c.id, cte.c.name, cte.c.price)) res = (C_Archive .insert_from(sq, (sq.c.id, sq.c.name, sq.c.price)) .with_cte(cte) .execute()) self.assertEqual([(p.name, p.price) for p in C_Archive], [('p6', 6)]) self.assertEqual(sorted([(p.name, p.price) for p in C_Product]), [ ('p0', 0), ('p1', 1), ('p2', 2), ('p3', 3), ('p4', 4), ('p5', 5), ('p6', 6)]) class TestBindTo(ModelTestCase): requires = [User, Tweet] def test_bind_to(self): for i in (1, 2, 3): user = User.create(username='u%s' % i) Tweet.create(user=user, content='t%s' % i) # Alias to a particular field-name. name = Case(User.username, [ ('u1', 'user 1'), ('u2', 'user 2')], 'someone else') q = (Tweet .select(Tweet.content, name.alias('username').bind_to(User)) .join(User) .order_by(Tweet.content)) with self.assertQueryCount(1): self.assertEqual([(t.content, t.user.username) for t in q], [ ('t1', 'user 1'), ('t2', 'user 2'), ('t3', 'someone else')]) # Use a different alias. q = (Tweet .select(Tweet.content, name.alias('display').bind_to(User)) .join(User) .order_by(Tweet.content)) with self.assertQueryCount(1): self.assertEqual([(t.content, t.user.display) for t in q], [ ('t1', 'user 1'), ('t2', 'user 2'), ('t3', 'someone else')]) # Ensure works with model and field aliases. TA, UA = Tweet.alias(), User.alias() name = Case(UA.username, [ ('u1', 'user 1'), ('u2', 'user 2')], 'someone else') q = (TA .select(TA.content, name.alias('display').bind_to(UA)) .join(UA, on=(UA.id == TA.user)) .order_by(TA.content)) with self.assertQueryCount(1): self.assertEqual([(t.content, t.user.display) for t in q], [ ('t1', 'user 1'), ('t2', 'user 2'), ('t3', 'someone else')]) peewee-3.17.7/tests/mysql_ext.py000066400000000000000000000106651470346076600166510ustar00rootroot00000000000000import datetime from peewee import * from playhouse.mysql_ext import JSONField from playhouse.mysql_ext import Match from .base import IS_MYSQL_JSON from .base import ModelDatabaseTestCase from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import requires_mysql from .base import skip_if from .base import skip_unless try: import mariadb except ImportError: mariadb = mariadb_db = None else: mariadb_db = db_loader('mariadb') try: import mysql.connector as mysql_connector except ImportError: mysql_connector = None mysql_ext_db = db_loader('mysqlconnector') class Person(TestModel): first = CharField() last = CharField() dob = DateField(default=datetime.date(2000, 1, 1)) class Note(TestModel): person = ForeignKeyField(Person, backref='notes') content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) class KJ(TestModel): key = CharField(primary_key=True, max_length=100) data = JSONField() @requires_mysql @skip_if(mysql_connector is None, 'mysql-connector not installed') class TestMySQLConnector(ModelTestCase): database = mysql_ext_db requires = [Person, Note] def test_basic_operations(self): with self.database.atomic(): charlie, huey, zaizee = [Person.create(first=f, last='leifer') for f in ('charlie', 'huey', 'zaizee')] # Use nested-transaction. with self.database.atomic(): data = ( (charlie, ('foo', 'bar', 'zai')), (huey, ('meow', 'purr', 'hiss')), (zaizee, ())) for person, notes in data: for note in notes: Note.create(person=person, content=note) with self.database.atomic() as sp: Person.create(first='x', last='y') sp.rollback() people = Person.select().order_by(Person.first) self.assertEqual([person.first for person in people], ['charlie', 'huey', 'zaizee']) with self.assertQueryCount(1): notes = (Note .select(Note, Person) .join(Person) .order_by(Note.content)) self.assertEqual([(n.person.first, n.content) for n in notes], [ ('charlie', 'bar'), ('charlie', 'foo'), ('huey', 'hiss'), ('huey', 'meow'), ('huey', 'purr'), ('charlie', 'zai')]) @requires_mysql @skip_if(mariadb is None, 'mariadb connector not installed') class TestMariaDBConnector(TestMySQLConnector): database = mariadb_db @requires_mysql @skip_unless(IS_MYSQL_JSON, 'requires MySQL 5.7+ or 8.x') class TestMySQLJSONField(ModelTestCase): requires = [KJ] def test_mysql_json_field(self): values = ( 0, 1.0, 2.3, True, False, 'string', ['foo', 'bar', 'baz'], {'k1': 'v1', 'k2': 'v2'}, {'k3': [0, 1.0, 2.3], 'k4': {'x1': 'y1', 'x2': 'y2'}}) for i, value in enumerate(values): # Verify data can be written. kj = KJ.create(key='k%s' % i, data=value) # Verify value is deserialized correctly. kj_db = KJ['k%s' % i] self.assertEqual(kj_db.data, value) kj = KJ.select().where(KJ.data.extract('$.k1') == 'v1').get() self.assertEqual(kj.key, 'k7') with self.assertRaises(IntegrityError): KJ.create(key='kx', data=None) @requires_mysql class TestMatchExpression(ModelDatabaseTestCase): requires = [Person] def test_match_expression(self): query = (Person .select() .where(Match(Person.first, 'charlie'))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."first", "t1"."last", "t1"."dob" ' 'FROM "person" AS "t1" ' 'WHERE MATCH("t1"."first") AGAINST(?)'), ['charlie']) query = (Person .select() .where(Match((Person.first, Person.last), 'huey AND zaizee', 'IN BOOLEAN MODE'))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."first", "t1"."last", "t1"."dob" ' 'FROM "person" AS "t1" ' 'WHERE MATCH("t1"."first", "t1"."last") ' 'AGAINST(? IN BOOLEAN MODE)'), ['huey AND zaizee']) peewee-3.17.7/tests/pool.py000066400000000000000000000414301470346076600155670ustar00rootroot00000000000000import heapq import os import threading import time from peewee import * from peewee import _savepoint from peewee import _transaction from playhouse.cockroachdb import PooledCockroachDatabase from playhouse.pool import * from playhouse.psycopg3_ext import Psycopg3Database from .base import BACKEND from .base import BaseTestCase from .base import IS_CRDB from .base import IS_MYSQL from .base import IS_POSTGRESQL from .base import IS_SQLITE from .base import ModelTestCase from .base import db_loader from .base_models import Register class FakeTransaction(_transaction): def _add_history(self, message): self.db.transaction_history.append( '%s%s' % (message, self._conn)) def __enter__(self): self._conn = self.db.connection() self._add_history('O') self.db.push_transaction(self) def __exit__(self, *args): self._add_history('X') self.db.pop_transaction() class FakeDatabase(SqliteDatabase): def __init__(self, *args, **kwargs): self.counter = self.closed_counter = kwargs.pop('counter', 0) self.transaction_history = [] super(FakeDatabase, self).__init__(*args, **kwargs) def _connect(self): self.counter += 1 return self.counter def _close(self, conn): self.closed_counter += 1 def transaction(self): return FakeTransaction(self) class FakePooledDatabase(PooledDatabase, FakeDatabase): def __init__(self, *args, **kwargs): super(FakePooledDatabase, self).__init__(*args, **kwargs) self.conn_key = lambda conn: conn class PooledTestDatabase(PooledDatabase, SqliteDatabase): pass class TestPooledDatabase(BaseTestCase): def setUp(self): super(TestPooledDatabase, self).setUp() self.db = FakePooledDatabase('testing') def test_connection_pool(self): # Closing and reopening a connection returns us the same conn. self.assertEqual(self.db.connection(), 1) self.assertEqual(self.db.connection(), 1) self.db.close() self.db.connect() self.assertEqual(self.db.connection(), 1) def test_reuse_connection(self): # Verify the connection pool correctly handles calling connect twice. self.assertEqual(self.db.connection(), 1) self.assertRaises(OperationalError, self.db.connect) self.assertFalse(self.db.connect(reuse_if_open=True)) self.assertEqual(self.db.connection(), 1) self.db.close() self.db.connect() self.assertEqual(self.db.connection(), 1) def test_concurrent_connections(self): db = FakePooledDatabase('testing') signal = threading.Event() def open_conn(): db.connect() signal.wait() db.close() # Simulate 5 concurrent connections. threads = [threading.Thread(target=open_conn) for i in range(5)] for thread in threads: thread.start() # Wait for all connections to be opened. while db.counter < 5: time.sleep(.01) # Signal threads to close connections and join threads. signal.set() for t in threads: t.join() self.assertEqual(db.counter, 5) self.assertEqual( sorted([conn for _, _, conn in db._connections]), [1, 2, 3, 4, 5]) # All 5 are ready to be re-used. self.assertEqual(db._in_use, {}) def test_max_conns(self): for i in range(self.db._max_connections): self.db._state.closed = True # Hack to make it appear closed. self.db.connect() self.assertEqual(self.db.connection(), i + 1) self.db._state.closed = True self.assertRaises(ValueError, self.db.connect) def test_stale_timeout(self): # Create a test database with a very short stale timeout. db = FakePooledDatabase('testing', stale_timeout=.001) self.assertEqual(db.connection(), 1) self.assertTrue(1 in db._in_use) # Sleep long enough for the connection to be considered stale. time.sleep(.001) # When we close, since the conn is stale it won't be returned to # the pool. db.close() self.assertEqual(db._in_use, {}) self.assertEqual(db._connections, []) # A new connection will be returned. self.assertEqual(db.connection(), 2) def test_stale_on_checkout(self): # Create a test database with a very short stale timeout. db = FakePooledDatabase('testing', stale_timeout=.005) self.assertEqual(db.connection(), 1) self.assertTrue(1 in db._in_use) # When we close, the conn should not be stale so it won't return to # the pool. db.close() assert len(db._connections) == 1, 'Test runner too slow!' # Sleep long enough for the connection to be considered stale. time.sleep(.005) self.assertEqual(db._in_use, {}) self.assertEqual(len(db._connections), 1) # A new connection will be returned, as the original one is stale. # The stale connection (1) will be removed. self.assertEqual(db.connection(), 2) def test_manual_close(self): self.assertEqual(self.db.connection(), 1) self.db.manual_close() # When we manually close a connection that's not yet stale, we add it # back to the queue (because close() calls _close()), then close it # for real, and mark it with a tombstone. The next time it's checked # out, it will simply be removed and skipped over. self.assertEqual(len(self.db._connections), 0) self.assertEqual(self.db._in_use, {}) self.assertEqual(self.db.connection(), 2) self.assertEqual(len(self.db._connections), 0) self.assertEqual(list(self.db._in_use.keys()), [2]) self.db.close() self.assertEqual(self.db.connection(), 2) def test_close_idle(self): db = FakePooledDatabase('testing', counter=3) now = time.time() heapq.heappush(db._connections, (now - 10, None, 3)) heapq.heappush(db._connections, (now - 5, None, 2)) heapq.heappush(db._connections, (now - 1, None, 1)) self.assertEqual(db.connection(), 3) self.assertTrue(3 in db._in_use) db.close_idle() self.assertEqual(len(db._connections), 0) self.assertEqual(len(db._in_use), 1) self.assertTrue(3 in db._in_use) self.assertEqual(db.connection(), 3) db.manual_close() self.assertEqual(db.connection(), 4) def test_close_stale(self): db = FakePooledDatabase('testing', counter=3) now = time.time() # Closing stale uses the last checkout time rather than the creation # time for the connection. db._in_use[1] = PoolConnection(now - 400, 1, now - 300) db._in_use[2] = PoolConnection(now - 200, 2, now - 200) db._in_use[3] = PoolConnection(now - 300, 3, now - 100) db._in_use[4] = PoolConnection(now, 4, now) self.assertEqual(db.close_stale(age=200), 2) self.assertEqual(len(db._in_use), 2) self.assertEqual(sorted(db._in_use), [3, 4]) def test_close_all(self): db = FakePooledDatabase('testing', counter=3) now = time.time() heapq.heappush(db._connections, (now - 10, None, 3)) heapq.heappush(db._connections, (now - 5, None, 2)) heapq.heappush(db._connections, (now - 1, None, 1)) self.assertEqual(db.connection(), 3) self.assertTrue(3 in db._in_use) db.close_all() self.assertEqual(len(db._connections), 0) self.assertEqual(len(db._in_use), 0) self.assertEqual(db.connection(), 4) def test_stale_timeout_cascade(self): now = time.time() db = FakePooledDatabase('testing', stale_timeout=10) conns = [ (now - 20, None, 1), (now - 15, None, 2), (now - 5, None, 3), (now, None, 4), ] for ts_conn in conns: heapq.heappush(db._connections, ts_conn) self.assertEqual(db.connection(), 3) self.assertEqual(len(db._in_use), 1) self.assertTrue(3 in db._in_use) self.assertEqual(db._connections, [(now, None, 4)]) def test_connect_cascade(self): now = time.time() class ClosedPooledDatabase(FakePooledDatabase): def _is_closed(self, conn): return conn in (2, 4) db = ClosedPooledDatabase('testing', stale_timeout=10) conns = [ (now - 15, None, 1), # Skipped due to being stale. (now - 5, None, 2), # Will appear closed. (now - 3, None, 3), (now, None, 4), # Will appear closed. ] db.counter = 4 # The next connection we create will have id=5. for ts_conn in conns: heapq.heappush(db._connections, ts_conn) # Conn 3 is not stale or closed, so we will get it. self.assertEqual(db.connection(), 3) self.assertEqual(len(db._in_use), 1) self.assertTrue(3 in db._in_use) pool_conn = db._in_use[3] self.assertEqual(pool_conn.timestamp, now - 3) self.assertEqual(pool_conn.connection, 3) self.assertEqual(db._connections, [(now, None, 4)]) # Since conn 4 is closed, we will open a new conn. db._state.closed = True # Pretend we're in a different thread. db.connect() self.assertEqual(db.connection(), 5) self.assertEqual(sorted(db._in_use.keys()), [3, 5]) self.assertEqual(db._connections, []) def test_db_context(self): self.assertEqual(self.db.connection(), 1) with self.db: self.assertEqual(self.db.connection(), 1) self.assertEqual(self.db.transaction_history, ['O1']) self.assertEqual(self.db.connection(), 1) self.assertEqual(self.db.transaction_history, ['O1', 'X1']) with self.db: self.assertEqual(self.db.connection(), 1) self.assertEqual(len(self.db._connections), 1) self.assertEqual(len(self.db._in_use), 0) def test_db_context_threads(self): signal = threading.Event() def create_context(): with self.db: signal.wait() threads = [threading.Thread(target=create_context) for i in range(5)] for thread in threads: thread.start() while len(self.db.transaction_history) < 5: time.sleep(.001) signal.set() for thread in threads: thread.join() self.assertEqual(self.db.counter, 5) self.assertEqual(len(self.db._connections), 5) self.assertEqual(len(self.db._in_use), 0) class TestLivePooledDatabase(ModelTestCase): database = PooledTestDatabase('test_pooled.db') requires = [Register] def tearDown(self): super(TestLivePooledDatabase, self).tearDown() self.database.close_idle() if os.path.exists('test_pooled.db'): os.unlink('test_pooled.db') def test_reuse_connection(self): for i in range(5): Register.create(value=i) conn_id = id(self.database.connection()) self.database.close() for i in range(5, 10): Register.create(value=i) self.assertEqual(id(self.database.connection()), conn_id) self.assertEqual( [x.value for x in Register.select().order_by(Register.id)], list(range(10))) def test_db_context(self): with self.database: Register.create(value=1) with self.database.atomic() as sp: self.assertTrue(isinstance(sp, _savepoint)) Register.create(value=2) sp.rollback() with self.database.atomic() as sp: self.assertTrue(isinstance(sp, _savepoint)) Register.create(value=3) with self.database: values = [r.value for r in Register.select().order_by(Register.id)] self.assertEqual(values, [1, 3]) def test_bad_connection(self): self.database.connection() try: self.database.execute_sql('select 1/0') except Exception as exc: pass self.database.close() self.database.connect() class TestPooledDatabaseIntegration(ModelTestCase): requires = [Register] def setUp(self): params = {} if IS_MYSQL: db_class = PooledMySQLDatabase elif IS_POSTGRESQL: if isinstance(self.database, Psycopg3Database): db_class = PooledPsycopg3Database else: db_class = PooledPostgresqlDatabase elif IS_CRDB: db_class = PooledCockroachDatabase else: db_class = PooledSqliteDatabase params['check_same_thread'] = False self.database = db_loader(BACKEND, db_class=db_class, **params) super(TestPooledDatabaseIntegration, self).setUp() def assertConnections(self, expected): available = len(self.database._connections) in_use = len(self.database._in_use) self.assertEqual(available + in_use, expected, 'expected %s, got: %s available, %s in use' % (expected, available, in_use)) def test_pooled_database_integration(self): # Connection should be open from the setup method. self.assertFalse(self.database.is_closed()) self.assertConnections(1) self.assertTrue(self.database.close()) self.assertTrue(self.database.is_closed()) self.assertConnections(1) signal = threading.Event() def connect(): self.assertTrue(self.database.is_closed()) self.assertTrue(self.database.connect()) self.assertFalse(self.database.is_closed()) signal.wait() self.assertTrue(self.database.close()) self.assertTrue(self.database.is_closed()) # Open connections in 4 separate threads. threads = [threading.Thread(target=connect) for _ in range(4)] for t in threads: t.start() while len(self.database._in_use) < 4: time.sleep(.005) # Close connections in all 4 threads. signal.set() for t in threads: t.join() # Verify that there are 4 connections available in the pool. self.assertConnections(4) self.assertEqual(len(self.database._connections), 4) # Available. self.assertEqual(len(self.database._in_use), 0) # Verify state of the main thread, just a sanity check. self.assertTrue(self.database.is_closed()) # Opening a connection will pull from the pool. self.assertTrue(self.database.connect()) self.assertFalse(self.database.connect(reuse_if_open=True)) self.assertConnections(4) self.assertEqual(len(self.database._in_use), 1) # Calling close_all() closes everything, including calling thread. self.database.close_all() self.assertConnections(0) self.assertTrue(self.database.is_closed()) def test_pool_with_models(self): self.database.close() signal = threading.Event() def create_obj(i): with self.database.connection_context(): with self.database.atomic(): Register.create(value=i) signal.wait() # Create 4 objects, one in each thread. The INSERT will be wrapped in a # transaction, and after COMMIT (but while the conn is still open), we # will wait for the signal that all objects were created. This ensures # that all our connections are open concurrently. threads = [threading.Thread(target=create_obj, args=(i,)) for i in range(4)] for t in threads: t.start() # Explicitly connect, as the connection is required to verify that all # the objects are present (and that its safe to set the signal). self.assertTrue(self.database.connect()) while Register.select().count() != 4: time.sleep(0.005) # Signal threads that they can exit now and ensure all exited. signal.set() for t in threads: t.join() # Close connection from main thread as well. self.database.close() self.assertConnections(5) self.assertEqual(len(self.database._in_use), 0) # Cycle through the available connections, running a query on each, and # then manually closing it. for i in range(5): self.assertTrue(self.database.is_closed()) self.assertTrue(self.database.connect()) # Sanity check to verify objects are created. query = Register.select().order_by(Register.value) self.assertEqual([r.value for r in query], [0, 1, 2, 3]) self.database.manual_close() self.assertConnections(4 - i) self.assertConnections(0) self.assertEqual(len(self.database._in_use), 0) peewee-3.17.7/tests/postgres.py000066400000000000000000001052721470346076600164710ustar00rootroot00000000000000#coding:utf-8 import datetime import functools import uuid from decimal import Decimal as Dc from types import MethodType from peewee import * from playhouse.postgres_ext import * from playhouse.reflection import Introspector from .base import BaseTestCase from .base import DatabaseTestCase from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import requires_models from .base import skip_unless from .base_models import Register from .base_models import Tweet from .base_models import User from .postgres_helpers import BaseBinaryJsonFieldTestCase from .postgres_helpers import BaseJsonFieldTestCase db = db_loader('postgres', db_class=PostgresqlExtDatabase) class HStoreModel(TestModel): name = CharField() data = HStoreField() D = HStoreModel.data class ArrayModel(TestModel): tags = ArrayField(CharField) ints = ArrayField(IntegerField, dimensions=2) class UUIDList(TestModel): key = CharField() id_list = ArrayField(BinaryUUIDField, convert_values=True, index=False) id_list_native = ArrayField(UUIDField, index=False) class ArrayTSModel(TestModel): key = CharField(max_length=100, primary_key=True) timestamps = ArrayField(TimestampField, convert_values=True) class DecimalArray(TestModel): values = ArrayField(DecimalField, field_kwargs={'decimal_places': 1}) class FTSModel(TestModel): title = CharField() data = TextField() fts_data = TSVectorField() try: class JsonModel(TestModel): data = JSONField() class JsonModelNull(TestModel): data = JSONField(null=True) class BJson(TestModel): data = BinaryJSONField() class JData(TestModel): d1 = BinaryJSONField() d2 = BinaryJSONField(index=False) except: BJson = JData = None JsonModel = JsonModelNull = None class Normal(TestModel): data = TextField() class Event(TestModel): name = CharField() duration = IntervalField() class TZModel(TestModel): dt = DateTimeTZField() class TestTZField(ModelTestCase): database = db requires = [TZModel] def test_tz_field(self): self.database.set_time_zone('us/eastern') # Our naive datetime is treated as if it were in US/Eastern. dt = datetime.datetime(2019, 1, 1, 12) tz = TZModel.create(dt=dt) self.assertTrue(tz.dt.tzinfo is None) # When we retrieve the row, psycopg2 will attach the appropriate tzinfo # data. The value is returned as an "aware" datetime in US/Eastern. tz_db = TZModel[tz.id] self.assertTrue(tz_db.dt.tzinfo is not None) self.assertEqual(tz_db.dt.timetuple()[:4], (2019, 1, 1, 12)) self.assertEqual(tz_db.dt.utctimetuple()[:4], (2019, 1, 1, 17)) class _UTC(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(0) def tzname(self, dt): return "UTC" def dst(self, dt): return datetime.timedelta(0) UTC = _UTC() # We can explicitly insert a row with a different timezone, however. # When we read the row back, it is returned in US/Eastern. dt2 = datetime.datetime(2019, 1, 1, 12, tzinfo=UTC) tz2 = TZModel.create(dt=dt2) tz2_db = TZModel[tz2.id] self.assertEqual(tz2_db.dt.timetuple()[:4], (2019, 1, 1, 7)) self.assertEqual(tz2_db.dt.utctimetuple()[:4], (2019, 1, 1, 12)) # Querying using naive datetime, treated as localtime (US/Eastern). tzq1 = TZModel.get(TZModel.dt == dt) self.assertEqual(tzq1.id, tz.id) # Querying using aware datetime, tzinfo is respected. tzq2 = TZModel.get(TZModel.dt == dt2) self.assertEqual(tzq2.id, tz2.id) # Change the connection timezone? self.database.set_time_zone('us/central') tz_db = TZModel[tz.id] self.assertEqual(tz_db.dt.timetuple()[:4], (2019, 1, 1, 11)) self.assertEqual(tz_db.dt.utctimetuple()[:4], (2019, 1, 1, 17)) tz2_db = TZModel[tz2.id] self.assertEqual(tz2_db.dt.timetuple()[:4], (2019, 1, 1, 6)) self.assertEqual(tz2_db.dt.utctimetuple()[:4], (2019, 1, 1, 12)) class TestHStoreField(ModelTestCase): database = db_loader('postgres', db_class=PostgresqlExtDatabase, register_hstore=True) requires = [HStoreModel] def setUp(self): super(TestHStoreField, self).setUp() self.t1 = HStoreModel.create(name='t1', data={'k1': 'v1', 'k2': 'v2'}) self.t2 = HStoreModel.create(name='t2', data={'k2': 'v2', 'k3': 'v3'}) def by_name(self, name): return HStoreModel.get(HStoreModel.name == name).data def test_hstore_storage(self): self.assertEqual(self.by_name('t1'), {'k1': 'v1', 'k2': 'v2'}) self.assertEqual(self.by_name('t2'), {'k2': 'v2', 'k3': 'v3'}) self.t1.data = {'k4': 'v4'} self.t1.save() self.assertEqual(self.by_name('t1'), {'k4': 'v4'}) HStoreModel.create(name='t3', data={}) self.assertEqual(self.by_name('t3'), {}) def query(self, *cols): return (HStoreModel .select(HStoreModel.name, *cols) .order_by(HStoreModel.id)) def test_hstore_selecting(self): query = self.query(D.keys().alias('keys')) self.assertEqual([(x.name, sorted(x.keys)) for x in query], [ ('t1', ['k1', 'k2']), ('t2', ['k2', 'k3'])]) query = self.query(D.values().alias('vals')) self.assertEqual([(x.name, sorted(x.vals)) for x in query], [ ('t1', ['v1', 'v2']), ('t2', ['v2', 'v3'])]) query = self.query(D.items().alias('mtx')) self.assertEqual([(x.name, sorted(x.mtx)) for x in query], [ ('t1', [['k1', 'v1'], ['k2', 'v2']]), ('t2', [['k2', 'v2'], ['k3', 'v3']])]) query = self.query(D.slice('k2', 'k3').alias('kz')) self.assertEqual([(x.name, x.kz) for x in query], [ ('t1', {'k2': 'v2'}), ('t2', {'k2': 'v2', 'k3': 'v3'})]) query = self.query(D.slice('k4').alias('kz')) self.assertEqual([(x.name, x.kz) for x in query], [ ('t1', {}), ('t2', {})]) query = self.query(D.exists('k3').alias('ke')) self.assertEqual([(x.name, x.ke) for x in query], [ ('t1', False), ('t2', True)]) query = self.query(D.defined('k3').alias('ke')) self.assertEqual([(x.name, x.ke) for x in query], [ ('t1', False), ('t2', True)]) query = self.query(D['k1'].alias('k1')) self.assertEqual([(x.name, x.k1) for x in query], [ ('t1', 'v1'), ('t2', None)]) query = self.query().where(D['k1'] == 'v1') self.assertEqual([x.name for x in query], ['t1']) def assertWhere(self, expr, names): query = HStoreModel.select().where(expr) self.assertEqual([x.name for x in query], names) def test_hstore_filtering(self): self.assertWhere(D == {'k1': 'v1', 'k2': 'v2'}, ['t1']) self.assertWhere(D == {'k2': 'v2'}, []) self.assertWhere(D.contains('k3'), ['t2']) self.assertWhere(D.contains(['k2', 'k3']), ['t2']) self.assertWhere(D.contains(['k2']), ['t1', 't2']) # test dict self.assertWhere(D.contains({'k2': 'v2', 'k3': 'v3'}), ['t2']) self.assertWhere(D.contains({'k2': 'v2'}), ['t1', 't2']) self.assertWhere(D.contains({'k2': 'v3'}), []) # test contains any. self.assertWhere(D.contains_any('k3', 'kx'), ['t2']) self.assertWhere(D.contains_any('k2', 'x', 'k3'), ['t1', 't2']) self.assertWhere(D.contains_any('x', 'kx', 'y'), []) def test_hstore_filter_functions(self): self.assertWhere(D.exists('k2') == True, ['t1', 't2']) self.assertWhere(D.exists('k3') == True, ['t2']) self.assertWhere(D.defined('k2') == True, ['t1', 't2']) self.assertWhere(D.defined('k3') == True, ['t2']) def test_hstore_update(self): rc = (HStoreModel .update(data=D.update(k4='v4')) .where(HStoreModel.name == 't1') .execute()) self.assertTrue(rc > 0) self.assertEqual(self.by_name('t1'), {'k1': 'v1', 'k2': 'v2', 'k4': 'v4'}) rc = (HStoreModel .update(data=D.update(k5='v5', k6='v6')) .where(HStoreModel.name == 't2') .execute()) self.assertTrue(rc > 0) self.assertEqual(self.by_name('t2'), {'k2': 'v2', 'k3': 'v3', 'k5': 'v5', 'k6': 'v6'}) HStoreModel.update(data=D.update(k2='vxxx')).execute() self.assertEqual([x.data for x in self.query(D)], [ {'k1': 'v1', 'k2': 'vxxx', 'k4': 'v4'}, {'k2': 'vxxx', 'k3': 'v3', 'k5': 'v5', 'k6': 'v6'}]) (HStoreModel .update(data=D.delete('k4')) .where(HStoreModel.name == 't1') .execute()) self.assertEqual(self.by_name('t1'), {'k1': 'v1', 'k2': 'vxxx'}) HStoreModel.update(data=D.delete('k5')).execute() self.assertEqual([x.data for x in self.query(D)], [ {'k1': 'v1', 'k2': 'vxxx'}, {'k2': 'vxxx', 'k3': 'v3', 'k6': 'v6'} ]) HStoreModel.update(data=D.delete('k1', 'k2')).execute() self.assertEqual([x.data for x in self.query(D)], [ {}, {'k3': 'v3', 'k6': 'v6'}]) class TestArrayField(ModelTestCase): database = db requires = [ArrayModel] def create_sample(self): return ArrayModel.create( tags=['alpha', 'beta', 'gamma', 'delta'], ints=[[1, 2], [3, 4], [5, 6]]) def test_index_expression(self): data = ( (['a', 'b', 'c'], []), (['b', 'c', 'd', 'e'], [])) am_ids = [] for tags, ints in data: am = ArrayModel.create(tags=tags, ints=ints) am_ids.append(am.id) last_tag = fn.array_upper(ArrayModel.tags, 1) query = ArrayModel.select(ArrayModel.tags[last_tag]).tuples() self.assertEqual(sorted([t for t, in query]), ['c', 'e']) q = ArrayModel.select().where(ArrayModel.tags[last_tag] < 'd') self.assertEqual([a.id for a in q], [am_ids[0]]) q = ArrayModel.select().where(ArrayModel.tags[last_tag] > 'd') self.assertEqual([a.id for a in q], [am_ids[1]]) def test_hashable_objectslice(self): ArrayModel.create(tags=[], ints=[[0, 1], [2, 3]]) ArrayModel.create(tags=[], ints=[[4, 5], [6, 7]]) n = (ArrayModel .update({ArrayModel.ints[0][0]: ArrayModel.ints[0][0] + 1}) .execute()) self.assertEqual(n, 2) am1, am2 = ArrayModel.select().order_by(ArrayModel.id) self.assertEqual(am1.ints, [[1, 1], [2, 3]]) self.assertEqual(am2.ints, [[5, 5], [6, 7]]) def test_array_get_set(self): am = self.create_sample() am_db = ArrayModel.get(ArrayModel.id == am.id) self.assertEqual(am_db.tags, ['alpha', 'beta', 'gamma', 'delta']) self.assertEqual(am_db.ints, [[1, 2], [3, 4], [5, 6]]) def test_array_equality(self): am1 = ArrayModel.create(tags=['t1'], ints=[[1, 2]]) am2 = ArrayModel.create(tags=['t2'], ints=[[3, 4]]) obj = ArrayModel.get(ArrayModel.tags == ['t1']) self.assertEqual(obj.id, am1.id) self.assertEqual(obj.tags, ['t1']) obj = ArrayModel.get(ArrayModel.ints == [[3, 4]]) self.assertEqual(obj.id, am2.id) obj = ArrayModel.get(ArrayModel.tags != ['t1']) self.assertEqual(obj.id, am2.id) def test_array_db_value(self): am = ArrayModel.create(tags=('foo', 'bar'), ints=[]) am_db = ArrayModel.get(ArrayModel.id == am.id) self.assertEqual(am_db.tags, ['foo', 'bar']) def test_array_search(self): def assertAM(where, *instances): query = (ArrayModel .select() .where(where) .order_by(ArrayModel.id)) self.assertEqual([x.id for x in query], [x.id for x in instances]) am = self.create_sample() am2 = ArrayModel.create(tags=['alpha', 'beta'], ints=[[1, 1]]) am3 = ArrayModel.create(tags=['delta'], ints=[[3, 4]]) am4 = ArrayModel.create(tags=['中文'], ints=[[3, 4]]) am5 = ArrayModel.create(tags=['中文', '汉语'], ints=[[3, 4]]) AM = ArrayModel T = AM.tags assertAM((Value('beta') == fn.ANY(T)), am, am2) assertAM((Value('delta') == fn.Any(T)), am, am3) assertAM(Value('omega') == fn.Any(T)) # Check the contains operator. assertAM(SQL("tags::text[] @> ARRAY['beta']"), am, am2) # Use the nicer API. assertAM(T.contains('beta'), am, am2) assertAM(T.contains('omega', 'delta')) assertAM(T.contains('汉语'), am5) assertAM(T.contains('alpha', 'delta'), am) assertAM(T.contained_by('alpha', 'beta', 'delta'), am2, am3) assertAM(T.contained_by('alpha', 'beta', 'gamma', 'delta'), am, am2, am3) # Check for any. assertAM(T.contains_any('beta'), am, am2) assertAM(T.contains_any('中文'), am4, am5) assertAM(T.contains_any('omega', 'delta'), am, am3) assertAM(T.contains_any('alpha', 'delta'), am, am2, am3) def test_array_index_slice(self): self.create_sample() AM = ArrayModel I, T = AM.ints, AM.tags row = AM.select(T[1].alias('arrtags')).dicts().get() self.assertEqual(row['arrtags'], 'beta') row = AM.select(T[2:4].alias('foo')).dicts().get() self.assertEqual(row['foo'], ['gamma', 'delta']) row = AM.select(I[1][1].alias('ints')).dicts().get() self.assertEqual(row['ints'], 4) row = AM.select(I[1:2][0].alias('ints')).dicts().get() self.assertEqual(row['ints'], [[3], [5]]) @requires_models(DecimalArray) def test_field_kwargs(self): vl1, vl2 = [Dc('3.1'), Dc('1.3')], [Dc('3.14'), Dc('1')] da1, da2 = [DecimalArray.create(values=vl) for vl in (vl1, vl2)] da1_db = DecimalArray.get(DecimalArray.id == da1.id) da2_db = DecimalArray.get(DecimalArray.id == da2.id) self.assertEqual(da1_db.values, [Dc('3.1'), Dc('1.3')]) self.assertEqual(da2_db.values, [Dc('3.1'), Dc('1.0')]) class TestArrayFieldConvertValues(ModelTestCase): database = db requires = [ArrayTSModel] def dt(self, day, hour=0, minute=0, second=0): return datetime.datetime(2018, 1, day, hour, minute, second) def test_value_conversion(self): data = { 'k1': [self.dt(1), self.dt(2), self.dt(3)], 'k2': [], 'k3': [self.dt(4, 5, 6, 7), self.dt(10, 11, 12, 13)], } for key in sorted(data): ArrayTSModel.create(key=key, timestamps=data[key]) for key in sorted(data): am = ArrayTSModel.get(ArrayTSModel.key == key) self.assertEqual(am.timestamps, data[key]) # Perform lookup using timestamp values. ts = ArrayTSModel.get(ArrayTSModel.timestamps.contains(self.dt(3))) self.assertEqual(ts.key, 'k1') ts = ArrayTSModel.get( ArrayTSModel.timestamps.contains(self.dt(4, 5, 6, 7))) self.assertEqual(ts.key, 'k3') self.assertRaises(ArrayTSModel.DoesNotExist, ArrayTSModel.get, ArrayTSModel.timestamps.contains(self.dt(4, 5, 6))) def test_get_with_array_values(self): a1 = ArrayTSModel.create(key='k1', timestamps=[self.dt(1)]) a2 = ArrayTSModel.create(key='k2', timestamps=[self.dt(2), self.dt(3)]) query = (ArrayTSModel .select() .where(ArrayTSModel.timestamps == [self.dt(1)])) a1_db = query.get() self.assertEqual(a1_db.id, a1.id) query = (ArrayTSModel .select() .where(ArrayTSModel.timestamps == [self.dt(2), self.dt(3)])) a2_db = query.get() self.assertEqual(a2_db.id, a2.id) a1_db = ArrayTSModel.get(timestamps=[self.dt(1)]) self.assertEqual(a1_db.id, a1.id) a2_db = ArrayTSModel.get(timestamps=[self.dt(2), self.dt(3)]) self.assertEqual(a2_db.id, a2.id) class TestArrayUUIDField(ModelTestCase): database = db requires = [UUIDList] def setUp(self): super(TestArrayUUIDField, self).setUp() import psycopg2.extras psycopg2.extras.register_uuid() def test_array_of_uuids(self): u1, u2, u3, u4 = [uuid.uuid4() for _ in range(4)] a = UUIDList.create(key='a', id_list=[u1, u2, u3], id_list_native=[u1, u2, u3]) b = UUIDList.create(key='b', id_list=[u2, u3, u4], id_list_native=[u2, u3, u4]) a_db = UUIDList.get(UUIDList.key == 'a') b_db = UUIDList.get(UUIDList.key == 'b') self.assertEqual(a.id_list, [u1, u2, u3]) self.assertEqual(b.id_list, [u2, u3, u4]) self.assertEqual(a.id_list_native, [u1, u2, u3]) self.assertEqual(b.id_list_native, [u2, u3, u4]) class TestTSVectorField(ModelTestCase): database = db requires = [FTSModel] messages = [ 'A faith is a necessity to a man. Woe to him who believes in nothing.', 'All who call on God in true faith, earnestly from the heart, will ' 'certainly be heard, and will receive what they have asked and desired.', 'Be faithful in small things because it is in them that your strength lies.', 'Faith consists in believing when it is beyond the power of reason to believe.', 'Faith has to do with things that are not seen and hope with things that are not at hand.', ] def setUp(self): super(TestTSVectorField, self).setUp() for idx, message in enumerate(self.messages): FTSModel.create(title=str(idx), data=message, fts_data=fn.to_tsvector(message)) def assertMessages(self, expr, expected): query = FTSModel.select().where(expr).order_by(FTSModel.id) titles = [row.title for row in query] self.assertEqual(list(map(int, titles)), expected) def test_sql(self): query = FTSModel.select().where(Match(FTSModel.data, 'foo bar')) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."title", "t1"."data", "t1"."fts_data" ' 'FROM "fts_model" AS "t1" ' 'WHERE (to_tsvector("t1"."data") @@ to_tsquery(?))'), ['foo bar']) def test_match_function(self): D = FTSModel.data self.assertMessages(Match(D, 'heart'), [1]) self.assertMessages(Match(D, 'god'), [1]) self.assertMessages(Match(D, 'faith'), [0, 1, 2, 3, 4]) self.assertMessages(Match(D, 'thing'), [2, 4]) self.assertMessages(Match(D, 'faith & things'), [2, 4]) self.assertMessages(Match(D, 'god | things'), [1, 2, 4]) self.assertMessages(Match(D, 'god & things'), []) def test_tsvector_field(self): M = FTSModel.fts_data.match self.assertMessages(M('heart'), [1]) self.assertMessages(M('god'), [1]) self.assertMessages(M('faith'), [0, 1, 2, 3, 4]) self.assertMessages(M('thing'), [2, 4]) self.assertMessages(M('faith & things'), [2, 4]) self.assertMessages(M('god | things'), [1, 2, 4]) self.assertMessages(M('god & things'), []) # Using the plain parser we cannot express "OR", but individual term # match works like we expect and multi-term is AND-ed together. self.assertMessages(M('god | things', plain=True), []) self.assertMessages(M('god', plain=True), [1]) self.assertMessages(M('thing', plain=True), [2, 4]) self.assertMessages(M('faith things', plain=True), [2, 4]) def pg93(): with db: return db.connection().server_version >= 90300 def pg10(): with db: return db.connection().server_version >= 100000 def pg12(): with db: return db.connection().server_version >= 120000 JSON_SUPPORT = (JsonModel is not None) and pg93() @skip_unless(JSON_SUPPORT, 'json support unavailable') class TestJsonField(BaseJsonFieldTestCase, ModelTestCase): M = JsonModel N = Normal database = db requires = [JsonModel, Normal, JsonModelNull] def test_json_null(self): tjn = JsonModelNull.create(data=None) tj = JsonModelNull.create(data={'k1': 'v1'}) results = JsonModelNull.select().order_by(JsonModelNull.id) self.assertEqual( [tj_db.data for tj_db in results], [None, {'k1': 'v1'}]) query = JsonModelNull.select().where( JsonModelNull.data.is_null(True)) self.assertEqual(query.get(), tjn) @skip_unless(JSON_SUPPORT, 'json support unavailable') class TestBinaryJsonField(BaseBinaryJsonFieldTestCase, ModelTestCase): M = BJson N = Normal database = db requires = [BJson, Normal] @skip_unless(pg10(), 'jsonb remove support requires pg >= 10') def test_remove_data(self): BJson.delete().execute() # Clear out db. BJson.create(data={ 'k1': 'v1', 'k2': 'v2', 'k3': {'x1': 'z1', 'x2': 'z2'}, 'k4': [0, 1, 2]}) def assertData(exp_list, expected_data): query = BJson.select(BJson.data.remove(*exp_list)).tuples() data = query[:][0][0] self.assertEqual(data, expected_data) D = BJson.data assertData(['k3'], {'k1': 'v1', 'k2': 'v2', 'k4': [0, 1, 2]}) assertData(['k1', 'k3'], {'k2': 'v2', 'k4': [0, 1, 2]}) assertData(['k1', 'kx', 'ky', 'k3'], {'k2': 'v2', 'k4': [0, 1, 2]}) assertData(['k4', 'k3'], {'k1': 'v1', 'k2': 'v2'}) @skip_unless(pg10(), 'jsonb remove support requires pg >= 10') def test_json_contains_in_list(self): m1 = self.M.create(data=[{'k1': 'v1', 'k2': 'v2'}, {'a1': 'b1'}]) m2 = self.M.create(data=[{'k3': 'v3'}, {'k4': 'v4'}]) m3 = self.M.create(data=[{'k5': 'v5', 'k6': 'v6'}, {'k1': 'v1'}]) query = (self.M .select() .where(self.M.data.contains([{'k1': 'v1'}])) .order_by(self.M.id)) self.assertEqual([m.id for m in query], [m1.id, m3.id]) def test_integer_index_weirdness(self): self._create_test_data() def fails(): with self.database.atomic(): expr = BJson.data.contains_any(2, 8, 12) results = list(BJson.select().where( BJson.data.contains_any(2, 8, 12))) # Complains of a missing cast/conversion for the data-type? self.assertRaises(ProgrammingError, fails) @skip_unless(JSON_SUPPORT, 'json support unavailable') class TestBinaryJsonFieldBulkUpdate(ModelTestCase): database = db requires = [BJson] def test_binary_json_field_bulk_update(self): b1 = BJson.create(data={'k1': 'v1'}) b2 = BJson.create(data={'k2': 'v2'}) b1.data['k1'] = 'v1-x' b2.data['k2'] = 'v2-y' BJson.bulk_update([b1, b2], fields=[BJson.data]) b1_db = BJson.get(BJson.id == b1.id) b2_db = BJson.get(BJson.id == b2.id) self.assertEqual(b1_db.data, {'k1': 'v1-x'}) self.assertEqual(b2_db.data, {'k2': 'v2-y'}) @skip_unless(JSON_SUPPORT, 'json support unavailable') class TestJsonFieldRegressions(ModelTestCase): database = db requires = [JData] def test_json_field_concat(self): jd = JData.create( d1={'k1': {'x1': 'y1'}, 'k2': 'v2', 'k3': 'v3'}, d2={'k1': {'x2': 'y2'}, 'k2': 'v2-x', 'k4': 'v4'}) query = JData.select(JData.d1.concat(JData.d2).alias('data')) obj = query.get() self.assertEqual(obj.data, { 'k1': {'x2': 'y2'}, 'k2': 'v2-x', 'k3': 'v3', 'k4': 'v4'}) def test_introspect_bjson_field(self): introspector = Introspector.from_database(self.database) models = introspector.generate_models(table_names=['j_data']) JD = models['j_data'] self.assertEqual(JD._meta.sorted_field_names, ['id', 'd1', 'd2']) self.assertTrue(isinstance(JD.d1, BinaryJSONField)) self.assertTrue(isinstance(JD.d2, BinaryJSONField)) self.assertTrue(JD.d1.index) self.assertEqual(JD.d1.index_type, 'GIN') self.assertFalse(JD.d2.index) class TestIntervalField(ModelTestCase): database = db requires = [Event] def test_interval_field(self): e1 = Event.create(name='hour', duration=datetime.timedelta(hours=1)) e2 = Event.create(name='mix', duration=datetime.timedelta( days=1, hours=2, minutes=3, seconds=4)) events = [(e.name, e.duration) for e in Event.select().order_by(Event.duration)] self.assertEqual(events, [ ('hour', datetime.timedelta(hours=1)), ('mix', datetime.timedelta(days=1, hours=2, minutes=3, seconds=4)) ]) class TestIndexedField(BaseTestCase): def test_indexed_field_ddl(self): class FakeIndexedField(IndexedFieldMixin, CharField): default_index_type = 'GiST' class IndexedModel(TestModel): array_index = ArrayField(CharField) array_noindex= ArrayField(IntegerField, index=False) fake_index = FakeIndexedField() fake_index_with_type = FakeIndexedField(index_type='MAGIC') fake_noindex = FakeIndexedField(index=False) class Meta: database = db create_sql, _ = IndexedModel._schema._create_table(False).query() self.assertEqual(create_sql, ( 'CREATE TABLE "indexed_model" (' '"id" SERIAL NOT NULL PRIMARY KEY, ' '"array_index" VARCHAR(255)[] NOT NULL, ' '"array_noindex" INTEGER[] NOT NULL, ' '"fake_index" VARCHAR(255) NOT NULL, ' '"fake_index_with_type" VARCHAR(255) NOT NULL, ' '"fake_noindex" VARCHAR(255) NOT NULL)')) indexes = [idx.query()[0] for idx in IndexedModel._schema._create_indexes(False)] self.assertEqual(indexes, [ ('CREATE INDEX "indexed_model_array_index" ON "indexed_model" ' 'USING GIN ("array_index")'), ('CREATE INDEX "indexed_model_fake_index" ON "indexed_model" ' 'USING GiST ("fake_index")'), ('CREATE INDEX "indexed_model_fake_index_with_type" ' 'ON "indexed_model" ' 'USING MAGIC ("fake_index_with_type")')]) class IDAlways(TestModel): id = IdentityField(generate_always=True) data = CharField() class IDByDefault(TestModel): id = IdentityField() data = CharField() @skip_unless(pg10(), 'identity field requires pg >= 10') class TestIdentityField(ModelTestCase): database = db requires = [IDAlways, IDByDefault] def test_identity_field_always(self): iq = IDAlways.insert_many([(d,) for d in ('d1', 'd2', 'd3')]) curs = iq.execute() self.assertEqual(list(curs), [(1,), (2,), (3,)]) # Cannot specify id when generate always is true. with self.assertRaises(ProgrammingError): with self.database.atomic(): IDAlways.create(id=10, data='d10') query = IDAlways.select().order_by(IDAlways.id) self.assertEqual(list(query.tuples()), [ (1, 'd1'), (2, 'd2'), (3, 'd3')]) def test_identity_field_by_default(self): iq = IDByDefault.insert_many([(d,) for d in ('d1', 'd2', 'd3')]) curs = iq.execute() self.assertEqual(list(curs), [(1,), (2,), (3,)]) # Cannot specify id when generate always is true. IDByDefault.create(id=10, data='d10') query = IDByDefault.select().order_by(IDByDefault.id) self.assertEqual(list(query.tuples()), [ (1, 'd1'), (2, 'd2'), (3, 'd3'), (10, 'd10')]) def test_schema(self): sql, params = IDAlways._schema._create_table(False).query() self.assertEqual(sql, ( 'CREATE TABLE "id_always" ("id" INT GENERATED ALWAYS AS IDENTITY ' 'NOT NULL PRIMARY KEY, "data" VARCHAR(255) NOT NULL)')) sql, params = IDByDefault._schema._create_table(False).query() self.assertEqual(sql, ( 'CREATE TABLE "id_by_default" ("id" INT GENERATED BY DEFAULT AS ' 'IDENTITY NOT NULL PRIMARY KEY, "data" VARCHAR(255) NOT NULL)')) class TestServerSide(ModelTestCase): database = db requires = [Register] def setUp(self): super(TestServerSide, self).setUp() with db.atomic(): for i in range(100): Register.create(value=i) def test_server_side_cursor(self): query = Register.select().order_by(Register.value) with self.assertQueryCount(1): data = [row.value for row in ServerSide(query)] self.assertEqual(data, list(range(100))) ss_query = ServerSide(query.limit(10), array_size=3) self.assertEqual([row.value for row in ss_query], list(range(10))) ss_query = ServerSide(query.where(SQL('1 = 0'))) self.assertEqual(list(ss_query), []) def test_lower_level_apis(self): query = Register.select(Register.value).order_by(Register.value) ssq = ServerSideQuery(query, array_size=10) curs_wrapper = ssq._execute(self.database) curs = curs_wrapper.cursor self.assertTrue(isinstance(curs, FetchManyCursor)) self.assertEqual(curs.fetchone(), (0,)) self.assertEqual(curs.fetchone(), (1,)) curs.close() class KX(TestModel): key = CharField(unique=True) value = IntegerField() class TestAutocommitIntegration(ModelTestCase): database = db requires = [KX] def setUp(self): super(TestAutocommitIntegration, self).setUp() with self.database.atomic(): kx1 = KX.create(key='k1', value=1) def force_integrity_error(self): # Force an integrity error, then verify that the current # transaction has been aborted. self.assertRaises(IntegrityError, KX.create, key='k1', value=10) def test_autocommit_default(self): kx2 = KX.create(key='k2', value=2) # Will be committed. self.assertTrue(kx2.id > 0) self.force_integrity_error() self.assertEqual(KX.select().count(), 2) self.assertEqual([(kx.key, kx.value) for kx in KX.select().order_by(KX.key)], [('k1', 1), ('k2', 2)]) def test_autocommit_disabled(self): with self.database.manual_commit(): self.database.begin() kx2 = KX.create(key='k2', value=2) # Not committed. self.assertTrue(kx2.id > 0) # Yes, we have a primary key. self.force_integrity_error() self.database.rollback() self.assertEqual(KX.select().count(), 1) kx1_db = KX.get(KX.key == 'k1') self.assertEqual(kx1_db.value, 1) def test_atomic_block(self): with self.database.atomic() as txn: kx2 = KX.create(key='k2', value=2) self.assertTrue(kx2.id > 0) self.force_integrity_error() txn.rollback(False) self.assertEqual(KX.select().count(), 1) kx1_db = KX.get(KX.key == 'k1') self.assertEqual(kx1_db.value, 1) def test_atomic_block_exception(self): with self.assertRaises(IntegrityError): with self.database.atomic(): KX.create(key='k2', value=2) KX.create(key='k1', value=10) self.assertEqual(KX.select().count(), 1) class TestPostgresIsolationLevel(DatabaseTestCase): database = db_loader('postgres', isolation_level=3) # SERIALIZABLE. def test_isolation_level(self): conn = self.database.connection() self.assertEqual(conn.isolation_level, 3) conn.set_isolation_level(2) self.assertEqual(conn.isolation_level, 2) self.database.close() conn = self.database.connection() self.assertEqual(conn.isolation_level, 3) @skip_unless(pg12(), 'cte materialization requires pg >= 12') class TestPostgresCTEMaterialization(ModelTestCase): database = db requires = [Register] def test_postgres_cte_materialization(self): Register.insert_many([(i,) for i in (1, 2, 3)]).execute() for materialized in (None, False, True): cte = Register.select().cte('t', materialized=materialized) query = (cte .select_from(cte.c.value) .where(cte.c.value != 2) .order_by(cte.c.value)) self.assertEqual([r.value for r in query], [1, 3]) @skip_unless(pg93(), 'lateral join requires pg >= 9.3') class TestPostgresLateralJoin(ModelTestCase): database = db test_data = ( ('a', (('a1', 1), ('a2', 2), ('a10', 10))), ('b', (('b3', 3), ('b4', 4), ('b7', 7))), ('c', ())) ts = functools.partial(datetime.datetime, 2019, 1) def create_data(self): with self.database.atomic(): for username, tweets in self.test_data: user = User.create(username=username) for c, d in tweets: Tweet.create(user=user, content=c, timestamp=self.ts(d)) @requires_models(User, Tweet) def test_lateral_top_n(self): self.create_data() subq = (Tweet .select(Tweet.content, Tweet.timestamp) .where(Tweet.user == User.id) .order_by(Tweet.timestamp.desc()) .limit(2)) query = (User .select(User, subq.c.content) .join(subq, JOIN.LEFT_LATERAL) .order_by(subq.c.timestamp.desc(nulls='last'))) results = [(u.username, u.content) for u in query] self.assertEqual(results, [ ('a', 'a10'), ('b', 'b7'), ('b', 'b4'), ('a', 'a2'), ('c', None)]) query = (Tweet .select(User.username, subq.c.content) .from_(User) .join(subq, JOIN.LEFT_LATERAL) .order_by(User.username, subq.c.timestamp)) results = [(t.username, t.content) for t in query] self.assertEqual(results, [ ('a', 'a2'), ('a', 'a10'), ('b', 'b4'), ('b', 'b7'), ('c', None)]) @requires_models(User, Tweet) def test_lateral_helper(self): self.create_data() subq = (Tweet .select(Tweet.content, Tweet.timestamp) .where(Tweet.user == User.id) .order_by(Tweet.timestamp.desc()) .limit(2) .lateral()) query = (User .select(User, subq.c.content) .join(subq, on=True) .order_by(subq.c.timestamp.desc(nulls='last'))) with self.assertQueryCount(1): results = [(u.username, u.tweet.content) for u in query] self.assertEqual(results, [ ('a', 'a10'), ('b', 'b7'), ('b', 'b4'), ('a', 'a2')]) peewee-3.17.7/tests/postgres_helpers.py000066400000000000000000000330461470346076600202120ustar00rootroot00000000000000from peewee import Cast class BaseJsonFieldTestCase(object): # Subclasses must define these, as well as specifying requires[]. M = None # Json model. N = None # "Normal" model. def test_json_field(self): data = {'k1': ['a1', 'a2'], 'k2': {'k3': 'v3'}} j = self.M.create(data=data) j_db = self.M.get(j._pk_expr()) self.assertEqual(j_db.data, data) def test_joining_on_json_key(self): values = [ {'foo': 'bar', 'baze': {'nugget': 'alpha'}}, {'foo': 'bar', 'baze': {'nugget': 'beta'}}, {'herp': 'derp', 'baze': {'nugget': 'epsilon'}}, {'herp': 'derp', 'bar': {'nuggie': 'alpha'}}, ] for data in values: self.M.create(data=data) for value in ['alpha', 'beta', 'gamma', 'delta']: self.N.create(data=value) query = (self.M .select() .join(self.N, on=( self.N.data == self.M.data['baze']['nugget'])) .order_by(self.M.id)) results = [jm.data for jm in query] self.assertEqual(results, [ {'foo': 'bar', 'baze': {'nugget': 'alpha'}}, {'foo': 'bar', 'baze': {'nugget': 'beta'}}, ]) def test_json_lookup_methods(self): data = { 'gp1': { 'p1': {'c1': 'foo'}, 'p2': {'c2': 'bar'}}, 'gp2': {}} j = self.M.create(data=data) def assertLookup(lookup, expected): query = (self.M .select(lookup) .where(j._pk_expr()) .dicts()) self.assertEqual(query.get(), expected) expr = self.M.data['gp1']['p1'] assertLookup(expr.alias('p1'), {'p1': '{"c1": "foo"}'}) assertLookup(expr.as_json().alias('p2'), {'p2': {'c1': 'foo'}}) expr = self.M.data['gp1']['p1']['c1'] assertLookup(expr.alias('c1'), {'c1': 'foo'}) assertLookup(expr.as_json().alias('c2'), {'c2': 'foo'}) j.data = [ {'i1': ['foo', 'bar', 'baz']}, ['nugget', 'mickey']] j.save() expr = self.M.data[0]['i1'] assertLookup(expr.alias('i1'), {'i1': '["foo", "bar", "baz"]'}) assertLookup(expr.as_json().alias('i2'), {'i2': ['foo', 'bar', 'baz']}) expr = self.M.data[1][1] assertLookup(expr.alias('l1'), {'l1': 'mickey'}) assertLookup(expr.as_json().alias('l2'), {'l2': 'mickey'}) def test_json_cast(self): self.M.create(data={'foo': {'bar': 3}}) self.M.create(data={'foo': {'bar': 5}}) query = (self.M .select(Cast(self.M.data['foo']['bar'], 'float') * 1.5) .order_by(self.M.id) .tuples()) self.assertEqual(query[:], [(4.5,), (7.5,)]) def test_json_path(self): data = { 'foo': { 'baz': { 'bar': ['i1', 'i2', 'i3'], 'baze': ['j1', 'j2'], }}} j = self.M.create(data=data) def assertPath(path, expected): query = (self.M .select(path) .where(j._pk_expr()) .dicts()) self.assertEqual(query.get(), expected) expr = self.M.data.path('foo', 'baz', 'bar') assertPath(expr.alias('p1'), {'p1': '["i1", "i2", "i3"]'}) assertPath(expr.as_json().alias('p2'), {'p2': ['i1', 'i2', 'i3']}) expr = self.M.data.path('foo', 'baz', 'baze', 1) assertPath(expr.alias('p1'), {'p1': 'j2'}) assertPath(expr.as_json().alias('p2'), {'p2': 'j2'}) def test_json_field_sql(self): j = (self.M .select() .where(self.M.data == {'foo': 'bar'})) table = self.M._meta.table_name self.assertSQL(j, ( 'SELECT "t1"."id", "t1"."data" ' 'FROM "%s" AS "t1" WHERE ("t1"."data" = CAST(? AS %s))') % (table, self.M.data._json_datatype)) j = (self.M .select() .where(self.M.data['foo'] == 'bar')) self.assertSQL(j, ( 'SELECT "t1"."id", "t1"."data" ' 'FROM "%s" AS "t1" WHERE ("t1"."data"->>? = ?)') % table) def assertItems(self, where, *items): query = (self.M .select() .where(where) .order_by(self.M.id)) self.assertEqual( [item.id for item in query], [item.id for item in items]) def test_lookup(self): t1 = self.M.create(data={'k1': 'v1', 'k2': {'k3': 'v3'}}) t2 = self.M.create(data={'k1': 'x1', 'k2': {'k3': 'x3'}}) t3 = self.M.create(data={'k1': 'v1', 'j2': {'j3': 'v3'}}) self.assertItems((self.M.data['k2']['k3'] == 'v3'), t1) self.assertItems((self.M.data['k1'] == 'v1'), t1, t3) # Valid key, no matching value. self.assertItems((self.M.data['k2'] == 'v1')) # Non-existent key. self.assertItems((self.M.data['not-here'] == 'v1')) # Non-existent nested key. self.assertItems((self.M.data['not-here']['xxx'] == 'v1')) self.assertItems((self.M.data['k2']['xxx'] == 'v1')) def test_json_bulk_update_top_level_list(self): m1 = self.M.create(data=['a', 'b', 'c']) m2 = self.M.create(data=['d', 'e', 'f']) m1.data = ['g', 'h', 'i'] m2.data = ['j', 'k', 'l'] self.M.bulk_update([m1, m2], fields=[self.M.data]) m1_db = self.M.get(self.M.id == m1.id) m2_db = self.M.get(self.M.id == m2.id) self.assertEqual(m1_db.data, ['g', 'h', 'i']) self.assertEqual(m2_db.data, ['j', 'k', 'l']) # Contains additional test-cases suitable for the JSONB data-type. class BaseBinaryJsonFieldTestCase(BaseJsonFieldTestCase): def _create_test_data(self): data = [ {'k1': 'v1', 'k2': 'v2', 'k3': {'k4': ['i1', 'i2'], 'k5': {}}}, ['a1', 'a2', {'a3': 'a4'}], {'a1': 'x1', 'a2': 'x2', 'k4': ['i1', 'i2']}, list(range(10)), list(range(5, 15)), ['k4', 'k1']] self._bjson_objects = [] for json_value in data: self._bjson_objects.append(self.M.create(data=json_value)) def assertObjects(self, expr, *indexes): query = (self.M .select() .where(expr) .order_by(self.M.id)) self.assertEqual( [bjson.data for bjson in query], [self._bjson_objects[index].data for index in indexes]) def test_contained_by(self): self._create_test_data() item1 = ['a1', 'a2', {'a3': 'a4'}, 'a5'] self.assertObjects(self.M.data.contained_by(item1), 1) item2 = {'a1': 'x1', 'a2': 'x2', 'k4': ['i0', 'i1', 'i2'], 'x': 'y'} self.assertObjects(self.M.data.contained_by(item2), 2) def test_equality(self): data = {'k1': ['a1', 'a2'], 'k2': {'k3': 'v3'}} j = self.M.create(data=data) j_db = self.M.get(self.M.data == data) self.assertEqual(j.id, j_db.id) def test_subscript_contains(self): self._create_test_data() D = self.M.data # 'k3' is mapped to another dictioary {'k4': [...]}. Therefore, # 'k3' is said to contain 'k4', but *not* ['k4'] or ['k4', 'k5']. self.assertObjects(D['k3'].contains('k4'), 0) self.assertObjects(D['k3'].contains(['k4'])) self.assertObjects(D['k3'].contains(['k4', 'k5'])) # We can check for the keys this way, though. self.assertObjects(D['k3'].contains_all('k4', 'k5'), 0) self.assertObjects(D['k3'].contains_any('k4', 'kx'), 0) # However, in test object index=2, 'k4' can be said to contain # both 'i1' and ['i1']. self.assertObjects(D['k4'].contains('i1'), 2) self.assertObjects(D['k4'].contains(['i1']), 2) # Interestingly, we can also specify the list of contained values # out-of-order. self.assertObjects(D['k4'].contains(['i2', 'i1']), 2) # We can test whether an object contains another JSON object fragment. self.assertObjects(D['k3'].contains({'k4': ['i1']}), 0) self.assertObjects(D['k3'].contains({'k4': ['i1', 'i2']}), 0) # Check multiple levels of nesting / containment. self.assertObjects(D['k3']['k4'].contains('i2'), 0) self.assertObjects(D['k3']['k4'].contains_all('i1', 'i2'), 0) self.assertObjects(D['k3']['k4'].contains_all('i0', 'i2')) self.assertObjects(D['k4'].contains_all('i1', 'i2'), 2) # Check array indexes. self.assertObjects(D[2].contains('a3'), 1) self.assertObjects(D[0].contains('a1'), 1) self.assertObjects(D[0].contains('k1')) def test_contains(self): self._create_test_data() D = self.M.data # Test for keys. 'k4' is both an object key and an array element. self.assertObjects(D.contains('k4'), 2, 5) self.assertObjects(D.contains('a1'), 1, 2) self.assertObjects(D.contains('k3'), 0) # We can test for multiple top-level keys/indexes. self.assertObjects(D.contains_all('a1', 'a2'), 1, 2) # If we test for both with .contains(), though, it is treated as # an object match. self.assertObjects(D.contains(['a1', 'a2']), 1) # Check numbers. self.assertObjects(D.contains([2, 5, 6, 7, 8]), 3) self.assertObjects(D.contains([5, 6, 7, 8, 9]), 3, 4) # We can check for partial objects. self.assertObjects(D.contains({'a1': 'x1'}), 2) self.assertObjects(D.contains({'k3': {'k4': []}}), 0) self.assertObjects(D.contains([{'a3': 'a4'}]), 1) # Check for simple keys. self.assertObjects(D.contains('a1'), 1, 2) self.assertObjects(D.contains('k3'), 0) # Contains any. self.assertObjects(D.contains_any('a1', 'k1'), 0, 1, 2, 5) self.assertObjects(D.contains_any('k4', 'xx', 'yy', '2'), 2, 5) self.assertObjects(D.contains_any('i1', 'i2', 'a3')) # Contains all. self.assertObjects(D.contains_all('k1', 'k2', 'k3'), 0) self.assertObjects(D.contains_all('k1', 'k2', 'k3', 'k4')) # Has key. self.assertObjects(D.has_key('a1'), 1, 2) self.assertObjects(D.has_key('k1'), 0, 5) self.assertObjects(D.has_key('k4'), 2, 5) self.assertObjects(D.has_key('a3')) self.assertObjects(D['k3'].has_key('k4'), 0) self.assertObjects(D['k4'].has_key('i2'), 2) def test_concat_data(self): self.M.delete().execute() self.M.create(data={'k1': {'x1': 'y1'}, 'k2': 'v2', 'k3': [0, 1]}) def assertData(exp, expected_data): query = self.M.select(self.M.data.concat(exp)).tuples() data = query[:][0][0] self.assertEqual(data, expected_data) D = self.M.data assertData({'k2': 'v2-x', 'k1': {'x2': 'y2'}, 'k4': 'v4'}, { 'k1': {'x2': 'y2'}, # NB: not merged/patched!! 'k2': 'v2-x', 'k3': [0, 1], 'k4': 'v4'}) assertData({'k1': 'v1-x', 'k3': [2, 3, 4], 'k4': {'x4': 'y4'}}, { 'k1': 'v1-x', 'k2': 'v2', 'k3': [2, 3, 4], 'k4': {'x4': 'y4'}}) # We can update sub-keys. query = self.M.select(D['k1'].concat({'x2': 'y2', 'x3': 'y3'})) self.assertEqual(query.tuples()[0][0], {'x1': 'y1', 'x2': 'y2', 'x3': 'y3'}) # Concat can be used to extend JSON arrays. query = self.M.select(D['k3'].concat([2, 3])) self.assertEqual(query.tuples()[0][0], [0, 1, 2, 3]) def test_update_data_inplace(self): self.M.delete().execute() b = self.M.create(data={'k1': {'x1': 'y1'}, 'k2': 'v2'}) self.M.update(data=self.M.data.concat({ 'k1': {'x2': 'y2'}, 'k3': 'v3'})).execute() b2 = self.M.get(self.M.id == b.id) self.assertEqual(b2.data, {'k1': {'x2': 'y2'}, 'k2': 'v2', 'k3': 'v3'}) def test_selecting(self): self._create_test_data() query = (self.M .select(self.M.data['k3']['k4'].as_json().alias('k3k4')) .order_by(self.M.id)) k3k4_data = [obj.k3k4 for obj in query] self.assertEqual(k3k4_data, [ ['i1', 'i2'], None, None, None, None, None]) query = (self.M .select( self.M.data[0].as_json(), self.M.data[2].as_json()) .order_by(self.M.id) .tuples()) self.assertEqual(list(query), [ (None, None), ('a1', {'a3': 'a4'}), (None, None), (0, 2), (5, 7), ('k4', None)]) def test_conflict_update(self): b1 = self.M.create(data={'k1': 'v1'}) iq = (self.M .insert(id=b1.id, data={'k1': 'v1-x'}) .on_conflict('update', conflict_target=[self.M.id], update={self.M.data: {'k1': 'v1-z'}})) b1_id_db = iq.execute() self.assertEqual(b1.id, b1_id_db) b1_db = self.M.get(self.M.id == b1.id) self.assertEqual(self.M.data, {'k1': 'v1-z'}) iq = (self.M .insert(id=b1.id, data={'k1': 'v1-y'}) .on_conflict('update', conflict_target=[self.M.id], update={'data': {'k1': 'v1-w'}})) b1_id_db = iq.execute() self.assertEqual(b1.id, b1_id_db) b1_db = self.M.get(self.M.id == b1.id) self.assertEqual(self.M.data, {'k1': 'v1-w'}) self.assertEqual(self.M.select().count(), 1) peewee-3.17.7/tests/prefetch_tests.py000066400000000000000000000513501470346076600176420ustar00rootroot00000000000000from peewee import * from .base import get_in_memory_db from .base import requires_models from .base import ModelTestCase from .base import TestModel class Person(TestModel): name = TextField() class Relationship(TestModel): from_person = ForeignKeyField(Person, backref='relationships') to_person = ForeignKeyField(Person, backref='related_to') class Note(TestModel): person = ForeignKeyField(Person, backref='notes') content = TextField() class NoteItem(TestModel): note = ForeignKeyField(Note, backref='items') content = TextField() class Like(TestModel): person = ForeignKeyField(Person, backref='likes') note = ForeignKeyField(Note, backref='likes') class Flag(TestModel): note = ForeignKeyField(Note, backref='flags') is_spam = BooleanField() class Category(TestModel): name = TextField() parent = ForeignKeyField('self', backref='children', null=True) class Package(TestModel): barcode = TextField(unique=True) class PackageItem(TestModel): name = TextField() package = ForeignKeyField(Package, backref='items', field=Package.barcode) class TestPrefetch(ModelTestCase): database = get_in_memory_db() requires = [Person, Note, NoteItem, Like, Flag] def create_test_data(self): data = { 'huey': ( ('meow', ('meow-1', 'meow-2', 'meow-3')), ('purr', ()), ('hiss', ('hiss-1', 'hiss-2'))), 'mickey': ( ('woof', ()), ('bark', ('bark-1', 'bark-2'))), 'zaizee': (), } for name, notes in sorted(data.items()): person = Person.create(name=name) for note, items in notes: note = Note.create(person=person, content=note) for item in items: NoteItem.create(note=note, content=item) Flag.create(note=Note.get(Note.content == 'purr'), is_spam=True) Flag.create(note=Note.get(Note.content == 'woof'), is_spam=True) Like.create(note=Note.get(Note.content == 'meow'), person=Person.get(Person.name == 'mickey')) Like.create(note=Note.get(Note.content == 'woof'), person=Person.get(Person.name == 'huey')) def setUp(self): super(TestPrefetch, self).setUp() self.create_test_data() def accumulate_results(self, query, sort_items=False): accum = [] for person in query: notes = [] for note in person.notes: items = [] for item in note.items: items.append(item.content) if sort_items: items.sort() notes.append((note.content, items)) if sort_items: notes.sort() accum.append((person.name, notes)) return accum def test_prefetch_simple(self): for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(3): people = Person.select().order_by(Person.name) query = people.prefetch(Note, NoteItem, prefetch_type=pt) accum = self.accumulate_results(query, sort_items=True) self.assertEqual(accum, [ ('huey', [ ('hiss', ['hiss-1', 'hiss-2']), ('meow', ['meow-1', 'meow-2', 'meow-3']), ('purr', [])]), ('mickey', [ ('bark', ['bark-1', 'bark-2']), ('woof', [])]), ('zaizee', []), ]) def test_prefetch_filter(self): for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(3): people = Person.select().order_by(Person.name) notes = (Note .select() .where(Note.content.not_in(('hiss', 'meow', 'woof'))) .order_by(Note.content.desc())) items = NoteItem.select().where( ~NoteItem.content.endswith('-2')) query = prefetch(people, notes, items, prefetch_type=pt) self.assertEqual(self.accumulate_results(query), [ ('huey', [('purr', [])]), ('mickey', [('bark', ['bark-1'])]), ('zaizee', []), ]) def test_prefetch_reverse(self): for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(2): people = Person.select().order_by(Person.name) notes = Note.select().order_by(Note.content) query = prefetch(notes, people, prefetch_type=pt) accum = [(note.content, note.person.name) for note in query] self.assertEqual(accum, [ ('bark', 'mickey'), ('hiss', 'huey'), ('meow', 'huey'), ('purr', 'huey'), ('woof', 'mickey')]) def test_prefetch_reverse_with_parent_join(self): for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(2): notes = (Note .select(Note, Person) .join(Person) .order_by(Note.content)) items = NoteItem.select().order_by(NoteItem.content.desc()) query = prefetch(notes, items, prefetch_type=pt) accum = [(note.person.name, note.content, [item.content for item in note.items]) for note in query] self.assertEqual(accum, [ ('mickey', 'bark', ['bark-2', 'bark-1']), ('huey', 'hiss', ['hiss-2', 'hiss-1']), ('huey', 'meow', ['meow-3', 'meow-2', 'meow-1']), ('huey', 'purr', []), ('mickey', 'woof', []), ]) def test_prefetch_multi_depth(self): for pt in PREFETCH_TYPE.values(): people = Person.select().order_by(Person.name) notes = Note.select().order_by(Note.content) items = NoteItem.select().order_by(NoteItem.content) flags = Flag.select().order_by(Flag.id) LikePerson = Person.alias('lp') likes = (Like .select(Like, LikePerson.name) .join(LikePerson, on=(Like.person == LikePerson.id))) # Five queries: # - person (outermost query) # - notes for people # - items for notes # - flags for notes # - likes for notes (includes join to person) with self.assertQueryCount(5): query = prefetch(people, notes, items, flags, likes, prefetch_type=pt) accum = [] for person in query: notes = [] for note in person.notes: items = [item.content for item in note.items] likes = [like.person.name for like in note.likes] flags = [flag.is_spam for flag in note.flags] notes.append((note.content, items, likes, flags)) accum.append((person.name, notes)) self.assertEqual(accum, [ ('huey', [ ('hiss', ['hiss-1', 'hiss-2'], [], []), ('meow', ['meow-1', 'meow-2', 'meow-3'], ['mickey'], []), ('purr', [], [], [True])]), ('mickey', [ ('bark', ['bark-1', 'bark-2'], [], []), ('woof', [], ['huey'], [True])]), (u'zaizee', []), ]) def test_prefetch_multi_depth_no_join(self): for pt in PREFETCH_TYPE.values(): LikePerson = Person.alias() people = Person.select().order_by(Person.name) notes = Note.select().order_by(Note.content) items = NoteItem.select().order_by(NoteItem.content) flags = Flag.select().order_by(Flag.id) with self.assertQueryCount(6): query = prefetch(people, notes, items, flags, Like, LikePerson, prefetch_type=pt) accum = [] for person in query: notes = [] for note in person.notes: items = [item.content for item in note.items] likes = [like.person.name for like in note.likes] flags = [flag.is_spam for flag in note.flags] notes.append((note.content, items, likes, flags)) accum.append((person.name, notes)) self.assertEqual(accum, [ ('huey', [ ('hiss', ['hiss-1', 'hiss-2'], [], []), ('meow', ['meow-1', 'meow-2', 'meow-3'], ['mickey'], []), ('purr', [], [], [True])]), ('mickey', [ ('bark', ['bark-1', 'bark-2'], [], []), ('woof', [], ['huey'], [True])]), (u'zaizee', []), ]) def test_prefetch_with_group_by(self): for pt in PREFETCH_TYPE.values(): people = (Person .select(Person, fn.COUNT(Note.id).alias('note_count')) .join(Note, JOIN.LEFT_OUTER) .group_by(Person) .order_by(Person.name)) notes = Note.select().order_by(Note.content) items = NoteItem.select().order_by(NoteItem.content) with self.assertQueryCount(3): query = prefetch(people, notes, items, prefetch_type=pt) self.assertEqual(self.accumulate_results(query), [ ('huey', [ ('hiss', ['hiss-1', 'hiss-2']), ('meow', ['meow-1', 'meow-2', 'meow-3']), ('purr', [])]), ('mickey', [ ('bark', ['bark-1', 'bark-2']), ('woof', [])]), ('zaizee', []), ]) huey, mickey, zaizee = query self.assertEqual(huey.note_count, 3) self.assertEqual(mickey.note_count, 2) self.assertEqual(zaizee.note_count, 0) @requires_models(Category) def test_prefetch_self_join(self): def cc(name, parent=None): return Category.create(name=name, parent=parent) root = cc('root') p1 = cc('p1', root) p2 = cc('p2', root) for p in (p1, p2): for i in range(2): cc('%s-%s' % (p.name, i + 1), p) for pt in PREFETCH_TYPE.values(): Child = Category.alias('child') with self.assertQueryCount(2): query = prefetch(Category.select().order_by(Category.id), Child, prefetch_type=pt) names_and_children = [ (cat.name, [child.name for child in cat.children]) for cat in query] self.assertEqual(names_and_children, [ ('root', ['p1', 'p2']), ('p1', ['p1-1', 'p1-2']), ('p2', ['p2-1', 'p2-2']), ('p1-1', []), ('p1-2', []), ('p2-1', []), ('p2-2', []), ]) @requires_models(Category) def test_prefetch_adjacency_list(self): def cc(name, parent=None): return Category.create(name=name, parent=parent) tree = ('root', ( ('n1', ( ('c11', ()), ('c12', ()))), ('n2', ( ('c21', ()), ('c22', ( ('g221', ()), ('g222', ()))), ('c23', ()), ('c24', ( ('g241', ()), ('g242', ()), ('g243', ()))))))) stack = [(None, tree)] while stack: parent, (name, children) = stack.pop() node = cc(name, parent) for child_tree in children: stack.insert(0, (node, child_tree)) for pt in PREFETCH_TYPE.values(): C = Category.alias('c') G = Category.alias('g') GG = Category.alias('gg') GGG = Category.alias('ggg') query = Category.select().where(Category.name == 'root') with self.assertQueryCount(5): pf = prefetch(query, C, (G, C), (GG, G), (GGG, GG), prefetch_type=pt) def gather(c): children = sorted([gather(ch) for ch in c.children]) return (c.name, tuple(children)) nodes = list(pf) self.assertEqual(len(nodes), 1) pf_tree = gather(nodes[0]) self.assertEqual(tree, pf_tree) def test_prefetch_specific_model(self): # Person -> Note # -> Like (has fks to both person and note) Like.create(note=Note.get(Note.content == 'woof'), person=Person.get(Person.name == 'zaizee')) NoteAlias = Note.alias('na') for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(3): people = Person.select().order_by(Person.name) notes = Note.select().order_by(Note.content) likes = (Like .select(Like, NoteAlias.content) .join(NoteAlias, on=(Like.note == NoteAlias.id)) .order_by(NoteAlias.content)) query = prefetch(people, notes, (likes, Person), prefetch_type=pt) accum = [] for person in query: likes = [] notes = [] for note in person.notes: notes.append(note.content) for like in person.likes: likes.append(like.note.content) accum.append((person.name, notes, likes)) self.assertEqual(accum, [ ('huey', ['hiss', 'meow', 'purr'], ['woof']), ('mickey', ['bark', 'woof'], ['meow']), ('zaizee', [], ['woof']), ]) @requires_models(Relationship) def test_multiple_foreign_keys(self): self.database.pragma('foreign_keys', 0) Person.delete().execute() c, h, z = [Person.create(name=name) for name in ('charlie', 'huey', 'zaizee')] RC = lambda f, t: Relationship.create(from_person=f, to_person=t) r1 = RC(c, h) r2 = RC(c, z) r3 = RC(h, c) r4 = RC(z, c) def assertRelationships(attr, values): self.assertEqual(len(attr),len(values)) for relationship, value in zip(attr, values): self.assertEqual(relationship.__data__, value) for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(2): people = Person.select().order_by(Person.name) relationships = Relationship.select().order_by(Relationship.id) query = prefetch(people, relationships, prefetch_type=pt) cp, hp, zp = list(query) assertRelationships(cp.relationships, [ {'id': r1.id, 'from_person': c.id, 'to_person': h.id}, {'id': r2.id, 'from_person': c.id, 'to_person': z.id}]) assertRelationships(cp.related_to, [ {'id': r3.id, 'from_person': h.id, 'to_person': c.id}, {'id': r4.id, 'from_person': z.id, 'to_person': c.id}]) assertRelationships(hp.relationships, [ {'id': r3.id, 'from_person': h.id, 'to_person': c.id}]) assertRelationships(hp.related_to, [ {'id': r1.id, 'from_person': c.id, 'to_person': h.id}]) assertRelationships(zp.relationships, [ {'id': r4.id, 'from_person': z.id, 'to_person': c.id}]) assertRelationships(zp.related_to, [ {'id': r2.id, 'from_person': c.id, 'to_person': z.id}]) with self.assertQueryCount(2): query = prefetch(relationships, people, prefetch_type=pt) accum = [] for row in query: accum.append((row.from_person.name, row.to_person.name)) self.assertEqual(accum, [ ('charlie', 'huey'), ('charlie', 'zaizee'), ('huey', 'charlie'), ('zaizee', 'charlie')]) m = Person.create(name='mickey') RC(h, m) def assertNames(p, ns): self.assertEqual([r.to_person.name for r in p.relationships], ns) # Use prefetch to go Person -> Relationship <- Person (PA). for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(3): people = (Person .select() .where(Person.name != 'mickey') .order_by(Person.name)) relationships = Relationship.select().order_by(Relationship.id) PA = Person.alias() query = prefetch(people, relationships, PA, prefetch_type=pt) cp, hp, zp = list(query) assertNames(cp, ['huey', 'zaizee']) assertNames(hp, ['charlie', 'mickey']) assertNames(zp, ['charlie']) # User prefetch to go Person -> Relationship+Person (PA). with self.assertQueryCount(2): people = (Person .select() .where(Person.name != 'mickey') .order_by(Person.name)) rels = (Relationship .select(Relationship, PA) .join(PA, on=(Relationship.to_person == PA.id)) .order_by(Relationship.id)) query = prefetch(people, rels, prefetch_type=pt) cp, hp, zp = list(query) assertNames(cp, ['huey', 'zaizee']) assertNames(hp, ['charlie', 'mickey']) assertNames(zp, ['charlie']) def test_prefetch_through_manytomany(self): Like.create(note=Note.get(Note.content == 'meow'), person=Person.get(Person.name == 'zaizee')) Like.create(note=Note.get(Note.content == 'woof'), person=Person.get(Person.name == 'zaizee')) for pt in PREFETCH_TYPE.values(): with self.assertQueryCount(3): people = Person.select().order_by(Person.name) notes = Note.select().order_by(Note.content) likes = Like.select().order_by(Like.id) query = prefetch(people, likes, notes, prefetch_type=pt) accum = [] for person in query: liked_notes = [] for like in person.likes: liked_notes.append(like.note.content) accum.append((person.name, liked_notes)) self.assertEqual(accum, [ ('huey', ['woof']), ('mickey', ['meow']), ('zaizee', ['meow', 'woof']), ]) @requires_models(Package, PackageItem) def test_prefetch_non_pk_fk(self): data = ( ('101', ('a', 'b')), ('102', ('a', 'b')), ('103', ()), ('104', ('a', 'b', 'c', 'd', 'e')), ) for barcode, items in data: Package.create(barcode=barcode) for item in items: PackageItem.create(package=barcode, name=item) for pt in PREFETCH_TYPE.values(): packages = Package.select().order_by(Package.barcode) items = PackageItem.select().order_by(PackageItem.name) with self.assertQueryCount(2): query = prefetch(packages, items, prefetch_type=pt) for package, (barcode, items) in zip(query, data): self.assertEqual(package.barcode, barcode) self.assertEqual([item.name for item in package.items], list(items)) def test_prefetch_mark_dirty_regression(self): for pt in PREFETCH_TYPE.values(): people = Person.select().order_by(Person.name) query = people.prefetch(Note, NoteItem, prefetch_type=pt) for person in query: self.assertEqual(person.dirty_fields, []) for note in person.notes: self.assertEqual(note.dirty_fields, []) for item in note.items: self.assertEqual(item.dirty_fields, []) peewee-3.17.7/tests/psycopg3_ext.py000066400000000000000000000472211470346076600172510ustar00rootroot00000000000000#coding:utf-8 import datetime import uuid from decimal import Decimal as Dc import psycopg # Failure to do so will skip these tests. from peewee import * from playhouse.psycopg3_ext import * from .base import DatabaseTestCase from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import requires_models from .postgres_helpers import BaseBinaryJsonFieldTestCase from .postgres_helpers import BaseJsonFieldTestCase db = db_loader('postgres', db_class=Psycopg3Database) class ArrayModel(TestModel): tags = ArrayField(CharField) ints = ArrayField(IntegerField, dimensions=2) class UUIDList(TestModel): key = CharField() id_list = ArrayField(BinaryUUIDField, convert_values=True, index=False) id_list_native = ArrayField(UUIDField, index=False) class ArrayTSModel(TestModel): key = CharField(max_length=100, primary_key=True) timestamps = ArrayField(TimestampField, convert_values=True) class DecimalArray(TestModel): values = ArrayField(DecimalField, field_kwargs={'decimal_places': 1}) class FTSModel(TestModel): title = CharField() data = TextField() fts_data = TSVectorField() class BJson(TestModel): data = BinaryJSONField() class JData(TestModel): d1 = BinaryJSONField() d2 = BinaryJSONField(index=False) class Normal(TestModel): data = TextField() class Event(TestModel): name = CharField() duration = IntervalField() class TZModel(TestModel): dt = DateTimeTZField() class TestPsycopg3TZField(ModelTestCase): database = db requires = [TZModel] def test_tz_field(self): self.database.set_time_zone('us/eastern') # Our naive datetime is treated as if it were in US/Eastern. dt = datetime.datetime(2019, 1, 1, 12) tz = TZModel.create(dt=dt) self.assertTrue(tz.dt.tzinfo is None) # When we retrieve the row, psycopg2 will attach the appropriate tzinfo # data. The value is returned as an "aware" datetime in US/Eastern. tz_db = TZModel[tz.id] self.assertTrue(tz_db.dt.tzinfo is not None) self.assertEqual(tz_db.dt.timetuple()[:4], (2019, 1, 1, 12)) self.assertEqual(tz_db.dt.utctimetuple()[:4], (2019, 1, 1, 17)) class _UTC(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(0) def tzname(self, dt): return "UTC" def dst(self, dt): return datetime.timedelta(0) UTC = _UTC() # We can explicitly insert a row with a different timezone, however. # When we read the row back, it is returned in US/Eastern. dt2 = datetime.datetime(2019, 1, 1, 12, tzinfo=UTC) tz2 = TZModel.create(dt=dt2) tz2_db = TZModel[tz2.id] self.assertEqual(tz2_db.dt.timetuple()[:4], (2019, 1, 1, 7)) self.assertEqual(tz2_db.dt.utctimetuple()[:4], (2019, 1, 1, 12)) # Querying using naive datetime, treated as localtime (US/Eastern). tzq1 = TZModel.get(TZModel.dt == dt) self.assertEqual(tzq1.id, tz.id) # Querying using aware datetime, tzinfo is respected. tzq2 = TZModel.get(TZModel.dt == dt2) self.assertEqual(tzq2.id, tz2.id) # Change the connection timezone? self.database.set_time_zone('us/central') tz_db = TZModel[tz.id] self.assertEqual(tz_db.dt.timetuple()[:4], (2019, 1, 1, 11)) self.assertEqual(tz_db.dt.utctimetuple()[:4], (2019, 1, 1, 17)) tz2_db = TZModel[tz2.id] self.assertEqual(tz2_db.dt.timetuple()[:4], (2019, 1, 1, 6)) self.assertEqual(tz2_db.dt.utctimetuple()[:4], (2019, 1, 1, 12)) class TestPsycopg3ArrayField(ModelTestCase): database = db requires = [ArrayModel] def create_sample(self): return ArrayModel.create( tags=['alpha', 'beta', 'gamma', 'delta'], ints=[[1, 2], [3, 4], [5, 6]]) def test_index_expression(self): data = ( (['a', 'b', 'c'], []), (['b', 'c', 'd', 'e'], [])) am_ids = [] for tags, ints in data: am = ArrayModel.create(tags=tags, ints=ints) am_ids.append(am.id) last_tag = fn.array_upper(ArrayModel.tags, 1) query = ArrayModel.select(ArrayModel.tags[last_tag]).tuples() self.assertEqual(sorted([t for t, in query]), ['c', 'e']) q = ArrayModel.select().where(ArrayModel.tags[last_tag] < 'd') self.assertEqual([a.id for a in q], [am_ids[0]]) q = ArrayModel.select().where(ArrayModel.tags[last_tag] > 'd') self.assertEqual([a.id for a in q], [am_ids[1]]) def test_hashable_objectslice(self): ArrayModel.create(tags=[], ints=[[0, 1], [2, 3]]) ArrayModel.create(tags=[], ints=[[4, 5], [6, 7]]) n = (ArrayModel .update({ArrayModel.ints[0][0]: ArrayModel.ints[0][0] + 1}) .execute()) self.assertEqual(n, 2) am1, am2 = ArrayModel.select().order_by(ArrayModel.id) self.assertEqual(am1.ints, [[1, 1], [2, 3]]) self.assertEqual(am2.ints, [[5, 5], [6, 7]]) def test_array_get_set(self): am = self.create_sample() am_db = ArrayModel.get(ArrayModel.id == am.id) self.assertEqual(am_db.tags, ['alpha', 'beta', 'gamma', 'delta']) self.assertEqual(am_db.ints, [[1, 2], [3, 4], [5, 6]]) def test_array_equality(self): am1 = ArrayModel.create(tags=['t1'], ints=[[1, 2]]) am2 = ArrayModel.create(tags=['t2'], ints=[[3, 4]]) obj = ArrayModel.get(ArrayModel.tags == ['t1']) self.assertEqual(obj.id, am1.id) self.assertEqual(obj.tags, ['t1']) obj = ArrayModel.get(ArrayModel.ints == [[3, 4]]) self.assertEqual(obj.id, am2.id) obj = ArrayModel.get(ArrayModel.tags != ['t1']) self.assertEqual(obj.id, am2.id) def test_array_db_value(self): am = ArrayModel.create(tags=('foo', 'bar'), ints=[]) am_db = ArrayModel.get(ArrayModel.id == am.id) self.assertEqual(am_db.tags, ['foo', 'bar']) def test_array_search(self): def assertAM(where, *instances): query = (ArrayModel .select() .where(where) .order_by(ArrayModel.id)) self.assertEqual([x.id for x in query], [x.id for x in instances]) am = self.create_sample() am2 = ArrayModel.create(tags=['alpha', 'beta'], ints=[[1, 1]]) am3 = ArrayModel.create(tags=['delta'], ints=[[3, 4]]) am4 = ArrayModel.create(tags=['中文'], ints=[[3, 4]]) am5 = ArrayModel.create(tags=['中文', '汉语'], ints=[[3, 4]]) AM = ArrayModel T = AM.tags assertAM((Value('beta') == fn.ANY(T)), am, am2) assertAM((Value('delta') == fn.Any(T)), am, am3) assertAM(Value('omega') == fn.Any(T)) # Check the contains operator. assertAM(SQL("tags::text[] @> ARRAY['beta']"), am, am2) # Use the nicer API. assertAM(T.contains('beta'), am, am2) assertAM(T.contains('omega', 'delta')) assertAM(T.contains('汉语'), am5) assertAM(T.contains('alpha', 'delta'), am) assertAM(T.contained_by('alpha', 'beta', 'delta'), am2, am3) assertAM(T.contained_by('alpha', 'beta', 'gamma', 'delta'), am, am2, am3) # Check for any. assertAM(T.contains_any('beta'), am, am2) assertAM(T.contains_any('中文'), am4, am5) assertAM(T.contains_any('omega', 'delta'), am, am3) assertAM(T.contains_any('alpha', 'delta'), am, am2, am3) def test_array_index_slice(self): self.create_sample() AM = ArrayModel I, T = AM.ints, AM.tags row = AM.select(T[1].alias('arrtags')).dicts().get() self.assertEqual(row['arrtags'], 'beta') row = AM.select(T[2:4].alias('foo')).dicts().get() self.assertEqual(row['foo'], ['gamma', 'delta']) row = AM.select(I[1][1].alias('ints')).dicts().get() self.assertEqual(row['ints'], 4) row = AM.select(I[1:2][0].alias('ints')).dicts().get() self.assertEqual(row['ints'], [[3], [5]]) @requires_models(DecimalArray) def test_field_kwargs(self): vl1, vl2 = [Dc('3.1'), Dc('1.3')], [Dc('3.14'), Dc('1')] da1, da2 = [DecimalArray.create(values=vl) for vl in (vl1, vl2)] da1_db = DecimalArray.get(DecimalArray.id == da1.id) da2_db = DecimalArray.get(DecimalArray.id == da2.id) self.assertEqual(da1_db.values, [Dc('3.1'), Dc('1.3')]) self.assertEqual(da2_db.values, [Dc('3.1'), Dc('1.0')]) class TestPsycopg3ArrayFieldConvertValues(ModelTestCase): database = db requires = [ArrayTSModel] def dt(self, day, hour=0, minute=0, second=0): return datetime.datetime(2018, 1, day, hour, minute, second) def test_value_conversion(self): data = { 'k1': [self.dt(1), self.dt(2), self.dt(3)], 'k2': [], 'k3': [self.dt(4, 5, 6, 7), self.dt(10, 11, 12, 13)], } for key in sorted(data): ArrayTSModel.create(key=key, timestamps=data[key]) for key in sorted(data): am = ArrayTSModel.get(ArrayTSModel.key == key) self.assertEqual(am.timestamps, data[key]) # Perform lookup using timestamp values. ts = ArrayTSModel.get(ArrayTSModel.timestamps.contains(self.dt(3))) self.assertEqual(ts.key, 'k1') ts = ArrayTSModel.get( ArrayTSModel.timestamps.contains(self.dt(4, 5, 6, 7))) self.assertEqual(ts.key, 'k3') self.assertRaises(ArrayTSModel.DoesNotExist, ArrayTSModel.get, ArrayTSModel.timestamps.contains(self.dt(4, 5, 6))) def test_get_with_array_values(self): a1 = ArrayTSModel.create(key='k1', timestamps=[self.dt(1)]) a2 = ArrayTSModel.create(key='k2', timestamps=[self.dt(2), self.dt(3)]) query = (ArrayTSModel .select() .where(ArrayTSModel.timestamps == [self.dt(1)])) a1_db = query.get() self.assertEqual(a1_db.id, a1.id) query = (ArrayTSModel .select() .where(ArrayTSModel.timestamps == [self.dt(2), self.dt(3)])) a2_db = query.get() self.assertEqual(a2_db.id, a2.id) a1_db = ArrayTSModel.get(timestamps=[self.dt(1)]) self.assertEqual(a1_db.id, a1.id) a2_db = ArrayTSModel.get(timestamps=[self.dt(2), self.dt(3)]) self.assertEqual(a2_db.id, a2.id) class TestPsycopg3ArrayUUIDField(ModelTestCase): database = db requires = [UUIDList] def test_array_of_uuids(self): u1, u2, u3, u4 = [uuid.uuid4() for _ in range(4)] a = UUIDList.create(key='a', id_list=[u1, u2, u3], id_list_native=[u1, u2, u3]) b = UUIDList.create(key='b', id_list=[u2, u3, u4], id_list_native=[u2, u3, u4]) a_db = UUIDList.get(UUIDList.key == 'a') b_db = UUIDList.get(UUIDList.key == 'b') self.assertEqual(a.id_list, [u1, u2, u3]) self.assertEqual(b.id_list, [u2, u3, u4]) self.assertEqual(a.id_list_native, [u1, u2, u3]) self.assertEqual(b.id_list_native, [u2, u3, u4]) class TestPsycopg3TSVectorField(ModelTestCase): database = db requires = [FTSModel] messages = [ 'A faith is a necessity to a man. Woe to him who believes in nothing.', 'All who call on God in true faith, earnestly from the heart, will ' 'certainly be heard, and will receive what they have asked and desired.', 'Be faithful in small things because it is in them that your strength lies.', 'Faith consists in believing when it is beyond the power of reason to believe.', 'Faith has to do with things that are not seen and hope with things that are not at hand.', ] def setUp(self): super(TestPsycopg3TSVectorField, self).setUp() for idx, message in enumerate(self.messages): FTSModel.create(title=str(idx), data=message, fts_data=fn.to_tsvector(message)) def assertMessages(self, expr, expected): query = FTSModel.select().where(expr).order_by(FTSModel.id) titles = [row.title for row in query] self.assertEqual(list(map(int, titles)), expected) def test_sql(self): query = FTSModel.select().where(Match(FTSModel.data, 'foo bar')) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."title", "t1"."data", "t1"."fts_data" ' 'FROM "fts_model" AS "t1" ' 'WHERE (to_tsvector("t1"."data") @@ to_tsquery(?))'), ['foo bar']) def test_match_function(self): D = FTSModel.data self.assertMessages(Match(D, 'heart'), [1]) self.assertMessages(Match(D, 'god'), [1]) self.assertMessages(Match(D, 'faith'), [0, 1, 2, 3, 4]) self.assertMessages(Match(D, 'thing'), [2, 4]) self.assertMessages(Match(D, 'faith & things'), [2, 4]) self.assertMessages(Match(D, 'god | things'), [1, 2, 4]) self.assertMessages(Match(D, 'god & things'), []) def test_tsvector_field(self): M = FTSModel.fts_data.match self.assertMessages(M('heart'), [1]) self.assertMessages(M('god'), [1]) self.assertMessages(M('faith'), [0, 1, 2, 3, 4]) self.assertMessages(M('thing'), [2, 4]) self.assertMessages(M('faith & things'), [2, 4]) self.assertMessages(M('god | things'), [1, 2, 4]) self.assertMessages(M('god & things'), []) # Using the plain parser we cannot express "OR", but individual term # match works like we expect and multi-term is AND-ed together. self.assertMessages(M('god | things', plain=True), []) self.assertMessages(M('god', plain=True), [1]) self.assertMessages(M('thing', plain=True), [2, 4]) self.assertMessages(M('faith things', plain=True), [2, 4]) class TestPsycopg3BinaryJsonField(BaseBinaryJsonFieldTestCase, ModelTestCase): M = BJson N = Normal database = db requires = [BJson, Normal] def test_remove_data(self): BJson.delete().execute() # Clear out db. BJson.create(data={ 'k1': 'v1', 'k2': 'v2', 'k3': {'x1': 'z1', 'x2': 'z2'}, 'k4': [0, 1, 2]}) def assertData(exp_list, expected_data): query = BJson.select(BJson.data.remove(*exp_list)).tuples() data = query[:][0][0] self.assertEqual(data, expected_data) D = BJson.data assertData(['k3'], {'k1': 'v1', 'k2': 'v2', 'k4': [0, 1, 2]}) assertData(['k1', 'k3'], {'k2': 'v2', 'k4': [0, 1, 2]}) assertData(['k1', 'kx', 'ky', 'k3'], {'k2': 'v2', 'k4': [0, 1, 2]}) assertData(['k4', 'k3'], {'k1': 'v1', 'k2': 'v2'}) def test_json_contains_in_list(self): m1 = self.M.create(data=[{'k1': 'v1', 'k2': 'v2'}, {'a1': 'b1'}]) m2 = self.M.create(data=[{'k3': 'v3'}, {'k4': 'v4'}]) m3 = self.M.create(data=[{'k5': 'v5', 'k6': 'v6'}, {'k1': 'v1'}]) query = (self.M .select() .where(self.M.data.contains([{'k1': 'v1'}])) .order_by(self.M.id)) self.assertEqual([m.id for m in query], [m1.id, m3.id]) def test_integer_index_weirdness(self): self._create_test_data() def fails(): with self.database.atomic(): expr = BJson.data.contains_any(2, 8, 12) results = list(BJson.select().where( BJson.data.contains_any(2, 8, 12))) # Complains of a missing cast/conversion for the data-type? self.assertRaises(ProgrammingError, fails) class TestPsycopg3BinaryJsonFieldBulkUpdate(ModelTestCase): database = db requires = [BJson] def test_binary_json_field_bulk_update(self): b1 = BJson.create(data={'k1': 'v1'}) b2 = BJson.create(data={'k2': 'v2'}) b1.data['k1'] = 'v1-x' b2.data['k2'] = 'v2-y' BJson.bulk_update([b1, b2], fields=[BJson.data]) b1_db = BJson.get(BJson.id == b1.id) b2_db = BJson.get(BJson.id == b2.id) self.assertEqual(b1_db.data, {'k1': 'v1-x'}) self.assertEqual(b2_db.data, {'k2': 'v2-y'}) class TestPsycopg3JsonFieldRegressions(ModelTestCase): database = db requires = [JData] def test_json_field_concat(self): jd = JData.create( d1={'k1': {'x1': 'y1'}, 'k2': 'v2', 'k3': 'v3'}, d2={'k1': {'x2': 'y2'}, 'k2': 'v2-x', 'k4': 'v4'}) query = JData.select(JData.d1.concat(JData.d2).alias('data')) obj = query.get() self.assertEqual(obj.data, { 'k1': {'x2': 'y2'}, 'k2': 'v2-x', 'k3': 'v3', 'k4': 'v4'}) class TestPsycopg3IntervalField(ModelTestCase): database = db requires = [Event] def test_interval_field(self): e1 = Event.create(name='hour', duration=datetime.timedelta(hours=1)) e2 = Event.create(name='mix', duration=datetime.timedelta( days=1, hours=2, minutes=3, seconds=4)) events = [(e.name, e.duration) for e in Event.select().order_by(Event.duration)] self.assertEqual(events, [ ('hour', datetime.timedelta(hours=1)), ('mix', datetime.timedelta(days=1, hours=2, minutes=3, seconds=4)) ]) class KX(TestModel): key = CharField(unique=True) value = IntegerField() class TestPsycopg3AutocommitIntegration(ModelTestCase): database = db requires = [KX] def setUp(self): super(TestPsycopg3AutocommitIntegration, self).setUp() with self.database.atomic(): kx1 = KX.create(key='k1', value=1) def force_integrity_error(self): # Force an integrity error, then verify that the current # transaction has been aborted. self.assertRaises(IntegrityError, KX.create, key='k1', value=10) def test_autocommit_default(self): kx2 = KX.create(key='k2', value=2) # Will be committed. self.assertTrue(kx2.id > 0) self.force_integrity_error() self.assertEqual(KX.select().count(), 2) self.assertEqual([(kx.key, kx.value) for kx in KX.select().order_by(KX.key)], [('k1', 1), ('k2', 2)]) def test_autocommit_disabled(self): with self.database.manual_commit(): self.database.begin() kx2 = KX.create(key='k2', value=2) # Not committed. self.assertTrue(kx2.id > 0) # Yes, we have a primary key. self.force_integrity_error() self.database.rollback() self.assertEqual(KX.select().count(), 1) kx1_db = KX.get(KX.key == 'k1') self.assertEqual(kx1_db.value, 1) def test_atomic_block(self): with self.database.atomic() as txn: kx2 = KX.create(key='k2', value=2) self.assertTrue(kx2.id > 0) self.force_integrity_error() txn.rollback(False) self.assertEqual(KX.select().count(), 1) kx1_db = KX.get(KX.key == 'k1') self.assertEqual(kx1_db.value, 1) def test_atomic_block_exception(self): with self.assertRaises(IntegrityError): with self.database.atomic(): KX.create(key='k2', value=2) KX.create(key='k1', value=10) self.assertEqual(KX.select().count(), 1) class TestPsycopg3IsolationLevel(DatabaseTestCase): database = db_loader('postgres', db_class=Psycopg3Database, isolation_level=3) # SERIALIZABLE. def test_isolation_level(self): conn = self.database.connection() self.assertEqual(conn.isolation_level, 3) conn.isolation_level = 2 self.assertEqual(conn.isolation_level, 2) self.database.close() conn = self.database.connection() self.assertEqual(conn.isolation_level, 3) peewee-3.17.7/tests/pwiz_integration.py000066400000000000000000000213521470346076600202130ustar00rootroot00000000000000import datetime import os try: from StringIO import StringIO except ImportError: from io import StringIO import textwrap import sys from peewee import * from pwiz import * from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import mock from .base import skip_if db = db_loader('sqlite') class User(TestModel): username = CharField(primary_key=True) id = IntegerField(default=0) class Note(TestModel): user = ForeignKeyField(User) text = TextField(index=True) data = IntegerField(default=0) misc = IntegerField(default=0) class Meta: indexes = ( (('user', 'text'), True), (('user', 'data', 'misc'), False), ) class Category(TestModel): name = CharField(unique=True) parent = ForeignKeyField('self', null=True) class OddColumnNames(TestModel): spaces = CharField(column_name='s p aces') symbols = CharField(column_name='w/-nug!') camelCaseName = CharField(column_name='camelCaseName') class Meta: table_name = 'oddColumnNames' class Event(TestModel): data = TextField() status = IntegerField() class capture_output(object): def __enter__(self): self._stdout = sys.stdout sys.stdout = self._buffer = StringIO() return self def __exit__(self, *args): self.data = self._buffer.getvalue() sys.stdout = self._stdout EXPECTED = """ from peewee import * database = SqliteDatabase('peewee_test.db') class UnknownField(object): def __init__(self, *_, **__): pass class BaseModel(Model): class Meta: database = database class Category(BaseModel): name = CharField(unique=True) parent = ForeignKeyField(column_name='parent_id', field='id', model='self', null=True) class Meta: table_name = 'category' class User(BaseModel): id = IntegerField() username = CharField(primary_key=True) class Meta: table_name = 'user' class Note(BaseModel): data = IntegerField() misc = IntegerField() text = TextField(index=True) user = ForeignKeyField(column_name='user_id', field='username', model=User) class Meta: table_name = 'note' indexes = ( (('user', 'data', 'misc'), False), (('user', 'text'), True), ) """.strip() EXPECTED_ORDERED = """ from peewee import * database = SqliteDatabase('peewee_test.db') class UnknownField(object): def __init__(self, *_, **__): pass class BaseModel(Model): class Meta: database = database class User(BaseModel): username = CharField(primary_key=True) id = IntegerField() class Meta: table_name = 'user' class Note(BaseModel): user = ForeignKeyField(column_name='user_id', field='username', model=User) text = TextField(index=True) data = IntegerField() misc = IntegerField() class Meta: table_name = 'note' indexes = ( (('user', 'data', 'misc'), False), (('user', 'text'), True), ) """.strip() class BasePwizTestCase(ModelTestCase): database = db requires = [] def setUp(self): if not self.database.is_closed(): self.database.close() if os.path.exists(self.database.database): os.unlink(self.database.database) super(BasePwizTestCase, self).setUp() self.introspector = Introspector.from_database(self.database) class TestPwiz(BasePwizTestCase): requires = [User, Note, Category] def test_print_models(self): with capture_output() as output: print_models(self.introspector) self.assertEqual(output.data.strip(), EXPECTED) def test_print_header(self): cmdline = '-i -e sqlite %s' % db.database with capture_output() as output: with mock.patch('pwiz.datetime.datetime') as mock_datetime: now = mock_datetime.now.return_value now.strftime.return_value = 'February 03, 2015 15:30PM' print_header(cmdline, self.introspector) self.assertEqual(output.data.strip(), ( '# Code generated by:\n' '# python -m pwiz %s\n' '# Date: February 03, 2015 15:30PM\n' '# Database: %s\n' '# Peewee version: %s') % (cmdline, db.database, peewee_version)) class TestPwizOrdered(BasePwizTestCase): requires = [User, Note] def test_ordered_columns(self): with capture_output() as output: print_models(self.introspector, preserve_order=True) self.assertEqual(output.data.strip(), EXPECTED_ORDERED) class TestPwizUnknownField(BasePwizTestCase): header = ('from peewee import *\n\n' 'database = SqliteDatabase(\'peewee_test.db\')\n\n') unknown = ('class UnknownField(object):\n' ' def __init__(self, *_, **__): pass\n\n') basemodel = ('class BaseModel(Model):\n class Meta:\n' ' database = database\n\n') def setUp(self): super(TestPwizUnknownField, self).setUp() self.database.execute_sql( 'CREATE TABLE "foo" ("id" INTEGER NOT NULL PRIMARY KEY, ' '"unk1", "unk2" BIZBAZ NOT NULL)') def test_unknown_field(self): with capture_output() as output: print_models(self.introspector) self.assertEqual(output.data.strip(), ( self.header + self.unknown + self.basemodel + 'class Foo(BaseModel):\n' ' unk1 = BareField(null=True)\n' ' unk2 = UnknownField() # BIZBAZ\n\n' ' class Meta:\n table_name = \'foo\'')) def test_ignore_unknown(self): with capture_output() as output: print_models(self.introspector, ignore_unknown=True) self.assertEqual(output.data.strip(), ( self.header + self.basemodel + 'class Foo(BaseModel):\n' ' unk1 = BareField(null=True)\n' ' # unk2 - BIZBAZ\n\n' ' class Meta:\n table_name = \'foo\'')) class TestPwizInvalidColumns(BasePwizTestCase): requires = [OddColumnNames] def test_invalid_columns(self): with capture_output() as output: print_models(self.introspector) result = output.data.strip() expected = textwrap.dedent(""" class OddColumnNames(BaseModel): camel_case_name = CharField(column_name='camelCaseName') s_p_aces = CharField(column_name='s p aces') w_nug_ = CharField(column_name='w/-nug!') class Meta: table_name = 'oddColumnNames'""").strip() actual = result[-len(expected):] self.assertEqual(actual, expected) def test_odd_columns_legacy(self): with capture_output() as output: print_models(self.introspector, snake_case=False) result = output.data.strip() expected = textwrap.dedent(""" class Oddcolumnnames(BaseModel): camelcasename = CharField(column_name='camelCaseName') s_p_aces = CharField(column_name='s p aces') w_nug_ = CharField(column_name='w/-nug!') class Meta: table_name = 'oddColumnNames'""").strip() actual = result[-len(expected):] self.assertEqual(actual, expected) class TestPwizIntrospectViews(BasePwizTestCase): requires = [Event] def setUp(self): super(TestPwizIntrospectViews, self).setUp() self.database.execute_sql('CREATE VIEW "events_public" AS ' 'SELECT data FROM event WHERE status = 1') def tearDown(self): self.database.execute_sql('DROP VIEW "events_public"') super(TestPwizIntrospectViews, self).tearDown() def test_introspect_ignore_views(self): # By default views are not included in the output. with capture_output() as output: print_models(self.introspector) self.assertFalse('events_public' in output.data.strip()) def test_introspect_views(self): # Views can be introspected, however. with capture_output() as output: print_models(self.introspector, include_views=True) result = output.data.strip() event_tbl = textwrap.dedent(""" class Event(BaseModel): data = TextField() status = IntegerField() class Meta: table_name = 'event'""").strip() self.assertTrue(event_tbl in result) event_view = textwrap.dedent(""" class EventsPublic(BaseModel): data = TextField(null=True) class Meta: table_name = 'events_public' primary_key = False""").strip() self.assertTrue(event_view in result) peewee-3.17.7/tests/queries.py000066400000000000000000000164601470346076600163000ustar00rootroot00000000000000from peewee import * from .base import BaseTestCase from .base import DatabaseTestCase from .base import TestModel from .base import get_in_memory_db User = Table('users', ['id', 'username']) Tweet = Table('tweet', ['id', 'user_id', 'content']) Register = Table('register', ['id', 'value']) class TestQueryExecution(DatabaseTestCase): database = get_in_memory_db() def setUp(self): super(TestQueryExecution, self).setUp() User.bind(self.database) Tweet.bind(self.database) Register.bind(self.database) self.execute('CREATE TABLE "users" (id INTEGER NOT NULL PRIMARY KEY, ' 'username TEXT)') self.execute('CREATE TABLE "tweet" (id INTEGER NOT NULL PRIMARY KEY, ' 'user_id INTEGER NOT NULL, content TEXT, FOREIGN KEY ' '(user_id) REFERENCES users (id))') self.execute('CREATE TABLE "register" (' 'id INTEGER NOT NULL PRIMARY KEY, ' 'value REAL)') def tearDown(self): self.execute('DROP TABLE "tweet";') self.execute('DROP TABLE "users";') self.execute('DROP TABLE "register";') super(TestQueryExecution, self).tearDown() def create_user_tweets(self, username, *tweets): user_id = User.insert({User.username: username}).execute() for tweet in tweets: Tweet.insert({ Tweet.user_id: user_id, Tweet.content: tweet}).execute() return user_id def test_selection(self): huey_id = self.create_user_tweets('huey', 'meow', 'purr') query = User.select() self.assertEqual(query[:], [{'id': huey_id, 'username': 'huey'}]) query = (Tweet .select(Tweet.content, User.username) .join(User, on=(Tweet.user_id == User.id)) .order_by(Tweet.id)) self.assertEqual(query[:], [ {'content': 'meow', 'username': 'huey'}, {'content': 'purr', 'username': 'huey'}]) def test_select_peek_first(self): huey_id = self.create_user_tweets('huey', 'meow', 'purr', 'hiss') query = Tweet.select(Tweet.content).order_by(Tweet.id) self.assertEqual(query.peek(n=2), [ {'content': 'meow'}, {'content': 'purr'}]) self.assertEqual(query.first(), {'content': 'meow'}) query = Tweet.select().where(Tweet.id == 0) self.assertIsNone(query.peek(n=2)) self.assertIsNone(query.first()) def test_select_get(self): huey_id = self.create_user_tweets('huey') self.assertEqual(User.select().where(User.username == 'huey').get(), { 'id': huey_id, 'username': 'huey'}) self.assertIsNone(User.select().where(User.username == 'x').get()) def test_select_count(self): huey_id = self.create_user_tweets('huey', 'meow', 'purr') mickey_id = self.create_user_tweets('mickey', 'woof', 'pant', 'whine') self.assertEqual(User.select().count(), 2) self.assertEqual(Tweet.select().count(), 5) query = Tweet.select().where(Tweet.user_id == mickey_id) self.assertEqual(query.count(), 3) query = (Tweet .select() .join(User, on=(Tweet.user_id == User.id)) .where(User.username == 'foo')) self.assertEqual(query.count(), 0) def test_select_exists(self): self.create_user_tweets('huey') self.assertTrue(User.select().where(User.username == 'huey').exists()) self.assertFalse(User.select().where(User.username == 'foo').exists()) def test_scalar(self): values = [1.0, 1.5, 2.0, 5.0, 8.0] (Register .insert([{Register.value: value} for value in values]) .execute()) query = Register.select(fn.AVG(Register.value)) self.assertEqual(query.scalar(), 3.5) query = query.where(Register.value < 5) self.assertEqual(query.scalar(), 1.5) query = (Register .select( fn.SUM(Register.value), fn.COUNT(Register.value), fn.SUM(Register.value) / fn.COUNT(Register.value))) self.assertEqual(query.scalar(as_tuple=True), (17.5, 5, 3.5)) query = query.where(Register.value >= 2) self.assertEqual(query.scalar(as_tuple=True), (15, 3, 5)) def test_scalars(self): values = [1.0, 1.5, 2.0, 5.0, 8.0] (Register .insert([{Register.value: value} for value in values]) .execute()) query = Register.select(Register.value).order_by(Register.value) self.assertEqual(list(query.scalars()), values) query = query.where(Register.value < 5) self.assertEqual(list(query.scalars()), [1.0, 1.5, 2.0]) def test_slicing_select(self): values = [1., 1., 2., 3., 5., 8.] (Register .insert([(v,) for v in values], columns=(Register.value,)) .execute()) query = (Register .select(Register.value) .order_by(Register.value) .tuples()) with self.assertQueryCount(1): self.assertEqual(query[0], (1.,)) self.assertEqual(query[:2], [(1.,), (1.,)]) self.assertEqual(query[1:4], [(1.,), (2.,), (3.,)]) self.assertEqual(query[-1], (8.,)) self.assertEqual(query[-2], (5.,)) self.assertEqual(query[-2:], [(5.,), (8.,)]) self.assertEqual(query[2:-2], [(2.,), (3.,)]) class TestQueryCloning(BaseTestCase): def test_clone_tables(self): self._do_test_clone(User, Tweet) def test_clone_models(self): class User(TestModel): username = TextField() class Meta: table_name = 'users' class Tweet(TestModel): user = ForeignKeyField(User, backref='tweets') content = TextField() self._do_test_clone(User, Tweet) def _do_test_clone(self, User, Tweet): query = Tweet.select(Tweet.id) base_sql = 'SELECT "t1"."id" FROM "tweet" AS "t1"' self.assertSQL(query, base_sql, []) qj = query.join(User, on=(Tweet.user_id == User.id)) self.assertSQL(query, base_sql, []) self.assertSQL(qj, ( 'SELECT "t1"."id" FROM "tweet" AS "t1" ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id")'), []) qw = query.where(Tweet.id > 3) self.assertSQL(query, base_sql, []) self.assertSQL(qw, base_sql + ' WHERE ("t1"."id" > ?)', [3]) qw2 = qw.where(Tweet.id < 6) self.assertSQL(query, base_sql, []) self.assertSQL(qw, base_sql + ' WHERE ("t1"."id" > ?)', [3]) self.assertSQL(qw2, base_sql + (' WHERE (("t1"."id" > ?) ' 'AND ("t1"."id" < ?))'), [3, 6]) qo = query.order_by(Tweet.id) self.assertSQL(query, base_sql, []) self.assertSQL(qo, base_sql + ' ORDER BY "t1"."id"', []) qo2 = qo.order_by(Tweet.content, Tweet.id) self.assertSQL(query, base_sql, []) self.assertSQL(qo, base_sql + ' ORDER BY "t1"."id"', []) self.assertSQL(qo2, base_sql + ' ORDER BY "t1"."content", "t1"."id"', []) qg = query.group_by(Tweet.id) self.assertSQL(query, base_sql, []) self.assertSQL(qg, base_sql + ' GROUP BY "t1"."id"', []) peewee-3.17.7/tests/reflection.py000066400000000000000000000556071470346076600167630ustar00rootroot00000000000000import datetime import os import re import warnings from peewee import * from playhouse.reflection import * from .base import IS_CRDB from .base import IS_SQLITE_OLD from .base import ModelTestCase from .base import TestModel from .base import db from .base import requires_models from .base import requires_sqlite from .base import skip_if from .base_models import Tweet from .base_models import User class ColTypes(TestModel): f1 = BigIntegerField(index=True) f2 = BlobField() f3 = BooleanField() f4 = CharField(max_length=50) f5 = DateField() f6 = DateTimeField() f7 = DecimalField() f8 = DoubleField() f9 = FloatField() f10 = IntegerField(unique=True) f11 = AutoField() f12 = TextField() f13 = TimeField() class Meta: indexes = ( (('f10', 'f11'), True), (('f11', 'f8', 'f13'), False), ) class Nullable(TestModel): nullable_cf = CharField(null=True) nullable_if = IntegerField(null=True) class RelModel(TestModel): col_types = ForeignKeyField(ColTypes, backref='foo') col_types_nullable = ForeignKeyField(ColTypes, null=True) class FKPK(TestModel): col_types = ForeignKeyField(ColTypes, primary_key=True) class Underscores(TestModel): _id = AutoField() _name = CharField() class Category(TestModel): name = CharField(max_length=10) parent = ForeignKeyField('self', null=True) class Nugget(TestModel): category_id = ForeignKeyField(Category, column_name='category_id') category = CharField() class NoPK(TestModel): data = CharField() class Meta: primary_key = False class BaseReflectionTestCase(ModelTestCase): def setUp(self): super(BaseReflectionTestCase, self).setUp() self.introspector = Introspector.from_database(self.database) class TestReflection(BaseReflectionTestCase): requires = [ColTypes, Nullable, RelModel, FKPK, Underscores, Category, Nugget] def test_generate_models(self): models = self.introspector.generate_models() self.assertTrue(set(( 'category', 'col_types', 'fkpk', 'nugget', 'nullable', 'rel_model', 'underscores')).issubset(set(models))) def assertIsInstance(obj, klass): self.assertTrue(isinstance(obj, klass)) category = models['category'] self.assertEqual( sorted(category._meta.fields), ['id', 'name', 'parent']) assertIsInstance(category.id, AutoField) assertIsInstance(category.name, CharField) assertIsInstance(category.parent, ForeignKeyField) self.assertEqual(category.parent.rel_model, category) fkpk = models['fkpk'] self.assertEqual(sorted(fkpk._meta.fields), ['col_types']) assertIsInstance(fkpk.col_types, ForeignKeyField) self.assertEqual(fkpk.col_types.rel_model, models['col_types']) self.assertTrue(fkpk.col_types.primary_key) relmodel = models['rel_model'] self.assertEqual( sorted(relmodel._meta.fields), ['col_types', 'col_types_nullable', 'id']) assertIsInstance(relmodel.col_types, ForeignKeyField) assertIsInstance(relmodel.col_types_nullable, ForeignKeyField) self.assertFalse(relmodel.col_types.null) self.assertTrue(relmodel.col_types_nullable.null) self.assertEqual(relmodel.col_types.rel_model, models['col_types']) self.assertEqual(relmodel.col_types_nullable.rel_model, models['col_types']) @requires_sqlite def test_generate_models_indexes(self): models = self.introspector.generate_models() self.assertEqual(models['fkpk']._meta.indexes, []) self.assertEqual(models['rel_model']._meta.indexes, []) self.assertEqual(models['category']._meta.indexes, []) col_types = models['col_types'] indexed = set(['f1']) unique = set(['f10']) for field in col_types._meta.sorted_fields: self.assertEqual(field.index, field.name in indexed) self.assertEqual(field.unique, field.name in unique) indexes = col_types._meta.indexes self.assertEqual(sorted(indexes), [ (['f10', 'f11'], True), (['f11', 'f8', 'f13'], False), ]) def test_table_subset(self): models = self.introspector.generate_models(table_names=[ 'category', 'col_types', 'foobarbaz']) self.assertEqual(sorted(models.keys()), ['category', 'col_types']) @requires_sqlite def test_sqlite_fk_re(self): user_id_tests = [ 'FOREIGN KEY("user_id") REFERENCES "users"("id")', 'FOREIGN KEY(user_id) REFERENCES users(id)', 'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])', '"user_id" NOT NULL REFERENCES "users" ("id")', 'user_id not null references users (id)', ] fk_pk_tests = [ ('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES ' '"coltypes" ("f11")'), 'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")', ] regex = SqliteMetadata.re_foreign_key for test in user_id_tests: match = re.search(regex, test, re.I) self.assertEqual(match.groups(), ( 'user_id', 'users', 'id', )) for test in fk_pk_tests: match = re.search(regex, test, re.I) self.assertEqual(match.groups(), ( 'col_types_id', 'coltypes', 'f11', )) def test_make_column_name(self): # Tests for is_foreign_key=False. tests = ( ('Column', 'column'), ('Foo_id', 'foo_id'), ('foo_id', 'foo_id'), ('foo_id_id', 'foo_id_id'), ('foo', 'foo'), ('_id', '_id'), ('a123', 'a123'), ('and', 'and_'), ('Class', 'class_'), ('Class_ID', 'class_id'), ('camelCase', 'camel_case'), ('ABCdefGhi', 'ab_cdef_ghi'), ) for col_name, expected in tests: self.assertEqual( self.introspector.make_column_name(col_name), expected) # Tests for is_foreign_key=True. tests = ( ('Foo_id', 'foo'), ('foo_id', 'foo'), ('foo_id_id', 'foo_id'), ('foo', 'foo'), ('_id', '_id'), ('a123', 'a123'), ('and', 'and_'), ('Class', 'class_'), ('Class_ID', 'class_'), ('camelCase', 'camel_case'), ('ABCdefGhi', 'ab_cdef_ghi'), ) for col_name, expected in tests: self.assertEqual( self.introspector.make_column_name(col_name, True), expected) def test_make_model_name(self): tests = ( ('Table', 'Table'), ('table', 'Table'), ('table_baz', 'TableBaz'), ('foo__bar__baz2', 'FooBarBaz2'), ('foo12_3', 'Foo123'), ) for table_name, expected in tests: self.assertEqual( self.introspector.make_model_name(table_name), expected) def test_col_types(self): (columns, primary_keys, foreign_keys, model_names, indexes) = self.introspector.introspect() expected = ( ('col_types', ( ('f1', (BigIntegerField, IntegerField), False), # There do not appear to be separate constants for the blob and # text field types in MySQL's drivers. See GH#1034. ('f2', (BlobField, TextField), False), ('f3', (BooleanField, IntegerField), False), ('f4', CharField, False), ('f5', DateField, False), ('f6', DateTimeField, False), ('f7', DecimalField, False), ('f8', (DoubleField, FloatField), False), ('f9', FloatField, False), ('f10', IntegerField, False), ('f11', AutoField, False), ('f12', TextField, False), ('f13', TimeField, False))), ('rel_model', ( ('col_types_id', ForeignKeyField, False), ('col_types_nullable_id', ForeignKeyField, True))), ('nugget', ( ('category_id', ForeignKeyField, False), ('category', CharField, False))), ('nullable', ( ('nullable_cf', CharField, True), ('nullable_if', IntegerField, True))), ('fkpk', ( ('col_types_id', ForeignKeyField, False),)), ('underscores', ( ('_id', AutoField, False), ('_name', CharField, False))), ('category', ( ('name', CharField, False), ('parent_id', ForeignKeyField, True))), ) for table_name, expected_columns in expected: introspected_columns = columns[table_name] for field_name, field_class, is_null in expected_columns: if not isinstance(field_class, (list, tuple)): field_class = (field_class,) column = introspected_columns[field_name] self.assertTrue(column.field_class in field_class, "%s in %s" % (column.field_class, field_class)) self.assertEqual(column.nullable, is_null) def test_foreign_keys(self): (columns, primary_keys, foreign_keys, model_names, indexes) = self.introspector.introspect() self.assertEqual(foreign_keys['col_types'], []) rel_model = foreign_keys['rel_model'] self.assertEqual(len(rel_model), 2) fkpk = foreign_keys['fkpk'] self.assertEqual(len(fkpk), 1) fkpk_fk = fkpk[0] self.assertEqual(fkpk_fk.table, 'fkpk') self.assertEqual(fkpk_fk.column, 'col_types_id') self.assertEqual(fkpk_fk.dest_table, 'col_types') self.assertEqual(fkpk_fk.dest_column, 'f11') category = foreign_keys['category'] self.assertEqual(len(category), 1) category_fk = category[0] self.assertEqual(category_fk.table, 'category') self.assertEqual(category_fk.column, 'parent_id') self.assertEqual(category_fk.dest_table, 'category') self.assertEqual(category_fk.dest_column, 'id') def test_table_names(self): (columns, primary_keys, foreign_keys, model_names, indexes) = self.introspector.introspect() names = ( ('col_types', 'ColTypes'), ('nullable', 'Nullable'), ('rel_model', 'RelModel'), ('fkpk', 'Fkpk')) for k, v in names: self.assertEqual(model_names[k], v) def test_column_meta(self): (columns, primary_keys, foreign_keys, model_names, indexes) = self.introspector.introspect() rel_model = columns['rel_model'] col_types_id = rel_model['col_types_id'] self.assertEqual(col_types_id.get_field_parameters(), { 'column_name': "'col_types_id'", 'model': 'ColTypes', 'field': "'f11'", }) col_types_nullable_id = rel_model['col_types_nullable_id'] self.assertEqual(col_types_nullable_id.get_field_parameters(), { 'column_name': "'col_types_nullable_id'", 'null': True, 'backref': "'col_types_col_types_nullable_set'", 'model': 'ColTypes', 'field': "'f11'", }) fkpk = columns['fkpk'] self.assertEqual(fkpk['col_types_id'].get_field_parameters(), { 'column_name': "'col_types_id'", 'model': 'ColTypes', 'primary_key': True, 'field': "'f11'"}) category = columns['category'] parent_id = category['parent_id'] self.assertEqual(parent_id.get_field_parameters(), { 'column_name': "'parent_id'", 'null': True, 'model': "'self'", 'field': "'id'", }) nugget = columns['nugget'] category_fk = nugget['category_id'] self.assertEqual(category_fk.name, 'category_id') self.assertEqual(category_fk.get_field_parameters(), { 'field': "'id'", 'model': 'Category', 'column_name': "'category_id'", }) category = nugget['category'] self.assertEqual(category.name, 'category') def test_get_field(self): (columns, primary_keys, foreign_keys, model_names, indexes) = self.introspector.introspect() expected = ( ('col_types', ( ('f1', ('f1 = BigIntegerField(index=True)', 'f1 = IntegerField(index=True)')), ('f2', ('f2 = BlobField()', 'f2 = TextField()')), ('f4', 'f4 = CharField()'), ('f5', 'f5 = DateField()'), ('f6', 'f6 = DateTimeField()'), ('f7', 'f7 = DecimalField()'), ('f10', 'f10 = IntegerField(unique=True)'), ('f11', 'f11 = AutoField()'), ('f12', ('f12 = TextField()', 'f12 = BlobField()')), ('f13', 'f13 = TimeField()'), )), ('nullable', ( ('nullable_cf', 'nullable_cf = ' 'CharField(null=True)'), ('nullable_if', 'nullable_if = IntegerField(null=True)'), )), ('fkpk', ( ('col_types_id', 'col_types = ForeignKeyField(' "column_name='col_types_id', field='f11', model=ColTypes, " 'primary_key=True)'), )), ('nugget', ( ('category_id', 'category_id = ForeignKeyField(' "column_name='category_id', field='id', model=Category)"), ('category', 'category = CharField()'), )), ('rel_model', ( ('col_types_id', 'col_types = ForeignKeyField(' "column_name='col_types_id', field='f11', model=ColTypes)"), ('col_types_nullable_id', 'col_types_nullable = ' "ForeignKeyField(backref='col_types_col_types_nullable_set', " "column_name='col_types_nullable_id', field='f11', " 'model=ColTypes, null=True)'), )), ('underscores', ( ('_id', '_id = AutoField()'), ('_name', '_name = CharField()'), )), ('category', ( ('name', 'name = CharField()'), ('parent_id', 'parent = ForeignKeyField(' "column_name='parent_id', field='id', model='self', " 'null=True)'), )), ) for table, field_data in expected: for field_name, fields in field_data: if not isinstance(fields, tuple): fields = (fields,) actual = columns[table][field_name].get_field() self.assertTrue(actual in fields, '%s not in %s' % (actual, fields)) class TestReflectNoPK(BaseReflectionTestCase): requires = [NoPK] def test_no_pk(self): models = self.introspector.generate_models() NoPK = models['no_pk'] if IS_CRDB: # CockroachDB always includes a "rowid". self.assertEqual(NoPK._meta.sorted_field_names, ['rowid', 'data']) else: self.assertEqual(NoPK._meta.sorted_field_names, ['data']) self.assertTrue(NoPK._meta.primary_key is False) class EventLog(TestModel): data = CharField(constraints=[SQL('DEFAULT \'\'')]) timestamp = DateTimeField(constraints=[SQL('DEFAULT current_timestamp')]) flags = IntegerField(constraints=[SQL('DEFAULT 0')]) misc = TextField(constraints=[SQL('DEFAULT \'foo\'')]) class DefaultVals(TestModel): key = CharField(constraints=[SQL('DEFAULT \'foo\'')]) value = IntegerField(constraints=[SQL('DEFAULT 0')]) class Meta: primary_key = CompositeKey('key', 'value') class TestReflectDefaultValues(BaseReflectionTestCase): requires = [DefaultVals, EventLog] @requires_sqlite def test_default_values(self): models = self.introspector.generate_models() default_vals = models['default_vals'] create_table = ( 'CREATE TABLE IF NOT EXISTS "default_vals" (' '"key" VARCHAR(255) NOT NULL DEFAULT \'foo\', ' '"value" INTEGER NOT NULL DEFAULT 0, ' 'PRIMARY KEY ("key", "value"))') # Re-create table using the introspected schema. self.assertSQL(default_vals._schema._create_table(), create_table, []) default_vals.drop_table() default_vals.create_table() # Verify that the introspected schema has not changed. models = self.introspector.generate_models() default_vals = models['default_vals'] self.assertSQL(default_vals._schema._create_table(), create_table, []) @requires_sqlite def test_default_values_extended(self): models = self.introspector.generate_models() eventlog = models['event_log'] create_table = ( 'CREATE TABLE IF NOT EXISTS "event_log" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"data" VARCHAR(255) NOT NULL DEFAULT \'\', ' '"timestamp" DATETIME NOT NULL DEFAULT current_timestamp, ' '"flags" INTEGER NOT NULL DEFAULT 0, ' '"misc" TEXT NOT NULL DEFAULT \'foo\')') # Re-create table using the introspected schema. self.assertSQL(eventlog._schema._create_table(), create_table, []) eventlog.drop_table() eventlog.create_table() # Verify that the introspected schema has not changed. models = self.introspector.generate_models() eventlog = models['event_log'] self.assertSQL(eventlog._schema._create_table(), create_table, []) class TestReflectionDependencies(BaseReflectionTestCase): requires = [User, Tweet] def test_generate_dependencies(self): models = self.introspector.generate_models(table_names=['tweet']) self.assertEqual(set(models), set(('users', 'tweet'))) IUser = models['users'] ITweet = models['tweet'] self.assertEqual(set(ITweet._meta.fields), set(( 'id', 'user', 'content', 'timestamp'))) self.assertEqual(set(IUser._meta.fields), set(('id', 'username'))) self.assertTrue(ITweet.user.rel_model is IUser) self.assertTrue(ITweet.user.rel_field is IUser.id) def test_ignore_backrefs(self): models = self.introspector.generate_models(table_names=['users']) self.assertEqual(set(models), set(('users',))) class Note(TestModel): content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) status = IntegerField() class TestReflectViews(BaseReflectionTestCase): requires = [Note] def setUp(self): super(TestReflectViews, self).setUp() self.database.execute_sql('CREATE VIEW notes_public AS ' 'SELECT content, timestamp FROM note ' 'WHERE status = 1 ORDER BY timestamp DESC') def tearDown(self): self.database.execute_sql('DROP VIEW notes_public') super(TestReflectViews, self).tearDown() def test_views_ignored_default(self): models = self.introspector.generate_models() self.assertFalse('notes_public' in models) def test_introspect_view(self): models = self.introspector.generate_models(include_views=True) self.assertTrue('notes_public' in models) NotesPublic = models['notes_public'] self.assertEqual(sorted(NotesPublic._meta.fields), ['content', 'timestamp']) self.assertTrue(isinstance(NotesPublic.content, TextField)) self.assertTrue(isinstance(NotesPublic.timestamp, DateTimeField)) @skip_if(IS_SQLITE_OLD) @skip_if(IS_CRDB, 'crdb does not respect order by in view def') def test_introspect_view_integration(self): for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]): Note.create(content=ct, status=st, timestamp=datetime.datetime(2018, 1, 1 + i)) NP = self.introspector.generate_models( table_names=['notes_public'], include_views=True)['notes_public'] self.assertEqual([(np.content, np.timestamp) for np in NP.select()], [ ('n3', datetime.datetime(2018, 1, 3)), ('n1', datetime.datetime(2018, 1, 1))]) class TestCyclicalFK(BaseReflectionTestCase): def setUp(self): super(TestCyclicalFK, self).setUp() warnings.filterwarnings('ignore') @requires_sqlite def test_cyclical_fk(self): # NOTE: this schema was provided by a user. cursor = self.database.cursor() cursor.executescript( 'CREATE TABLE flow_run_state (id CHAR(36) NOT NULL, ' 'flow_run_id CHAR(36) NOT NULL, ' 'CONSTRAINT pk_flow_run_state PRIMARY KEY (id), ' 'CONSTRAINT fk_flow_run_state__flow_run_id__flow_run ' 'FOREIGN KEY(flow_run_id) REFERENCES flow_run (id) ' 'ON DELETE cascade); ' 'CREATE TABLE flow_run (id CHAR(36) NOT NULL, ' 'state_id CHAR(36) NOT NULL, ' 'CONSTRAINT pk_flow_run PRIMARY KEY (id), ' 'CONSTRAINT fk_flow_run__state_id__flow_run_state ' 'FOREIGN KEY(state_id) REFERENCES flow_run_state (id) ' 'ON DELETE SET NULL);') M = self.introspector.generate_models() FRS = M['flow_run_state'] FR = M['flow_run'] self.assertEqual(sorted(FR._meta.fields), ['id', 'state']) self.assertEqual(sorted(FRS._meta.fields), ['flow_run', 'id']) self.assertTrue(isinstance(FR.id, CharField)) self.assertTrue(isinstance(FR.state, ForeignKeyField)) self.assertTrue(FR.state.rel_model is FRS) self.assertTrue(isinstance(FRS.id, CharField)) self.assertTrue(isinstance(FRS.flow_run, ForeignKeyField)) self.assertTrue(FRS.flow_run.rel_model is FR) class Event(TestModel): key = TextField() timestamp = DateTimeField(index=True) metadata = TextField(default='') class TestInteractiveHelpers(ModelTestCase): requires = [Category, Event] def test_generate_models(self): M = generate_models(self.database) self.assertTrue('category' in M) self.assertTrue('event' in M) def assertFields(m, expected): actual = [(f.name, f.field_type) for f in m._meta.sorted_fields] self.assertEqual(actual, expected) assertFields(M['category'], [('id', 'AUTO'), ('name', 'VARCHAR'), ('parent', 'INT')]) assertFields(M['event'], [ ('id', 'AUTO'), ('key', 'TEXT'), ('timestamp', 'DATETIME'), ('metadata', 'TEXT')]) peewee-3.17.7/tests/regressions.py000066400000000000000000001751151470346076600171710ustar00rootroot00000000000000import datetime import json import random import sys import threading import time import uuid from peewee import * from playhouse.hybrid import * from playhouse.migrate import migrate from playhouse.migrate import SchemaMigrator from .base import BaseTestCase from .base import IS_MYSQL from .base import IS_MYSQL_ADVANCED_FEATURES from .base import IS_SQLITE from .base import IS_SQLITE_OLD from .base import ModelTestCase from .base import TestModel from .base import get_in_memory_db from .base import requires_models from .base import requires_mysql from .base import requires_postgresql from .base import skip_if from .base import skip_unless from .base import slow_test from .base_models import Sample from .base_models import Tweet from .base_models import User class ColAlias(TestModel): name = TextField(column_name='pname') class CARef(TestModel): colalias = ForeignKeyField(ColAlias, backref='carefs', column_name='ca', object_id_name='colalias_id') class TestQueryAliasToColumnName(ModelTestCase): requires = [ColAlias, CARef] def setUp(self): super(TestQueryAliasToColumnName, self).setUp() with self.database.atomic(): for name in ('huey', 'mickey'): col_alias = ColAlias.create(name=name) CARef.create(colalias=col_alias) def test_alias_to_column_name(self): # The issue here occurs when we take a field whose name differs from # it's underlying column name, then alias that field to it's column # name. In this case, peewee was *not* respecting the alias and using # the field name instead. query = (ColAlias .select(ColAlias.name.alias('pname')) .order_by(ColAlias.name)) self.assertEqual([c.pname for c in query], ['huey', 'mickey']) # Ensure that when using dicts the logic is preserved. query = query.dicts() self.assertEqual([r['pname'] for r in query], ['huey', 'mickey']) def test_alias_overlap_with_join(self): query = (CARef .select(CARef, ColAlias.name.alias('pname')) .join(ColAlias) .order_by(ColAlias.name)) with self.assertQueryCount(1): self.assertEqual([r.colalias.pname for r in query], ['huey', 'mickey']) # Note: we cannot alias the join to "ca", as this is the object-id # descriptor name. query = (CARef .select(CARef, ColAlias.name.alias('pname')) .join(ColAlias, on=(CARef.colalias == ColAlias.id).alias('ca')) .order_by(ColAlias.name)) with self.assertQueryCount(1): self.assertEqual([r.ca.pname for r in query], ['huey', 'mickey']) def test_cannot_alias_join_to_object_id_name(self): query = CARef.select(CARef, ColAlias.name.alias('pname')) expr = (CARef.colalias == ColAlias.id).alias('colalias_id') self.assertRaises(ValueError, query.join, ColAlias, on=expr) class TestOverrideModelRepr(BaseTestCase): def test_custom_reprs(self): # In 3.5.0, Peewee included a new implementation and semantics for # customizing model reprs. This introduced a regression where model # classes that defined a __repr__() method had this override ignored # silently. This test ensures that it is possible to completely # override the model repr. class Foo(Model): def __repr__(self): return 'FOO: %s' % self.id f = Foo(id=1337) self.assertEqual(repr(f), 'FOO: 1337') class DiA(TestModel): a = TextField(unique=True) class DiB(TestModel): a = ForeignKeyField(DiA) b = TextField() class DiC(TestModel): b = ForeignKeyField(DiB) c = TextField() class DiD(TestModel): c = ForeignKeyField(DiC) d = TextField() class DiBA(TestModel): a = ForeignKeyField(DiA, to_field=DiA.a) b = TextField() class TestDeleteInstanceRegression(ModelTestCase): database = get_in_memory_db() requires = [DiA, DiB, DiC, DiD, DiBA] def test_delete_instance_regression(self): with self.database.atomic(): a1, a2, a3 = [DiA.create(a=a) for a in ('a1', 'a2', 'a3')] for a in (a1, a2, a3): for j in (1, 2): b = DiB.create(a=a, b='%s-b%s' % (a.a, j)) c = DiC.create(b=b, c='%s-c' % (b.b)) d = DiD.create(c=c, d='%s-d' % (c.c)) DiBA.create(a=a, b='%s-b%s' % (a.a, j)) # (a1 (b1 (c (d))), (b2 (c (d)))), (a2 ...), (a3 ...) with self.assertQueryCount(5): a2.delete_instance(recursive=True) self.assertHistory(5, [ ('DELETE FROM "di_d" WHERE ("di_d"."c_id" IN (' 'SELECT "t1"."id" FROM "di_c" AS "t1" WHERE ("t1"."b_id" IN (' 'SELECT "t2"."id" FROM "di_b" AS "t2" WHERE ("t2"."a_id" = ?)' '))))', [2]), ('DELETE FROM "di_c" WHERE ("di_c"."b_id" IN (' 'SELECT "t1"."id" FROM "di_b" AS "t1" WHERE ("t1"."a_id" = ?)' '))', [2]), ('DELETE FROM "di_ba" WHERE ("di_ba"."a_id" = ?)', ['a2']), ('DELETE FROM "di_b" WHERE ("di_b"."a_id" = ?)', [2]), ('DELETE FROM "di_a" WHERE ("di_a"."id" = ?)', [2]) ]) # a1 & a3 exist, plus their relations. self.assertTrue(DiA.select().count(), 2) for rel in (DiB, DiBA, DiC, DiD): self.assertTrue(rel.select().count(), 4) # 2x2 with self.assertQueryCount(5): a1.delete_instance(recursive=True) # Only the objects related to a3 exist still. self.assertTrue(DiA.select().count(), 1) self.assertEqual(DiA.get(DiA.a == 'a3').id, a3.id) self.assertEqual([d.d for d in DiD.select().order_by(DiD.d)], ['a3-b1-c-d', 'a3-b2-c-d']) self.assertEqual([c.c for c in DiC.select().order_by(DiC.c)], ['a3-b1-c', 'a3-b2-c']) self.assertEqual([b.b for b in DiB.select().order_by(DiB.b)], ['a3-b1', 'a3-b2']) self.assertEqual([ba.b for ba in DiBA.select().order_by(DiBA.b)], ['a3-b1', 'a3-b2']) class TestCountUnionRegression(ModelTestCase): @requires_mysql @requires_models(User) def test_count_union(self): with self.database.atomic(): for i in range(5): User.create(username='user-%d' % i) lhs = User.select() rhs = User.select() query = (lhs | rhs) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'UNION ' 'SELECT "t2"."id", "t2"."username" FROM "users" AS "t2"'), []) self.assertEqual(query.count(), 5) query = query.limit(3) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" ' 'UNION ' 'SELECT "t2"."id", "t2"."username" FROM "users" AS "t2" ' 'LIMIT ?'), [3]) self.assertEqual(query.count(), 3) class User2(TestModel): username = TextField() class Category2(TestModel): name = TextField() parent = ForeignKeyField('self', backref='children', null=True) user = ForeignKeyField(User2) class TestGithub1354(ModelTestCase): @requires_models(Category2, User2) def test_get_or_create_self_referential_fk2(self): huey = User2.create(username='huey') parent = Category2.create(name='parent', user=huey) child, created = Category2.get_or_create(parent=parent, name='child', user=huey) child_db = Category2.get(Category2.parent == parent) self.assertEqual(child_db.user.username, 'huey') self.assertEqual(child_db.parent.name, 'parent') self.assertEqual(child_db.name, 'child') class TestInsertFromSQL(ModelTestCase): def setUp(self): super(TestInsertFromSQL, self).setUp() self.database.execute_sql('create table if not exists user_src ' '(name TEXT);') tbl = Table('user_src').bind(self.database) tbl.insert(name='foo').execute() def tearDown(self): super(TestInsertFromSQL, self).tearDown() self.database.execute_sql('drop table if exists user_src') @requires_models(User) def test_insert_from_sql(self): query_src = SQL('SELECT name FROM user_src') User.insert_from(query=query_src, fields=[User.username]).execute() self.assertEqual([u.username for u in User.select()], ['foo']) class TestSubqueryFunctionCall(BaseTestCase): def test_subquery_function_call(self): Sample = Table('sample') SA = Sample.alias('s2') query = (Sample .select(Sample.c.data) .where(~fn.EXISTS( SA.select(SQL('1')).where(SA.c.key == 'foo')))) self.assertSQL(query, ( 'SELECT "t1"."data" FROM "sample" AS "t1" ' 'WHERE NOT EXISTS(' 'SELECT 1 FROM "sample" AS "s2" WHERE ("s2"."key" = ?))'), ['foo']) class A(TestModel): id = IntegerField(primary_key=True) class B(TestModel): id = IntegerField(primary_key=True) class C(TestModel): id = IntegerField(primary_key=True) a = ForeignKeyField(A) b = ForeignKeyField(B) class TestCrossJoin(ModelTestCase): requires = [A, B, C] def setUp(self): super(TestCrossJoin, self).setUp() A.insert_many([(1,), (2,), (3,)], fields=[A.id]).execute() B.insert_many([(1,), (2,)], fields=[B.id]).execute() C.insert_many([ (1, 1, 1), (2, 1, 2), (3, 2, 1)], fields=[C.id, C.a, C.b]).execute() def test_cross_join(self): query = (A .select(A.id.alias('aid'), B.id.alias('bid')) .join(B, JOIN.CROSS) .join(C, JOIN.LEFT_OUTER, on=( (C.a == A.id) & (C.b == B.id))) .where(C.id.is_null()) .order_by(A.id, B.id)) self.assertEqual(list(query.tuples()), [(2, 2), (3, 1), (3, 2)]) def _create_users_tweets(db): data = ( ('huey', ('meow', 'hiss', 'purr')), ('mickey', ('woof', 'bark')), ('zaizee', ())) with db.atomic(): for username, tweets in data: user = User.create(username=username) for tweet in tweets: Tweet.create(user=user, content=tweet) class TestSubqueryInSelect(ModelTestCase): requires = [User, Tweet] def setUp(self): super(TestSubqueryInSelect, self).setUp() _create_users_tweets(self.database) def test_subquery_in_select(self): subq = User.select().where(User.username == 'huey') query = (Tweet .select(Tweet.content, Tweet.user.in_(subq).alias('is_huey')) .order_by(Tweet.content)) self.assertEqual([(r.content, r.is_huey) for r in query], [ ('bark', False), ('hiss', True), ('meow', True), ('purr', True), ('woof', False)]) @requires_postgresql class TestReturningIntegrationRegressions(ModelTestCase): requires = [User, Tweet] def test_returning_integration_subqueries(self): _create_users_tweets(self.database) # We can use a correlated subquery in the RETURNING clause. subq = (Tweet .select(fn.COUNT(Tweet.id).alias('ct')) .where(Tweet.user == User.id)) query = (User .update(username=(User.username + '-x')) .returning(subq.alias('ct'), User.username)) result = query.execute() self.assertEqual(sorted([(r.ct, r.username) for r in result]), [ (0, 'zaizee-x'), (2, 'mickey-x'), (3, 'huey-x')]) # We can use a correlated subquery via UPDATE...FROM, and reference the # FROM table in both the update and the RETURNING clause. subq = (User .select(User.id, fn.COUNT(Tweet.id).alias('ct')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.id)) query = (User .update(username=User.username + subq.c.ct) .from_(subq) .where(User.id == subq.c.id) .returning(subq.c.ct, User.username)) result = query.execute() self.assertEqual(sorted([(r.ct, r.username) for r in result]), [ (0, 'zaizee-x0'), (2, 'mickey-x2'), (3, 'huey-x3')]) def test_returning_integration(self): query = (User .insert_many([('huey',), ('mickey',), ('zaizee',)], fields=[User.username]) .returning(User.id, User.username) .objects()) result = query.execute() self.assertEqual([(r.id, r.username) for r in result], [ (1, 'huey'), (2, 'mickey'), (3, 'zaizee')]) query = (User .delete() .where(~User.username.startswith('h')) .returning(User.id, User.username) .objects()) result = query.execute() self.assertEqual(sorted([(r.id, r.username) for r in result]), [ (2, 'mickey'), (3, 'zaizee')]) class TestUpdateIntegrationRegressions(ModelTestCase): requires = [User, Tweet, Sample] def setUp(self): super(TestUpdateIntegrationRegressions, self).setUp() _create_users_tweets(self.database) for i in range(4): Sample.create(counter=i, value=i) @skip_if(IS_MYSQL) def test_update_examples(self): # Do a simple update. res = (User .update(username=(User.username + '-cat')) .where(User.username != 'mickey') .execute()) users = User.select().order_by(User.username) self.assertEqual([u.username for u in users.clone()], ['huey-cat', 'mickey', 'zaizee-cat']) # Do an update using a subquery.. subq = User.select(User.username).where(User.username == 'mickey') res = (User .update(username=(User.username + '-dog')) .where(User.username.in_(subq)) .execute()) self.assertEqual([u.username for u in users.clone()], ['huey-cat', 'mickey-dog', 'zaizee-cat']) # Subquery referring to a different table. subq = User.select().where(User.username == 'mickey-dog') res = (Tweet .update(content=(Tweet.content + '-x')) .where(Tweet.user.in_(subq)) .execute()) self.assertEqual( [t.content for t in Tweet.select().order_by(Tweet.id)], ['meow', 'hiss', 'purr', 'woof-x', 'bark-x']) # Subquery on the right-hand of the assignment. subq = (Tweet .select(fn.COUNT(Tweet.id).cast('text')) .where(Tweet.user == User.id)) res = User.update(username=(User.username + '-' + subq)).execute() self.assertEqual([u.username for u in users.clone()], ['huey-cat-3', 'mickey-dog-2', 'zaizee-cat-0']) def test_update_examples_2(self): SA = Sample.alias() subq = (SA .select(SA.value) .where(SA.value.in_([1.0, 3.0]))) res = (Sample .update(counter=(Sample.counter + Sample.value.cast('int'))) .where(Sample.value.in_(subq)) .execute()) query = (Sample .select(Sample.counter, Sample.value) .order_by(Sample.id) .tuples()) self.assertEqual(list(query.clone()), [(0, 0.), (2, 1.), (2, 2.), (6, 3.)]) subq = (SA .select(SA.counter - SA.value.cast('int')) .where(SA.value == Sample.value)) res = (Sample .update(counter=subq) .where(Sample.value.in_([1., 3.])) .execute()) self.assertEqual(list(query.clone()), [(0, 0.), (1, 1.), (2, 2.), (3, 3.)]) class TestSelectValueConversion(ModelTestCase): requires = [User] @skip_if(IS_SQLITE_OLD or IS_MYSQL) def test_select_value_conversion(self): u1 = User.create(username='u1') cte = User.select(User.id.cast('text')).cte('tmp', columns=('id',)) query = User.select(cte.c.id.alias('id')).with_cte(cte).from_(cte) u1_id, = [user.id for user in query] self.assertEqual(u1_id, u1.id) query2 = User.select(cte.c.id.coerce(False)).with_cte(cte).from_(cte) u1_id, = [user.id for user in query2] self.assertEqual(u1_id, str(u1.id)) class ConflictDetectedException(Exception): pass class BaseVersionedModel(TestModel): version = IntegerField(default=1, index=True) def save_optimistic(self): if not self.id: # This is a new record, so the default logic is to perform an # INSERT. Ideally your model would also have a unique # constraint that made it impossible for two INSERTs to happen # at the same time. return self.save() # Update any data that has changed and bump the version counter. field_data = dict(self.__data__) current_version = field_data.pop('version', 1) self._populate_unsaved_relations(field_data) field_data = self._prune_fields(field_data, self.dirty_fields) if not field_data: raise ValueError('No changes have been made.') ModelClass = type(self) field_data['version'] = ModelClass.version + 1 # Atomic increment. query = ModelClass.update(**field_data).where( (ModelClass.version == current_version) & (ModelClass.id == self.id)) if query.execute() == 0: # No rows were updated, indicating another process has saved # a new version. How you handle this situation is up to you, # but for simplicity I'm just raising an exception. raise ConflictDetectedException() else: # Increment local version to match what is now in the db. self.version += 1 return True class VUser(BaseVersionedModel): username = TextField() class VTweet(BaseVersionedModel): user = ForeignKeyField(VUser, null=True) content = TextField() class TestOptimisticLockingDemo(ModelTestCase): requires = [VUser, VTweet] def test_optimistic_locking(self): vu = VUser(username='u1') vu.save_optimistic() vt = VTweet(user=vu, content='t1') vt.save_optimistic() # Update the "vt" row in the db, which bumps the version counter. vt2 = VTweet.get(VTweet.id == vt.id) vt2.content = 't1-x' vt2.save_optimistic() # Since no data was modified, this returns a ValueError. self.assertRaises(ValueError, vt.save_optimistic) # If we do make an update and attempt to save, a conflict is detected. vt.content = 't1-y' self.assertRaises(ConflictDetectedException, vt.save_optimistic) self.assertEqual(vt.version, 1) vt_db = VTweet.get(VTweet.id == vt.id) self.assertEqual(vt_db.content, 't1-x') self.assertEqual(vt_db.version, 2) self.assertEqual(vt_db.user.username, 'u1') def test_optimistic_locking_populate_fks(self): vt = VTweet(content='t1') vt.save_optimistic() vu = VUser(username='u1') vt.user = vu vu.save_optimistic() vt.save_optimistic() vt_db = VTweet.get(VTweet.content == 't1') self.assertEqual(vt_db.version, 2) self.assertEqual(vt_db.user.username, 'u1') class TS(TestModel): key = CharField(primary_key=True) timestamp = TimestampField(utc=True) class TestZeroTimestamp(ModelTestCase): requires = [TS] def test_zero_timestamp(self): t0 = TS.create(key='t0', timestamp=0) t1 = TS.create(key='t1', timestamp=1) t0_db = TS.get(TS.key == 't0') self.assertEqual(t0_db.timestamp, datetime.datetime(1970, 1, 1)) t1_db = TS.get(TS.key == 't1') self.assertEqual(t1_db.timestamp, datetime.datetime(1970, 1, 1, 0, 0, 1)) class Player(TestModel): name = TextField() class Game(TestModel): name = TextField() player = ForeignKeyField(Player) class Score(TestModel): game = ForeignKeyField(Game) points = IntegerField() class TestJoinSubqueryAggregateViaLeftOuter(ModelTestCase): requires = [Player, Game, Score] def test_join_subquery_aggregate_left_outer(self): with self.database.atomic(): p1, p2 = [Player.create(name=name) for name in ('p1', 'p2')] games = [] for p in (p1, p2): for gnum in (1, 2): g = Game.create(name='%s-g%s' % (p.name, gnum), player=p) games.append(g) score_list = ( (10, 20, 30), (), (100, 110, 100), (50, 50)) for g, plist in zip(games, score_list): for p in plist: Score.create(game=g, points=p) subq = (Game .select(Game.player, fn.SUM(Score.points).alias('ptotal'), fn.AVG(Score.points).alias('pavg')) .join(Score, JOIN.LEFT_OUTER) .group_by(Game.player)) query = (Player .select(Player, subq.c.ptotal, subq.c.pavg) .join(subq, on=(Player.id == subq.c.player_id)) .order_by(Player.name)) with self.assertQueryCount(1): results = [(p.name, p.game.ptotal, p.game.pavg) for p in query] self.assertEqual(results, [('p1', 60, 20), ('p2', 410, 82)]) with self.assertQueryCount(1): obj_query = query.objects() results = [(p.name, p.ptotal, p.pavg) for p in obj_query] self.assertEqual(results, [('p1', 60, 20), ('p2', 410, 82)]) class Project(TestModel): name = TextField() class Task(TestModel): name = TextField() project = ForeignKeyField(Project, backref='tasks') alt = ForeignKeyField(Project, backref='alt_tasks') class TestModelGraphMultiFK(ModelTestCase): requires = [Project, Task] def test_model_graph_multi_fk(self): pa, pb, pc = [Project.create(name=name) for name in 'abc'] t1 = Task.create(name='t1', project=pa, alt=pc) t2 = Task.create(name='t2', project=pb, alt=pb) P1 = Project.alias('p1') P2 = Project.alias('p2') LO = JOIN.LEFT_OUTER # Query using join expression. q1 = (Task .select(Task, P1, P2) .join_from(Task, P1, LO, on=(Task.project == P1.id)) .join_from(Task, P2, LO, on=(Task.alt == P2.id)) .order_by(Task.name)) # Query specifying target field. q2 = (Task .select(Task, P1, P2) .join_from(Task, P1, LO, on=Task.project) .join_from(Task, P2, LO, on=Task.alt) .order_by(Task.name)) # Query specifying with missing target field. q3 = (Task .select(Task, P1, P2) .join_from(Task, P1, LO) .join_from(Task, P2, LO, on=Task.alt) .order_by(Task.name)) for query in (q1, q2, q3): with self.assertQueryCount(1): t1, t2 = list(query) self.assertEqual(t1.project.name, 'a') self.assertEqual(t1.alt.name, 'c') self.assertEqual(t2.project.name, 'b') self.assertEqual(t2.alt.name, 'b') class TestBlobFieldContextRegression(BaseTestCase): def test_blob_field_context_regression(self): class A(Model): f = BlobField() orig = A.f._constructor db = get_in_memory_db() with db.bind_ctx([A]): self.assertTrue(A.f._constructor is db.get_binary_type()) self.assertTrue(A.f._constructor is orig) class Product(TestModel): id = CharField() color = CharField() class Meta: primary_key = CompositeKey('id', 'color') class Sku(TestModel): upc = CharField(primary_key=True) product_id = CharField() color = CharField() class Meta: constraints = [SQL('FOREIGN KEY (product_id, color) REFERENCES ' 'product(id, color)')] @hybrid_property def product(self): if not hasattr(self, '_product'): self._product = Product.get((Product.id == self.product_id) & (Product.color == self.color)) return self._product @product.setter def product(self, obj): self._product = obj self.product_id = obj.id self.color = obj.color @product.expression def product(cls): return (Product.id == cls.product_id) & (Product.color == cls.color) class TestFKCompositePK(ModelTestCase): requires = [Product, Sku] def test_fk_composite_pk_regression(self): Product.insert_many([ (1, 'red'), (1, 'blue'), (2, 'red'), (2, 'green'), (3, 'white')]).execute() Sku.insert_many([ ('1-red', 1, 'red'), ('1-blue', 1, 'blue'), ('2-red', 2, 'red'), ('2-green', 2, 'green'), ('3-white', 3, 'white')]).execute() query = (Product .select(Product, Sku) .join(Sku, on=Sku.product) .where(Product.color == 'red') .order_by(Product.id, Product.color)) with self.assertQueryCount(1): rows = [(p.id, p.color, p.sku.upc) for p in query] self.assertEqual(rows, [ ('1', 'red', '1-red'), ('2', 'red', '2-red')]) query = (Sku .select(Sku, Product) .join(Product, on=Sku.product) .where(Product.color != 'red') .order_by(Sku.upc)) with self.assertQueryCount(1): rows = [(s.upc, s.product_id, s.color, s.product.id, s.product.color) for s in query] self.assertEqual(rows, [ ('1-blue', '1', 'blue', '1', 'blue'), ('2-green', '2', 'green', '2', 'green'), ('3-white', '3', 'white', '3', 'white')]) class RS(TestModel): name = TextField() class RD(TestModel): key = TextField() value = IntegerField() rs = ForeignKeyField(RS, backref='rds') class RKV(TestModel): key = CharField(max_length=10) value = IntegerField() extra = IntegerField() class Meta: primary_key = CompositeKey('key', 'value') class TestRegressionCountDistinct(ModelTestCase): @requires_models(RS, RD) def test_regression_count_distinct(self): rs = RS.create(name='rs') nums = [0, 1, 2, 3, 2, 1, 0] RD.insert_many([('k%s' % i, i, rs) for i in nums]).execute() query = RD.select(RD.key).distinct() self.assertEqual(query.count(), 4) # Try re-selecting using the id/key, which are all distinct. query = query.select(RD.id, RD.key) self.assertEqual(query.count(), 7) # Re-select the key/value, of which there are 4 distinct. query = query.select(RD.key, RD.value) self.assertEqual(query.count(), 4) query = rs.rds.select(RD.key).distinct() self.assertEqual(query.count(), 4) query = rs.rds.select(RD.key, RD.value).distinct() self.assertEqual(query.count(), 4) # Was returning 7! @requires_models(RKV) def test_regression_count_distinct_cpk(self): RKV.insert_many([('k%s' % i, i, i) for i in range(5)]).execute() self.assertEqual(RKV.select().distinct().count(), 5) class TestReselectModelRegression(ModelTestCase): requires = [User] def test_reselect_model_regression(self): u1, u2, u3 = [User.create(username='u%s' % i) for i in '123'] query = User.select(User.username).order_by(User.username.desc()) self.assertEqual(list(query.tuples()), [('u3',), ('u2',), ('u1',)]) query = query.select(User) self.assertEqual(list(query.tuples()), [ (u3.id, 'u3',), (u2.id, 'u2',), (u1.id, 'u1',)]) class TestJoinCorrelatedSubquery(ModelTestCase): requires = [User, Tweet] def test_join_correlated_subquery(self): for i in range(3): user = User.create(username='u%s' % i) for j in range(i + 1): Tweet.create(user=user, content='u%s-%s' % (i, j)) UA = User.alias() subq = (UA .select(UA.username) .where(UA.username.in_(('u0', 'u2')))) query = (Tweet .select(Tweet, User) .join(User, on=( (Tweet.user == User.id) & (User.username.in_(subq)))) .order_by(Tweet.id)) with self.assertQueryCount(1): data = [(t.content, t.user.username) for t in query] self.assertEqual(data, [ ('u0-0', 'u0'), ('u2-0', 'u2'), ('u2-1', 'u2'), ('u2-2', 'u2')]) class RU(TestModel): username = TextField() class Recipe(TestModel): name = TextField() created_by = ForeignKeyField(RU, backref='recipes') changed_by = ForeignKeyField(RU, backref='recipes_modified') class TestMultiFKJoinRegression(ModelTestCase): requires = [RU, Recipe] def test_multi_fk_join_regression(self): u1, u2 = [RU.create(username=u) for u in ('u1', 'u2')] for (n, a, m) in (('r11', u1, u1), ('r12', u1, u2), ('r21', u2, u1)): Recipe.create(name=n, created_by=a, changed_by=m) Change = RU.alias() query = (Recipe .select(Recipe, RU, Change) .join(RU, on=(RU.id == Recipe.created_by).alias('a')) .switch(Recipe) .join(Change, on=(Change.id == Recipe.changed_by).alias('b')) .order_by(Recipe.name)) with self.assertQueryCount(1): data = [(r.name, r.a.username, r.b.username) for r in query] self.assertEqual(data, [ ('r11', 'u1', 'u1'), ('r12', 'u1', 'u2'), ('r21', 'u2', 'u1')]) class TestCompoundExistsRegression(ModelTestCase): requires = [User] def test_compound_regressions_1961(self): UA = User.alias() cq = (User.select(User.id) | UA.select(UA.id)) # Calling .exists() fails with AttributeError, no attribute "columns". self.assertFalse(cq.exists()) self.assertEqual(cq.count(), 0) User.create(username='u1') self.assertTrue(cq.exists()) self.assertEqual(cq.count(), 1) class TestViewFieldMapping(ModelTestCase): requires = [User] def tearDown(self): try: self.execute('drop view user_testview_fm') except Exception as exc: pass super(TestViewFieldMapping, self).tearDown() def test_view_field_mapping(self): user = User.create(username='huey') self.execute('create view user_testview_fm as ' 'select id, username from users') class View(User): class Meta: table_name = 'user_testview_fm' self.assertEqual([(v.id, v.username) for v in View.select()], [(user.id, 'huey')]) class TC(TestModel): ifield = IntegerField() ffield = FloatField() cfield = TextField() tfield = TextField() class TestTypeCoercion(ModelTestCase): requires = [TC] def test_type_coercion(self): t = TC.create(ifield='10', ffield='20.5', cfield=30, tfield=40) t_db = TC.get(TC.id == t.id) self.assertEqual(t_db.ifield, 10) self.assertEqual(t_db.ffield, 20.5) self.assertEqual(t_db.cfield, '30') self.assertEqual(t_db.tfield, '40') class TestLikeColumnValue(ModelTestCase): requires = [User, Tweet] def test_like_column_value(self): # e.g., find all tweets that contain the users own username. u1, u2, u3 = [User.create(username='u%s' % i) for i in (1, 2, 3)] data = ( (u1, ('nada', 'i am u1', 'u1 is my name')), (u2, ('nothing', 'he is u1')), (u3, ('she is u2', 'hey u3 is me', 'xx'))) for user, tweets in data: Tweet.insert_many([(user, tweet) for tweet in tweets], fields=[Tweet.user, Tweet.content]).execute() expressions = ( (Tweet.content ** ('%' + User.username + '%')), Tweet.content.contains(User.username)) for expr in expressions: query = (Tweet .select(Tweet, User) .join(User) .where(expr) .order_by(Tweet.id)) self.assertEqual([(t.user.username, t.content) for t in query], [ ('u1', 'i am u1'), ('u1', 'u1 is my name'), ('u3', 'hey u3 is me')]) class TestUnionParenthesesRegression(ModelTestCase): requires = [User] def test_union_parentheses_regression(self): ua, ub, uc = [User.create(username=u) for u in 'abc'] lhs = User.select(User.id).where(User.username == 'a') rhs = User.select(User.id).where(User.username == 'c') union = lhs.union_all(rhs) self.assertEqual(sorted([u.id for u in union]), [ua.id, uc.id]) query = User.select().where(User.id.in_(union)).order_by(User.id) self.assertEqual([u.username for u in query], ['a', 'c']) class NoPK(TestModel): data = IntegerField() class Meta: primary_key = False class TestNoPKHashRegression(ModelTestCase): requires = [NoPK] def test_no_pk_hash_regression(self): npk = NoPK.create(data=1) npk_db = NoPK.get(NoPK.data == 1) # When a model does not define a primary key, we cannot test equality. self.assertTrue(npk != npk_db) # Their hash is the same, though they are not equal. self.assertEqual(hash(npk), hash(npk_db)) class Site(TestModel): url = TextField() class Page(TestModel): site = ForeignKeyField(Site, backref='pages') title = TextField() class PageItem(TestModel): page = ForeignKeyField(Page, backref='items') content = TextField() class TestModelFilterJoinOrdering(ModelTestCase): requires = [Site, Page, PageItem] def setUp(self): super(TestModelFilterJoinOrdering, self).setUp() with self.database.atomic(): s1, s2 = [Site.create(url=s) for s in ('s1', 's2')] p11, p12, p21 = [Page.create(site=s, title=t) for s, t in ((s1, 'p1-1'), (s1, 'p1-2'), (s2, 'p2-1'))] items = ( (p11, 's1p1i1'), (p11, 's1p1i2'), (p11, 's1p1i3'), (p12, 's1p2i1'), (p21, 's2p1i1')) PageItem.insert_many(items).execute() def test_model_filter_join_ordering(self): q = PageItem.filter(page__site__url='s1').order_by(PageItem.content) self.assertSQL(q, ( 'SELECT "t1"."id", "t1"."page_id", "t1"."content" ' 'FROM "page_item" AS "t1" ' 'INNER JOIN "page" AS "t2" ON ("t1"."page_id" = "t2"."id") ' 'INNER JOIN "site" AS "t3" ON ("t2"."site_id" = "t3"."id") ' 'WHERE ("t3"."url" = ?) ORDER BY "t1"."content"'), ['s1']) def assertQ(q): with self.assertQueryCount(1): self.assertEqual([pi.content for pi in q], ['s1p1i1', 's1p1i2', 's1p1i3', 's1p2i1']) assertQ(q) sid = Site.get(Site.url == 's1').id q = (PageItem .filter(page__site__url='s1', page__site__id=sid) .order_by(PageItem.content)) assertQ(q) q = (PageItem .filter(page__site__id=sid) .filter(page__site__url='s1') .order_by(PageItem.content)) assertQ(q) q = (PageItem .filter(page__site__id=sid) .filter(DQ(page__title='p1-1') | DQ(page__title='p1-2')) .filter(page__site__url='s1') .order_by(PageItem.content)) assertQ(q) class JsonField(TextField): def db_value(self, value): return json.dumps(value) if value is not None else None def python_value(self, value): if value is not None: return json.loads(value) class JM(TestModel): key = TextField() data = JsonField() class TestListValueConversion(ModelTestCase): requires = [JM] def test_list_value_conversion(self): jm = JM.create(key='k1', data=['i0', 'i1']) jm.key = 'k1-x' jm.save() jm_db = JM.get(JM.key == 'k1-x') self.assertEqual(jm_db.data, ['i0', 'i1']) JM.update(data=['i1', 'i2']).execute() jm_db = JM.get(JM.key == 'k1-x') self.assertEqual(jm_db.data, ['i1', 'i2']) jm2 = JM.create(key='k2', data=['i3', 'i4']) jm_db.data = ['i1', 'i2', 'i3'] jm2.data = ['i4', 'i5'] JM.bulk_update([jm_db, jm2], fields=[JM.key, JM.data]) jm = JM.get(JM.key == 'k1-x') self.assertEqual(jm.data, ['i1', 'i2', 'i3']) jm2 = JM.get(JM.key == 'k2') self.assertEqual(jm2.data, ['i4', 'i5']) class TestCountSubqueryEquals(ModelTestCase): requires = [User, Tweet] def test_count_subquery_equals(self): a, b, c = [User.create(username=u) for u in 'abc'] Tweet.insert_many([(a, 'a1'), (b, 'b1')]).execute() subq = (Tweet .select(fn.COUNT(Tweet.id)) .where(Tweet.user == User.id)) query = User.select().where(subq == 0) self.assertEqual([u.username for u in query], ['c']) class BoolModel(TestModel): key = TextField() active = BooleanField() class TestBooleanCompare(ModelTestCase): requires = [BoolModel] def test_boolean_compare(self): b1 = BoolModel.create(key='b1', active=True) b2 = BoolModel.create(key='b2', active=False) expr2key = ( ((BoolModel.active == True), 'b1'), ((BoolModel.active == False), 'b2'), ((BoolModel.active != True), 'b2'), ((BoolModel.active != False), 'b1')) for expr, key in expr2key: q = BoolModel.select().where(expr) self.assertEqual([b.key for b in q], [key]) class CPK(TestModel): name = TextField() class CPKFK(TestModel): key = CharField() cpk = ForeignKeyField(CPK) class Meta: primary_key = CompositeKey('key', 'cpk') class TestCompositePKwithFK(ModelTestCase): requires = [CPK, CPKFK] def test_composite_pk_with_fk(self): c1 = CPK.create(name='c1') c2 = CPK.create(name='c2') CPKFK.create(key='k1', cpk=c1) CPKFK.create(key='k2', cpk=c1) CPKFK.create(key='k3', cpk=c2) query = (CPKFK .select(CPKFK.key, CPK) .join(CPK) .order_by(CPKFK.key, CPK.name)) with self.assertQueryCount(1): self.assertEqual([(r.key, r.cpk.name) for r in query], [('k1', 'c1'), ('k2', 'c1'), ('k3', 'c2')]) class TestChainWhere(ModelTestCase): requires = [User] def test_chain_where(self): for username in 'abcd': User.create(username=username) q = (User.select() .where(User.username != 'a') .where(User.username != 'd') .order_by(User.username)) self.assertEqual([u.username for u in q], ['b', 'c']) q = (User.select() .where(User.username != 'a') .where(User.username != 'd') .where(User.username == 'b')) self.assertEqual([u.username for u in q], ['b']) class BCUser(TestModel): username = CharField(unique=True) class BCTweet(TestModel): user = ForeignKeyField(BCUser, field=BCUser.username) content = TextField() class TestBulkCreateWithFK(ModelTestCase): @requires_models(BCUser, BCTweet) def test_bulk_create_with_fk(self): u1 = BCUser.create(username='u1') u2 = BCUser.create(username='u2') with self.assertQueryCount(1): BCTweet.bulk_create([ BCTweet(user='u1', content='t%s' % i) for i in range(4)]) self.assertEqual(BCTweet.select().where(BCTweet.user == 'u1').count(), 4) self.assertEqual(BCTweet.select().where(BCTweet.user != 'u1').count(), 0) u = BCUser(username='u3') t = BCTweet(user=u, content='tx') with self.assertQueryCount(2): BCUser.bulk_create([u]) BCTweet.bulk_create([t]) with self.assertQueryCount(1): t_db = (BCTweet .select(BCTweet, BCUser) .join(BCUser) .where(BCUser.username == 'u3') .get()) self.assertEqual(t_db.content, 'tx') self.assertEqual(t_db.user.username, 'u3') @requires_postgresql @requires_models(User, Tweet) def test_bulk_create_related_objects(self): u = User(username='u1') t = Tweet(user=u, content='t1') with self.assertQueryCount(2): User.bulk_create([u]) Tweet.bulk_create([t]) with self.assertQueryCount(1): t_db = Tweet.select(Tweet, User).join(User).get() self.assertEqual(t_db.content, 't1') self.assertEqual(t_db.user.username, 'u1') class UUIDReg(TestModel): id = UUIDField(primary_key=True, default=uuid.uuid4) key = TextField() class CharPKKV(TestModel): id = CharField(primary_key=True) key = TextField() value = IntegerField(default=0) class TestBulkUpdateNonIntegerPK(ModelTestCase): @skip_if(sys.version_info[0] == 2) @requires_models(UUIDReg) def test_bulk_update_uuid_pk(self): r1 = UUIDReg.create(key='k1') r2 = UUIDReg.create(key='k2') r1.key = 'k1-x' r2.key = 'k2-x' UUIDReg.bulk_update((r1, r2), (UUIDReg.key,)) r1_db, r2_db = UUIDReg.select().order_by(UUIDReg.key) self.assertEqual(r1_db.key, 'k1-x') self.assertEqual(r2_db.key, 'k2-x') @requires_models(CharPKKV) def test_bulk_update_non_integer_pk(self): a, b, c = [CharPKKV.create(id=c, key='k%s' % c) for c in 'abc'] a.key = 'ka-x' a.value = 1 b.value = 2 c.key = 'kc-x' c.value = 3 CharPKKV.bulk_update((a, b, c), (CharPKKV.key, CharPKKV.value)) data = list(CharPKKV.select().order_by(CharPKKV.id).tuples()) self.assertEqual(data, [ ('a', 'ka-x', 1), ('b', 'kb', 2), ('c', 'kc-x', 3)]) class TestSaveClearingPK(ModelTestCase): requires = [User, Tweet] def test_save_clear_pk(self): u = User.create(username='u1') t1 = Tweet.create(content='t1', user=u) orig_id, t1.id = t1.id, None t1.content = 't2' t1.save() self.assertTrue(t1.id is not None) self.assertTrue(t1.id != orig_id) tweets = [t.content for t in u.tweets.order_by(Tweet.id)] self.assertEqual(tweets, ['t1', 't2']) class Bits(TestModel): b1 = BitField(default=1) b1_1 = b1.flag(1) b1_2 = b1.flag(2) b2 = BitField(default=0) b2_1 = b2.flag() b2_2 = b2.flag() class TestBitFieldName(ModelTestCase): requires = [Bits] def assertBits(self, bf, expected): b1_1, b1_2, b2_1, b2_2 = expected self.assertEqual(bf.b1_1, b1_1) self.assertEqual(bf.b1_2, b1_2) self.assertEqual(bf.b2_1, b2_1) self.assertEqual(bf.b2_2, b2_2) def test_bit_field_name(self): bf = Bits.create() self.assertBits(bf, (True, False, False, False)) bf.b1_1 = False bf.b1_2 = True bf.b2_1 = True bf.save() self.assertBits(bf, (False, True, True, False)) bf = Bits.get(Bits.id == bf.id) self.assertBits(bf, (False, True, True, False)) self.assertEqual(bf.b1, 2) self.assertEqual(bf.b2, 1) self.assertEqual(Bits.select().where(Bits.b1_2).count(), 1) self.assertEqual(Bits.select().where(Bits.b2_2).count(), 0) class FKMA(TestModel): name = TextField() class FKMB(TestModel): name = TextField() fkma = ForeignKeyField(FKMA, backref='fkmb_set', null=True) class TestFKMigrationRegression(ModelTestCase): requires = [FKMA, FKMB] def test_fk_migration(self): migrator = SchemaMigrator.from_database(self.database) kw = {'legacy': True} if IS_SQLITE else {} migrate(migrator.drop_column( FKMB._meta.table_name, FKMB.fkma.column_name, **kw)) migrate(migrator.add_column( FKMB._meta.table_name, FKMB.fkma.column_name, FKMB.fkma)) fa = FKMA.create(name='fa') FKMB.create(name='fb', fkma=fa) obj = FKMB.select().first() self.assertEqual(obj.name, 'fb') class ModelTypeField(CharField): def db_value(self, value): if value is not None: return value._meta.name def python_value(self, value): if value is not None: return {'user': User, 'tweet': Tweet}[value] class MTF(TestModel): name = TextField() mtype = ModelTypeField() class TestFieldValueRegression(ModelTestCase): requires = [MTF] def test_field_value_regression(self): u = MTF.create(name='user', mtype=User) u_db = MTF.get() self.assertEqual(u_db.name, 'user') self.assertTrue(u_db.mtype is User) class NLM(TestModel): a = IntegerField() b = IntegerField() class TestRegressionNodeListClone(ModelTestCase): requires = [NLM] def test_node_list_clone_expr(self): expr = (NLM.a + NLM.b) query = NLM.select(expr.alias('expr')).order_by(expr).distinct(expr) self.assertSQL(query, ( 'SELECT DISTINCT ON ("t1"."a" + "t1"."b") ' '("t1"."a" + "t1"."b") AS "expr" ' 'FROM "nlm" AS "t1" ' 'ORDER BY ("t1"."a" + "t1"."b")'), []) class LK(TestModel): key = TextField() class TestLikeEscape(ModelTestCase): requires = [LK] def assertNames(self, expr, expected): query = LK.select().where(expr).order_by(LK.id) self.assertEqual([lk.key for lk in query], expected) def test_like_escape(self): names = ('foo', 'foo%', 'foo%bar', 'foo_bar', 'fooxba', 'fooba') LK.insert_many([(n,) for n in names]).execute() cases = ( (LK.key.contains('bar'), ['foo%bar', 'foo_bar']), (LK.key.contains('%'), ['foo%', 'foo%bar']), (LK.key.contains('_'), ['foo_bar']), (LK.key.contains('o%b'), ['foo%bar']), (LK.key.startswith('foo%'), ['foo%', 'foo%bar']), (LK.key.startswith('foo_'), ['foo_bar']), (LK.key.startswith('bar'), []), (LK.key.endswith('ba'), ['fooxba', 'fooba']), (LK.key.endswith('_bar'), ['foo_bar']), (LK.key.endswith('fo'), []), ) for expr, expected in cases: self.assertNames(expr, expected) def test_like_escape_backslash(self): names = ('foo_bar\\baz', 'bar\\', 'fbar\\baz', 'foo_bar') LK.insert_many([(n,) for n in names]).execute() cases = ( (LK.key.contains('\\'), ['foo_bar\\baz', 'bar\\', 'fbar\\baz']), (LK.key.contains('_bar\\'), ['foo_bar\\baz']), (LK.key.contains('bar\\'), ['foo_bar\\baz', 'bar\\', 'fbar\\baz']), ) for expr, expected in cases: self.assertNames(expr, expected) class FKF_A(TestModel): key = CharField(max_length=16, unique=True) class FKF_B(TestModel): fk_a_1 = ForeignKeyField(FKF_A, field='key') fk_a_2 = IntegerField() class TestQueryWithModelInstanceParam(ModelTestCase): requires = [FKF_A, FKF_B] def test_query_with_model_instance_param(self): a1 = FKF_A.create(key='k1') a2 = FKF_A.create(key='k2') b1 = FKF_B.create(fk_a_1=a1, fk_a_2=a1) b2 = FKF_B.create(fk_a_1=a2, fk_a_2=a2) # Ensure that UPDATE works as expected as well. b1.save() # See also keys.TestFKtoNonPKField test, which replicates much of this. args = (b1.fk_a_1, b1.fk_a_1_id, a1, a1.key) for arg in args: query = FKF_B.select().where(FKF_B.fk_a_1 == arg) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."fk_a_1_id", "t1"."fk_a_2" ' 'FROM "fkf_b" AS "t1" ' 'WHERE ("t1"."fk_a_1_id" = ?)'), ['k1']) b1_db = query.get() self.assertEqual(b1_db.id, b1.id) # When we are handed a model instance and a conversion (an IntegerField # in this case), when the attempted conversion fails we fall back to # using the given model's primary-key. args = (b1.fk_a_2, a1, a1.id) for arg in args: query = FKF_B.select().where(FKF_B.fk_a_2 == arg) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."fk_a_1_id", "t1"."fk_a_2" ' 'FROM "fkf_b" AS "t1" ' 'WHERE ("t1"."fk_a_2" = ?)'), [a1.id]) b1_db = query.get() self.assertEqual(b1_db.id, b1.id) @skip_if(IS_SQLITE_OLD or IS_MYSQL) class TestModelSelectFromSubquery(ModelTestCase): requires = [User] def test_model_select_from_subquery(self): for i in range(5): User.create(username='u%s' % i) UA = User.alias() subquery = (UA.select() .where(UA.username.in_(('u0', 'u2', 'u4')))) cte = (ValuesList([('u0',), ('u4',)], columns=['username']) .cte('user_cte', columns=['username'])) query = (User .select(subquery.c.id, subquery.c.username) .from_(subquery) .join(cte, on=(subquery.c.username == cte.c.username)) .with_cte(cte) .order_by(subquery.c.username.desc())) self.assertEqual([u.username for u in query], ['u4', 'u0']) self.assertTrue(isinstance(query[0], User)) class CharPK(TestModel): id = CharField(primary_key=True) name = CharField(unique=True) def __str__(self): return self.name class CharFK(TestModel): id = IntegerField(primary_key=True) cpk = ForeignKeyField(CharPK, field=CharPK.name) class TestModelConversionRegression(ModelTestCase): requires = [CharPK, CharFK] def test_model_conversion_regression(self): cpks = [CharPK.create(id=str(i), name='u%s' % i) for i in range(3)] query = CharPK.select().where(CharPK.id << cpks) self.assertEqual(sorted([c.id for c in query]), ['0', '1', '2']) query = CharPK.select().where(CharPK.id.in_(list(CharPK.select()))) self.assertEqual(sorted([c.id for c in query]), ['0', '1', '2']) def test_model_conversion_fk_retained(self): cpks = [CharPK.create(id=str(i), name='u%s' % i) for i in range(3)] cfks = [CharFK.create(id=i + 1, cpk='u%s' % i) for i in range(3)] c0, c1, c2 = cpks query = CharFK.select().where(CharFK.cpk << [c0, c2]) self.assertEqual(sorted([f.id for f in query]), [1, 3]) class FKN_A(TestModel): pass class FKN_B(TestModel): a = ForeignKeyField(FKN_A, null=True) class TestSetFKNull(ModelTestCase): requires = [FKN_A, FKN_B] def test_set_fk_null(self): a1 = FKN_A.create() a2 = FKN_A() b1 = FKN_B(a=a1) b2 = FKN_B(a=a2) self.assertTrue(b1.a is a1) self.assertTrue(b2.a is a2) b1.a = b2.a = None self.assertTrue(b1.a is None) self.assertTrue(b2.a is None) class TestWeirdAliases(ModelTestCase): requires = [User] @skip_if(IS_MYSQL) # mysql can't do anything normally. def test_weird_aliases(self): User.create(username='huey') def assertAlias(s, expected): query = User.select(s).dicts() row = query[0] self.assertEqual(list(row)[0], expected) # When we explicitly provide an alias, use that. assertAlias(User.username.alias('"username"'), '"username"') assertAlias(User.username.alias('(username)'), '(username)') assertAlias(User.username.alias('user(name)'), 'user(name)') assertAlias(User.username.alias('(username"'), '(username"') assertAlias(User.username.alias('"username)'), '"username)') assertAlias(fn.LOWER(User.username).alias('user (name)'), 'user (name)') # Here peewee cannot tell that an alias was given, so it will attempt # to clean-up the column name returned by the cursor description. assertAlias(SQL('"t1"."username" AS "user name"'), 'user name') assertAlias(SQL('"t1"."username" AS "user (name)"'), 'user (name') assertAlias(SQL('"t1"."username" AS "(username)"'), 'username') assertAlias(SQL('"t1"."username" AS "x.y.(username)"'), 'username') if IS_SQLITE: assertAlias(SQL('LOWER("t1"."username")'), 'username') class NDF(TestModel): key = CharField(primary_key=True) date = DateTimeField(null=True) class TestBulkUpdateAllNull(ModelTestCase): requires = [NDF] @skip_unless(IS_SQLITE or IS_MYSQL, 'postgres cannot do this properly') def test_bulk_update_all_null(self): n1 = NDF.create(key='n1', date=datetime.datetime(2021, 1, 1)) n2 = NDF.create(key='n2', date=datetime.datetime(2021, 1, 2)) rows = [NDF(key=key, date=None) for key in ('n1', 'n2')] NDF.bulk_update(rows, fields=['date']) query = NDF.select().order_by(NDF.key).tuples() self.assertEqual([r for r in query], [('n1', None), ('n2', None)]) class CQA(TestModel): a = TextField() b = TextField() class TestSelectFromUnion(ModelTestCase): requires = [CQA] def test_select_from_union(self): CQA.insert_many([('a%d' % i, 'b%d' % i) for i in range(10)]).execute() q1 = CQA.select(CQA.a).order_by(CQA.id).limit(3) q2 = CQA.select(CQA.b).order_by(CQA.id).limit(3) wq1 = q1.select_from(SQL('*')) wq2 = q2.select_from(SQL('*')) union = wq1 | wq2 data = [val for val, in union.tuples()] self.assertEqual(sorted(data), ['a0', 'a1', 'a2', 'b0', 'b1', 'b2']) class DF(TestModel): name = TextField() value = IntegerField() class DFC(TestModel): df = ForeignKeyField(DF) name = TextField() value = IntegerField() class DFGC(TestModel): dfc = ForeignKeyField(DFC) name = TextField() value = IntegerField() class TestDjangoFilterRegression(ModelTestCase): requires = [DF, DFC, DFGC] def test_django_filter_regression(self): a, b, c = [DF.create(name=n, value=i) for i, n in enumerate('abc')] ca1 = DFC.create(df=a, name='a1', value=11) ca2 = DFC.create(df=a, name='a2', value=12) cb1 = DFC.create(df=b, name='b1', value=21) gca1_1 = DFGC.create(dfc=ca1, name='a1-1', value=101) gca1_2 = DFGC.create(dfc=ca1, name='a1-2', value=101) gca2_1 = DFGC.create(dfc=ca2, name='a2-1', value=111) def assertNames(q, expected): self.assertEqual(sorted([n.name for n in q]), expected) assertNames(DF.filter(name='a'), ['a']) assertNames(DF.filter(name='a', id=a.id), ['a']) assertNames(DF.filter(name__in=['a', 'c']), ['a', 'c']) assertNames(DF.filter(name__in=['a', 'c'], id=a.id), ['a']) assertNames(DF.filter(dfc_set__name='a1'), ['a']) assertNames(DF.filter(dfc_set__name__in=['a1', 'b1']), ['a', 'b']) assertNames(DF.filter(DQ(dfc_set__name='a1') | DQ(dfc_set__name='b1')), ['a', 'b']) assertNames(DF.filter(dfc_set__dfgc_set__name='a1-1'), ['a']) assertNames(DF.filter( DQ(dfc_set__dfgc_set__name='a1-1') | DQ(dfc_set__dfgc_set__name__in=['x', 'y'])), ['a']) assertNames(DFC.filter(df__name='a'), ['a1', 'a2']) assertNames(DFC.filter(df__name='a', value=11), ['a1']) assertNames(DFC.filter(DQ(df__name='a') | DQ(df__name='b')), ['a1', 'a2', 'b1']) assertNames(DFC.filter( DQ(df__name='a') | DQ(dfgc_set__name='a1-1')).distinct(), ['a1', 'a2']) assertNames(DFGC.filter(dfc__df__name='a'), ['a1-1', 'a1-2', 'a2-1']) assertNames(DFGC.filter(dfc__df__name='a', dfc__name='a2'), ['a2-1']) assertNames(DFGC.filter( DQ(dfc__df__value__lte=0) | DQ(dfc__df__name='a', dfc__name='a1') | DQ(dfc__name='a2')), ['a1-1', 'a1-2', 'a2-1']) assertNames( (DFGC.filter(DQ(dfc__df__value__lte=10) | DQ(dfc__value__lte=101)) .filter(DQ(name__ilike='a1%') | DQ(dfc__value=101))), ['a1-1', 'a1-2']) assertNames(DFGC.filter(dfc__df=a), ['a1-1', 'a1-2', 'a2-1']) assertNames(DFGC.filter(dfc__df=a.id), ['a1-1', 'a1-2', 'a2-1']) q = DFC.select().join(DF) assertNames(q.filter(df=a), ['a1', 'a2']) assertNames(q.filter(df__name='a'), ['a1', 'a2']) DFA = DF.alias() DFCA = DFC.alias() DFGCA = DFGC.alias() q = DFCA.select().join(DFA) assertNames(q.filter(df=a), ['a1', 'a2']) assertNames(q.filter(df__name='a'), ['a1', 'a2']) q = DFGC.select().join(DFC).join(DF) assertNames(q.filter(dfc__df=a), ['a1-1', 'a1-2', 'a2-1']) q = DFGCA.select().join(DFCA).join(DFA) assertNames(q.filter(dfc__df=a), ['a1-1', 'a1-2', 'a2-1']) q = DF.select().join(DFC).join(DFGC) assertNames(q.filter(dfc_set__dfgc_set__name='a1-1'), ['a']) class TestFunctionInfiniteLoop(BaseTestCase): def test_function_infinite_loop(self): self.assertRaises(TypeError, lambda: list(fn.COUNT())) class State(TestModel): name = TextField() class Transition(TestModel): src = ForeignKeyField(State, backref='sources') dest = ForeignKeyField(State, backref='dests') class TestJoinTypePrefetchMultipleFKs(ModelTestCase): requires = [State, Transition] def test_join_prefetch_multiple_fks(self): s1, s2a, s2b, s3 = [State.create(name=s) for s in ('s1', 's2a', 's2b', 's3')] t1 = Transition.create(src=s1, dest=s2a) t2 = Transition.create(src=s1, dest=s2b) t3 = Transition.create(src=s2a, dest=s3) t4 = Transition.create(src=s2b, dest=s3) query = State.select().where(State.name != 's3').order_by(State.name) transitions = (Transition .select(Transition, State) .join(State, on=Transition.dest) .order_by(Transition.id)) with self.assertQueryCount(2): p = prefetch(query, transitions, prefetch_type=PREFETCH_TYPE.JOIN) accum = [] for row in p: accum.append((row.name, row.sources, row.dests, [d.dest.name for d in row.sources], [d.src.name for d in row.dests])) self.assertEqual(accum, [ ('s1', [t1, t2], [], ['s2a', 's2b'], []), ('s2a', [t3], [t1], ['s3'], ['s1']), ('s2b', [t4], [t2], ['s3'], ['s1'])]) @slow_test() class TestThreadSafetyDecorators(ModelTestCase): requires = [User] def test_thread_safety_atomic(self): @self.database.atomic() def get_one(n): time.sleep(n) return User.select().first() def run(n): with self.database.atomic(): get_one(n) User.create(username='u') threads = [threading.Thread(target=run, args=(i,)) for i in (0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.5)] for t in threads: t.start() for t in threads: t.join() class TestQueryCountList(ModelTestCase): requires = [User] def test_iteration_single_query(self): with self.assertQueryCount(1): list(User.select()) with self.assertQueryCount(1): self.assertEqual(User.select().count(), 0) class TestSumCaseSubquery(ModelTestCase): requires = [Sample] def test_sum_case_subquery(self): Sample.insert_many([(i, i) for i in range(5)]).execute() subq = Sample.select().where(Sample.counter.in_([1, 3, 5])) case = Case(None, [(Sample.id.in_(subq), Sample.value)], 0) q = Sample.select(fn.SUM(case)) self.assertEqual(q.scalar(), 4.0) class I(TestModel): name = TextField() class S(TestModel): i = ForeignKeyField(I) class P(TestModel): i = ForeignKeyField(I) class PS(TestModel): p = ForeignKeyField(P) s = ForeignKeyField(S) class PP(TestModel): ps = ForeignKeyField(PS) class O(TestModel): ps = ForeignKeyField(PS) s = ForeignKeyField(S) class OX(TestModel): o = ForeignKeyField(O, null=True) class TestDeleteInstanceDFS(ModelTestCase): requires = [I, S, P, PS, PP, O, OX] def test_delete_instance_dfs(self): i1, i2 = [I.create(name=n) for n in ('i1', 'i2')] for i in (i1, i2): s = S.create(i=i) p = P.create(i=i) ps = PS.create(p=p, s=s) pp = PP.create(ps=ps) o = O.create(ps=ps, s=s) ox = OX.create(o=o) with self.assertQueryCount(9): i1.delete_instance(recursive=True) self.assertHistory(9, [ ('DELETE FROM "pp" WHERE (' '"pp"."ps_id" IN (SELECT "t1"."id" FROM "ps" AS "t1" WHERE (' '"t1"."p_id" IN (SELECT "t2"."id" FROM "p" AS "t2" WHERE (' '"t2"."i_id" = ?)))))', [i1.id]), ('UPDATE "ox" SET "o_id" = ? WHERE (' '"ox"."o_id" IN (SELECT "t1"."id" FROM "o" AS "t1" WHERE (' '"t1"."ps_id" IN (SELECT "t2"."id" FROM "ps" AS "t2" WHERE (' '"t2"."p_id" IN (SELECT "t3"."id" FROM "p" AS "t3" WHERE (' '"t3"."i_id" = ?)))))))', [None, i1.id]), ('DELETE FROM "o" WHERE (' '"o"."ps_id" IN (SELECT "t1"."id" FROM "ps" AS "t1" WHERE (' '"t1"."p_id" IN (SELECT "t2"."id" FROM "p" AS "t2" WHERE (' '"t2"."i_id" = ?)))))', [i1.id]), ('DELETE FROM "o" WHERE (' '"o"."s_id" IN (SELECT "t1"."id" FROM "s" AS "t1" WHERE (' '"t1"."i_id" = ?)))', [i1.id]), ('DELETE FROM "ps" WHERE (' '"ps"."p_id" IN (SELECT "t1"."id" FROM "p" AS "t1" WHERE (' '"t1"."i_id" = ?)))', [i1.id]), ('DELETE FROM "ps" WHERE (' '"ps"."s_id" IN (SELECT "t1"."id" FROM "s" AS "t1" WHERE (' '"t1"."i_id" = ?)))', [i1.id]), ('DELETE FROM "s" WHERE ("s"."i_id" = ?)', [i1.id]), ('DELETE FROM "p" WHERE ("p"."i_id" = ?)', [i1.id]), ('DELETE FROM "i" WHERE ("i"."id" = ?)', [i1.id]), ]) counts = {OX: 2} for m in self.requires: self.assertEqual(m.select().count(), counts.get(m, 1)) peewee-3.17.7/tests/results.py000066400000000000000000000150471470346076600163240ustar00rootroot00000000000000import datetime from peewee import * from .base import get_in_memory_db from .base import ModelTestCase from .base_models import * def lange(x, y=None): if y is None: value = range(x) else: value = range(x, y) return list(value) class TestCursorWrapper(ModelTestCase): database = get_in_memory_db() requires = [User] def test_iteration(self): for i in range(10): User.create(username=str(i)) query = User.select() cursor = query.execute() first_five = [] for i, u in enumerate(cursor): first_five.append(int(u.username)) if i == 4: break self.assertEqual(first_five, lange(5)) names = lambda i: [int(obj.username) for obj in i] self.assertEqual(names(query[5:]), lange(5, 10)) self.assertEqual(names(query[2:5]), lange(2, 5)) for i in range(2): self.assertEqual(names(cursor), lange(10)) def test_count(self): for i in range(5): User.create(username=str(i)) with self.assertQueryCount(1): query = User.select() self.assertEqual(len(query), 5) cursor = query.execute() self.assertEqual(len(cursor), 5) with self.assertQueryCount(1): query = query.where(User.username != '0') cursor = query.execute() self.assertEqual(len(cursor), 4) self.assertEqual(len(query), 4) def test_nested_iteration(self): for i in range(4): User.create(username=str(i)) with self.assertQueryCount(1): query = User.select().order_by(User.username) outer = [] inner = [] for o_user in query: outer.append(int(o_user.username)) for i_user in query: inner.append(int(i_user.username)) self.assertEqual(outer, lange(4)) self.assertEqual(inner, lange(4) * 4) def test_iterator_protocol(self): for i in range(3): User.create(username=str(i)) with self.assertQueryCount(1): query = User.select().order_by(User.id) cursor = query.execute() for _ in range(2): for user in cursor: pass it = iter(cursor) for obj in it: pass self.assertRaises(StopIteration, next, it) self.assertEqual([int(u.username) for u in cursor], lange(3)) self.assertEqual(query[0].username, '0') self.assertEqual(query[2].username, '2') self.assertRaises(StopIteration, next, it) def test_iterator(self): for i in range(3): User.create(username=str(i)) with self.assertQueryCount(1): cursor = User.select().order_by(User.id).execute() usernames = [int(u.username) for u in cursor.iterator()] self.assertEqual(usernames, lange(3)) self.assertTrue(cursor.populated) self.assertEqual(cursor.row_cache, []) with self.assertQueryCount(0): self.assertEqual(list(cursor), []) def test_query_iterator(self): for i in range(3): User.create(username=str(i)) with self.assertQueryCount(1): query = User.select().order_by(User.id) usernames = [int(u.username) for u in query.iterator()] self.assertEqual(usernames, lange(3)) with self.assertQueryCount(0): self.assertEqual(list(query), []) def test_row_cache(self): def assertCache(cursor, n): self.assertEqual([int(u.username) for u in cursor.row_cache], lange(n)) for i in range(10): User.create(username=str(i)) with self.assertQueryCount(1): cursor = User.select().order_by(User.id).execute() cursor.fill_cache(5) self.assertFalse(cursor.populated) assertCache(cursor, 5) cursor.fill_cache(5) assertCache(cursor, 5) cursor.fill_cache(6) assertCache(cursor, 6) self.assertFalse(cursor.populated) cursor.fill_cache(11) self.assertTrue(cursor.populated) assertCache(cursor, 10) class TestModelObjectCursorWrapper(ModelTestCase): database = get_in_memory_db() requires = [User, Tweet] def test_model_objects(self): huey = User.create(username='huey') mickey = User.create(username='mickey') for user, tweet in ((huey, 'meow'), (huey, 'purr'), (mickey, 'woof')): Tweet.create(user=user, content=tweet) query = (Tweet .select(Tweet, User.username) .join(User) .order_by(Tweet.id) .objects()) with self.assertQueryCount(1): self.assertEqual([(t.username, t.content) for t in query], [ ('huey', 'meow'), ('huey', 'purr'), ('mickey', 'woof')]) def test_dict_flattening(self): u = User.create(username='u1') for i in range(3): Tweet.create(user=u, content='t%d' % (i + 1)) query = (Tweet .select(Tweet, User) .join(User) .order_by(Tweet.id) .dicts()) with self.assertQueryCount(1): results = [(r['id'], r['content'], r['username']) for r in query] self.assertEqual(results, [ (1, 't1', 'u1'), (2, 't2', 'u1'), (3, 't3', 'u1')]) class Reg(TestModel): key = TextField() ts = DateTimeField() class TestSpecifyConverter(ModelTestCase): requires = [Reg] def test_specify_converter(self): D = lambda d: datetime.datetime(2020, 1, d) for i in range(1, 4): Reg.create(key='k%s' % i, ts=D(i)) RA = Reg.alias() subq = RA.select(RA.key, RA.ts, RA.ts.alias('aliased')) ra_a = subq.c.aliased.alias('aliased') q = (Reg .select(Reg.key, subq.c.ts.alias('ts'), ra_a.converter(Reg.ts.python_value)) .join(subq, on=(Reg.key == subq.c.key).alias('rsub')) .order_by(Reg.key)) results = [(r.key, r.ts, r.aliased) for r in q.objects()] self.assertEqual(results, [ ('k1', D(1), D(1)), ('k2', D(2), D(2)), ('k3', D(3), D(3))]) results2 = [(r.key, r.rsub.ts, r.rsub.aliased) for r in q] self.assertEqual(results, [ ('k1', D(1), D(1)), ('k2', D(2), D(2)), ('k3', D(3), D(3))]) peewee-3.17.7/tests/returning.py000066400000000000000000000070351470346076600166360ustar00rootroot00000000000000import unittest from peewee import * from peewee import __sqlite_version__ from .base import db from .base import skip_unless from .base import IS_SQLITE from .base import ModelTestCase from .base import TestModel class Reg(TestModel): k = CharField() v = IntegerField() x = IntegerField() class Meta: indexes = ( (('k', 'v'), True), ) returning_support = db.returning_clause or (IS_SQLITE and __sqlite_version__ >= (3, 35, 0)) @skip_unless(returning_support, 'database does not support RETURNING') class TestReturningIntegration(ModelTestCase): requires = [Reg] def test_crud(self): iq = Reg.insert_many([('k1', 1, 0), ('k2', 2, 0)]).returning(Reg) self.assertEqual([(r.id is not None, r.k, r.v) for r in iq.execute()], [(True, 'k1', 1), (True, 'k2', 2)]) iq = (Reg .insert_many([('k1', 1, 1), ('k2', 2, 1), ('k3', 3, 0)]) .on_conflict( conflict_target=[Reg.k, Reg.v], preserve=[Reg.x], update={Reg.v: Reg.v + 1}, where=(Reg.k != 'k1')) .returning(Reg)) ic = iq.execute() self.assertEqual([(r.id is not None, r.k, r.v, r.x) for r in ic], [ (True, 'k2', 3, 1), (True, 'k3', 3, 0)]) uq = (Reg .update({Reg.v: Reg.v - 1, Reg.x: Reg.x + 1}) .where(Reg.k != 'k1') .returning(Reg)) self.assertEqual([(r.k, r.v, r.x) for r in uq.execute()], [ ('k2', 2, 2), ('k3', 2, 1)]) dq = Reg.delete().where(Reg.k != 'k1').returning(Reg) self.assertEqual([(r.k, r.v, r.x) for r in dq.execute()], [ ('k2', 2, 2), ('k3', 2, 1)]) def test_returning_expression(self): Rs = (Reg.v + Reg.x).alias('s') iq = (Reg .insert_many([('k1', 1, 10), ('k2', 2, 20)]) .returning(Reg.k, Reg.v, Rs)) self.assertEqual([(r.k, r.v, r.s) for r in iq.execute()], [ ('k1', 1, 11), ('k2', 2, 22)]) uq = (Reg .update({Reg.k: Reg.k + 'x', Reg.v: Reg.v + 1}) .returning(Reg.k, Reg.v, Rs)) self.assertEqual([(r.k, r.v, r.s) for r in uq.execute()], [ ('k1x', 2, 12), ('k2x', 3, 23)]) dq = Reg.delete().returning(Reg.k, Reg.v, Rs) self.assertEqual([(r.k, r.v, r.s) for r in dq.execute()], [ ('k1x', 2, 12), ('k2x', 3, 23)]) def test_returning_types(self): Rs = (Reg.v + Reg.x).alias('s') mapping = ( ((lambda q: q), (lambda r: (r.k, r.v, r.s))), ((lambda q: q.dicts()), (lambda r: (r['k'], r['v'], r['s']))), ((lambda q: q.tuples()), (lambda r: r)), ((lambda q: q.namedtuples()), (lambda r: (r.k, r.v, r.s)))) for qconv, r2t in mapping: iq = (Reg .insert_many([('k1', 1, 10), ('k2', 2, 20)]) .returning(Reg.k, Reg.v, Rs)) self.assertEqual([r2t(r) for r in qconv(iq).execute()], [ ('k1', 1, 11), ('k2', 2, 22)]) uq = (Reg .update({Reg.k: Reg.k + 'x', Reg.v: Reg.v + 1}) .returning(Reg.k, Reg.v, Rs)) self.assertEqual([r2t(r) for r in qconv(uq).execute()], [ ('k1x', 2, 12), ('k2x', 3, 23)]) dq = Reg.delete().returning(Reg.k, Reg.v, Rs) self.assertEqual([r2t(r) for r in qconv(dq).execute()], [ ('k1x', 2, 12), ('k2x', 3, 23)]) peewee-3.17.7/tests/schema.py000066400000000000000000001010301470346076600160470ustar00rootroot00000000000000import datetime from peewee import * from peewee import NodeList from .base import BaseTestCase from .base import get_in_memory_db from .base import IS_CRDB from .base import IS_SQLITE from .base import ModelDatabaseTestCase from .base import ModelTestCase from .base import TestModel from .base_models import Category from .base_models import Note from .base_models import Person from .base_models import Relationship from .base_models import User class TMUnique(TestModel): data = TextField(unique=True) class TMSequence(TestModel): value = IntegerField(sequence='test_seq') class TMIndexes(TestModel): alpha = IntegerField() beta = IntegerField() gamma = IntegerField() class Meta: indexes = ( (('alpha', 'beta'), True), (('beta', 'gamma'), False)) class TMConstraints(TestModel): data = IntegerField(null=True, constraints=[Check('data < 5')]) value = TextField(collation='NOCASE') class TMNamedConstraints(TestModel): fk = ForeignKeyField('self', null=True, constraint_name='tmc_fk') k = TextField() v = IntegerField(constraints=[Check('v in (1, 2)')]) class Meta: constraints = [Check('k != \'kx\'', name='chk_k')] class CacheData(TestModel): key = TextField(unique=True) value = TextField() class Meta: schema = 'cache' class Article(TestModel): name = TextField(unique=True) timestamp = TimestampField() status = IntegerField() flags = IntegerField() Article.add_index(Article.timestamp.desc(), Article.status) idx = (Article .index(Article.name, Article.timestamp, Article.flags.bin_and(4)) .where(Article.status == 1)) Article.add_index(idx) Article.add_index(SQL('CREATE INDEX "article_foo" ON "article" ("flags" & 3)')) class TestModelDDL(ModelDatabaseTestCase): database = get_in_memory_db() requires = [Article, CacheData, Category, Note, Person, Relationship, TMUnique, TMSequence, TMIndexes, TMConstraints, TMNamedConstraints, User] def test_database_required(self): class MissingDB(Model): data = TextField() self.assertRaises(ImproperlyConfigured, MissingDB.create_table) def assertCreateTable(self, model_class, expected): sql, params = model_class._schema._create_table(False).query() self.assertEqual(params, []) indexes = [] for create_index in model_class._schema._create_indexes(False): isql, params = create_index.query() self.assertEqual(params, []) indexes.append(isql) self.assertEqual([sql] + indexes, expected) def assertIndexes(self, model_class, expected): indexes = [] for create_index in model_class._schema._create_indexes(False): indexes.append(create_index.query()) self.assertEqual(indexes, expected) def test_model_fk_schema(self): class Base(TestModel): class Meta: database = self.database class User(Base): username = TextField() class Meta: schema = 'foo' class Tweet(Base): user = ForeignKeyField(User) content = TextField() class Meta: schema = 'bar' self.assertCreateTable(User, [ ('CREATE TABLE "foo"."user" ("id" INTEGER NOT NULL PRIMARY KEY, ' '"username" TEXT NOT NULL)')]) self.assertCreateTable(Tweet, [ ('CREATE TABLE "bar"."tweet" ("id" INTEGER NOT NULL PRIMARY KEY, ' '"user_id" INTEGER NOT NULL, "content" TEXT NOT NULL, ' 'FOREIGN KEY ("user_id") REFERENCES "foo"."user" ("id"))'), ('CREATE INDEX "bar"."tweet_user_id" ON "tweet" ("user_id")')]) def test_model_indexes_with_schema(self): # Attach cache database so we can reference "cache." as the schema. self.database.execute_sql("attach database ':memory:' as cache;") self.assertCreateTable(CacheData, [ ('CREATE TABLE "cache"."cache_data" (' '"id" INTEGER NOT NULL PRIMARY KEY, "key" TEXT NOT NULL, ' '"value" TEXT NOT NULL)'), ('CREATE UNIQUE INDEX "cache"."cache_data_key" ON "cache_data" ' '("key")')]) # Actually create the table to verify it works correctly. CacheData.create_table() # Introspect the database and get indexes for the "cache" schema. indexes = self.database.get_indexes('cache_data', 'cache') self.assertEqual(len(indexes), 1) index_metadata = indexes[0] self.assertEqual(index_metadata.name, 'cache_data_key') # Verify the index does not exist in the main schema. self.assertEqual(len(self.database.get_indexes('cache_data')), 0) class TestDatabase(Database): index_schema_prefix = False # When "index_schema_prefix == False", the index name is not prefixed # with the schema, and the schema is referenced via the table name. with CacheData.bind_ctx(TestDatabase(None)): self.assertCreateTable(CacheData, [ ('CREATE TABLE "cache"."cache_data" (' '"id" INTEGER NOT NULL PRIMARY KEY, "key" TEXT NOT NULL, ' '"value" TEXT NOT NULL)'), ('CREATE UNIQUE INDEX "cache_data_key" ON "cache"."cache_data"' ' ("key")')]) def test_model_indexes(self): self.assertIndexes(Article, [ ('CREATE UNIQUE INDEX "article_name" ON "article" ("name")', []), ('CREATE INDEX "article_timestamp_status" ON "article" (' '"timestamp" DESC, "status")', []), ('CREATE INDEX "article_name_timestamp" ON "article" (' '"name", "timestamp", ("flags" & 4)) ' 'WHERE ("status" = 1)', []), ('CREATE INDEX "article_foo" ON "article" ("flags" & 3)', []), ]) def test_model_index_types(self): class Event(TestModel): key = TextField() timestamp = TimestampField(index=True, index_type='BRIN') class Meta: database = self.database self.assertIndexes(Event, [ ('CREATE INDEX "event_timestamp" ON "event" ' 'USING BRIN ("timestamp")', [])]) # Check that we support MySQL-style USING clause. idx, = Event._meta.fields_to_index() self.assertSQL(idx, ( 'CREATE INDEX IF NOT EXISTS "event_timestamp" ' 'USING BRIN ON "event" ("timestamp")'), [], index_using_precedes_table=True) def test_model_indexes_custom_tablename(self): class KV(TestModel): key = TextField() value = TextField() timestamp = TimestampField(index=True) class Meta: database = self.database indexes = ( (('key', 'value'), True), ) table_name = 'kvs' self.assertIndexes(KV, [ ('CREATE INDEX "kvs_timestamp" ON "kvs" ("timestamp")', []), ('CREATE UNIQUE INDEX "kvs_key_value" ON "kvs" ("key", "value")', [])]) def test_model_indexes_computed_columns(self): class FuncIdx(TestModel): a = IntegerField() b = IntegerField() class Meta: database = self.database i = FuncIdx.index(FuncIdx.a, FuncIdx.b, fn.SUM(FuncIdx.a + FuncIdx.b)) FuncIdx.add_index(i) self.assertIndexes(FuncIdx, [ ('CREATE INDEX "func_idx_a_b" ON "func_idx" ' '("a", "b", SUM("a" + "b"))', []), ]) def test_model_indexes_complex_columns(self): class Taxonomy(TestModel): name = CharField() name_class = CharField() class Meta: database = self.database name = NodeList((fn.LOWER(Taxonomy.name), SQL('varchar_pattern_ops'))) index = (Taxonomy .index(name, Taxonomy.name_class) .where(Taxonomy.name_class == 'scientific name')) Taxonomy.add_index(index) self.assertIndexes(Taxonomy, [ ('CREATE INDEX "taxonomy_name_class" ON "taxonomy" (' 'LOWER("name") varchar_pattern_ops, "name_class") ' 'WHERE ("name_class" = ?)', ['scientific name']), ]) def test_legacy_model_table_and_indexes(self): class Base(Model): class Meta: database = self.database class WebHTTPRequest(Base): timestamp = DateTimeField(index=True) data = TextField() self.assertTrue(WebHTTPRequest._meta.legacy_table_names) self.assertCreateTable(WebHTTPRequest, [ ('CREATE TABLE "webhttprequest" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"timestamp" DATETIME NOT NULL, "data" TEXT NOT NULL)'), ('CREATE INDEX "webhttprequest_timestamp" ON "webhttprequest" ' '("timestamp")')]) # Table name is explicit, but legacy table names == false, so we get # the new index name format. class FooBar(Base): data = IntegerField(unique=True) class Meta: legacy_table_names = False table_name = 'foobar_tbl' self.assertFalse(FooBar._meta.legacy_table_names) self.assertCreateTable(FooBar, [ ('CREATE TABLE "foobar_tbl" ("id" INTEGER NOT NULL PRIMARY KEY, ' '"data" INTEGER NOT NULL)'), ('CREATE UNIQUE INDEX "foobar_tbl_data" ON "foobar_tbl" ("data")'), ]) # Table name is explicit and legacy table names == true, so we get # the old index name format. class FooBar2(Base): data = IntegerField(unique=True) class Meta: table_name = 'foobar2_tbl' self.assertTrue(FooBar2._meta.legacy_table_names) self.assertCreateTable(FooBar2, [ ('CREATE TABLE "foobar2_tbl" ("id" INTEGER NOT NULL PRIMARY KEY, ' '"data" INTEGER NOT NULL)'), ('CREATE UNIQUE INDEX "foobar2_data" ON "foobar2_tbl" ("data")')]) def test_without_pk(self): class NoPK(TestModel): data = TextField() class Meta: database = self.database primary_key = False self.assertCreateTable(NoPK, [ ('CREATE TABLE "no_pk" ("data" TEXT NOT NULL)')]) def test_without_rowid(self): class NoRowid(TestModel): key = TextField(primary_key=True) value = TextField() class Meta: database = self.database without_rowid = True self.assertCreateTable(NoRowid, [ ('CREATE TABLE "no_rowid" (' '"key" TEXT NOT NULL PRIMARY KEY, ' '"value" TEXT NOT NULL) WITHOUT ROWID')]) # Subclasses do not inherit "without_rowid" setting. class SubNoRowid(NoRowid): pass self.assertCreateTable(SubNoRowid, [ ('CREATE TABLE "sub_no_rowid" (' '"key" TEXT NOT NULL PRIMARY KEY, ' '"value" TEXT NOT NULL)')]) def test_strict_tables(self): class Strict(TestModel): key = TextField(primary_key=True) value = TextField() class Meta: database = self.database strict_tables = True self.assertCreateTable(Strict, [ ('CREATE TABLE "strict" (' '"key" TEXT NOT NULL PRIMARY KEY, ' '"value" TEXT NOT NULL) STRICT')]) # Subclasses *do* inherit "strict_tables" setting. class SubStrict(Strict): pass self.assertCreateTable(SubStrict, [ ('CREATE TABLE "sub_strict" (' '"key" TEXT NOT NULL PRIMARY KEY, ' '"value" TEXT NOT NULL) STRICT')]) def test_without_rowid_strict(self): class KV(TestModel): key = TextField(primary_key=True) class Meta: database = self.database strict_tables = True without_rowid = True self.assertCreateTable(KV, [ ('CREATE TABLE "kv" ("key" TEXT NOT NULL PRIMARY KEY) ' 'STRICT, WITHOUT ROWID')]) class SKV(KV): pass self.assertCreateTable(SKV, [ ('CREATE TABLE "skv" ("key" TEXT NOT NULL PRIMARY KEY) STRICT')]) def test_table_name(self): class A(TestModel): class Meta: database = self.database table_name = 'A_tbl' class B(TestModel): a = ForeignKeyField(A, backref='bs') class Meta: database = self.database table_name = 'B_tbl' self.assertCreateTable(A, [ 'CREATE TABLE "A_tbl" ("id" INTEGER NOT NULL PRIMARY KEY)']) self.assertCreateTable(B, [ ('CREATE TABLE "B_tbl" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"a_id" INTEGER NOT NULL, ' 'FOREIGN KEY ("a_id") REFERENCES "A_tbl" ("id"))'), 'CREATE INDEX "B_tbl_a_id" ON "B_tbl" ("a_id")']) def test_temporary_table(self): sql, params = User._schema._create_table(temporary=True).query() self.assertEqual(sql, ( 'CREATE TEMPORARY TABLE IF NOT EXISTS "users" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"username" VARCHAR(255) NOT NULL)')) def test_model_temporary_table(self): class TempUser(User): class Meta: temporary = True self.reset_sql_history() TempUser.create_table() TempUser.drop_table() queries = [x.msg for x in self.history] self.assertEqual(queries, [ ('CREATE TEMPORARY TABLE IF NOT EXISTS "temp_user" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"username" VARCHAR(255) NOT NULL)', []), ('DROP TABLE IF EXISTS "temp_user"', [])]) def test_drop_table(self): sql, params = User._schema._drop_table().query() self.assertEqual(sql, 'DROP TABLE IF EXISTS "users"') sql, params = User._schema._drop_table(cascade=True).query() self.assertEqual(sql, 'DROP TABLE IF EXISTS "users" CASCADE') sql, params = User._schema._drop_table(restrict=True).query() self.assertEqual(sql, 'DROP TABLE IF EXISTS "users" RESTRICT') def test_table_constraints(self): class UKV(TestModel): key = TextField() value = TextField() status = IntegerField() class Meta: constraints = [ SQL('CONSTRAINT ukv_kv_uniq UNIQUE (key, value)'), Check('status > 0')] database = self.database table_name = 'ukv' self.assertCreateTable(UKV, [ ('CREATE TABLE "ukv" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"key" TEXT NOT NULL, ' '"value" TEXT NOT NULL, ' '"status" INTEGER NOT NULL, ' 'CONSTRAINT ukv_kv_uniq UNIQUE (key, value), ' 'CHECK (status > 0))')]) def test_table_settings(self): class KVSettings(TestModel): key = TextField(primary_key=True) value = TextField() timestamp = TimestampField() class Meta: database = self.database table_settings = ('PARTITION BY RANGE (timestamp)', 'WITHOUT ROWID') self.assertCreateTable(KVSettings, [ ('CREATE TABLE "kv_settings" (' '"key" TEXT NOT NULL PRIMARY KEY, ' '"value" TEXT NOT NULL, ' '"timestamp" INTEGER NOT NULL) ' 'PARTITION BY RANGE (timestamp) ' 'WITHOUT ROWID')]) def test_table_options(self): class TOpts(TestModel): key = TextField() class Meta: database = self.database options = { 'CHECKSUM': 1, 'COMPRESSION': 'lz4'} self.assertCreateTable(TOpts, [ ('CREATE TABLE "t_opts" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"key" TEXT NOT NULL, ' 'CHECKSUM=1, COMPRESSION=lz4)')]) def test_table_and_index_creation(self): self.assertCreateTable(Person, [ ('CREATE TABLE "person" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"first" VARCHAR(255) NOT NULL, ' '"last" VARCHAR(255) NOT NULL, ' '"dob" DATE NOT NULL)'), 'CREATE INDEX "person_dob" ON "person" ("dob")', ('CREATE UNIQUE INDEX "person_first_last" ON ' '"person" ("first", "last")')]) self.assertCreateTable(Note, [ ('CREATE TABLE "note" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"author_id" INTEGER NOT NULL, ' '"content" TEXT NOT NULL, ' 'FOREIGN KEY ("author_id") REFERENCES "person" ("id"))'), 'CREATE INDEX "note_author_id" ON "note" ("author_id")']) self.assertCreateTable(Category, [ ('CREATE TABLE "category" (' '"name" VARCHAR(20) NOT NULL PRIMARY KEY, ' '"parent_id" VARCHAR(20), ' 'FOREIGN KEY ("parent_id") REFERENCES "category" ("name"))'), 'CREATE INDEX "category_parent_id" ON "category" ("parent_id")']) self.assertCreateTable(Relationship, [ ('CREATE TABLE "relationship" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"from_person_id" INTEGER NOT NULL, ' '"to_person_id" INTEGER NOT NULL, ' 'FOREIGN KEY ("from_person_id") REFERENCES "person" ("id"), ' 'FOREIGN KEY ("to_person_id") REFERENCES "person" ("id"))'), ('CREATE INDEX "relationship_from_person_id" ' 'ON "relationship" ("from_person_id")'), ('CREATE INDEX "relationship_to_person_id" ' 'ON "relationship" ("to_person_id")')]) self.assertCreateTable(TMUnique, [ ('CREATE TABLE "tm_unique" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"data" TEXT NOT NULL)'), 'CREATE UNIQUE INDEX "tm_unique_data" ON "tm_unique" ("data")']) self.assertCreateTable(TMSequence, [ ('CREATE TABLE "tm_sequence" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"value" INTEGER NOT NULL DEFAULT NEXTVAL(\'test_seq\'))')]) self.assertCreateTable(TMIndexes, [ ('CREATE TABLE "tm_indexes" ("id" INTEGER NOT NULL PRIMARY KEY, ' '"alpha" INTEGER NOT NULL, "beta" INTEGER NOT NULL, ' '"gamma" INTEGER NOT NULL)'), ('CREATE UNIQUE INDEX "tm_indexes_alpha_beta" ' 'ON "tm_indexes" ("alpha", "beta")'), ('CREATE INDEX "tm_indexes_beta_gamma" ' 'ON "tm_indexes" ("beta", "gamma")')]) self.assertCreateTable(TMConstraints, [ ('CREATE TABLE "tm_constraints" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"data" INTEGER CHECK (data < 5), ' '"value" TEXT NOT NULL COLLATE NOCASE)')]) self.assertCreateTable(TMNamedConstraints, [ ('CREATE TABLE "tm_named_constraints" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"fk_id" INTEGER, ' '"k" TEXT NOT NULL, ' '"v" INTEGER NOT NULL ' 'CHECK (v in (1, 2)), ' 'CONSTRAINT "tmc_fk" FOREIGN KEY ("fk_id") ' 'REFERENCES "tm_named_constraints" ("id"), ' 'CONSTRAINT "chk_k" CHECK (k != \'kx\'))'), ('CREATE INDEX "tm_named_constraints_fk_id" ' 'ON "tm_named_constraints" ("fk_id")')]) def test_index_name_truncation(self): class LongIndex(TestModel): a123456789012345678901234567890 = CharField() b123456789012345678901234567890 = CharField() c123456789012345678901234567890 = CharField() class Meta: database = self.database fields = LongIndex._meta.sorted_fields[1:] self.assertEqual(len(fields), 3) idx = ModelIndex(LongIndex, fields) ctx = LongIndex._schema._create_index(idx) self.assertSQL(ctx, ( 'CREATE INDEX IF NOT EXISTS "' 'long_index_a123456789012345678901234567890_b123456789012_9dd2139' '" ON "long_index" (' '"a123456789012345678901234567890", ' '"b123456789012345678901234567890", ' '"c123456789012345678901234567890")'), []) def test_fk_non_pk_ddl(self): class A(Model): cf = CharField(max_length=100, unique=True) df = DecimalField( max_digits=4, decimal_places=2, auto_round=True, unique=True) class Meta: database = self.database class CF(TestModel): a = ForeignKeyField(A, field='cf') class Meta: database = self.database class DF(TestModel): a = ForeignKeyField(A, field='df') class Meta: database = self.database sql, params = CF._schema._create_table(safe=False).query() self.assertEqual(sql, ( 'CREATE TABLE "cf" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"a_id" VARCHAR(100) NOT NULL, ' 'FOREIGN KEY ("a_id") REFERENCES "a" ("cf"))')) sql, params = DF._schema._create_table(safe=False).query() self.assertEqual(sql, ( 'CREATE TABLE "df" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"a_id" DECIMAL(4, 2) NOT NULL, ' 'FOREIGN KEY ("a_id") REFERENCES "a" ("df"))')) def test_deferred_foreign_key(self): class Language(TestModel): name = CharField() selected_snippet = DeferredForeignKey('Snippet', null=True) class Meta: database = self.database class Snippet(TestModel): code = TextField() language = ForeignKeyField(Language, backref='snippets') class Meta: database = self.database self.assertEqual(Snippet._meta.fields['language'].rel_model, Language) self.assertEqual(Language._meta.fields['selected_snippet'].rel_model, Snippet) sql, params = Snippet._schema._create_table(safe=False).query() self.assertEqual(sql, ( 'CREATE TABLE "snippet" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"code" TEXT NOT NULL, ' '"language_id" INTEGER NOT NULL, ' 'FOREIGN KEY ("language_id") REFERENCES "language" ("id"))')) sql, params = Language._schema._create_table(safe=False).query() self.assertEqual(sql, ( 'CREATE TABLE "language" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"name" VARCHAR(255) NOT NULL, ' '"selected_snippet_id" INTEGER)')) sql, params = (Language ._schema ._create_foreign_key(Language.selected_snippet) .query()) self.assertEqual(sql, ( 'ALTER TABLE "language" ADD CONSTRAINT ' '"fk_language_selected_snippet_id_refs_snippet" ' 'FOREIGN KEY ("selected_snippet_id") REFERENCES "snippet" ("id")')) class SnippetComment(TestModel): snippet_long_foreign_key_identifier = ForeignKeyField(Snippet) comment = TextField() class Meta: database = self.database sql, params = SnippetComment._schema._create_table(safe=True).query() self.assertEqual(sql, ( 'CREATE TABLE IF NOT EXISTS "snippet_comment" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"snippet_long_foreign_key_identifier_id" INTEGER NOT NULL, ' '"comment" TEXT NOT NULL, ' 'FOREIGN KEY ("snippet_long_foreign_key_identifier_id") ' 'REFERENCES "snippet" ("id"))')) sql, params = (SnippetComment._schema ._create_foreign_key( SnippetComment.snippet_long_foreign_key_identifier) .query()) self.assertEqual(sql, ( 'ALTER TABLE "snippet_comment" ADD CONSTRAINT "' 'fk_snippet_comment_snippet_long_foreign_key_identifier_i_2a8b87d"' ' FOREIGN KEY ("snippet_long_foreign_key_identifier_id") ' 'REFERENCES "snippet" ("id")')) def test_deferred_foreign_key_inheritance(self): class Base(TestModel): class Meta: database = self.database class WithTimestamp(Base): timestamp = TimestampField() class Tweet(Base): user = DeferredForeignKey('DUser') content = TextField() class TimestampTweet(Tweet, WithTimestamp): pass class DUser(Base): username = TextField() sql, params = Tweet._schema._create_table(safe=False).query() self.assertEqual(sql, ( 'CREATE TABLE "tweet" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"content" TEXT NOT NULL, ' '"user_id" INTEGER NOT NULL)')) sql, params = TimestampTweet._schema._create_table(safe=False).query() self.assertEqual(sql, ( 'CREATE TABLE "timestamp_tweet" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"timestamp" INTEGER NOT NULL, ' '"content" TEXT NOT NULL, ' '"user_id" INTEGER NOT NULL)')) def test_identity_field(self): class PG10Identity(TestModel): id = IdentityField() data = TextField() class Meta: database = self.database self.assertCreateTable(PG10Identity, [ ('CREATE TABLE "pg10_identity" (' '"id" INT GENERATED BY DEFAULT AS IDENTITY NOT NULL PRIMARY KEY, ' '"data" TEXT NOT NULL)'), ]) def test_self_fk_inheritance(self): class BaseCategory(TestModel): parent = ForeignKeyField('self', backref='children') class Meta: database = self.database class CatA1(BaseCategory): name_a1 = TextField() class CatA2(CatA1): name_a2 = TextField() self.assertTrue(CatA1.parent.rel_model is CatA1) self.assertTrue(CatA2.parent.rel_model is CatA2) self.assertCreateTable(CatA1, [ ('CREATE TABLE "cat_a1" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"parent_id" INTEGER NOT NULL, ' '"name_a1" TEXT NOT NULL, ' 'FOREIGN KEY ("parent_id") REFERENCES "cat_a1" ("id"))'), ('CREATE INDEX "cat_a1_parent_id" ON "cat_a1" ("parent_id")')]) self.assertCreateTable(CatA2, [ ('CREATE TABLE "cat_a2" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"parent_id" INTEGER NOT NULL, ' '"name_a1" TEXT NOT NULL, ' '"name_a2" TEXT NOT NULL, ' 'FOREIGN KEY ("parent_id") REFERENCES "cat_a2" ("id"))'), ('CREATE INDEX "cat_a2_parent_id" ON "cat_a2" ("parent_id")')]) class NoteX(TestModel): content = TextField() timestamp = TimestampField() status = IntegerField() flags = IntegerField() class TestCreateAs(ModelTestCase): requires = [NoteX] test_data = ( # name, timestamp, status, flags. (1, 'n1', datetime.datetime(2019, 1, 1), 1, 1), (2, 'n2', datetime.datetime(2019, 1, 2), 2, 1), (3, 'n3', datetime.datetime(2019, 1, 3), 9, 1), (4, 'nx', datetime.datetime(2019, 1, 1), 9, 0)) def setUp(self): super(TestCreateAs, self).setUp() fields = NoteX._meta.sorted_fields NoteX.insert_many(self.test_data, fields=fields).execute() def tearDown(self): class Note2(TestModel): class Meta: database = self.database self.database.drop_tables([Note2]) super(TestCreateAs, self).tearDown() def test_create_as(self): status = Case(NoteX.status, ( (1, 'published'), (2, 'draft'), (9, 'deleted'))) query = (NoteX .select(NoteX.id, NoteX.content, NoteX.timestamp, status.alias('status')) .where(NoteX.flags == SQL('1'))) query.create_table('note2') class Note2(TestModel): id = IntegerField() content = TextField() timestamp = TimestampField() status = TextField() class Meta: database = self.database query = Note2.select().order_by(Note2.id) self.assertEqual(list(query.tuples()), [ (1, 'n1', datetime.datetime(2019, 1, 1), 'published'), (2, 'n2', datetime.datetime(2019, 1, 2), 'draft'), (3, 'n3', datetime.datetime(2019, 1, 3), 'deleted')]) class TestModelSetTableName(BaseTestCase): def test_set_table_name(self): class Foo(TestModel): pass self.assertEqual(Foo._meta.table_name, 'foo') self.assertEqual(Foo._meta.table.__name__, 'foo') # Writing the attribute directly does not update the cached Table name. Foo._meta.table_name = 'foo2' self.assertEqual(Foo._meta.table.__name__, 'foo') # Use the helper-method. Foo._meta.set_table_name('foo3') self.assertEqual(Foo._meta.table.__name__, 'foo3') class TestTruncateTable(ModelTestCase): requires = [User] def test_truncate_table(self): for i in range(3): User.create(username='u%s' % i) ctx = User._schema._truncate_table() if IS_SQLITE: self.assertSQL(ctx, 'DELETE FROM "users"', []) else: sql, _ = ctx.query() self.assertTrue(sql.startswith('TRUNCATE TABLE ')) User.truncate_table() self.assertEqual(User.select().count(), 0) class TestNamedConstraintsIntegration(ModelTestCase): requires = [TMNamedConstraints] def setUp(self): super(TestNamedConstraintsIntegration, self).setUp() if IS_SQLITE: self.database.pragma('foreign_keys', 'on') def test_named_constraints_integration(self): t = TMNamedConstraints.create(k='k1', v=1) # Sanity test. fails = [ {'fk': t.id - 1, 'k': 'k2', 'v': 1}, # Invalid fk. {'fk': t.id, 'k': 'k3', 'v': 0}, # Invalid val. {'fk': t.id, 'k': 'kx', 'v': 1}] # Invalid key. for f in fails: # MySQL may use OperationalError. with self.assertRaises((IntegrityError, OperationalError)): with self.database.atomic() as tx: TMNamedConstraints.create(**f) self.assertEqual(len(TMNamedConstraints), 1) class TMKV(TestModel): key = CharField() value = IntegerField() extra = IntegerField() class TMKVNew(TestModel): key = CharField() val = IntegerField() class Meta: primary_key = False table_name = 'tmkv_new' class TestCreateTableAsSQL(ModelDatabaseTestCase): database = get_in_memory_db() requires = [TMKV] def test_create_table_as_sql(self): query = (TMKV .select(TMKV.key, TMKV.value.alias('val')) .where(TMKV.extra < 4)) ctx = TMKV._schema._create_table_as('tmkv_new', query) self.assertSQL(ctx, ( 'CREATE TABLE IF NOT EXISTS "tmkv_new" AS ' 'SELECT "t1"."key", "t1"."value" AS "val" FROM "tmkv" AS "t1" ' 'WHERE ("t1"."extra" < ?)'), [4]) ctx = TMKV._schema._create_table_as(('alt', 'tmkv_new'), query) self.assertSQL(ctx, ( 'CREATE TABLE IF NOT EXISTS "alt"."tmkv_new" AS ' 'SELECT "t1"."key", "t1"."value" AS "val" FROM "tmkv" AS "t1" ' 'WHERE ("t1"."extra" < ?)'), [4]) class TestCreateTableAs(ModelTestCase): requires = [TMKV] def tearDown(self): try: TMKVNew.drop_table(safe=True) except: pass super(TestCreateTableAs, self).tearDown() def test_create_table_as(self): TMKV.insert_many([('k%02d' % i, i, i) for i in range(10)]).execute() query = (TMKV .select(TMKV.key, TMKV.value.alias('val')) .where(TMKV.extra < 4)) query.create_table('tmkv_new', safe=True) expected = ['key', 'val'] if IS_CRDB: expected.append('rowid') # CRDB adds this. self.assertEqual( [col.name for col in self.database.get_columns('tmkv_new')], expected) query = TMKVNew.select().order_by(TMKVNew.key) self.assertEqual([(r.key, r.val) for r in query], [('k00', 0), ('k01', 1), ('k02', 2), ('k03', 3)]) peewee-3.17.7/tests/shortcuts.py000066400000000000000000000721131470346076600166560ustar00rootroot00000000000000import operator from peewee import * from playhouse.shortcuts import * from .base import BaseTestCase from .base import DatabaseTestCase from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import get_in_memory_db from .base import requires_models from .base import requires_mysql from .base_models import Category class User(TestModel): username = TextField() @property def name_hash(self): return sum(map(ord, self.username)) % 10 class Tweet(TestModel): user = ForeignKeyField(User, backref='tweets') content = TextField() class Tag(TestModel): tag = TextField() class TweetTag(TestModel): tweet = ForeignKeyField(Tweet) tag = ForeignKeyField(Tag) class Meta: primary_key = CompositeKey('tweet', 'tag') class Owner(TestModel): name = TextField() class Label(TestModel): label = TextField() class Gallery(TestModel): name = TextField() labels = ManyToManyField(Label, backref='galleries') owner = ForeignKeyField(Owner, backref='galleries') GalleryLabel = Gallery.labels.through_model class Student(TestModel): name = TextField() StudentCourseProxy = DeferredThroughModel() class Course(TestModel): name = TextField() students = ManyToManyField(Student, through_model=StudentCourseProxy, backref='courses') class StudentCourse(TestModel): student = ForeignKeyField(Student) course = ForeignKeyField(Course) StudentCourseProxy.set_model(StudentCourse) class Host(TestModel): name = TextField() class Service(TestModel): host = ForeignKeyField(Host, backref='services') name = TextField() class Device(TestModel): host = ForeignKeyField(Host, backref='+') name = TextField() class Basket(TestModel): id = IntegerField(primary_key=True) class Item(TestModel): id = IntegerField(primary_key=True) basket = ForeignKeyField(Basket) class TestModelToDict(ModelTestCase): database = get_in_memory_db() requires = [User, Tweet, Tag, TweetTag] def setUp(self): super(TestModelToDict, self).setUp() self.user = User.create(username='peewee') def test_simple(self): with self.assertQueryCount(0): self.assertEqual(model_to_dict(self.user), { 'id': self.user.id, 'username': 'peewee'}) def test_simple_recurse(self): tweet = Tweet.create(user=self.user, content='t1') with self.assertQueryCount(0): self.assertEqual(model_to_dict(tweet), { 'id': tweet.id, 'content': tweet.content, 'user': { 'id': self.user.id, 'username': 'peewee'}}) with self.assertQueryCount(0): self.assertEqual(model_to_dict(tweet, recurse=False), { 'id': tweet.id, 'content': tweet.content, 'user': self.user.id}) def test_simple_backref(self): with self.assertQueryCount(1): self.assertEqual(model_to_dict(self.user, backrefs=True), { 'id': self.user.id, 'tweets': [], 'username': 'peewee'}) tweet = Tweet.create(user=self.user, content='t0') # Two queries, one for tweets, one for tweet-tags. with self.assertQueryCount(2): self.assertEqual(model_to_dict(self.user, backrefs=True), { 'id': self.user.id, 'username': 'peewee', 'tweets': [{'id': tweet.id, 'content': 't0', 'tweettag_set': []}]}) def test_recurse_and_backrefs(self): tweet = Tweet.create(user=self.user, content='t0') with self.assertQueryCount(1): self.assertEqual(model_to_dict(tweet, backrefs=True), { 'id': tweet.id, 'content': 't0', 'tweettag_set': [], 'user': {'id': self.user.id, 'username': 'peewee'}}) @requires_models(Category) def test_recursive_fk(self): root = Category.create(name='root') child = Category.create(name='child', parent=root) grandchild = Category.create(name='grandchild', parent=child) with self.assertQueryCount(0): for recurse in (True, False): self.assertEqual(model_to_dict(root, recurse=recurse), { 'name': 'root', 'parent': None}) with self.assertQueryCount(1): self.assertEqual(model_to_dict(root, backrefs=True), { 'name': 'root', 'parent': None, 'children': [{'name': 'child'}]}) with self.assertQueryCount(1): self.assertEqual(model_to_dict(root, backrefs=True), { 'name': 'root', 'parent': None, 'children': [{'name': 'child'}]}) with self.assertQueryCount(1): self.assertEqual(model_to_dict(child, backrefs=True), { 'name': 'child', 'parent': {'name': 'root'}, 'children': [{'name': 'grandchild'}]}) with self.assertQueryCount(0): self.assertEqual(model_to_dict(child, backrefs=False), { 'name': 'child', 'parent': {'name': 'root'}}) def test_manytomany(self): tweet = Tweet.create(user=self.user, content='t0') tag1 = Tag.create(tag='t1') tag2 = Tag.create(tag='t2') Tag.create(tag='tx') TweetTag.create(tweet=tweet, tag=tag1) TweetTag.create(tweet=tweet, tag=tag2) with self.assertQueryCount(4): self.assertEqual(model_to_dict(self.user, backrefs=True), { 'id': self.user.id, 'username': 'peewee', 'tweets': [{ 'id': tweet.id, 'content': 't0', 'tweettag_set': [ {'tag': {'id': tag1.id, 'tag': 't1'}}, {'tag': {'id': tag2.id, 'tag': 't2'}}]}]}) @requires_models(Label, Gallery, GalleryLabel, Owner) def test_manytomany_field(self): data = ( ('charlie', 'family', ('nuggie', 'bearbe')), ('charlie', 'pets', ('huey', 'zaizee', 'beanie')), ('peewee', 'misc', ('nuggie', 'huey'))) for owner_name, gallery, labels in data: owner, _ = Owner.get_or_create(name=owner_name) gallery = Gallery.create(name=gallery, owner=owner) label_objects = [Label.get_or_create(label=l)[0] for l in labels] gallery.labels.add(label_objects) query = (Gallery .select(Gallery, Owner) .join(Owner) .switch(Gallery) .join(GalleryLabel) .join(Label) .where(Label.label == 'nuggie') .order_by(Gallery.id)) rows = [model_to_dict(gallery, backrefs=True, manytomany=True) for gallery in query] self.assertEqual(rows, [ { 'id': 1, 'name': 'family', 'owner': {'id': 1, 'name': 'charlie'}, 'labels': [{'id': 1, 'label': 'nuggie'}, {'id': 2, 'label': 'bearbe'}], }, { 'id': 3, 'name': 'misc', 'owner': {'id': 2, 'name': 'peewee'}, 'labels': [{'id': 1, 'label': 'nuggie'}, {'id': 3, 'label': 'huey'}], }]) @requires_models(Student, Course, StudentCourse) def test_manytomany_deferred(self): data = ( ('s1', ('ca', 'cb', 'cc')), ('s2', ('cb', 'cd')), ('s3', ())) c = {} for student, courses in data: s = Student.create(name=student) for course in courses: if course not in c: c[course] = Course.create(name=course) StudentCourse.create(student=s, course=c[course]) query = Student.select().order_by(Student.name) data = [] for user in query: user_dict = model_to_dict(user, manytomany=True) user_dict['courses'].sort(key=operator.itemgetter('id')) data.append(user_dict) self.assertEqual(data, [ {'id': 1, 'name': 's1', 'courses': [ {'id': 1, 'name': 'ca'}, {'id': 2, 'name': 'cb'}, {'id': 3, 'name': 'cc'}]}, {'id': 2, 'name': 's2', 'courses': [ {'id': 2, 'name': 'cb'}, {'id': 4, 'name': 'cd'}]}, {'id': 3, 'name': 's3', 'courses': []}]) query = Course.select().order_by(Course.name) data = [] for course in query: course_dict = model_to_dict(course, manytomany=True) course_dict['students'].sort(key=operator.itemgetter('id')) data.append(course_dict) self.assertEqual(data, [ {'id': 1, 'name': 'ca', 'students': [ {'id': 1, 'name': 's1'}]}, {'id': 2, 'name': 'cb', 'students': [ {'id': 1, 'name': 's1'}, {'id': 2, 'name': 's2'}]}, {'id': 3, 'name': 'cc', 'students': [ {'id': 1, 'name': 's1'}]}, {'id': 4, 'name': 'cd', 'students': [ {'id': 2, 'name': 's2'}]}]) def test_recurse_max_depth(self): t0, t1, t2 = [Tweet.create(user=self.user, content='t%s' % i) for i in range(3)] tag0, tag1 = [Tag.create(tag=t) for t in ['tag0', 'tag1']] TweetTag.create(tweet=t0, tag=tag0) TweetTag.create(tweet=t0, tag=tag1) TweetTag.create(tweet=t1, tag=tag1) data = model_to_dict(self.user, recurse=True, backrefs=True) self.assertEqual(data, { 'id': self.user.id, 'username': 'peewee', 'tweets': [ {'id': t0.id, 'content': 't0', 'tweettag_set': [ {'tag': {'tag': 'tag0', 'id': tag0.id}}, {'tag': {'tag': 'tag1', 'id': tag1.id}}, ]}, {'id': t1.id, 'content': 't1', 'tweettag_set': [ {'tag': {'tag': 'tag1', 'id': tag1.id}}, ]}, {'id': t2.id, 'content': 't2', 'tweettag_set': []}, ]}) data = model_to_dict(self.user, recurse=True, backrefs=True, max_depth=2) self.assertEqual(data, { 'id': self.user.id, 'username': 'peewee', 'tweets': [ {'id': t0.id, 'content': 't0', 'tweettag_set': [ {'tag': tag0.id}, {'tag': tag1.id}, ]}, {'id': t1.id, 'content': 't1', 'tweettag_set': [ {'tag': tag1.id}, ]}, {'id': t2.id, 'content': 't2', 'tweettag_set': []}, ]}) data = model_to_dict(self.user, recurse=True, backrefs=True, max_depth=1) self.assertEqual(data, { 'id': self.user.id, 'username': 'peewee', 'tweets': [ {'id': t0.id, 'content': 't0'}, {'id': t1.id, 'content': 't1'}, {'id': t2.id, 'content': 't2'}]}) self.assertEqual(model_to_dict(self.user, recurse=True, backrefs=True, max_depth=0), {'id': self.user.id, 'username': 'peewee'}) def test_only(self): username_dict = {'username': 'peewee'} self.assertEqual(model_to_dict(self.user, only=[User.username]), username_dict) self.assertEqual( model_to_dict(self.user, backrefs=True, only=[User.username]), username_dict) tweet = Tweet.create(user=self.user, content='t0') tweet_dict = {'content': 't0', 'user': {'username': 'peewee'}} field_list = [Tweet.content, Tweet.user, User.username] self.assertEqual(model_to_dict(tweet, only=field_list), tweet_dict) self.assertEqual(model_to_dict(tweet, backrefs=True, only=field_list), tweet_dict) tweet_dict['user'] = self.user.id self.assertEqual(model_to_dict(tweet, backrefs=True, recurse=False, only=field_list), tweet_dict) def test_exclude(self): self.assertEqual(model_to_dict(self.user, exclude=[User.id]), {'username': 'peewee'}) # Exclude the foreign key using FK field and backref. self.assertEqual(model_to_dict(self.user, backrefs=True, exclude=[User.id, Tweet.user]), {'username': 'peewee'}) self.assertEqual(model_to_dict(self.user, backrefs=True, exclude=[User.id, User.tweets]), {'username': 'peewee'}) tweet = Tweet.create(user=self.user, content='t0') fields = [Tweet.tweettag_set, Tweet.id, Tweet.user] self.assertEqual(model_to_dict(tweet, backrefs=True, exclude=fields), {'content': 't0'}) fields[-1] = User.id self.assertEqual(model_to_dict(tweet, backrefs=True, exclude=fields), {'content': 't0', 'user': {'username': 'peewee'}}) def test_extra_attrs(self): with self.assertQueryCount(0): extra = ['name_hash'] self.assertEqual(model_to_dict(self.user, extra_attrs=extra), { 'id': self.user.id, 'username': 'peewee', 'name_hash': 5}) with self.assertQueryCount(0): self.assertRaises(AttributeError, model_to_dict, self.user, extra_attrs=['xx']) def test_fields_from_query(self): User.delete().execute() for i in range(3): user = User.create(username='u%d' % i) for x in range(i + 1): Tweet.create(user=user, content='%s-%s' % (user.username, x)) query = (User .select(User.username, fn.COUNT(Tweet.id).alias('ct')) .join(Tweet, JOIN.LEFT_OUTER) .group_by(User.username) .order_by(User.id)) with self.assertQueryCount(1): u0, u1, u2 = list(query) self.assertEqual(model_to_dict(u0, fields_from_query=query), { 'username': 'u0', 'ct': 1}) self.assertEqual(model_to_dict(u2, fields_from_query=query), { 'username': 'u2', 'ct': 3}) query = (Tweet .select(Tweet, User, SQL('1337').alias('magic')) .join(User) .order_by(Tweet.id) .limit(1)) with self.assertQueryCount(1): tweet, = query self.assertEqual(model_to_dict(tweet, fields_from_query=query), { 'id': tweet.id, 'content': 'u0-0', 'magic': 1337, 'user': {'id': tweet.user_id, 'username': 'u0'}}) self.assertEqual(model_to_dict(tweet, fields_from_query=query, exclude=[User.id, Tweet.id]), {'magic': 1337, 'content': 'u0-0', 'user': {'username': 'u0'}}) def test_fields_from_query_alias(self): q = User.select(User.username.alias('name')) res = q[0] self.assertEqual(model_to_dict(res, fields_from_query=q), {'name': 'peewee'}) UA = User.alias() q = UA.select(UA.username.alias('name')) res = q[0] self.assertEqual(model_to_dict(res, fields_from_query=q), {'name': 'peewee'}) def test_only_backref(self): for i in range(3): Tweet.create(user=self.user, content=str(i)) data = model_to_dict(self.user, backrefs=True, only=[ User.username, User.tweets, Tweet.content]) if 'tweets' in data: data['tweets'].sort(key=lambda t: t['content']) self.assertEqual(data, { 'username': 'peewee', 'tweets': [ {'content': '0'}, {'content': '1'}, {'content': '2'}]}) @requires_models(Host, Service, Device) def test_model_to_dict_disabled_backref(self): host = Host.create(name='pi') Device.create(host=host, name='raspberry pi') Service.create(host=host, name='ssh') Service.create(host=host, name='vpn') data = model_to_dict(host, recurse=True, backrefs=True) services = sorted(data.pop('services'), key=operator.itemgetter('id')) self.assertEqual(data, {'id': 1, 'name': 'pi'}) self.assertEqual(services, [ {'id': 1, 'name': 'ssh'}, {'id': 2, 'name': 'vpn'}]) @requires_models(Basket, Item) def test_empty_vs_null_fk(self): b = Basket.create(id=0) i = Item.create(id=0, basket=b) data = model_to_dict(i) self.assertEqual(data, {'id': 0, 'basket': {'id': 0}}) data = model_to_dict(i, recurse=False) self.assertEqual(data, {'id': 0, 'basket': 0}) class TestDictToModel(ModelTestCase): database = get_in_memory_db() requires = [User, Tweet, Tag, TweetTag] def setUp(self): super(TestDictToModel, self).setUp() self.user = User.create(username='peewee') def test_simple(self): data = {'username': 'peewee', 'id': self.user.id} inst = dict_to_model(User, data) self.assertTrue(isinstance(inst, User)) self.assertEqual(inst.username, 'peewee') self.assertEqual(inst.id, self.user.id) def test_update_model_from_dict(self): data = {'content': 'tweet', 'user': {'username': 'zaizee'}} with self.assertQueryCount(0): user = User(id=3, username='orig') tweet = Tweet(id=4, content='orig', user=user) obj = update_model_from_dict(tweet, data) self.assertEqual(obj.id, 4) self.assertEqual(obj.content, 'tweet') self.assertEqual(obj.user.id, 3) self.assertEqual(obj.user.username, 'zaizee') def test_related(self): data = { 'id': 2, 'content': 'tweet-1', 'user': {'id': self.user.id, 'username': 'peewee'}} with self.assertQueryCount(0): inst = dict_to_model(Tweet, data) self.assertTrue(isinstance(inst, Tweet)) self.assertEqual(inst.id, 2) self.assertEqual(inst.content, 'tweet-1') self.assertTrue(isinstance(inst.user, User)) self.assertEqual(inst.user.id, self.user.id) self.assertEqual(inst.user.username, 'peewee') data['user'] = self.user.id with self.assertQueryCount(0): inst = dict_to_model(Tweet, data) with self.assertQueryCount(1): self.assertEqual(inst.user, self.user) def test_backrefs(self): data = { 'id': self.user.id, 'username': 'peewee', 'tweets': [ {'id': 1, 'content': 't1'}, {'id': 2, 'content': 't2'}, ]} with self.assertQueryCount(0): inst = dict_to_model(User, data) self.assertEqual(inst.id, self.user.id) self.assertEqual(inst.username, 'peewee') self.assertTrue(isinstance(inst.tweets, list)) t1, t2 = inst.tweets self.assertEqual(t1.id, 1) self.assertEqual(t1.content, 't1') self.assertEqual(t1.user, self.user) self.assertEqual(t2.id, 2) self.assertEqual(t2.content, 't2') self.assertEqual(t2.user, self.user) def test_unknown_attributes(self): data = { 'id': self.user.id, 'username': 'peewee', 'xx': 'does not exist'} self.assertRaises(AttributeError, dict_to_model, User, data) inst = dict_to_model(User, data, ignore_unknown=True) self.assertEqual(inst.xx, 'does not exist') def test_ignore_id_attribute(self): class Register(Model): key = CharField(primary_key=True) data = {'id': 100, 'key': 'k1'} self.assertRaises(AttributeError, dict_to_model, Register, data) inst = dict_to_model(Register, data, ignore_unknown=True) self.assertEqual(inst.__data__, {'key': 'k1'}) class Base(Model): class Meta: primary_key = False class Register2(Model): key = CharField(primary_key=True) self.assertRaises(AttributeError, dict_to_model, Register2, data) inst = dict_to_model(Register2, data, ignore_unknown=True) self.assertEqual(inst.__data__, {'key': 'k1'}) class ReconnectMySQLDatabase(ReconnectMixin, MySQLDatabase): def cursor(self, named_cursor=None): cursor = super(ReconnectMySQLDatabase, self).cursor(named_cursor) # The first (0th) query fails, as do all queries after the 2nd (1st). if self._query_counter != 1: def _fake_execute(self, *args): raise OperationalError('2006') cursor.execute = _fake_execute self._query_counter += 1 return cursor def close(self): self._close_counter += 1 return super(ReconnectMySQLDatabase, self).close() def _reset_mock(self): self._close_counter = 0 self._query_counter = 0 @requires_mysql class TestReconnectMixin(DatabaseTestCase): database = db_loader('mysql', db_class=ReconnectMySQLDatabase) def test_reconnect_mixin_execute_sql(self): # Verify initial state. self.database._reset_mock() self.assertEqual(self.database._close_counter, 0) sql = 'select 1 + 1' curs = self.database.execute_sql(sql) self.assertEqual(curs.fetchone(), (2,)) self.assertEqual(self.database._close_counter, 1) # Due to how we configured our mock, our queries are now failing and we # can verify a reconnect is occuring *AND* the exception is propagated. self.assertRaises(OperationalError, self.database.execute_sql, sql) self.assertEqual(self.database._close_counter, 2) # We reset the mock counters. The first query we execute will fail. The # second query will succeed (which happens automatically, thanks to the # retry logic). self.database._reset_mock() curs = self.database.execute_sql(sql) self.assertEqual(curs.fetchone(), (2,)) self.assertEqual(self.database._close_counter, 1) def test_reconnect_mixin_begin(self): # Verify initial state. self.database._reset_mock() self.assertEqual(self.database._close_counter, 0) with self.database.atomic(): self.assertTrue(self.database.in_transaction()) self.assertEqual(self.database._close_counter, 1) # Prepare mock for commit call self.database._query_counter = 1 # Due to how we configured our mock, our queries are now failing and we # can verify a reconnect is occuring *AND* the exception is propagated. self.assertRaises(OperationalError, self.database.atomic().__enter__) self.assertEqual(self.database._close_counter, 2) self.assertFalse(self.database.in_transaction()) # We reset the mock counters. The first query we execute will fail. The # second query will succeed (which happens automatically, thanks to the # retry logic). self.database._reset_mock() with self.database.atomic(): self.assertTrue(self.database.in_transaction()) self.assertEqual(self.database._close_counter, 1) # Do not reconnect when nesting transactions self.assertRaises(OperationalError, self.database.atomic().__enter__) self.assertEqual(self.database._close_counter, 1) # Prepare mock for commit call self.database._query_counter = 1 self.assertFalse(self.database.in_transaction()) class MMA(TestModel): key = TextField() value = IntegerField() class MMB(TestModel): key = TextField() class MMC(TestModel): key = TextField() value = IntegerField() misc = TextField(null=True) class TestResolveMultiModelQuery(ModelTestCase): requires = [MMA, MMB, MMC] def test_resolve_multimodel_query(self): MMA.insert_many([('k0', 0), ('k1', 1)]).execute() MMB.insert_many([('k10',), ('k11',)]).execute() MMC.insert_many([('k20', 20, 'a'), ('k21', 21, 'b')]).execute() mma = MMA.select(MMA.key, MMA.value) mmb = MMB.select(MMB.key, Value(99).alias('value')) mmc = MMC.select(MMC.key, MMC.value) query = (mma | mmb | mmc).order_by(SQL('1')) data = [obj for obj in resolve_multimodel_query(query)] expected = [ MMA(key='k0', value=0), MMA(key='k1', value=1), MMB(key='k10', value=99), MMB(key='k11', value=99), MMC(key='k20', value=20), MMC(key='k21', value=21)] self.assertEqual(len(data), len(expected)) for row, exp_row in zip(data, expected): self.assertEqual(row.__class__, exp_row.__class__) self.assertEqual(row.key, exp_row.key) self.assertEqual(row.value, exp_row.value) ts_database = get_in_memory_db() class TSBase(Model): class Meta: database = ts_database model_metadata_class = ThreadSafeDatabaseMetadata class TSReg(TSBase): key = TextField() class TestThreadSafeDatabaseMetadata(BaseTestCase): def setUp(self): super(TestThreadSafeDatabaseMetadata, self).setUp() ts_database.create_tables([TSReg]) def test_threadsafe_database_metadata(self): self.assertTrue(isinstance(TSReg._meta, ThreadSafeDatabaseMetadata)) self.assertEqual(TSReg._meta.database, ts_database) t1 = TSReg.create(key='k1') t1_db = TSReg.get(TSReg.key == 'k1') self.assertEqual(t1.id, t1_db.id) def test_swap_database(self): d1 = get_in_memory_db() d2 = get_in_memory_db() class M(TSBase): pass def swap_db(): self.assertEqual(M._meta.database, ts_database) d1.bind([M]) self.assertEqual(M._meta.database, d1) with d2.bind_ctx([M]): self.assertEqual(M._meta.database, d2) self.assertEqual(M._meta.database, d1) self.assertEqual(M._meta.database, ts_database) # From a separate thread, swap the database and verify it works # correctly. t = threading.Thread(target=swap_db) t.start() ; t.join() # In the main thread the original database has not been altered. self.assertEqual(M._meta.database, ts_database) def test_preserve_original_db(self): outputs = [] d1 = get_in_memory_db() d2 = get_in_memory_db() class M(TSBase): class Meta: database = d1 def swap_db(): self.assertTrue(M._meta.database is d1) with d2.bind_ctx([M]): self.assertTrue(M._meta.database is d2) self.assertTrue(M._meta.database is d1) d2.bind([M]) # Now bind to d2 and leave it bound. self.assertTrue(M._meta.database is d2) # From a separate thread, swap the database and verify it works # correctly. threads = [threading.Thread(target=swap_db) for _ in range(20)] for t in threads: t.start() for t in threads: t.join() # In the main thread the original database has not been altered. self.assertTrue(M._meta.database is d1) class TIW(TestModel): key = CharField() value = IntegerField(default=0) extra = IntegerField(default=lambda: 1) class TestInsertWhere(ModelTestCase): requires = [User, Tweet, TIW] def test_insert_where(self): ua, ub = [User.create(username=n) for n in 'ab'] def _insert_where(user, content): cond = (Tweet.select() .where(Tweet.user == user, Tweet.content == content)) where = ~fn.EXISTS(cond) iq = insert_where(Tweet, { Tweet.user: user, Tweet.content: content}, where=where) return 1 if iq.execute() else 0 self.assertEqual(_insert_where(ua, 't1'), 1) self.assertEqual(_insert_where(ua, 't2'), 1) self.assertEqual(_insert_where(ua, 't1'), 0) self.assertEqual(_insert_where(ua, 't2'), 0) self.assertEqual(_insert_where(ub, 't1'), 1) self.assertEqual(_insert_where(ub, 't2'), 1) def test_insert_where_defaults(self): TIW.create(key='k1', value=1, extra=2) def _insert_where(key): where = ~fn.EXISTS(TIW.select().where(TIW.key == key)) iq = insert_where(TIW, {TIW.key: key}, where) return 1 if iq.execute() else 0 self.assertEqual(_insert_where('k2'), 1) self.assertEqual(_insert_where('k1'), 0) self.assertEqual(_insert_where('k2'), 0) tiw = TIW.get(TIW.key == 'k2') self.assertEqual(tiw.value, 0) self.assertEqual(tiw.extra, 1) peewee-3.17.7/tests/signals.py000066400000000000000000000140401470346076600162530ustar00rootroot00000000000000from peewee import * from playhouse import signals from .base import get_in_memory_db from .base import ModelTestCase class BaseSignalModel(signals.Model): pass class A(BaseSignalModel): a = TextField(default='') class B(BaseSignalModel): b = TextField(default='') class SubB(B): pass class TestSignals(ModelTestCase): database = get_in_memory_db() requires = [A, B, SubB] def tearDown(self): super(TestSignals, self).tearDown() signals.pre_save._flush() signals.post_save._flush() signals.pre_delete._flush() signals.post_delete._flush() signals.pre_init._flush() def test_pre_save(self): state = [] @signals.pre_save() def pre_save(sender, instance, created): state.append((sender, instance, instance._pk, created)) a = A() self.assertEqual(a.save(), 1) self.assertEqual(state, [(A, a, None, True)]) self.assertEqual(a.save(), 1) self.assertTrue(a.id is not None) self.assertEqual(len(state), 2) self.assertEqual(state[-1], (A, a, a.id, False)) def test_post_save(self): state = [] @signals.post_save() def post_save(sender, instance, created): state.append((sender, instance, instance._pk, created)) a = A() a.save() self.assertTrue(a.id is not None) self.assertEqual(state, [(A, a, a.id, True)]) a.save() self.assertEqual(len(state), 2) self.assertEqual(state[-1], (A, a, a.id, False)) def test_pre_delete(self): state = [] @signals.pre_delete() def pre_delete(sender, instance): state.append((sender, instance, A.select().count())) a = A.create() self.assertEqual(a.delete_instance(), 1) self.assertEqual(state, [(A, a, 1)]) def test_post_delete(self): state = [] @signals.post_delete() def post_delete(sender, instance): state.append((sender, instance, A.select().count())) a = A.create() a.delete_instance() self.assertEqual(state, [(A, a, 0)]) def test_pre_init(self): state = [] A.create(a='a') @signals.pre_init() def pre_init(sender, instance): state.append((sender, instance.a)) A.get() self.assertEqual(state, [(A, 'a')]) def test_sender(self): state = [] @signals.post_save(sender=A) def post_save(sender, instance, created): state.append(instance) m = A.create() self.assertEqual(state, [m]) m2 = B.create() self.assertEqual(state, [m]) def test_connect_disconnect(self): state = [] @signals.post_save(sender=A) def post_save(sender, instance, created): state.append(instance) a = A.create() self.assertEqual(state, [a]) # Signal was registered with a specific sender, so this fails. self.assertRaises(ValueError, signals.post_save.disconnect, post_save) # Disconnect signal, specifying sender. signals.post_save.disconnect(post_save, sender=A) # Signal handler has been unregistered. a2 = A.create() self.assertEqual(state, [a]) # Re-connect without specifying sender. signals.post_save.connect(post_save) a3 = A.create() self.assertEqual(state, [a, a3]) # Signal was not registered with a sender, so this fails. self.assertRaises(ValueError, signals.post_save.disconnect, post_save, sender=A) signals.post_save.disconnect(post_save) def test_function_reuse(self): state = [] @signals.post_save(sender=A) def post_save(sender, instance, created): state.append(instance) # Connect function for sender=B as well. signals.post_save(sender=B)(post_save) a = A.create() b = B.create() self.assertEqual(state, [a, b]) def test_subclass_instance_receive_signals(self): state = [] @signals.post_save(sender=B) def post_save(sender, instance, created): state.append(instance) b = SubB.create() assert b in state def test_disconnect_issue_2687(self): state = [] # Same sender. @signals.post_save(sender=A) def sig1(sender, instance, created): state.append((1, instance.a)) @signals.post_save(sender=A) def sig2(sender, instance, created): state.append((2, instance.a)) A.create(a='a1') self.assertEqual(state, [(1, 'a1'), (2, 'a1')]) signals.post_save.disconnect(name='sig1', sender=A) A.create(a='a2') self.assertEqual(state, [(1, 'a1'), (2, 'a1'), (2, 'a2')]) signals.post_save.disconnect(name='sig2', sender=A) A.create(a='a3') self.assertEqual(state, [(1, 'a1'), (2, 'a1'), (2, 'a2')]) signals.post_save(name='s1')(sig1) signals.post_save(name='s2', sender=A)(sig2) state = state[:0] # Clear state, 2.7 compat. A.create(a='a4') self.assertEqual(state, [(1, 'a4'), (2, 'a4')]) signals.post_save.disconnect(name='s1') A.create(a='a5') self.assertEqual(state, [(1, 'a4'), (2, 'a4'), (2, 'a5')]) signals.post_save.disconnect(name='s2', sender=A) A.create(a='a6') self.assertEqual(state, [(1, 'a4'), (2, 'a4'), (2, 'a5')]) class NoPK(BaseSignalModel): val = IntegerField(index=True) class Meta: primary_key = False class TestSaveNoPrimaryKey(ModelTestCase): database = get_in_memory_db() requires = [NoPK] def test_save_no_pk(self): accum = [0] @signals.pre_save(sender=NoPK) @signals.post_save(sender=NoPK) def save_hook(sender, instance, created): accum[0] += 1 obj = NoPK.create(val=1) self.assertEqual(obj.val, 1) obj_db = NoPK.get(NoPK.val == 1) self.assertEqual(obj_db.val, 1) self.assertEqual(accum[0], 2) peewee-3.17.7/tests/sql.py000066400000000000000000002714451470346076600154300ustar00rootroot00000000000000import datetime import re from peewee import * from peewee import Expression from peewee import Function from peewee import query_to_string from .base import BaseTestCase from .base import TestModel from .base import db from .base import requires_mysql from .base import requires_sqlite from .base import __sql__ User = Table('users') Tweet = Table('tweets') Person = Table('person', ['id', 'name', 'dob'], primary_key='id') Note = Table('note', ['id', 'person_id', 'content']) class TestSelectQuery(BaseTestCase): def test_select(self): query = (User .select(User.c.id, User.c.username) .where(User.c.username == 'foo')) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" ' 'FROM "users" AS "t1" ' 'WHERE ("t1"."username" = ?)'), ['foo']) query = (User .select(User.c['id'], User.c['username']) .where(User.c['username'] == 'test')) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" ' 'FROM "users" AS "t1" ' 'WHERE ("t1"."username" = ?)'), ['test']) def test_select_extend(self): query = User.select(User.c.id, User.c.username) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1"'), []) query = query.select(User.c.username, User.c.is_admin) self.assertSQL(query, ( 'SELECT "t1"."username", "t1"."is_admin" FROM "users" AS "t1"'), []) query = query.select_extend(User.c.is_active, User.c.id) self.assertSQL(query, ( 'SELECT "t1"."username", "t1"."is_admin", "t1"."is_active", ' '"t1"."id" FROM "users" AS "t1"'), []) def test_selected_columns(self): query = (User .select(User.c.id, User.c.username, fn.COUNT(Tweet.c.id)) .join(Tweet, JOIN.LEFT_OUTER, on=(User.c.id == Tweet.c.user_id))) # NOTE: because of operator overloads for equality we have to test by # asserting the attributes of the selected cols. c_id, c_username, c_ct = query.selected_columns self.assertEqual(c_id.name, 'id') self.assertTrue(c_id.source is User) self.assertEqual(c_username.name, 'username') self.assertTrue(c_username.source is User) self.assertTrue(isinstance(c_ct, Function)) self.assertEqual(c_ct.name, 'COUNT') c_tid, = c_ct.arguments self.assertEqual(c_tid.name, 'id') self.assertTrue(c_tid.source is Tweet) query.selected_columns = (User.c.username,) c_username, = query.selected_columns self.assertEqual(c_username.name, 'username') self.assertTrue(c_username.source is User) def test_select_explicit_columns(self): query = (Person .select() .where(Person.dob < datetime.date(1980, 1, 1))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name", "t1"."dob" ' 'FROM "person" AS "t1" ' 'WHERE ("t1"."dob" < ?)'), [datetime.date(1980, 1, 1)]) def test_select_in_list_of_values(self): names_vals = [ ['charlie', 'huey'], ('charlie', 'huey'), set(('charlie', 'huey')), frozenset(('charlie', 'huey'))] for names in names_vals: query = (Person .select() .where(Person.name.in_(names))) sql, params = Context().sql(query).query() self.assertEqual(sql, ( 'SELECT "t1"."id", "t1"."name", "t1"."dob" ' 'FROM "person" AS "t1" ' 'WHERE ("t1"."name" IN (?, ?))')) self.assertEqual(sorted(params), ['charlie', 'huey']) query = (Person .select() .where(Person.id.in_(range(1, 10, 2)))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name", "t1"."dob" ' 'FROM "person" AS "t1" ' 'WHERE ("t1"."id" IN (?, ?, ?, ?, ?))'), [1, 3, 5, 7, 9]) def test_select_subselect_function(self): # For functions whose only argument is a subquery, we do not need to # include additional parentheses -- in fact, some databases will report # a syntax error if we do. exists = fn.EXISTS(Tweet .select(Tweet.c.id) .where(Tweet.c.user_id == User.c.id)) query = User.select(User.c.username, exists.alias('has_tweet')) self.assertSQL(query, ( 'SELECT "t1"."username", EXISTS(' 'SELECT "t2"."id" FROM "tweets" AS "t2" ' 'WHERE ("t2"."user_id" = "t1"."id")) AS "has_tweet" ' 'FROM "users" AS "t1"'), []) # If the function has more than one argument, we need to wrap the # subquery in parentheses. Stat = Table('stat', ['id', 'val']) SA = Stat.alias('sa') subq = SA.select(fn.SUM(SA.val).alias('val_sum')) query = Stat.select(fn.COALESCE(subq, 0)) self.assertSQL(query, ( 'SELECT COALESCE((' 'SELECT SUM("sa"."val") AS "val_sum" FROM "stat" AS "sa"' '), ?) FROM "stat" AS "t1"'), [0]) def test_subquery_in_select_sql(self): subq = User.select(User.c.id).where(User.c.username == 'huey') query = Tweet.select(Tweet.c.content, Tweet.c.user_id.in_(subq).alias('is_huey')) self.assertSQL(query, ( 'SELECT "t1"."content", ("t1"."user_id" IN (' 'SELECT "t2"."id" FROM "users" AS "t2" WHERE ("t2"."username" = ?)' ')) AS "is_huey" FROM "tweets" AS "t1"'), ['huey']) # If we explicitly specify an alias, it will be included. subq = subq.alias('sq') query = Tweet.select(Tweet.c.content, Tweet.c.user_id.in_(subq).alias('is_huey')) self.assertSQL(query, ( 'SELECT "t1"."content", ("t1"."user_id" IN (' 'SELECT "t2"."id" FROM "users" AS "t2" WHERE ("t2"."username" = ?)' ') AS "sq") AS "is_huey" FROM "tweets" AS "t1"'), ['huey']) def test_subquery_in_select_expression_sql(self): Point = Table('point', ('x', 'y')) PA = Point.alias('pa') subq = PA.select(fn.SUM(PA.y).alias('sa')).where(PA.x == Point.x) query = (Point .select(Point.x, Point.y, subq.alias('sy')) .order_by(Point.x, Point.y)) self.assertSQL(query, ( 'SELECT "t1"."x", "t1"."y", (' 'SELECT SUM("pa"."y") AS "sa" FROM "point" AS "pa" ' 'WHERE ("pa"."x" = "t1"."x")) AS "sy" ' 'FROM "point" AS "t1" ' 'ORDER BY "t1"."x", "t1"."y"'), []) def test_star(self): query = User.select(User.__star__) self.assertSQL(query, ('SELECT "t1".* FROM "users" AS "t1"'), []) query = (Tweet .select(Tweet.__star__, User.__star__) .join(User, on=(Tweet.c.user_id == User.c.id))) self.assertSQL(query, ( 'SELECT "t1".*, "t2".* ' 'FROM "tweets" AS "t1" ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id")'), []) query = (Tweet .select(Tweet.__star__, User.c.id) .join(User, on=(Tweet.c.user_id == User.c.id))) self.assertSQL(query, ( 'SELECT "t1".*, "t2"."id" ' 'FROM "tweets" AS "t1" ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id")'), []) def test_from_clause(self): query = (Note .select(Note.content, Person.name) .from_(Note, Person) .where(Note.person_id == Person.id) .order_by(Note.id)) self.assertSQL(query, ( 'SELECT "t1"."content", "t2"."name" ' 'FROM "note" AS "t1", "person" AS "t2" ' 'WHERE ("t1"."person_id" = "t2"."id") ' 'ORDER BY "t1"."id"'), []) def test_from_query(self): inner = Person.select(Person.name) query = (Person .select(Person.name) .from_(inner.alias('i1'))) self.assertSQL(query, ( 'SELECT "t1"."name" ' 'FROM (SELECT "t1"."name" FROM "person" AS "t1") AS "i1"'), []) PA = Person.alias('pa') inner = PA.select(PA.name).alias('i1') query = (Person .select(inner.c.name) .from_(inner) .order_by(inner.c.name)) self.assertSQL(query, ( 'SELECT "i1"."name" ' 'FROM (SELECT "pa"."name" FROM "person" AS "pa") AS "i1" ' 'ORDER BY "i1"."name"'), []) def test_join_explicit_columns(self): query = (Note .select(Note.content) .join(Person, on=(Note.person_id == Person.id)) .where(Person.name == 'charlie') .order_by(Note.id.desc())) self.assertSQL(query, ( 'SELECT "t1"."content" ' 'FROM "note" AS "t1" ' 'INNER JOIN "person" AS "t2" ON ("t1"."person_id" = "t2"."id") ' 'WHERE ("t2"."name" = ?) ' 'ORDER BY "t1"."id" DESC'), ['charlie']) def test_multi_join(self): Like = Table('likes') LikeUser = User.alias('lu') query = (Like .select(Tweet.c.content, User.c.username, LikeUser.c.username) .join(Tweet, on=(Like.c.tweet_id == Tweet.c.id)) .join(User, on=(Tweet.c.user_id == User.c.id)) .join(LikeUser, on=(Like.c.user_id == LikeUser.c.id)) .where(LikeUser.c.username == 'charlie') .order_by(Tweet.c.timestamp)) self.assertSQL(query, ( 'SELECT "t1"."content", "t2"."username", "lu"."username" ' 'FROM "likes" AS "t3" ' 'INNER JOIN "tweets" AS "t1" ON ("t3"."tweet_id" = "t1"."id") ' 'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") ' 'INNER JOIN "users" AS "lu" ON ("t3"."user_id" = "lu"."id") ' 'WHERE ("lu"."username" = ?) ' 'ORDER BY "t1"."timestamp"'), ['charlie']) def test_correlated_subquery(self): Employee = Table('employee', ['id', 'name', 'salary', 'dept']) EA = Employee.alias('e2') query = (Employee .select(Employee.id, Employee.name) .where(Employee.salary > (EA .select(fn.AVG(EA.salary)) .where(EA.dept == Employee.dept)))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name" ' 'FROM "employee" AS "t1" ' 'WHERE ("t1"."salary" > (' 'SELECT AVG("e2"."salary") ' 'FROM "employee" AS "e2" ' 'WHERE ("e2"."dept" = "t1"."dept")))'), []) def test_multiple_where(self): """Ensure multiple calls to WHERE are AND-ed together.""" query = (Person .select(Person.name) .where(Person.dob < datetime.date(1980, 1, 1)) .where(Person.dob > datetime.date(1950, 1, 1))) self.assertSQL(query, ( 'SELECT "t1"."name" ' 'FROM "person" AS "t1" ' 'WHERE (("t1"."dob" < ?) AND ("t1"."dob" > ?))'), [datetime.date(1980, 1, 1), datetime.date(1950, 1, 1)]) def test_orwhere(self): query = (Person .select(Person.name) .orwhere(Person.dob > datetime.date(1980, 1, 1)) .orwhere(Person.dob < datetime.date(1950, 1, 1))) self.assertSQL(query, ( 'SELECT "t1"."name" ' 'FROM "person" AS "t1" ' 'WHERE (("t1"."dob" > ?) OR ("t1"."dob" < ?))'), [datetime.date(1980, 1, 1), datetime.date(1950, 1, 1)]) def test_limit(self): base = User.select(User.c.id) self.assertSQL(base.limit(None), ( 'SELECT "t1"."id" FROM "users" AS "t1"'), []) self.assertSQL(base.limit(10), ( 'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ?'), [10]) self.assertSQL(base.limit(10).offset(3), ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'LIMIT ? OFFSET ?'), [10, 3]) self.assertSQL(base.limit(0), ( 'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ?'), [0]) self.assertSQL(base.offset(3), ( 'SELECT "t1"."id" FROM "users" AS "t1" OFFSET ?'), [3], limit_max=None) # Some databases do not support offset without corresponding LIMIT: self.assertSQL(base.offset(3), ( 'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ? OFFSET ?'), [-1, 3], limit_max=-1) self.assertSQL(base.limit(0).offset(3), ( 'SELECT "t1"."id" FROM "users" AS "t1" LIMIT ? OFFSET ?'), [0, 3], limit_max=-1) def test_simple_join(self): query = (User .select( User.c.id, User.c.username, fn.COUNT(Tweet.c.id).alias('ct')) .join(Tweet, on=(Tweet.c.user_id == User.c.id)) .group_by(User.c.id, User.c.username)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username", COUNT("t2"."id") AS "ct" ' 'FROM "users" AS "t1" ' 'INNER JOIN "tweets" AS "t2" ON ("t2"."user_id" = "t1"."id") ' 'GROUP BY "t1"."id", "t1"."username"'), []) def test_subquery(self): inner = (Tweet .select(fn.COUNT(Tweet.c.id).alias('ct')) .where(Tweet.c.user == User.c.id)) query = (User .select(User.c.username, inner.alias('iq')) .order_by(User.c.username)) self.assertSQL(query, ( 'SELECT "t1"."username", ' '(SELECT COUNT("t2"."id") AS "ct" ' 'FROM "tweets" AS "t2" ' 'WHERE ("t2"."user" = "t1"."id")) AS "iq" ' 'FROM "users" AS "t1" ORDER BY "t1"."username"'), []) def test_subquery_in_expr(self): Team = Table('team') Challenge = Table('challenge') subq = Team.select(fn.COUNT(Team.c.id) + 1) query = (Challenge .select((Challenge.c.points / subq).alias('score')) .order_by(SQL('score'))) self.assertSQL(query, ( 'SELECT ("t1"."points" / (' 'SELECT (COUNT("t2"."id") + ?) FROM "team" AS "t2")) AS "score" ' 'FROM "challenge" AS "t1" ORDER BY score'), [1]) def test_user_defined_alias(self): UA = User.alias('alt') query = (User .select(User.c.id, User.c.username, UA.c.nuggz) .join(UA, on=(User.c.id == UA.c.id)) .order_by(UA.c.nuggz)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."username", "alt"."nuggz" ' 'FROM "users" AS "t1" ' 'INNER JOIN "users" AS "alt" ON ("t1"."id" = "alt"."id") ' 'ORDER BY "alt"."nuggz"'), []) def test_simple_cte(self): cte = User.select(User.c.id).cte('user_ids') query = (User .select(User.c.username) .where(User.c.id.in_(cte)) .with_cte(cte)) self.assertSQL(query, ( 'WITH "user_ids" AS (SELECT "t1"."id" FROM "users" AS "t1") ' 'SELECT "t2"."username" FROM "users" AS "t2" ' 'WHERE ("t2"."id" IN "user_ids")'), []) def test_two_ctes(self): c1 = User.select(User.c.id).cte('user_ids') c2 = User.select(User.c.username).cte('user_names') query = (User .select(c1.c.id, c2.c.username) .where((c1.c.id == User.c.id) & (c2.c.username == User.c.username)) .with_cte(c1, c2)) self.assertSQL(query, ( 'WITH "user_ids" AS (SELECT "t1"."id" FROM "users" AS "t1"), ' '"user_names" AS (SELECT "t1"."username" FROM "users" AS "t1") ' 'SELECT "user_ids"."id", "user_names"."username" ' 'FROM "users" AS "t2" ' 'WHERE (("user_ids"."id" = "t2"."id") AND ' '("user_names"."username" = "t2"."username"))'), []) def test_select_from_cte(self): # Use the "select_from()" helper on the CTE object. cte = User.select(User.c.username).cte('user_cte') query = cte.select_from(cte.c.username).order_by(cte.c.username) self.assertSQL(query, ( 'WITH "user_cte" AS (SELECT "t1"."username" FROM "users" AS "t1") ' 'SELECT "user_cte"."username" FROM "user_cte" ' 'ORDER BY "user_cte"."username"'), []) # Test selecting from multiple CTEs, which is done manually. c1 = User.select(User.c.username).where(User.c.is_admin == 1).cte('c1') c2 = User.select(User.c.username).where(User.c.is_staff == 1).cte('c2') query = (Select((c1, c2), (c1.c.username, c2.c.username)) .with_cte(c1, c2)) self.assertSQL(query, ( 'WITH "c1" AS (' 'SELECT "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."is_admin" = ?)), ' '"c2" AS (' 'SELECT "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."is_staff" = ?)) ' 'SELECT "c1"."username", "c2"."username" FROM "c1", "c2"'), [1, 1]) def test_materialize_cte(self): cases = ( (True, 'MATERIALIZED '), (False, 'NOT MATERIALIZED '), (None, '')) for materialized, clause in cases: cte = (User .select(User.c.id) .cte('user_ids', materialized=materialized)) query = cte.select_from(cte.c.id).where(cte.c.id < 10) self.assertSQL(query, ( 'WITH "user_ids" AS %s(' 'SELECT "t1"."id" FROM "users" AS "t1") ' 'SELECT "user_ids"."id" FROM "user_ids" ' 'WHERE ("user_ids"."id" < ?)') % clause, [10]) def test_fibonacci_cte(self): q1 = Select(columns=( Value(1).alias('n'), Value(0).alias('fib_n'), Value(1).alias('next_fib_n'))).cte('fibonacci', recursive=True) n = (q1.c.n + 1).alias('n') rterm = Select(columns=( n, q1.c.next_fib_n, q1.c.fib_n + q1.c.next_fib_n)).from_(q1).where(n < 10) cases = ( (q1.union_all, 'UNION ALL'), (q1.union, 'UNION')) for method, clause in cases: cte = method(rterm) query = cte.select_from(cte.c.n, cte.c.fib_n) self.assertSQL(query, ( 'WITH RECURSIVE "fibonacci" AS (' 'SELECT ? AS "n", ? AS "fib_n", ? AS "next_fib_n" ' '%s ' 'SELECT ("fibonacci"."n" + ?) AS "n", "fibonacci"."next_fib_n", ' '("fibonacci"."fib_n" + "fibonacci"."next_fib_n") ' 'FROM "fibonacci" ' 'WHERE ("n" < ?)) ' 'SELECT "fibonacci"."n", "fibonacci"."fib_n" ' 'FROM "fibonacci"' % clause), [1, 0, 1, 1, 10]) def test_cte_with_count(self): cte = User.select(User.c.id).cte('user_ids') query = (User .select(User.c.username) .join(cte, on=(User.c.id == cte.c.id)) .with_cte(cte)) count = Select([query], [fn.COUNT(SQL('1'))]) self.assertSQL(count, ( 'SELECT COUNT(1) FROM (' 'WITH "user_ids" AS (SELECT "t1"."id" FROM "users" AS "t1") ' 'SELECT "t2"."username" FROM "users" AS "t2" ' 'INNER JOIN "user_ids" ON ("t2"."id" = "user_ids"."id")) ' 'AS "t3"'), []) def test_cte_subquery_in_expression(self): Order = Table('order', ('id', 'description')) Item = Table('item', ('id', 'order_id', 'description')) cte = Order.select(fn.MAX(Order.id).alias('max_id')).cte('max_order') qexpr = (Order .select(Order.id) .join(cte, on=(Order.id == cte.c.max_id)) .with_cte(cte)) query = (Item .select(Item.id, Item.order_id, Item.description) .where(Item.order_id.in_(qexpr))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."order_id", "t1"."description" ' 'FROM "item" AS "t1" ' 'WHERE ("t1"."order_id" IN (' 'WITH "max_order" AS (' 'SELECT MAX("t2"."id") AS "max_id" FROM "order" AS "t2") ' 'SELECT "t3"."id" ' 'FROM "order" AS "t3" ' 'INNER JOIN "max_order" ' 'ON ("t3"."id" = "max_order"."max_id")))'), []) def test_multi_update_cte(self): data = [(i, 'u%sx' % i) for i in range(1, 3)] vl = ValuesList(data) cte = vl.select().cte('uv', columns=('id', 'username')) subq = cte.select(cte.c.username).where(cte.c.id == User.c.id) query = (User .update(username=subq) .where(User.c.id.in_(cte.select(cte.c.id))) .with_cte(cte)) self.assertSQL(query, ( 'WITH "uv" ("id", "username") AS (' 'SELECT * FROM (VALUES (?, ?), (?, ?)) AS "t1") ' 'UPDATE "users" SET "username" = (' 'SELECT "uv"."username" FROM "uv" ' 'WHERE ("uv"."id" = "users"."id")) ' 'WHERE ("users"."id" IN (SELECT "uv"."id" FROM "uv"))'), [1, 'u1x', 2, 'u2x']) def test_data_modifying_cte_delete(self): Product = Table('products', ('id', 'name', 'timestamp')) Archive = Table('archive', ('id', 'name', 'timestamp')) query = (Product.delete() .where(Product.timestamp < datetime.date(2022, 1, 1)) .returning(Product.id, Product.name, Product.timestamp)) cte = query.cte('moved_rows') src = Select((cte,), (cte.c.id, cte.c.name, cte.c.timestamp)) iq = (Archive .insert(src, (Archive.id, Archive.name, Archive.timestamp)) .with_cte(cte)) self.assertSQL(iq, ( 'WITH "moved_rows" AS (' 'DELETE FROM "products" WHERE ("products"."timestamp" < ?) ' 'RETURNING "products"."id", "products"."name", ' '"products"."timestamp") ' 'INSERT INTO "archive" ("id", "name", "timestamp") ' 'SELECT "moved_rows"."id", "moved_rows"."name", ' '"moved_rows"."timestamp" FROM "moved_rows"'), [datetime.date(2022, 1, 1)]) Part = Table('parts', ('id', 'part', 'sub_part')) base = (Part .select(Part.sub_part, Part.part) .where(Part.part == 'p') .cte('included_parts', recursive=True, columns=('sub_part', 'part'))) PA = Part.alias('p') recursive = (PA .select(PA.sub_part, PA.part) .join(base, on=(PA.part == base.c.sub_part))) cte = base.union_all(recursive) sq = Select((cte,), (cte.c.part,)) query = (Part.delete() .where(Part.part.in_(sq)) .with_cte(cte)) self.assertSQL(query, ( 'WITH RECURSIVE "included_parts" ("sub_part", "part") AS (' 'SELECT "t1"."sub_part", "t1"."part" FROM "parts" AS "t1" ' 'WHERE ("t1"."part" = ?) ' 'UNION ALL ' 'SELECT "p"."sub_part", "p"."part" ' 'FROM "parts" AS "p" ' 'INNER JOIN "included_parts" ' 'ON ("p"."part" = "included_parts"."sub_part")) ' 'DELETE FROM "parts" ' 'WHERE ("parts"."part" IN (' 'SELECT "included_parts"."part" FROM "included_parts"))'), ['p']) def test_data_modifying_cte_update(self): Product = Table('products', ('id', 'name', 'price')) Archive = Table('archive', ('id', 'name', 'price')) query = (Product .update(price=Product.price * 1.05) .returning(Product.id, Product.name, Product.price)) cte = query.cte('t') sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price) self.assertSQL(sq, ( 'WITH "t" AS (' 'UPDATE "products" SET "price" = ("products"."price" * ?) ' 'RETURNING "products"."id", "products"."name", "products"."price")' ' SELECT "t"."id", "t"."name", "t"."price" FROM "t"'), [1.05]) sq = Select((cte,), (cte.c.id, cte.c.price)) uq = (Archive .update(price=sq.c.price) .from_(sq) .where(Archive.id == sq.c.id) .with_cte(cte)) self.assertSQL(uq, ( 'WITH "t" AS (' 'UPDATE "products" SET "price" = ("products"."price" * ?) ' 'RETURNING "products"."id", "products"."name", "products"."price")' ' UPDATE "archive" SET "price" = "t1"."price"' ' FROM (SELECT "t"."id", "t"."price" FROM "t") AS "t1"' ' WHERE ("archive"."id" = "t1"."id")'), [1.05]) def test_data_modifying_cte_insert(self): Product = Table('products', ('id', 'name', 'price')) Archive = Table('archive', ('id', 'name', 'price')) query = (Product .insert({'name': 'p1', 'price': 10}) .returning(Product.id, Product.name, Product.price)) cte = query.cte('t') sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price) self.assertSQL(sq, ( 'WITH "t" AS (' 'INSERT INTO "products" ("name", "price") VALUES (?, ?) ' 'RETURNING "products"."id", "products"."name", "products"."price")' ' SELECT "t"."id", "t"."name", "t"."price" FROM "t"'), ['p1', 10]) sq = Select((cte,), (cte.c.id, cte.c.name, cte.c.price)) iq = (Archive .insert(sq, (sq.c.id, sq.c.name, sq.c.price)) .with_cte(cte)) self.assertSQL(iq, ( 'WITH "t" AS (' 'INSERT INTO "products" ("name", "price") VALUES (?, ?) ' 'RETURNING "products"."id", "products"."name", "products"."price")' ' INSERT INTO "archive" ("id", "name", "price")' ' SELECT "t"."id", "t"."name", "t"."price" FROM "t"'), ['p1', 10]) def test_complex_select(self): Order = Table('orders', columns=( 'region', 'amount', 'product', 'quantity')) regional_sales = (Order .select( Order.region, fn.SUM(Order.amount).alias('total_sales')) .group_by(Order.region) .cte('regional_sales')) top_regions = (regional_sales .select(regional_sales.c.region) .where(regional_sales.c.total_sales > ( regional_sales.select( fn.SUM(regional_sales.c.total_sales) / 10))) .cte('top_regions')) query = (Order .select( Order.region, Order.product, fn.SUM(Order.quantity).alias('product_units'), fn.SUM(Order.amount).alias('product_sales')) .where( Order.region << top_regions.select(top_regions.c.region)) .group_by(Order.region, Order.product) .with_cte(regional_sales, top_regions)) self.assertSQL(query, ( 'WITH "regional_sales" AS (' 'SELECT "t1"."region", SUM("t1"."amount") AS "total_sales" ' 'FROM "orders" AS "t1" ' 'GROUP BY "t1"."region"' '), ' '"top_regions" AS (' 'SELECT "regional_sales"."region" ' 'FROM "regional_sales" ' 'WHERE ("regional_sales"."total_sales" > ' '(SELECT (SUM("regional_sales"."total_sales") / ?) ' 'FROM "regional_sales"))' ') ' 'SELECT "t2"."region", "t2"."product", ' 'SUM("t2"."quantity") AS "product_units", ' 'SUM("t2"."amount") AS "product_sales" ' 'FROM "orders" AS "t2" ' 'WHERE (' '"t2"."region" IN (' 'SELECT "top_regions"."region" ' 'FROM "top_regions")' ') GROUP BY "t2"."region", "t2"."product"'), [10]) def test_compound_select(self): lhs = User.select(User.c.id).where(User.c.username == 'charlie') rhs = User.select(User.c.username).where(User.c.admin == True) q2 = (lhs | rhs) UA = User.alias('U2') q3 = q2 | UA.select(UA.c.id).where(UA.c.superuser == False) self.assertSQL(q3, ( 'SELECT "t1"."id" ' 'FROM "users" AS "t1" ' 'WHERE ("t1"."username" = ?) ' 'UNION ' 'SELECT "t2"."username" ' 'FROM "users" AS "t2" ' 'WHERE ("t2"."admin" = ?) ' 'UNION ' 'SELECT "U2"."id" ' 'FROM "users" AS "U2" ' 'WHERE ("U2"."superuser" = ?)'), ['charlie', True, False]) def test_compound_operations(self): admin = (User .select(User.c.username, Value('admin').alias('role')) .where(User.c.is_admin == True)) editors = (User .select(User.c.username, Value('editor').alias('role')) .where(User.c.is_editor == True)) union = admin.union(editors) self.assertSQL(union, ( 'SELECT "t1"."username", ? AS "role" ' 'FROM "users" AS "t1" ' 'WHERE ("t1"."is_admin" = ?) ' 'UNION ' 'SELECT "t2"."username", ? AS "role" ' 'FROM "users" AS "t2" ' 'WHERE ("t2"."is_editor" = ?)'), ['admin', 1, 'editor', 1]) xcept = editors.except_(admin) self.assertSQL(xcept, ( 'SELECT "t1"."username", ? AS "role" ' 'FROM "users" AS "t1" ' 'WHERE ("t1"."is_editor" = ?) ' 'EXCEPT ' 'SELECT "t2"."username", ? AS "role" ' 'FROM "users" AS "t2" ' 'WHERE ("t2"."is_admin" = ?)'), ['editor', 1, 'admin', 1]) def test_compound_parentheses_handling(self): admin = (User .select(User.c.username, Value('admin').alias('role')) .where(User.c.is_admin == True) .order_by(User.c.id.desc()) .limit(3)) editors = (User .select(User.c.username, Value('editor').alias('role')) .where(User.c.is_editor == True) .order_by(User.c.id.desc()) .limit(5)) self.assertSQL((admin | editors), ( '(SELECT "t1"."username", ? AS "role" FROM "users" AS "t1" ' 'WHERE ("t1"."is_admin" = ?) ORDER BY "t1"."id" DESC LIMIT ?) ' 'UNION ' '(SELECT "t2"."username", ? AS "role" FROM "users" AS "t2" ' 'WHERE ("t2"."is_editor" = ?) ORDER BY "t2"."id" DESC LIMIT ?)'), ['admin', 1, 3, 'editor', 1, 5], compound_select_parentheses=True) Reg = Table('register', ('value',)) lhs = Reg.select().where(Reg.value < 2) rhs = Reg.select().where(Reg.value > 7) compound = lhs | rhs for csq_setting in (1, 2): self.assertSQL(compound, ( '(SELECT "t1"."value" FROM "register" AS "t1" ' 'WHERE ("t1"."value" < ?)) ' 'UNION ' '(SELECT "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" > ?))'), [2, 7], compound_select_parentheses=csq_setting) rhs2 = Reg.select().where(Reg.value == 5) c2 = compound | rhs2 # CSQ = always, we get nested parentheses. self.assertSQL(c2, ( '((SELECT "t1"."value" FROM "register" AS "t1" ' 'WHERE ("t1"."value" < ?)) ' 'UNION ' '(SELECT "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" > ?))) ' 'UNION ' '(SELECT "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" = ?))'), [2, 7, 5], compound_select_parentheses=1) # Always. # CSQ = unnested, no nesting but all individual queries have parens. self.assertSQL(c2, ( '(SELECT "t1"."value" FROM "register" AS "t1" ' 'WHERE ("t1"."value" < ?)) ' 'UNION ' '(SELECT "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" > ?)) ' 'UNION ' '(SELECT "t2"."value" FROM "register" AS "t2" ' 'WHERE ("t2"."value" = ?))'), [2, 7, 5], compound_select_parentheses=2) # Un-nested. def test_compound_select_order_limit(self): A = Table('a', ('col_a',)) B = Table('b', ('col_b',)) C = Table('c', ('col_c',)) q1 = A.select(A.col_a.alias('foo')) q2 = B.select(B.col_b.alias('foo')) q3 = C.select(C.col_c.alias('foo')) qc = (q1 | q2 | q3) qc = qc.order_by(qc.c.foo.desc()).limit(3) self.assertSQL(qc, ( 'SELECT "t1"."col_a" AS "foo" FROM "a" AS "t1" UNION ' 'SELECT "t2"."col_b" AS "foo" FROM "b" AS "t2" UNION ' 'SELECT "t3"."col_c" AS "foo" FROM "c" AS "t3" ' 'ORDER BY "foo" DESC LIMIT ?'), [3]) self.assertSQL(qc, ( '((SELECT "t1"."col_a" AS "foo" FROM "a" AS "t1") UNION ' '(SELECT "t2"."col_b" AS "foo" FROM "b" AS "t2")) UNION ' '(SELECT "t3"."col_c" AS "foo" FROM "c" AS "t3") ' 'ORDER BY "foo" DESC LIMIT ?'), [3], compound_select_parentheses=1) def test_compound_select_as_subquery(self): A = Table('a', ('col_a',)) B = Table('b', ('col_b',)) q1 = A.select(A.col_a.alias('foo')) q2 = B.select(B.col_b.alias('foo')) union = q1 | q2 # Create an outer query and do grouping. outer = (union .select_from(union.c.foo, fn.COUNT(union.c.foo).alias('ct')) .group_by(union.c.foo)) self.assertSQL(outer, ( 'SELECT "t1"."foo", COUNT("t1"."foo") AS "ct" FROM (' 'SELECT "t2"."col_a" AS "foo" FROM "a" AS "t2" UNION ' 'SELECT "t3"."col_b" AS "foo" FROM "b" AS "t3") AS "t1" ' 'GROUP BY "t1"."foo"'), []) def test_join_on_query(self): inner = User.select(User.c.id).alias('j1') query = (Tweet .select(Tweet.c.content) .join(inner, on=(Tweet.c.user_id == inner.c.id))) self.assertSQL(query, ( 'SELECT "t1"."content" FROM "tweets" AS "t1" ' 'INNER JOIN (SELECT "t2"."id" FROM "users" AS "t2") AS "j1" ' 'ON ("t1"."user_id" = "j1"."id")'), []) def test_join_on_misc(self): cond = fn.Magic(Person.id, Note.id).alias('magic') query = Person.select(Person.id).join(Note, on=cond) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "person" AS "t1" ' 'INNER JOIN "note" AS "t2" ' 'ON Magic("t1"."id", "t2"."id") AS "magic"'), []) def test_all_clauses(self): count = fn.COUNT(Tweet.c.id).alias('ct') query = (User .select(User.c.username, count) .join(Tweet, JOIN.LEFT_OUTER, on=(User.c.id == Tweet.c.user_id)) .where(User.c.is_admin == 1) .group_by(User.c.username) .having(count > 10) .order_by(count.desc())) self.assertSQL(query, ( 'SELECT "t1"."username", COUNT("t2"."id") AS "ct" ' 'FROM "users" AS "t1" ' 'LEFT OUTER JOIN "tweets" AS "t2" ' 'ON ("t1"."id" = "t2"."user_id") ' 'WHERE ("t1"."is_admin" = ?) ' 'GROUP BY "t1"."username" ' 'HAVING ("ct" > ?) ' 'ORDER BY "ct" DESC'), [1, 10]) def test_order_by_collate(self): query = (User .select(User.c.username) .order_by(User.c.username.asc(collation='binary'))) self.assertSQL(query, ( 'SELECT "t1"."username" FROM "users" AS "t1" ' 'ORDER BY "t1"."username" ASC COLLATE binary'), []) def test_order_by_nulls(self): query = (User .select(User.c.username) .order_by(User.c.ts.desc(nulls='LAST'))) self.assertSQL(query, ( 'SELECT "t1"."username" FROM "users" AS "t1" ' 'ORDER BY "t1"."ts" DESC NULLS LAST'), [], nulls_ordering=True) self.assertSQL(query, ( 'SELECT "t1"."username" FROM "users" AS "t1" ' 'ORDER BY CASE WHEN ("t1"."ts" IS NULL) THEN ? ELSE ? END, ' '"t1"."ts" DESC'), [1, 0], nulls_ordering=False) query = (User .select(User.c.username) .order_by(User.c.ts.desc(nulls='first'))) self.assertSQL(query, ( 'SELECT "t1"."username" FROM "users" AS "t1" ' 'ORDER BY "t1"."ts" DESC NULLS first'), [], nulls_ordering=True) self.assertSQL(query, ( 'SELECT "t1"."username" FROM "users" AS "t1" ' 'ORDER BY CASE WHEN ("t1"."ts" IS NULL) THEN ? ELSE ? END, ' '"t1"."ts" DESC'), [0, 1], nulls_ordering=False) def test_in_value_representation(self): query = (User .select(User.c.id) .where(User.c.username.in_(['foo', 'bar', 'baz']))) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'WHERE ("t1"."username" IN (?, ?, ?))'), ['foo', 'bar', 'baz']) def test_tuple_comparison(self): name_dob = Tuple(Person.name, Person.dob) query = (Person .select(Person.id) .where(name_dob == ('foo', '2017-01-01'))) expected = ('SELECT "t1"."id" FROM "person" AS "t1" ' 'WHERE (("t1"."name", "t1"."dob") = (?, ?))') self.assertSQL(query, expected, ['foo', '2017-01-01']) # Also works specifying rhs values as Tuple(). query = (Person .select(Person.id) .where(name_dob == Tuple('foo', '2017-01-01'))) self.assertSQL(query, expected, ['foo', '2017-01-01']) def test_tuple_comparison_subquery(self): PA = Person.alias('pa') subquery = (PA .select(PA.name, PA.id) .where(PA.name != 'huey')) query = (Person .select(Person.name) .where(Tuple(Person.name, Person.id).in_(subquery))) self.assertSQL(query, ( 'SELECT "t1"."name" FROM "person" AS "t1" ' 'WHERE (("t1"."name", "t1"."id") IN (' 'SELECT "pa"."name", "pa"."id" FROM "person" AS "pa" ' 'WHERE ("pa"."name" != ?)))'), ['huey']) def test_empty_in(self): query = User.select(User.c.id).where(User.c.username.in_([])) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'WHERE (0 = 1)'), []) query = User.select(User.c.id).where(User.c.username.not_in([])) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'WHERE (1 = 1)'), []) def test_add_custom_op(self): def mod(lhs, rhs): return Expression(lhs, '%', rhs) Stat = Table('stats') query = (Stat .select(fn.COUNT(Stat.c.id)) .where(mod(Stat.c.index, 10) == 0)) self.assertSQL(query, ( 'SELECT COUNT("t1"."id") FROM "stats" AS "t1" ' 'WHERE (("t1"."index" % ?) = ?)'), [10, 0]) def test_where_convert_to_is_null(self): Note = Table('notes', ('id', 'content', 'user_id')) query = Note.select().where(Note.user_id == None) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."content", "t1"."user_id" ' 'FROM "notes" AS "t1" WHERE ("t1"."user_id" IS NULL)'), []) def test_like_escape(self): T = Table('tbl', ('key',)) def assertLike(expr, expected): query = T.select().where(expr) sql, params = __sql__(T.select().where(expr)) match_obj = re.search(r'\("t1"."key" (ILIKE[^\)]+)\)', sql) if match_obj is None: raise AssertionError('LIKE expression not found in query.') like, = match_obj.groups() self.assertEqual((like, params), expected) cases = ( (T.key.contains('base'), ('ILIKE ?', ['%base%'])), (T.key.contains('x_y'), ("ILIKE ? ESCAPE ?", ['%x\\_y%', '\\'])), (T.key.contains('__y'), ("ILIKE ? ESCAPE ?", ['%\\_\\_y%', '\\'])), (T.key.contains('%'), ("ILIKE ? ESCAPE ?", ['%\\%%', '\\'])), (T.key.contains('_%'), ("ILIKE ? ESCAPE ?", ['%\\_\\%%', '\\'])), (T.key.startswith('base'), ("ILIKE ?", ['base%'])), (T.key.startswith('x_y'), ("ILIKE ? ESCAPE ?", ['x\\_y%', '\\'])), (T.key.startswith('x%'), ("ILIKE ? ESCAPE ?", ['x\\%%', '\\'])), (T.key.startswith('_%'), ("ILIKE ? ESCAPE ?", ['\\_\\%%', '\\'])), (T.key.endswith('base'), ("ILIKE ?", ['%base'])), (T.key.endswith('x_y'), ("ILIKE ? ESCAPE ?", ['%x\\_y', '\\'])), (T.key.endswith('x%'), ("ILIKE ? ESCAPE ?", ['%x\\%', '\\'])), (T.key.endswith('_%'), ("ILIKE ? ESCAPE ?", ['%\\_\\%', '\\'])), ) for expr, expected in cases: assertLike(expr, expected) def test_like_expr(self): query = User.select(User.c.id).where(User.c.username.like('%foo%')) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'WHERE ("t1"."username" LIKE ?)'), ['%foo%']) query = User.select(User.c.id).where(User.c.username.ilike('%foo%')) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'WHERE ("t1"."username" ILIKE ?)'), ['%foo%']) def test_field_ops(self): query = User.select(User.c.id).where(User.c.username.regexp('[a-z]+')) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'WHERE ("t1"."username" REGEXP ?)'), ['[a-z]+']) query = User.select(User.c.id).where(User.c.username.contains('abc')) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "users" AS "t1" ' 'WHERE ("t1"."username" ILIKE ?)'), ['%abc%']) class TestInsertQuery(BaseTestCase): def test_insert_simple(self): query = User.insert({ User.c.username: 'charlie', User.c.superuser: False, User.c.admin: True}) self.assertSQL(query, ( 'INSERT INTO "users" ("admin", "superuser", "username") ' 'VALUES (?, ?, ?)'), [True, False, 'charlie']) @requires_sqlite def test_replace_sqlite(self): query = User.replace({ User.c.username: 'charlie', User.c.superuser: False}) self.assertSQL(query, ( 'INSERT OR REPLACE INTO "users" ("superuser", "username") ' 'VALUES (?, ?)'), [False, 'charlie']) @requires_mysql def test_replace_mysql(self): query = User.replace({ User.c.username: 'charlie', User.c.superuser: False}) self.assertSQL(query, ( 'REPLACE INTO "users" ("superuser", "username") ' 'VALUES (?, ?)'), [False, 'charlie']) def test_insert_list(self): data = [ {Person.name: 'charlie'}, {Person.name: 'huey'}, {Person.name: 'zaizee'}] query = Person.insert(data) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) def test_insert_list_with_columns(self): data = [(i,) for i in ('charlie', 'huey', 'zaizee')] query = Person.insert(data, columns=[Person.name]) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) # Use column name instead of column instance. query = Person.insert(data, columns=['name']) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) def test_insert_list_infer_columns(self): data = [('p1', '1980-01-01'), ('p2', '1980-02-02')] self.assertSQL(Person.insert(data), ( 'INSERT INTO "person" ("name", "dob") VALUES (?, ?), (?, ?)'), ['p1', '1980-01-01', 'p2', '1980-02-02']) # Cannot infer any columns for User. data = [('u1',), ('u2',)] self.assertRaises(ValueError, User.insert(data).sql) # Note declares columns, but no primary key. So we would have to # include it for this to work. data = [(1, 'p1-n'), (2, 'p2-n')] self.assertRaises(ValueError, Note.insert(data).sql) data = [(1, 1, 'p1-n'), (2, 2, 'p2-n')] self.assertSQL(Note.insert(data), ( 'INSERT INTO "note" ("id", "person_id", "content") ' 'VALUES (?, ?, ?), (?, ?, ?)'), [1, 1, 'p1-n', 2, 2, 'p2-n']) def test_insert_query(self): source = User.select(User.c.username).where(User.c.admin == False) query = Person.insert(source, columns=[Person.name]) self.assertSQL(query, ( 'INSERT INTO "person" ("name") ' 'SELECT "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."admin" = ?)'), [False]) def test_insert_query_cte(self): cte = User.select(User.c.username).cte('foo') source = cte.select(cte.c.username) query = Person.insert(source, columns=[Person.name]).with_cte(cte) self.assertSQL(query, ( 'WITH "foo" AS (SELECT "t1"."username" FROM "users" AS "t1") ' 'INSERT INTO "person" ("name") ' 'SELECT "foo"."username" FROM "foo"'), []) def test_insert_single_value_query(self): query = Person.select(Person.id).where(Person.name == 'huey') insert = Note.insert({ Note.person_id: query, Note.content: 'hello'}) self.assertSQL(insert, ( 'INSERT INTO "note" ("content", "person_id") VALUES (?, ' '(SELECT "t1"."id" FROM "person" AS "t1" ' 'WHERE ("t1"."name" = ?)))'), ['hello', 'huey']) def test_insert_returning(self): query = (Person .insert({ Person.name: 'zaizee', Person.dob: datetime.date(2000, 1, 2)}) .returning(Person.id, Person.name, Person.dob)) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") ' 'VALUES (?, ?) ' 'RETURNING "person"."id", "person"."name", "person"."dob"'), [datetime.date(2000, 1, 2), 'zaizee']) query = query.returning(Person.id, Person.name.alias('new_name')) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") ' 'VALUES (?, ?) ' 'RETURNING "person"."id", "person"."name" AS "new_name"'), [datetime.date(2000, 1, 2), 'zaizee']) def test_empty(self): class Empty(TestModel): pass query = Empty.insert() if isinstance(db, MySQLDatabase): sql = 'INSERT INTO "empty" () VALUES ()' elif isinstance(db, PostgresqlDatabase): sql = 'INSERT INTO "empty" DEFAULT VALUES RETURNING "empty"."id"' else: sql = 'INSERT INTO "empty" DEFAULT VALUES' self.assertSQL(query, sql, []) class TestUpdateQuery(BaseTestCase): def test_update_query(self): query = (User .update({ User.c.username: 'nuggie', User.c.admin: False, User.c.counter: User.c.counter + 1}) .where(User.c.username == 'nugz')) self.assertSQL(query, ( 'UPDATE "users" SET ' '"admin" = ?, ' '"counter" = ("users"."counter" + ?), ' '"username" = ? ' 'WHERE ("users"."username" = ?)'), [False, 1, 'nuggie', 'nugz']) def test_update_subquery(self): count = fn.COUNT(Tweet.c.id).alias('ct') subquery = (User .select(User.c.id, count) .join(Tweet, on=(Tweet.c.user_id == User.c.id)) .group_by(User.c.id) .having(count > 100)) query = (User .update({ User.c.muted: True, User.c.counter: 0}) .where(User.c.id << subquery)) self.assertSQL(query, ( 'UPDATE "users" SET ' '"counter" = ?, ' '"muted" = ? ' 'WHERE ("users"."id" IN (' 'SELECT "users"."id", COUNT("t1"."id") AS "ct" ' 'FROM "users" AS "users" ' 'INNER JOIN "tweets" AS "t1" ' 'ON ("t1"."user_id" = "users"."id") ' 'GROUP BY "users"."id" ' 'HAVING ("ct" > ?)))'), [0, True, 100]) def test_update_value_subquery(self): subquery = (Tweet .select(fn.MAX(Tweet.c.id)) .where(Tweet.c.user_id == User.c.id)) query = (User .update({User.c.last_tweet_id: subquery}) .where(User.c.last_tweet_id.is_null(True))) self.assertSQL(query, ( 'UPDATE "users" SET ' '"last_tweet_id" = (SELECT MAX("t1"."id") FROM "tweets" AS "t1" ' 'WHERE ("t1"."user_id" = "users"."id")) ' 'WHERE ("users"."last_tweet_id" IS NULL)'), []) def test_update_from(self): data = [(1, 'u1-x'), (2, 'u2-x')] vl = ValuesList(data, columns=('id', 'username'), alias='tmp') query = (User .update(username=vl.c.username) .from_(vl) .where(User.c.id == vl.c.id)) self.assertSQL(query, ( 'UPDATE "users" SET "username" = "tmp"."username" ' 'FROM (VALUES (?, ?), (?, ?)) AS "tmp"("id", "username") ' 'WHERE ("users"."id" = "tmp"."id")'), [1, 'u1-x', 2, 'u2-x']) subq = vl.select(vl.c.id, vl.c.username) query = (User .update({User.c.username: subq.c.username}) .from_(subq) .where(User.c.id == subq.c.id)) self.assertSQL(query, ( 'UPDATE "users" SET "username" = "t1"."username" FROM (' 'SELECT "tmp"."id", "tmp"."username" ' 'FROM (VALUES (?, ?), (?, ?)) AS "tmp"("id", "username")) AS "t1" ' 'WHERE ("users"."id" = "t1"."id")'), [1, 'u1-x', 2, 'u2-x']) def test_update_returning(self): query = (User .update({User.c.is_admin: True}) .where(User.c.username == 'charlie') .returning(User.c.id)) self.assertSQL(query, ( 'UPDATE "users" SET "is_admin" = ? WHERE ("users"."username" = ?) ' 'RETURNING "users"."id"'), [True, 'charlie']) query = query.returning(User.c.is_admin.alias('new_is_admin')) self.assertSQL(query, ( 'UPDATE "users" SET "is_admin" = ? WHERE ("users"."username" = ?) ' 'RETURNING "users"."is_admin" AS "new_is_admin"'), [True, 'charlie']) class TestDeleteQuery(BaseTestCase): def test_delete_query(self): query = (User .delete() .where(User.c.username != 'charlie') .limit(3)) self.assertSQL(query, ( 'DELETE FROM "users" WHERE ("users"."username" != ?) LIMIT ?'), ['charlie', 3]) def test_delete_subquery(self): count = fn.COUNT(Tweet.c.id).alias('ct') subquery = (User .select(User.c.id, count) .join(Tweet, on=(Tweet.c.user_id == User.c.id)) .group_by(User.c.id) .having(count > 100)) query = (User .delete() .where(User.c.id << subquery)) self.assertSQL(query, ( 'DELETE FROM "users" ' 'WHERE ("users"."id" IN (' 'SELECT "users"."id", COUNT("t1"."id") AS "ct" ' 'FROM "users" AS "users" ' 'INNER JOIN "tweets" AS "t1" ON ("t1"."user_id" = "users"."id") ' 'GROUP BY "users"."id" ' 'HAVING ("ct" > ?)))'), [100]) def test_delete_cte(self): cte = (User .select(User.c.id) .where(User.c.admin == True) .cte('u')) query = (User .delete() .where(User.c.id << cte.select(cte.c.id)) .with_cte(cte)) self.assertSQL(query, ( 'WITH "u" AS ' '(SELECT "t1"."id" FROM "users" AS "t1" WHERE ("t1"."admin" = ?)) ' 'DELETE FROM "users" ' 'WHERE ("users"."id" IN (SELECT "u"."id" FROM "u"))'), [True]) def test_delete_returning(self): query = (User .delete() .where(User.c.id > 2) .returning(User.c.username)) self.assertSQL(query, ( 'DELETE FROM "users" ' 'WHERE ("users"."id" > ?) ' 'RETURNING "users"."username"'), [2]) query = query.returning(User.c.id, User.c.username, SQL('1')) self.assertSQL(query, ( 'DELETE FROM "users" ' 'WHERE ("users"."id" > ?) ' 'RETURNING "users"."id", "users"."username", 1'), [2]) query = query.returning(User.c.id.alias('old_id')) self.assertSQL(query, ( 'DELETE FROM "users" ' 'WHERE ("users"."id" > ?) ' 'RETURNING "users"."id" AS "old_id"'), [2]) Register = Table('register', ('id', 'value', 'category')) class TestWindowFunctions(BaseTestCase): def test_partition_unordered(self): partition = [Register.category] query = (Register .select( Register.category, Register.value, fn.AVG(Register.value).over(partition_by=partition)) .order_by(Register.id)) self.assertSQL(query, ( 'SELECT "t1"."category", "t1"."value", AVG("t1"."value") ' 'OVER (PARTITION BY "t1"."category") ' 'FROM "register" AS "t1" ORDER BY "t1"."id"'), []) def test_ordered_unpartitioned(self): query = (Register .select( Register.value, fn.RANK().over(order_by=[Register.value]))) self.assertSQL(query, ( 'SELECT "t1"."value", RANK() OVER (ORDER BY "t1"."value") ' 'FROM "register" AS "t1"'), []) def test_ordered_partitioned(self): query = Register.select( Register.value, fn.SUM(Register.value).over( order_by=Register.id, partition_by=Register.category).alias('rsum')) self.assertSQL(query, ( 'SELECT "t1"."value", SUM("t1"."value") ' 'OVER (PARTITION BY "t1"."category" ORDER BY "t1"."id") AS "rsum" ' 'FROM "register" AS "t1"'), []) def test_empty_over(self): query = (Register .select(Register.value, fn.LAG(Register.value, 1).over()) .order_by(Register.value)) self.assertSQL(query, ( 'SELECT "t1"."value", LAG("t1"."value", ?) OVER () ' 'FROM "register" AS "t1" ' 'ORDER BY "t1"."value"'), [1]) def test_frame(self): query = (Register .select( Register.value, fn.AVG(Register.value).over( partition_by=[Register.category], start=Window.preceding(), end=Window.following(2)))) self.assertSQL(query, ( 'SELECT "t1"."value", AVG("t1"."value") ' 'OVER (PARTITION BY "t1"."category" ' 'ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) ' 'FROM "register" AS "t1"'), []) query = (Register .select(Register.value, fn.AVG(Register.value).over( partition_by=[Register.category], order_by=[Register.value], start=Window.CURRENT_ROW, end=Window.following()))) self.assertSQL(query, ( 'SELECT "t1"."value", AVG("t1"."value") ' 'OVER (PARTITION BY "t1"."category" ' 'ORDER BY "t1"."value" ' 'ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) ' 'FROM "register" AS "t1"'), []) def test_frame_types(self): def assertFrame(over_kwargs, expected): query = Register.select( Register.value, fn.SUM(Register.value).over(**over_kwargs)) sql, params = __sql__(query) match_obj = re.search(r'OVER \((.*?)\) FROM', sql) self.assertTrue(match_obj is not None) self.assertEqual(match_obj.groups()[0], expected) self.assertEqual(params, []) # No parameters -- empty OVER(). assertFrame({}, ('')) # Explicitly specify RANGE / ROWS frame-types. assertFrame({'frame_type': Window.RANGE}, 'RANGE UNBOUNDED PRECEDING') assertFrame({'frame_type': Window.ROWS}, 'ROWS UNBOUNDED PRECEDING') # Start and end boundaries. assertFrame({'start': Window.preceding(), 'end': Window.following()}, 'ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING') assertFrame({ 'start': Window.preceding(), 'end': Window.following(), 'frame_type': Window.RANGE, }, 'RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING') assertFrame({ 'start': Window.preceding(), 'end': Window.following(), 'frame_type': Window.ROWS, }, 'ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING') # Start boundary. assertFrame({'start': Window.preceding()}, 'ROWS UNBOUNDED PRECEDING') assertFrame({'start': Window.preceding(), 'frame_type': Window.RANGE}, 'RANGE UNBOUNDED PRECEDING') assertFrame({'start': Window.preceding(), 'frame_type': Window.ROWS}, 'ROWS UNBOUNDED PRECEDING') # Ordered or partitioned. assertFrame({'order_by': Register.value}, 'ORDER BY "t1"."value"') assertFrame({'frame_type': Window.RANGE, 'order_by': Register.value}, 'ORDER BY "t1"."value" RANGE UNBOUNDED PRECEDING') assertFrame({'frame_type': Window.ROWS, 'order_by': Register.value}, 'ORDER BY "t1"."value" ROWS UNBOUNDED PRECEDING') assertFrame({'partition_by': Register.category}, 'PARTITION BY "t1"."category"') assertFrame({ 'frame_type': Window.RANGE, 'partition_by': Register.category, }, 'PARTITION BY "t1"."category" RANGE UNBOUNDED PRECEDING') assertFrame({ 'frame_type': Window.ROWS, 'partition_by': Register.category, }, 'PARTITION BY "t1"."category" ROWS UNBOUNDED PRECEDING') # Ordering and boundaries. assertFrame({'order_by': Register.value, 'start': Window.CURRENT_ROW, 'end': Window.following()}, ('ORDER BY "t1"."value" ' 'ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING')) assertFrame({'order_by': Register.value, 'start': Window.CURRENT_ROW, 'end': Window.following(), 'frame_type': Window.RANGE}, ('ORDER BY "t1"."value" ' 'RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING')) assertFrame({'order_by': Register.value, 'start': Window.CURRENT_ROW, 'end': Window.following(), 'frame_type': Window.ROWS}, ('ORDER BY "t1"."value" ' 'ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING')) def test_running_total(self): EventLog = Table('evtlog', ('id', 'timestamp', 'data')) w = fn.SUM(EventLog.timestamp).over(order_by=[EventLog.timestamp]) query = (EventLog .select(EventLog.timestamp, EventLog.data, w.alias('elapsed')) .order_by(EventLog.timestamp)) self.assertSQL(query, ( 'SELECT "t1"."timestamp", "t1"."data", ' 'SUM("t1"."timestamp") OVER (ORDER BY "t1"."timestamp") ' 'AS "elapsed" ' 'FROM "evtlog" AS "t1" ORDER BY "t1"."timestamp"'), []) w = fn.SUM(EventLog.timestamp).over( order_by=[EventLog.timestamp], partition_by=[EventLog.data]) query = (EventLog .select(EventLog.timestamp, EventLog.data, w.alias('elapsed')) .order_by(EventLog.timestamp)) self.assertSQL(query, ( 'SELECT "t1"."timestamp", "t1"."data", ' 'SUM("t1"."timestamp") OVER ' '(PARTITION BY "t1"."data" ORDER BY "t1"."timestamp") AS "elapsed"' ' FROM "evtlog" AS "t1" ORDER BY "t1"."timestamp"'), []) def test_named_window(self): window = Window(partition_by=[Register.category]) query = (Register .select( Register.category, Register.value, fn.AVG(Register.value).over(window)) .window(window)) self.assertSQL(query, ( 'SELECT "t1"."category", "t1"."value", AVG("t1"."value") ' 'OVER w ' 'FROM "register" AS "t1" ' 'WINDOW w AS (PARTITION BY "t1"."category")'), []) window = Window( partition_by=[Register.category], order_by=[Register.value.desc()]) query = (Register .select( Register.value, fn.RANK().over(window)) .window(window)) self.assertSQL(query, ( 'SELECT "t1"."value", RANK() OVER w ' 'FROM "register" AS "t1" ' 'WINDOW w AS (' 'PARTITION BY "t1"."category" ' 'ORDER BY "t1"."value" DESC)'), []) def test_multiple_windows(self): w1 = Window(partition_by=[Register.category]).alias('w1') w2 = Window(order_by=[Register.value]).alias('w2') query = (Register .select( Register.value, fn.AVG(Register.value).over(w1), fn.RANK().over(w2)) .window(w1, w2)) self.assertSQL(query, ( 'SELECT "t1"."value", AVG("t1"."value") OVER w1, RANK() OVER w2 ' 'FROM "register" AS "t1" ' 'WINDOW w1 AS (PARTITION BY "t1"."category"), ' 'w2 AS (ORDER BY "t1"."value")'), []) def test_alias_window(self): w = Window(order_by=Register.value).alias('wx') query = Register.select(Register.value, fn.RANK().over(w)).window(w) # We can re-alias the window and it's updated alias is reflected # correctly in the final query. w.alias('wz') self.assertSQL(query, ( 'SELECT "t1"."value", RANK() OVER wz ' 'FROM "register" AS "t1" ' 'WINDOW wz AS (ORDER BY "t1"."value")'), []) def test_reuse_window(self): EventLog = Table('evt', ('id', 'timestamp', 'key')) window = Window(partition_by=[EventLog.key], order_by=[EventLog.timestamp]) query = (EventLog .select(EventLog.timestamp, EventLog.key, fn.NTILE(4).over(window).alias('quartile'), fn.NTILE(5).over(window).alias('quintile'), fn.NTILE(100).over(window).alias('percentile')) .order_by(EventLog.timestamp) .window(window)) self.assertSQL(query, ( 'SELECT "t1"."timestamp", "t1"."key", ' 'NTILE(?) OVER w AS "quartile", ' 'NTILE(?) OVER w AS "quintile", ' 'NTILE(?) OVER w AS "percentile" ' 'FROM "evt" AS "t1" ' 'WINDOW w AS (' 'PARTITION BY "t1"."key" ORDER BY "t1"."timestamp") ' 'ORDER BY "t1"."timestamp"'), [4, 5, 100]) def test_filter_clause(self): condsum = fn.SUM(Register.value).filter(Register.value > 1).over( order_by=[Register.id], partition_by=[Register.category], start=Window.preceding(1)) query = (Register .select(Register.category, Register.value, condsum) .order_by(Register.category)) self.assertSQL(query, ( 'SELECT "t1"."category", "t1"."value", SUM("t1"."value") FILTER (' 'WHERE ("t1"."value" > ?)) OVER (PARTITION BY "t1"."category" ' 'ORDER BY "t1"."id" ROWS 1 PRECEDING) ' 'FROM "register" AS "t1" ' 'ORDER BY "t1"."category"'), [1]) def test_window_in_orderby(self): Register = Table('register', ['id', 'value']) w = Window(partition_by=[Register.value], order_by=[Register.id]) query = (Register .select() .window(w) .order_by(fn.FIRST_VALUE(Register.id).over(w))) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" ' 'WINDOW w AS (PARTITION BY "t1"."value" ORDER BY "t1"."id") ' 'ORDER BY FIRST_VALUE("t1"."id") OVER w'), []) fv = fn.FIRST_VALUE(Register.id).over( partition_by=[Register.value], order_by=[Register.id]) query = Register.select().order_by(fv) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" ' 'ORDER BY FIRST_VALUE("t1"."id") ' 'OVER (PARTITION BY "t1"."value" ORDER BY "t1"."id")'), []) def test_window_extends(self): Tbl = Table('tbl', ('b', 'c')) w1 = Window(partition_by=[Tbl.b], alias='win1') w2 = Window(extends=w1, order_by=[Tbl.c], alias='win2') query = Tbl.select(fn.GROUP_CONCAT(Tbl.c).over(w2)).window(w1, w2) self.assertSQL(query, ( 'SELECT GROUP_CONCAT("t1"."c") OVER win2 FROM "tbl" AS "t1" ' 'WINDOW win1 AS (PARTITION BY "t1"."b"), ' 'win2 AS (win1 ORDER BY "t1"."c")'), []) w1 = Window(partition_by=[Tbl.b], alias='w1') w2 = Window(extends=w1).alias('w2') w3 = Window(extends=w2).alias('w3') w4 = Window(extends=w3, order_by=[Tbl.c]).alias('w4') query = (Tbl .select(fn.GROUP_CONCAT(Tbl.c).over(w4)) .window(w1, w2, w3, w4)) self.assertSQL(query, ( 'SELECT GROUP_CONCAT("t1"."c") OVER w4 FROM "tbl" AS "t1" ' 'WINDOW w1 AS (PARTITION BY "t1"."b"), w2 AS (w1), w3 AS (w2), ' 'w4 AS (w3 ORDER BY "t1"."c")'), []) def test_window_ranged(self): Tbl = Table('tbl', ('a', 'b')) query = (Tbl .select(Tbl.a, fn.SUM(Tbl.b).over( order_by=[Tbl.a.desc()], frame_type=Window.RANGE, start=Window.preceding(1), end=Window.following(2))) .order_by(Tbl.a.asc())) self.assertSQL(query, ( 'SELECT "t1"."a", SUM("t1"."b") OVER (' 'ORDER BY "t1"."a" DESC RANGE BETWEEN 1 PRECEDING AND 2 FOLLOWING)' ' FROM "tbl" AS "t1" ORDER BY "t1"."a" ASC'), []) query = (Tbl .select(Tbl.a, fn.SUM(Tbl.b).over( order_by=[Tbl.a], frame_type=Window.GROUPS, start=Window.preceding(3), end=Window.preceding(1)))) self.assertSQL(query, ( 'SELECT "t1"."a", SUM("t1"."b") OVER (' 'ORDER BY "t1"."a" GROUPS BETWEEN 3 PRECEDING AND 1 PRECEDING) ' 'FROM "tbl" AS "t1"'), []) query = (Tbl .select(Tbl.a, fn.SUM(Tbl.b).over( order_by=[Tbl.a], frame_type=Window.GROUPS, start=Window.following(1), end=Window.following(5)))) self.assertSQL(query, ( 'SELECT "t1"."a", SUM("t1"."b") OVER (' 'ORDER BY "t1"."a" GROUPS BETWEEN 1 FOLLOWING AND 5 FOLLOWING) ' 'FROM "tbl" AS "t1"'), []) def test_window_frametypes(self): Tbl = Table('tbl', ('b', 'c')) fts = (('as_range', Window.RANGE, 'RANGE'), ('as_rows', Window.ROWS, 'ROWS'), ('as_groups', Window.GROUPS, 'GROUPS')) for method, arg, sql in fts: w = getattr(Window(order_by=[Tbl.b + 1]), method)() self.assertSQL(Tbl.select(fn.SUM(Tbl.c).over(w)).window(w), ( 'SELECT SUM("t1"."c") OVER w FROM "tbl" AS "t1" ' 'WINDOW w AS (ORDER BY ("t1"."b" + ?) ' '%s UNBOUNDED PRECEDING)') % sql, [1]) query = Tbl.select(fn.SUM(Tbl.c) .over(order_by=[Tbl.b + 1], frame_type=arg)) self.assertSQL(query, ( 'SELECT SUM("t1"."c") OVER (ORDER BY ("t1"."b" + ?) ' '%s UNBOUNDED PRECEDING) FROM "tbl" AS "t1"') % sql, [1]) def test_window_frame_exclusion(self): Tbl = Table('tbl', ('b', 'c')) fts = ((Window.CURRENT_ROW, 'CURRENT ROW'), (Window.TIES, 'TIES'), (Window.NO_OTHERS, 'NO OTHERS'), (Window.GROUP, 'GROUP')) for arg, sql in fts: query = Tbl.select(fn.MAX(Tbl.b).over( order_by=[Tbl.c], start=Window.preceding(4), end=Window.following(), frame_type=Window.ROWS, exclude=arg)) self.assertSQL(query, ( 'SELECT MAX("t1"."b") OVER (ORDER BY "t1"."c" ' 'ROWS BETWEEN 4 PRECEDING AND UNBOUNDED FOLLOWING ' 'EXCLUDE %s) FROM "tbl" AS "t1"') % sql, []) def test_filter_window(self): # Example derived from sqlite window test 5.1.3.2. Tbl = Table('tbl', ('a', 'c')) win = Window(partition_by=fn.COALESCE(Tbl.a, ''), frame_type=Window.RANGE, start=Window.CURRENT_ROW, end=Window.following(), exclude=Window.NO_OTHERS) query = (Tbl .select(fn.SUM(Tbl.c).filter(Tbl.c < 5).over(win), fn.RANK().over(win), fn.DENSE_RANK().over(win)) .window(win)) self.assertSQL(query, ( 'SELECT SUM("t1"."c") FILTER (WHERE ("t1"."c" < ?)) OVER w, ' 'RANK() OVER w, DENSE_RANK() OVER w ' 'FROM "tbl" AS "t1" ' 'WINDOW w AS (PARTITION BY COALESCE("t1"."a", ?) ' 'RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING ' 'EXCLUDE NO OTHERS)'), [5, '']) class TestValuesList(BaseTestCase): _data = [(1, 'one'), (2, 'two'), (3, 'three')] def test_values_list(self): vl = ValuesList(self._data) query = vl.select(SQL('*')) self.assertSQL(query, ( 'SELECT * FROM (VALUES (?, ?), (?, ?), (?, ?)) AS "t1"'), [1, 'one', 2, 'two', 3, 'three']) def test_values_list_named_columns(self): vl = ValuesList(self._data).columns('idx', 'name') query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx)) self.assertSQL(query, ( 'SELECT "t1"."idx", "t1"."name" ' 'FROM (VALUES (?, ?), (?, ?), (?, ?)) AS "t1"("idx", "name") ' 'ORDER BY "t1"."idx"'), [1, 'one', 2, 'two', 3, 'three']) def test_named_values_list(self): vl = ValuesList(self._data, ['idx', 'name']).alias('vl') query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx)) self.assertSQL(query, ( 'SELECT "vl"."idx", "vl"."name" ' 'FROM (VALUES (?, ?), (?, ?), (?, ?)) AS "vl"("idx", "name") ' 'ORDER BY "vl"."idx"'), [1, 'one', 2, 'two', 3, 'three']) def test_docs_examples(self): data = [(1, 'first'), (2, 'second')] vl = ValuesList(data, columns=('idx', 'name')) query = (vl .select(vl.c.idx, vl.c.name) .order_by(vl.c.idx)) self.assertSQL(query, ( 'SELECT "t1"."idx", "t1"."name" ' 'FROM (VALUES (?, ?), (?, ?)) AS "t1"("idx", "name") ' 'ORDER BY "t1"."idx"'), [1, 'first', 2, 'second']) vl = ValuesList([(1, 'first'), (2, 'second')]) vl = vl.columns('idx', 'name').alias('v') query = vl.select(vl.c.idx, vl.c.name) self.assertSQL(query, ( 'SELECT "v"."idx", "v"."name" ' 'FROM (VALUES (?, ?), (?, ?)) AS "v"("idx", "name")'), [1, 'first', 2, 'second']) def test_join_on_valueslist(self): vl = ValuesList([('huey',), ('zaizee',)], columns=['username']) query = (User .select(vl.c.username) .join(vl, on=(User.c.username == vl.c.username)) .order_by(vl.c.username.desc())) self.assertSQL(query, ( 'SELECT "t1"."username" FROM "users" AS "t2" ' 'INNER JOIN (VALUES (?), (?)) AS "t1"("username") ' 'ON ("t2"."username" = "t1"."username") ' 'ORDER BY "t1"."username" DESC'), ['huey', 'zaizee']) class TestCaseFunction(BaseTestCase): def test_case_function(self): NameNum = Table('nn', ('name', 'number')) query = (NameNum .select(NameNum.name, Case(NameNum.number, ( (1, 'one'), (2, 'two')), '?').alias('num_str'))) self.assertSQL(query, ( 'SELECT "t1"."name", CASE "t1"."number" ' 'WHEN ? THEN ? ' 'WHEN ? THEN ? ' 'ELSE ? END AS "num_str" ' 'FROM "nn" AS "t1"'), [1, 'one', 2, 'two', '?']) query = (NameNum .select(NameNum.name, Case(None, ( (NameNum.number == 1, 'one'), (NameNum.number == 2, 'two')), '?'))) self.assertSQL(query, ( 'SELECT "t1"."name", CASE ' 'WHEN ("t1"."number" = ?) THEN ? ' 'WHEN ("t1"."number" = ?) THEN ? ' 'ELSE ? END ' 'FROM "nn" AS "t1"'), [1, 'one', 2, 'two', '?']) def test_case_subquery(self): Name = Table('n', ('id', 'name',)) case = Case(None, [(Name.id.in_(Name.select(Name.id)), 1)], 0) q = Name.select(fn.SUM(case)) self.assertSQL(q, ( 'SELECT SUM(' 'CASE WHEN ("t1"."id" IN (SELECT "t1"."id" FROM "n" AS "t1")) ' 'THEN ? ELSE ? END) FROM "n" AS "t1"'), [1, 0]) case = Case(None, [ (Name.id < 5, Name.select(fn.SUM(Name.id))), (Name.id > 5, Name.select(fn.COUNT(Name.name)).distinct())], Name.select(fn.MAX(Name.id))) q = Name.select(Name.name, case.alias('magic')) self.assertSQL(q, ( 'SELECT "t1"."name", CASE ' 'WHEN ("t1"."id" < ?) ' 'THEN (SELECT SUM("t1"."id") FROM "n" AS "t1") ' 'WHEN ("t1"."id" > ?) ' 'THEN (SELECT DISTINCT COUNT("t1"."name") FROM "n" AS "t1") ' 'ELSE (SELECT MAX("t1"."id") FROM "n" AS "t1") END AS "magic" ' 'FROM "n" AS "t1"'), [5, 5]) class TestSelectFeatures(BaseTestCase): def test_reselect(self): query = Person.select(Person.name) self.assertSQL(query, 'SELECT "t1"."name" FROM "person" AS "t1"', []) query = query.columns(Person.id, Person.name, Person.dob) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name", "t1"."dob" ' 'FROM "person" AS "t1"'), []) def test_distinct_on(self): query = (Note .select(Person.name, Note.content) .join(Person, on=(Note.person_id == Person.id)) .order_by(Person.name, Note.content) .distinct(Person.name)) self.assertSQL(query, ( 'SELECT DISTINCT ON ("t1"."name") ' '"t1"."name", "t2"."content" ' 'FROM "note" AS "t2" ' 'INNER JOIN "person" AS "t1" ON ("t2"."person_id" = "t1"."id") ' 'ORDER BY "t1"."name", "t2"."content"'), []) query = (Person .select(Person.name) .distinct(Person.name)) self.assertSQL(query, ( 'SELECT DISTINCT ON ("t1"."name") "t1"."name" ' 'FROM "person" AS "t1"'), []) def test_distinct(self): query = Person.select(Person.name).distinct() self.assertSQL(query, 'SELECT DISTINCT "t1"."name" FROM "person" AS "t1"', []) def test_distinct_count(self): query = Person.select(fn.COUNT(Person.name.distinct())) self.assertSQL(query, ( 'SELECT COUNT(DISTINCT "t1"."name") FROM "person" AS "t1"'), []) def test_filtered_count(self): filtered_count = (fn.COUNT(Person.name) .filter(Person.dob < datetime.date(2000, 1, 1))) query = Person.select(fn.COUNT(Person.name), filtered_count) self.assertSQL(query, ( 'SELECT COUNT("t1"."name"), COUNT("t1"."name") ' 'FILTER (WHERE ("t1"."dob" < ?)) ' 'FROM "person" AS "t1"'), [datetime.date(2000, 1, 1)]) def test_ordered_aggregate(self): agg = fn.array_agg(Person.name).order_by(Person.id.desc()) self.assertSQL(Person.select(agg.alias('names')), ( 'SELECT array_agg("t1"."name" ORDER BY "t1"."id" DESC) AS "names" ' 'FROM "person" AS "t1"'), []) agg = fn.string_agg(Person.name, ',').order_by(Person.dob, Person.id) self.assertSQL(Person.select(agg), ( 'SELECT string_agg("t1"."name", ? ORDER BY "t1"."dob", "t1"."id")' ' FROM "person" AS "t1"'), [',']) agg = (fn.string_agg(Person.name.concat('-x'), ',') .order_by(Person.name.desc(), Person.dob.asc())) self.assertSQL(Person.select(agg), ( 'SELECT string_agg(("t1"."name" || ?), ? ORDER BY "t1"."name" DESC' ', "t1"."dob" ASC) ' 'FROM "person" AS "t1"'), ['-x', ',']) agg = agg.order_by() self.assertSQL(Person.select(agg), ( 'SELECT string_agg(("t1"."name" || ?), ?) ' 'FROM "person" AS "t1"'), ['-x', ',']) def test_for_update(self): query = (Person .select() .where(Person.name == 'charlie') .for_update()) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name", "t1"."dob" ' 'FROM "person" AS "t1" ' 'WHERE ("t1"."name" = ?) ' 'FOR UPDATE'), ['charlie'], for_update=True) query = query.for_update('FOR SHARE NOWAIT') self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."name", "t1"."dob" ' 'FROM "person" AS "t1" ' 'WHERE ("t1"."name" = ?) ' 'FOR SHARE NOWAIT'), ['charlie'], for_update=True) def test_for_update_nested(self): PA = Person.alias('pa') subq = PA.select(PA.id).where(PA.name == 'charlie').for_update() query = (Person .delete() .where(Person.id.in_(subq))) self.assertSQL(query, ( 'DELETE FROM "person" WHERE ("person"."id" IN (' 'SELECT "pa"."id" FROM "person" AS "pa" ' 'WHERE ("pa"."name" = ?) FOR UPDATE))'), ['charlie'], for_update=True) def test_for_update_options(self): query = (Person .select(Person.id) .where(Person.name == 'huey') .for_update(of=Person, nowait=True)) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) ' 'FOR UPDATE OF "t1" NOWAIT'), ['huey'], for_update=True) # Check default behavior. query = query.for_update() self.assertSQL(query, ( 'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) ' 'FOR UPDATE'), ['huey'], for_update=True) # Clear flag. query = query.for_update(None) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?)'), ['huey']) # Old-style is still supported. query = query.for_update('FOR UPDATE NOWAIT') self.assertSQL(query, ( 'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) ' 'FOR UPDATE NOWAIT'), ['huey'], for_update=True) # Mix of old and new is OK. query = query.for_update('FOR SHARE NOWAIT', of=Person) self.assertSQL(query, ( 'SELECT "t1"."id" FROM "person" AS "t1" WHERE ("t1"."name" = ?) ' 'FOR SHARE OF "t1" NOWAIT'), ['huey'], for_update=True) def test_parentheses(self): query = (Person .select(fn.MAX( fn.IFNULL(1, 10) * 151, fn.IFNULL(None, 10)))) self.assertSQL(query, ( 'SELECT MAX((IFNULL(?, ?) * ?), IFNULL(?, ?)) ' 'FROM "person" AS "t1"'), [1, 10, 151, None, 10]) query = (Person .select(Person.name) .where(fn.EXISTS( User.select(User.c.id).where( User.c.username == Person.name)))) self.assertSQL(query, ( 'SELECT "t1"."name" FROM "person" AS "t1" ' 'WHERE EXISTS(' 'SELECT "t2"."id" FROM "users" AS "t2" ' 'WHERE ("t2"."username" = "t1"."name"))'), []) class TestExpressionSQL(BaseTestCase): def test_parentheses_functions(self): expr = (User.c.income + 100) expr2 = expr * expr query = User.select(fn.sum(expr), fn.avg(expr2)) self.assertSQL(query, ( 'SELECT sum("t1"."income" + ?), ' 'avg(("t1"."income" + ?) * ("t1"."income" + ?)) ' 'FROM "users" AS "t1"'), [100, 100, 100]) #Person = Table('person', ['id', 'name', 'dob']) class TestOnConflictSqlite(BaseTestCase): database = SqliteDatabase(None) def test_replace(self): query = Person.insert(name='huey').on_conflict('replace') self.assertSQL(query, ( 'INSERT OR REPLACE INTO "person" ("name") VALUES (?)'), ['huey']) def test_ignore(self): query = Person.insert(name='huey').on_conflict('ignore') self.assertSQL(query, ( 'INSERT OR IGNORE INTO "person" ("name") VALUES (?)'), ['huey']) def test_update_not_supported(self): query = Person.insert(name='huey').on_conflict( preserve=(Person.dob,), update={Person.name: Person.name.concat(' (updated)')}) with self.assertRaisesCtx(ValueError): self.database.get_sql_context().parse(query) class TestOnConflictMySQL(BaseTestCase): database = MySQLDatabase(None) def setUp(self): super(TestOnConflictMySQL, self).setUp() self.database.server_version = None def test_replace(self): query = Person.insert(name='huey').on_conflict('replace') self.assertSQL(query, ( 'REPLACE INTO "person" ("name") VALUES (?)'), ['huey']) def test_ignore(self): query = Person.insert(name='huey').on_conflict('ignore') self.assertSQL(query, ( 'INSERT IGNORE INTO "person" ("name") VALUES (?)'), ['huey']) def test_update(self): dob = datetime.date(2010, 1, 1) query = (Person .insert(name='huey', dob=dob) .on_conflict( preserve=(Person.dob,), update={Person.name: Person.name.concat('-x')})) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUES("dob"), "name" = ("name" || ?)'), [dob, 'huey', '-x']) query = (Person .insert(name='huey', dob=dob) .on_conflict(preserve='dob')) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUES("dob")'), [dob, 'huey']) def test_update_use_value_mariadb(self): # Verify that we use "VALUE" (not "VALUES") for MariaDB 10.3.3. dob = datetime.date(2010, 1, 1) query = (Person .insert(name='huey', dob=dob) .on_conflict(preserve=(Person.dob,))) self.database.server_version = (10, 3, 3) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUE("dob")'), [dob, 'huey']) self.database.server_version = (10, 3, 2) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON DUPLICATE KEY ' 'UPDATE "dob" = VALUES("dob")'), [dob, 'huey']) def test_where_not_supported(self): query = Person.insert(name='huey').on_conflict( preserve=(Person.dob,), where=(Person.name == 'huey')) with self.assertRaisesCtx(ValueError): self.database.get_sql_context().parse(query) class TestOnConflictPostgresql(BaseTestCase): database = PostgresqlDatabase(None) def test_ignore(self): query = Person.insert(name='huey').on_conflict('ignore') self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?) ' 'ON CONFLICT DO NOTHING'), ['huey']) def test_conflict_target_required(self): query = Person.insert(name='huey').on_conflict(preserve=(Person.dob,)) with self.assertRaisesCtx(ValueError): self.database.get_sql_context().parse(query) def test_conflict_resolution_required(self): query = Person.insert(name='huey').on_conflict(conflict_target='name') with self.assertRaisesCtx(ValueError): self.database.get_sql_context().parse(query) def test_conflict_update_excluded(self): KV = Table('kv', ('key', 'value', 'extra'), _database=self.database) query = (KV.insert(key='k1', value='v1', extra=1) .on_conflict(conflict_target=(KV.key, KV.value), update={KV.extra: EXCLUDED.extra + 2}, where=(EXCLUDED.extra < KV.extra))) self.assertSQL(query, ( 'INSERT INTO "kv" ("extra", "key", "value") VALUES (?, ?, ?) ' 'ON CONFLICT ("key", "value") DO UPDATE ' 'SET "extra" = (EXCLUDED."extra" + ?) ' 'WHERE (EXCLUDED."extra" < "kv"."extra")'), [1, 'k1', 'v1', 2]) def test_conflict_target_or_constraint(self): KV = Table('kv', ('key', 'value', 'extra'), _database=self.database) query = (KV.insert(key='k1', value='v1', extra='e1') .on_conflict(conflict_target=[KV.key, KV.value], preserve=[KV.extra])) self.assertSQL(query, ( 'INSERT INTO "kv" ("extra", "key", "value") VALUES (?, ?, ?) ' 'ON CONFLICT ("key", "value") DO UPDATE ' 'SET "extra" = EXCLUDED."extra"'), ['e1', 'k1', 'v1']) query = (KV.insert(key='k1', value='v1', extra='e1') .on_conflict(conflict_constraint='kv_key_value', preserve=[KV.extra])) self.assertSQL(query, ( 'INSERT INTO "kv" ("extra", "key", "value") VALUES (?, ?, ?) ' 'ON CONFLICT ON CONSTRAINT "kv_key_value" DO UPDATE ' 'SET "extra" = EXCLUDED."extra"'), ['e1', 'k1', 'v1']) query = KV.insert(key='k1', value='v1', extra='e1') self.assertRaises(ValueError, query.on_conflict, conflict_target=[KV.key, KV.value], conflict_constraint='kv_key_value') def test_update(self): dob = datetime.date(2010, 1, 1) query = (Person .insert(name='huey', dob=dob) .on_conflict( conflict_target=(Person.name,), preserve=(Person.dob,), update={Person.name: Person.name.concat('-x')})) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON CONFLICT ("name") DO ' 'UPDATE SET "dob" = EXCLUDED."dob", ' '"name" = ("person"."name" || ?)'), [dob, 'huey', '-x']) query = (Person .insert(name='huey', dob=dob) .on_conflict( conflict_target='name', preserve='dob')) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON CONFLICT ("name") DO ' 'UPDATE SET "dob" = EXCLUDED."dob"'), [dob, 'huey']) query = (Person .insert(name='huey') .on_conflict( conflict_target=Person.name, preserve=Person.dob, update={Person.name: Person.name.concat('-x')}, where=(Person.name != 'zaizee'))) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?) ' 'ON CONFLICT ("name") DO ' 'UPDATE SET "dob" = EXCLUDED."dob", ' '"name" = ("person"."name" || ?) ' 'WHERE ("person"."name" != ?)'), ['huey', '-x', 'zaizee']) def test_conflict_target_partial_index(self): KVE = Table('kve', ('key', 'value', 'extra')) data = [('k1', 1, 2), ('k2', 2, 3)] columns = [KVE.key, KVE.value, KVE.extra] query = (KVE .insert(data, columns) .on_conflict( conflict_target=(KVE.key, KVE.value), conflict_where=(KVE.extra > 1), preserve=(KVE.extra,), where=(KVE.key != 'kx'))) self.assertSQL(query, ( 'INSERT INTO "kve" ("key", "value", "extra") ' 'VALUES (?, ?, ?), (?, ?, ?) ' 'ON CONFLICT ("key", "value") WHERE ("extra" > ?) ' 'DO UPDATE SET "extra" = EXCLUDED."extra" ' 'WHERE ("kve"."key" != ?)'), ['k1', 1, 2, 'k2', 2, 3, 1, 'kx']) #Person = Table('person', ['id', 'name', 'dob']) #Note = Table('note', ['id', 'person_id', 'content']) class TestIndex(BaseTestCase): def test_simple_index(self): pidx = Index('person_name', Person, (Person.name,), unique=True) self.assertSQL(pidx, ( 'CREATE UNIQUE INDEX "person_name" ON "person" ("name")'), []) pidx = pidx.where(Person.dob > datetime.date(1950, 1, 1)) self.assertSQL(pidx, ( 'CREATE UNIQUE INDEX "person_name" ON "person" ' '("name") WHERE ("dob" > ?)'), [datetime.date(1950, 1, 1)]) def test_advanced_index(self): Article = Table('article') aidx = Index('foo_idx', Article, ( Article.c.status, Article.c.timestamp.desc(), fn.SUBSTR(Article.c.title, 1, 1)), safe=True) self.assertSQL(aidx, ( 'CREATE INDEX IF NOT EXISTS "foo_idx" ON "article" ' '("status", "timestamp" DESC, SUBSTR("title", ?, ?))'), [1, 1]) aidx = aidx.where(Article.c.flags.bin_and(4) == 4) self.assertSQL(aidx, ( 'CREATE INDEX IF NOT EXISTS "foo_idx" ON "article" ' '("status", "timestamp" DESC, SUBSTR("title", ?, ?)) ' 'WHERE (("flags" & ?) = ?)'), [1, 1, 4, 4]) # Check behavior when value-literals are enabled. self.assertSQL(aidx, ( 'CREATE INDEX IF NOT EXISTS "foo_idx" ON "article" ' '("status", "timestamp" DESC, SUBSTR("title", 1, 1)) ' 'WHERE (("flags" & 4) = 4)'), [], value_literals=True) def test_str_cols(self): uidx = Index('users_info', User, ('username DESC', 'id')) self.assertSQL(uidx, ( 'CREATE INDEX "users_info" ON "users" (username DESC, id)'), []) class TestSqlToString(BaseTestCase): def _test_sql_to_string(self, _param): class FakeDB(SqliteDatabase): param = _param db = FakeDB(None) T = Table('tbl', ('id', 'val')).bind(db) query = (T.select() .where((T.val == 'foo') | (T.val == b'bar') | (T.val == True) | (T.val == False) | (T.val == 2) | (T.val == -3.14) | (T.val == datetime.datetime(2018, 1, 1)) | (T.val == datetime.date(2018, 1, 2)) | T.val.is_null() | T.val.is_null(False) | T.val.in_(['aa', 'bb', 'cc']))) self.assertEqual(query_to_string(query), ( 'SELECT "t1"."id", "t1"."val" FROM "tbl" AS "t1" WHERE (((((((((((' '"t1"."val" = \'foo\') OR ' '("t1"."val" = \'bar\')) OR ' '("t1"."val" = 1)) OR ' '("t1"."val" = 0)) OR ' '("t1"."val" = 2)) OR ' '("t1"."val" = -3.14)) OR ' '("t1"."val" = \'2018-01-01 00:00:00\')) OR ' '("t1"."val" = \'2018-01-02\')) OR ' '("t1"."val" IS NULL)) OR ' '("t1"."val" IS NOT NULL)) OR ' '("t1"."val" IN (\'aa\', \'bb\', \'cc\')))')) def test_sql_to_string_qmark(self): self._test_sql_to_string('?') def test_sql_to_string_default(self): self._test_sql_to_string('%s') peewee-3.17.7/tests/sqlcipher_ext.py000066400000000000000000000126671470346076600175020ustar00rootroot00000000000000import datetime import os from hashlib import sha1 from peewee import DatabaseError from playhouse.sqlcipher_ext import * from playhouse.sqlite_ext import * from .base import ModelTestCase from .base import TestModel PASSPHRASE = 'testing sqlcipher' PRAGMAS = { 'kdf_iter': 10, # Much faster for testing. Totally unsafe. 'cipher_log_level': 'none', } db = SqlCipherDatabase('peewee_test.dbc', passphrase=PASSPHRASE, pragmas=PRAGMAS) ext_db = SqlCipherExtDatabase('peewee_test.dbx', passphrase=PASSPHRASE, pragmas=PRAGMAS) @ext_db.func('shazam') def shazam(s): return sha1((s or '').encode('utf-8')).hexdigest()[:5] class Thing(TestModel): name = CharField() class FTSNote(FTSModel, TestModel): content = TextField() class Note(TestModel): content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) class CleanUpModelTestCase(ModelTestCase): def tearDown(self): super(CleanUpModelTestCase, self).tearDown() if os.path.exists(self.database.database): os.unlink(self.database.database) class SqlCipherTestCase(CleanUpModelTestCase): database = db requires = [Thing] def test_good_and_bad_passphrases(self): things = ('t1', 't2', 't3') for thing in things: Thing.create(name=thing) # Try to open db with wrong passphrase bad_db = SqlCipherDatabase(db.database, passphrase='wrong passphrase') self.assertRaises(DatabaseError, bad_db.get_tables) # Assert that we can still access the data with the good passphrase. query = Thing.select().order_by(Thing.name) self.assertEqual([t.name for t in query], ['t1', 't2', 't3']) def test_rekey(self): things = ('t1', 't2', 't3') for thing in things: Thing.create(name=thing) self.database.rekey('a new passphrase') db2 = SqlCipherDatabase(db.database, passphrase='a new passphrase', pragmas=PRAGMAS) cursor = db2.execute_sql('select name from thing order by name;') self.assertEqual([name for name, in cursor], ['t1', 't2', 't3']) query = Thing.select().order_by(Thing.name) self.assertEqual([t.name for t in query], ['t1', 't2', 't3']) self.database.close() self.database.connect() query = Thing.select().order_by(Thing.name) self.assertEqual([t.name for t in query], ['t1', 't2', 't3']) # Re-set to the original passphrase. self.database.rekey(PASSPHRASE) def test_empty_passphrase(self): db = SqlCipherDatabase(':memory:') class CM(TestModel): data = TextField() class Meta: database = db db.connect() db.create_tables([CM]) cm = CM.create(data='foo') cm_db = CM.get(CM.data == 'foo') self.assertEqual(cm_db.id, cm.id) self.assertEqual(cm_db.data, 'foo') config_db = SqlCipherDatabase('peewee_test.dbc', pragmas={ 'kdf_iter': 1234, 'cipher_page_size': 8192}, passphrase=PASSPHRASE) class TestSqlCipherConfiguration(CleanUpModelTestCase): database = config_db def test_configuration_via_pragma(self): # Write some data so the database file is created. self.database.execute_sql('create table foo (data TEXT)') self.database.close() self.database.connect() self.assertEqual(int(self.database.pragma('kdf_iter')), 1234) self.assertEqual(int(self.database.pragma('cipher_page_size')), 8192) self.assertTrue('foo' in self.database.get_tables()) class SqlCipherExtTestCase(CleanUpModelTestCase): database = ext_db requires = [Note] def setUp(self): super(SqlCipherExtTestCase, self).setUp() FTSNote._meta.database = ext_db FTSNote.drop_table(True) FTSNote.create_table(tokenize='porter', content=Note.content) def tearDown(self): FTSNote.drop_table(True) super(SqlCipherExtTestCase, self).tearDown() def test_fts(self): strings = [ 'python and peewee for working with databases', 'relational databases are the best', 'sqlite is the best relational database', 'sqlcipher is a cool database extension'] for s in strings: Note.create(content=s) FTSNote.rebuild() query = (FTSNote .select(FTSNote, FTSNote.rank().alias('score')) .where(FTSNote.match('relational databases')) .order_by(SQL('score').desc())) notes = [note.content for note in query] self.assertEqual(notes, [ 'relational databases are the best', 'sqlite is the best relational database']) alt_conn = SqliteDatabase(ext_db.database) self.assertRaises( DatabaseError, alt_conn.execute_sql, 'SELECT * FROM "%s"' % (FTSNote._meta.table_name)) def test_func(self): Note.create(content='hello') Note.create(content='baz') Note.create(content='nug') query = (Note .select(Note.content, fn.shazam(Note.content).alias('shz')) .order_by(Note.id) .dicts()) results = list(query) self.assertEqual(results, [ {'content': 'hello', 'shz': 'aaf4c'}, {'content': 'baz', 'shz': 'bbe96'}, {'content': 'nug', 'shz': '52616'}, ]) peewee-3.17.7/tests/sqlite.py000066400000000000000000003045051470346076600161240ustar00rootroot00000000000000from decimal import Decimal as D import datetime import os import sys from peewee import * from peewee import sqlite3 from playhouse.sqlite_ext import * from playhouse._sqlite_ext import TableFunction from .base import BaseTestCase from .base import IS_SQLITE_37 from .base import IS_SQLITE_9 from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import get_in_memory_db from .base import requires_models from .base import skip_if from .base import skip_unless from .base_models import Person from .base_models import Tweet from .base_models import User from .sqlite_helpers import compile_option from .sqlite_helpers import json_installed from .sqlite_helpers import json_patch_installed from .sqlite_helpers import json_text_installed from .sqlite_helpers import jsonb_installed database = SqliteExtDatabase(':memory:', c_extensions=False, timeout=100) CLOSURE_EXTENSION = os.environ.get('PEEWEE_CLOSURE_EXTENSION') if not CLOSURE_EXTENSION and os.path.exists('closure.so'): CLOSURE_EXTENSION = './closure.so' LSM_EXTENSION = os.environ.get('LSM_EXTENSION') if not LSM_EXTENSION and os.path.exists('lsm.so'): LSM_EXTENSION = './lsm.so' try: from playhouse._sqlite_ext import peewee_rank CYTHON_EXTENSION = True except ImportError: CYTHON_EXTENSION = False class WeightedAverage(object): def __init__(self): self.total = 0. self.count = 0. def step(self, value, weight=None): weight = weight or 1. self.total += weight self.count += (weight * value) def finalize(self): if self.total != 0.: return self.count / self.total return 0. def _cmp(l, r): if l < r: return -1 return 1 if r < l else 0 def collate_reverse(s1, s2): return -_cmp(s1, s2) @database.collation() def collate_case_insensitive(s1, s2): return _cmp(s1.lower(), s2.lower()) def title_case(s): return s.title() @database.func() def rstrip(s, n): return s.rstrip(n) database.register_aggregate(WeightedAverage, 'weighted_avg', 1) database.register_aggregate(WeightedAverage, 'weighted_avg2', 2) database.register_collation(collate_reverse) database.register_function(title_case) class Post(TestModel): message = TextField() class ContentPost(FTSModel, Post): class Meta: options = { 'content': Post, 'tokenize': 'porter'} class ContentPostMessage(FTSModel, TestModel): message = TextField() class Meta: options = {'tokenize': 'porter', 'content': Post.message} class Document(FTSModel, TestModel): message = TextField() class Meta: options = {'tokenize': 'porter'} class MultiColumn(FTSModel, TestModel): c1 = SearchField() c2 = SearchField() c3 = SearchField() c4 = IntegerField() class Meta: options = {'tokenize': 'porter'} class RowIDModel(TestModel): rowid = RowIDField() data = IntegerField() class KeyData(TestModel): key = TextField() data = JSONField() class JBData(TestModel): key = TextField() data = JSONBField() class Values(TestModel): klass = IntegerField() value = FloatField() weight = FloatField() class FTS5Test(FTS5Model): title = SearchField() data = SearchField() misc = SearchField(unindexed=True) class Meta: legacy_table_names = False class FTS5Document(FTS5Model): message = SearchField() class Meta: options = {'tokenize': 'porter'} class Series(TableFunction): columns = ['value'] params = ['start', 'stop', 'step'] name = 'series' def initialize(self, start=0, stop=None, step=1): self.start = start self.stop = stop or float('inf') self.step = step self.curr = self.start def iterate(self, idx): if self.curr > self.stop: raise StopIteration ret = self.curr self.curr += self.step return (ret,) class RegexSearch(TableFunction): columns = ['match'] params = ['regex', 'search_string'] name = 'regex_search' def initialize(self, regex=None, search_string=None): if regex and search_string: self._iter = re.finditer(regex, search_string) else: self._iter = None def iterate(self, idx): # We do not need `idx`, so just ignore it. if self._iter is None: raise StopIteration else: return (next(self._iter).group(0),) class Split(TableFunction): params = ['data'] columns = ['part'] name = 'str_split' def initialize(self, data=None): self._parts = data.split() self._idx = 0 def iterate(self, idx): if self._idx < len(self._parts): result = (self._parts[self._idx],) self._idx += 1 return result raise StopIteration @skip_unless(IS_SQLITE_9, 'requires sqlite >= 3.9') class TestTableFunction(BaseTestCase): def setUp(self): super(TestTableFunction, self).setUp() self.conn = sqlite3.connect(':memory:') def tearDown(self): super(TestTableFunction, self).tearDown() self.conn.close() def execute(self, sql, params=None): return self.conn.execute(sql, params or ()) def test_split(self): Split.register(self.conn) curs = self.execute('select part from str_split(?) order by part ' 'limit 3', ('well hello huey and zaizee',)) self.assertEqual([row for row, in curs.fetchall()], ['and', 'hello', 'huey']) def test_split_tbl(self): Split.register(self.conn) self.execute('create table post (content TEXT);') self.execute('insert into post (content) values (?), (?), (?)', ('huey secret post', 'mickey message', 'zaizee diary')) curs = self.execute('SELECT * FROM post, str_split(post.content)') results = curs.fetchall() self.assertEqual(results, [ ('huey secret post', 'huey'), ('huey secret post', 'secret'), ('huey secret post', 'post'), ('mickey message', 'mickey'), ('mickey message', 'message'), ('zaizee diary', 'zaizee'), ('zaizee diary', 'diary'), ]) def test_series(self): Series.register(self.conn) def assertSeries(params, values, extra_sql=''): param_sql = ', '.join('?' * len(params)) sql = 'SELECT * FROM series(%s)' % param_sql if extra_sql: sql = ' '.join((sql, extra_sql)) curs = self.execute(sql, params) self.assertEqual([row for row, in curs.fetchall()], values) assertSeries((0, 10, 2), [0, 2, 4, 6, 8, 10]) assertSeries((5, None, 20), [5, 25, 45, 65, 85], 'LIMIT 5') assertSeries((4, 0, -1), [4, 3, 2], 'LIMIT 3') assertSeries((3, 5, 3), [3]) assertSeries((3, 3, 1), [3]) def test_series_tbl(self): Series.register(self.conn) self.execute('CREATE TABLE nums (id INTEGER PRIMARY KEY)') self.execute('INSERT INTO nums DEFAULT VALUES;') self.execute('INSERT INTO nums DEFAULT VALUES;') curs = self.execute('SELECT * FROM nums, series(nums.id, nums.id + 2)') results = curs.fetchall() self.assertEqual(results, [ (1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (2, 4)]) curs = self.execute('SELECT * FROM nums, series(nums.id) LIMIT 3') results = curs.fetchall() self.assertEqual(results, [(1, 1), (1, 2), (1, 3)]) def test_regex(self): RegexSearch.register(self.conn) def assertResults(regex, search_string, values): sql = 'SELECT * FROM regex_search(?, ?)' curs = self.execute(sql, (regex, search_string)) self.assertEqual([row for row, in curs.fetchall()], values) assertResults( '[0-9]+', 'foo 123 45 bar 678 nuggie 9.0', ['123', '45', '678', '9', '0']) assertResults( r'[\w]+@[\w]+\.[\w]{2,3}', ('Dear charlie@example.com, this is nug@baz.com. I am writing on ' 'behalf of zaizee@foo.io. He dislikes your blog.'), ['charlie@example.com', 'nug@baz.com', 'zaizee@foo.io']) assertResults( '[a-z]+', '123.pDDFeewXee', ['p', 'eew', 'ee']) assertResults( '[0-9]+', 'hello', []) def test_regex_tbl(self): messages = ( 'hello foo@example.fap, this is nuggie@example.fap. How are you?', 'baz@example.com wishes to let charlie@crappyblog.com know that ' 'huey@example.com hates his blog', 'testing no emails.', '') RegexSearch.register(self.conn) self.execute('create table posts (id integer primary key, msg)') self.execute('insert into posts (msg) values (?), (?), (?), (?)', messages) cur = self.execute('select posts.id, regex_search.rowid, regex_search.match ' 'FROM posts, regex_search(?, posts.msg)', (r'[\w]+@[\w]+\.\w{2,3}',)) results = cur.fetchall() self.assertEqual(results, [ (1, 1, 'foo@example.fap'), (1, 2, 'nuggie@example.fap'), (2, 3, 'baz@example.com'), (2, 4, 'charlie@crappyblog.com'), (2, 5, 'huey@example.com'), ]) def test_error_instantiate(self): class BrokenInstantiate(Series): name = 'broken_instantiate' print_tracebacks = False def __init__(self, *args, **kwargs): super(BrokenInstantiate, self).__init__(*args, **kwargs) raise ValueError('broken instantiate') BrokenInstantiate.register(self.conn) self.assertRaises(sqlite3.OperationalError, self.execute, 'SELECT * FROM broken_instantiate(1, 10)') def test_error_init(self): class BrokenInit(Series): name = 'broken_init' print_tracebacks = False def initialize(self, start=0, stop=None, step=1): raise ValueError('broken init') BrokenInit.register(self.conn) self.assertRaises(sqlite3.OperationalError, self.execute, 'SELECT * FROM broken_init(1, 10)') self.assertRaises(sqlite3.OperationalError, self.execute, 'SELECT * FROM broken_init(0, 1)') def test_error_iterate(self): class BrokenIterate(Series): name = 'broken_iterate' print_tracebacks = False def iterate(self, idx): raise ValueError('broken iterate') BrokenIterate.register(self.conn) self.assertRaises(sqlite3.OperationalError, self.execute, 'SELECT * FROM broken_iterate(1, 10)') self.assertRaises(sqlite3.OperationalError, self.execute, 'SELECT * FROM broken_iterate(0, 1)') def test_error_iterate_delayed(self): # Only raises an exception if the value 7 comes up. class SomewhatBroken(Series): name = 'somewhat_broken' print_tracebacks = False def iterate(self, idx): ret = super(SomewhatBroken, self).iterate(idx) if ret == (7,): raise ValueError('somewhat broken') else: return ret SomewhatBroken.register(self.conn) curs = self.execute('SELECT * FROM somewhat_broken(0, 3)') self.assertEqual(curs.fetchall(), [(0,), (1,), (2,), (3,)]) curs = self.execute('SELECT * FROM somewhat_broken(5, 8)') self.assertEqual(curs.fetchone(), (5,)) self.assertRaises(sqlite3.OperationalError, curs.fetchall) curs = self.execute('SELECT * FROM somewhat_broken(0, 2)') self.assertEqual(curs.fetchall(), [(0,), (1,), (2,)]) @skip_unless(json_installed(), 'requires sqlite json1') class TestJSONField(ModelTestCase): database = database requires = [KeyData] def test_schema(self): self.assertSQL(KeyData._schema._create_table(), ( 'CREATE TABLE IF NOT EXISTS "key_data" (' '"id" INTEGER NOT NULL PRIMARY KEY, ' '"key" TEXT NOT NULL, ' '"data" JSON NOT NULL)'), []) def test_create_read_update(self): test_values = ( 'simple string', '', 1337, 0.0, True, False, ['foo', 'bar', ['baz', 'nug']], {'k1': 'v1', 'k2': {'x1': 'y1', 'x2': 'y2'}}, {'a': 1, 'b': 0.0, 'c': True, 'd': False, 'e': None, 'f': [0, 1], 'g': {'h': 'ijkl'}}, ) # Create a row using the given test value. Verify we can read the value # back from the database, and also that we can query for the row using # the value in the WHERE clause. for i, value in enumerate(test_values): # We can create and re-read values. KeyData.create(key='k%s' % i, data=value) kd_db = KeyData.get(KeyData.key == 'k%s' % i) self.assertEqual(kd_db.data, value) # We can read the data back using the value in the WHERE clause. kd_db = KeyData.get(KeyData.data == value) self.assertEqual(kd_db.key, 'k%s' % i) # Verify we can use values in UPDATE query. kd = KeyData.create(key='kx', data='') for value in test_values: nrows = (KeyData .update(data=value) .where(KeyData.key == 'kx') .execute()) self.assertEqual(nrows, 1) kd_db = KeyData.get(KeyData.key == 'kx') self.assertEqual(kd_db.data, value) def test_json_unicode(self): with self.database.atomic(): KeyData.delete().execute() # Two Chinese characters. unicode_str = b'\xe4\xb8\xad\xe6\x96\x87'.decode('utf8') data = {'foo': unicode_str} kd = KeyData.create(key='k1', data=data) kd_db = KeyData.get(KeyData.key == 'k1') self.assertEqual(kd_db.data, {'foo': unicode_str}) def test_json_to_json(self): kd1 = KeyData.create(key='k1', data={'k1': 'v1', 'k2': 'v2'}) subq = (KeyData .select(KeyData.data) .where(KeyData.key == 'k1')) # Assign value using a subquery. KeyData.create(key='k2', data=subq) kd2_db = KeyData.get(KeyData.key == 'k2') self.assertEqual(kd2_db.data, {'k1': 'v1', 'k2': 'v2'}) def test_json_bulk_update_top_level_list(self): kd1 = KeyData.create(key='k1', data=['a', 'b', 'c']) kd2 = KeyData.create(key='k2', data=['d', 'e', 'f']) kd1.data = ['g', 'h', 'i'] kd2.data = ['j', 'k', 'l'] KeyData.bulk_update([kd1, kd2], fields=[KeyData.data]) kd1_db = KeyData.get(KeyData.key == 'k1') kd2_db = KeyData.get(KeyData.key == 'k2') self.assertEqual(kd1_db.data, ['g', 'h', 'i']) self.assertEqual(kd2_db.data, ['j', 'k', 'l']) def test_json_bulk_update_top_level_dict(self): kd1 = KeyData.create(key='k1', data={'x': 'y1'}) kd2 = KeyData.create(key='k2', data={'x': 'y2'}) kd1.data = {'x': 'z1'} kd2.data = {'X': 'Z2'} KeyData.bulk_update([kd1, kd2], fields=[KeyData.data]) kd1_db = KeyData.get(KeyData.key == 'k1') kd2_db = KeyData.get(KeyData.key == 'k2') self.assertEqual(kd1_db.data, {'x': 'z1'}) self.assertEqual(kd2_db.data, {'X': 'Z2'}) def test_json_multi_ops(self): data = ( ('k1', [0, 1]), ('k2', [1, 2]), ('k3', {'x3': 'y3'}), ('k4', {'x4': 'y4'})) res = KeyData.insert_many(data).execute() if database.returning_clause: self.assertEqual([r for r, in res], [1, 2, 3, 4]) else: self.assertEqual(res, 4) vals = [[1, 2], [2, 3], {'x3': 'y3'}, {'x5': 'y5'}] pw_vals = [Value(v, unpack=False) for v in vals] query = KeyData.select().where(KeyData.data.in_(pw_vals)) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."key", "t1"."data" ' 'FROM "key_data" AS "t1" ' 'WHERE ("t1"."data" IN (json(?), json(?), json(?), json(?)))'), ['[1, 2]', '[2, 3]', '{"x3": "y3"}', '{"x5": "y5"}']) self.assertEqual(query.count(), 2) self.assertEqual(sorted([k.key for k in query]), ['k2', 'k3']) query = KeyData.select().where(KeyData.data == [1, 2]) self.assertEqual(query.count(), 1) self.assertEqual(query.get().key, 'k2') query = KeyData.select().where(KeyData.data == {'x3': 'y3'}) self.assertEqual(query.count(), 1) self.assertEqual(query.get().key, 'k3') @skip_unless(json_installed(), 'requires sqlite json1') class TestJSONFieldFunctions(ModelTestCase): database = database requires = [KeyData] test_data = [ ('a', {'k1': 'v1', 'x1': {'y1': 'z1'}}), ('b', {'k2': 'v2', 'x2': {'y2': 'z2'}}), ('c', {'k1': 'v1', 'k2': 'v2'}), ('d', {'x1': {'y1': 'z1', 'y2': 'z2'}}), ('e', {'l1': [0, 1, 2], 'l2': [1, [3, 3], 7]}), ] M = KeyData def setUp(self): super(TestJSONFieldFunctions, self).setUp() KeyData = self.M with self.database.atomic(): for key, data in self.test_data: KeyData.create(key=key, data=data) self.Q = KeyData.select().order_by(KeyData.key) def assertRows(self, where, expected): self.assertEqual([kd.key for kd in self.Q.where(where)], expected) def assertData(self, key, expected): KeyData = self.M self.assertEqual(KeyData.get(KeyData.key == key).data, expected) def test_json_group_functions(self): KeyData = self.M with self.database.atomic(): KeyData.delete().execute() for i in range(10): # e.g., {v: 0, v0: {items: []}}, {v: 2, v2: {items: [0, 1]}} KeyData.create(key='k%s' % i, data={'v': i, 'v%s' % i: { 'items': list(range(i))}}) jga_key = fn.json_group_array(KeyData.key) query = (KeyData .select(jga_key) .where(KeyData.data['v'] < 4) .order_by(KeyData.key)) self.assertEqual(json.loads(query.scalar()), ['k0', 'k1', 'k2', 'k3']) # Can specify json.loads as the converter for the function. query = (KeyData .select(jga_key.python_value(json.loads)) .where(KeyData.data['v'] > 6) .order_by(KeyData.key)) self.assertEqual(query.scalar(), ['k7', 'k8', 'k9']) # Aggregating a list of ints? jga_id = fn.json_group_array(KeyData.id) query = (KeyData .select(jga_id) .where(KeyData.data['v'] < 4) .order_by(KeyData.id)) self.assertEqual(json.loads(query.scalar()), [1, 2, 3, 4]) query = (KeyData .select(jga_id.python_value(json.loads)) .where(KeyData.data['v'] > 6) .order_by(KeyData.id)) self.assertEqual(query.scalar(), [8, 9, 10]) # Using json_group_object. jgo_key = fn.json_group_object(KeyData.key, KeyData.data['v']) res = (KeyData .select(jgo_key) .where(KeyData.data['v'] < 4) .scalar()) self.assertEqual(json.loads(res), {'k0': 0, 'k1': 1, 'k2': 2, 'k3': 3}) query = (KeyData .select(jgo_key.python_value(json.loads)) .where(KeyData.data['v'] < 4)) self.assertEqual(query.scalar(), {'k0': 0, 'k1': 1, 'k2': 2, 'k3': 3}) def test_extract(self): KeyData = self.M self.assertRows((KeyData.data['k1'] == 'v1'), ['a', 'c']) self.assertRows((KeyData.data['k2'] == 'v2'), ['b', 'c']) self.assertRows((KeyData.data['x1']['y1'] == 'z1'), ['a', 'd']) self.assertRows((KeyData.data['l1'][1] == 1), ['e']) self.assertRows((KeyData.data['l2'][1][1] == 3), ['e']) @skip_unless(json_text_installed()) def test_extract_text_json(self): KeyData = self.M D = KeyData.data self.assertRows((D.extract('$.k1') == 'v1'), ['a', 'c']) self.assertRows((D.extract_text('$.k1') == 'v1'), ['a', 'c']) self.assertRows((D.extract_json('$.k1') == '"v1"'), ['a', 'c']) self.assertRows((D.extract_text('k2') == 'v2'), ['b', 'c']) self.assertRows((D.extract_json('k2') == '"v2"'), ['b', 'c']) self.assertRows((D.extract_text('$.x1.y1') == 'z1'), ['a', 'd']) self.assertRows((D.extract_json('$.x1.y1') == '"z1"'), ['a', 'd']) self.assertRows((D.extract_text('$.l1[1]') == 1), ['e']) self.assertRows((D.extract_text('$.l2[1][1]') == 3), ['e']) self.assertRows((D.extract_json('x1') == '{"y1":"z1"}'), ['a']) def test_extract_multiple(self): KeyData = self.M query = KeyData.select( KeyData.key, KeyData.data.extract('$.k1', '$.k2').alias('keys')) self.assertEqual(sorted((k.key, k.keys) for k in query), [ ('a', ['v1', None]), ('b', [None, 'v2']), ('c', ['v1', 'v2']), ('d', [None, None]), ('e', [None, None])]) def test_insert(self): KeyData = self.M # Existing values are not overwritten. query = KeyData.update(data=KeyData.data['k1'].insert('v1-x')) self.assertEqual(query.execute(), 5) self.assertData('a', {'k1': 'v1', 'x1': {'y1': 'z1'}}) self.assertData('b', {'k1': 'v1-x', 'k2': 'v2', 'x2': {'y2': 'z2'}}) self.assertData('c', {'k1': 'v1', 'k2': 'v2'}) self.assertData('d', {'k1': 'v1-x', 'x1': {'y1': 'z1', 'y2': 'z2'}}) self.assertData('e', {'k1': 'v1-x', 'l1': [0, 1, 2], 'l2': [1, [3, 3], 7]}) def test_insert_json(self): KeyData = self.M set_json = KeyData.data['k1'].insert([0]) query = KeyData.update(data=set_json) self.assertEqual(query.execute(), 5) self.assertData('a', {'k1': 'v1', 'x1': {'y1': 'z1'}}) self.assertData('b', {'k1': [0], 'k2': 'v2', 'x2': {'y2': 'z2'}}) self.assertData('c', {'k1': 'v1', 'k2': 'v2'}) self.assertData('d', {'k1': [0], 'x1': {'y1': 'z1', 'y2': 'z2'}}) self.assertData('e', {'k1': [0], 'l1': [0, 1, 2], 'l2': [1, [3, 3], 7]}) def test_replace(self): KeyData = self.M # Only existing values are overwritten. query = KeyData.update(data=KeyData.data['k1'].replace('v1-x')) self.assertEqual(query.execute(), 5) self.assertData('a', {'k1': 'v1-x', 'x1': {'y1': 'z1'}}) self.assertData('b', {'k2': 'v2', 'x2': {'y2': 'z2'}}) self.assertData('c', {'k1': 'v1-x', 'k2': 'v2'}) self.assertData('d', {'x1': {'y1': 'z1', 'y2': 'z2'}}) self.assertData('e', {'l1': [0, 1, 2], 'l2': [1, [3, 3], 7]}) def test_replace_json(self): KeyData = self.M set_json = KeyData.data['k1'].replace([0]) query = KeyData.update(data=set_json) self.assertEqual(query.execute(), 5) self.assertData('a', {'k1': [0], 'x1': {'y1': 'z1'}}) self.assertData('b', {'k2': 'v2', 'x2': {'y2': 'z2'}}) self.assertData('c', {'k1': [0], 'k2': 'v2'}) self.assertData('d', {'x1': {'y1': 'z1', 'y2': 'z2'}}) self.assertData('e', {'l1': [0, 1, 2], 'l2': [1, [3, 3], 7]}) def test_set(self): KeyData = self.M query = (KeyData .update({KeyData.data: KeyData.data['k1'].set('v1-x')}) .where(KeyData.data['k1'] == 'v1')) self.assertEqual(query.execute(), 2) self.assertRows((KeyData.data['k1'] == 'v1-x'), ['a', 'c']) self.assertData('a', {'k1': 'v1-x', 'x1': {'y1': 'z1'}}) def test_set_json(self): KeyData = self.M set_json = KeyData.data['x1'].set({'y1': 'z1-x', 'y3': 'z3'}) query = (KeyData .update({KeyData.data: set_json}) .where(KeyData.data['x1']['y1'] == 'z1')) self.assertEqual(query.execute(), 2) self.assertRows((KeyData.data['x1']['y1'] == 'z1-x'), ['a', 'd']) self.assertData('a', {'k1': 'v1', 'x1': {'y1': 'z1-x', 'y3': 'z3'}}) self.assertData('d', {'x1': {'y1': 'z1-x', 'y3': 'z3'}}) def test_append(self): KeyData = self.M for value in ('ix', [], ['c1'], ['c1', 'c2'], {}, {'k1': 'v1'}, {'k1': 'v1', 'k2': 'v2'}, None, 1): KeyData.delete().execute() KeyData.create(key='a0', data=[]) KeyData.create(key='a1', data=['i1']) KeyData.create(key='a2', data=['i1', 'i2']) KeyData.create(key='n0', data={'arr': []}) KeyData.create(key='n1', data={'arr': ['i1']}) KeyData.create(key='n2', data={'arr': ['i1', 'i2']}) query = (KeyData .update(data=KeyData.data.append(value)) .where(KeyData.key.startswith('a'))) self.assertEqual(query.execute(), 3) query = (KeyData .select(KeyData.key, fn.json(KeyData.data)) .where(KeyData.key.startswith('a'))) self.assertEqual(sorted((row.key, row.data) for row in query), [('a0', [value]), ('a1', ['i1', value]), ('a2', ['i1', 'i2', value])]) query = (KeyData .update(data=KeyData.data['arr'].append(value)) .where(KeyData.key.startswith('n'))) self.assertEqual(query.execute(), 3) query = (KeyData .select(KeyData.key, fn.json(KeyData.data)) .where(KeyData.key.startswith('n'))) self.assertEqual(sorted((row.key, row.data) for row in query), [('n0', {'arr': [value]}), ('n1', {'arr': ['i1', value]}), ('n2', {'arr': ['i1', 'i2', value]})]) @skip_unless(json_patch_installed()) def test_update(self): KeyData = self.M merged = KeyData.data.update({'x1': {'y1': 'z1-x', 'y3': 'z3'}}) query = (KeyData .update({KeyData.data: merged}) .where(KeyData.data['x1']['y1'] == 'z1')) self.assertEqual(query.execute(), 2) self.assertRows((KeyData.data['x1']['y1'] == 'z1-x'), ['a', 'd']) self.assertData('a', {'k1': 'v1', 'x1': {'y1': 'z1-x', 'y3': 'z3'}}) self.assertData('d', {'x1': {'y1': 'z1-x', 'y2': 'z2', 'y3': 'z3'}}) @skip_unless(json_patch_installed()) def test_update_with_removal(self): KeyData = self.M m = KeyData.data.update({'k1': None, 'x1': {'y1': None, 'y3': 'z3'}}) query = KeyData.update(data=m).where(KeyData.data['x1']['y1'] == 'z1') self.assertEqual(query.execute(), 2) self.assertRows((KeyData.data['x1']['y3'] == 'z3'), ['a', 'd']) self.assertData('a', {'x1': {'y3': 'z3'}}) self.assertData('d', {'x1': {'y2': 'z2', 'y3': 'z3'}}) @skip_unless(json_patch_installed()) def test_update_nested(self): KeyData = self.M merged = KeyData.data['x1'].update({'y1': 'z1-x', 'y3': 'z3'}) query = (KeyData .update(data=merged) .where(KeyData.data['x1']['y1'] == 'z1')) self.assertEqual(query.execute(), 2) self.assertRows((KeyData.data['x1']['y1'] == 'z1-x'), ['a', 'd']) self.assertData('a', {'k1': 'v1', 'x1': {'y1': 'z1-x', 'y3': 'z3'}}) self.assertData('d', {'x1': {'y1': 'z1-x', 'y2': 'z2', 'y3': 'z3'}}) @skip_unless(json_patch_installed()) def test_updated_nested_with_removal(self): KeyData = self.M merged = KeyData.data['x1'].update({'o1': 'p1', 'y1': None}) nrows = (KeyData .update(data=merged) .where(KeyData.data['x1']['y1'] == 'z1') .execute()) self.assertRows((KeyData.data['x1']['o1'] == 'p1'), ['a', 'd']) self.assertData('a', {'k1': 'v1', 'x1': {'o1': 'p1'}}) self.assertData('d', {'x1': {'o1': 'p1', 'y2': 'z2'}}) def test_remove(self): KeyData = self.M query = (KeyData .update(data=KeyData.data['k1'].remove()) .where(KeyData.data['k1'] == 'v1')) self.assertEqual(query.execute(), 2) self.assertData('a', {'x1': {'y1': 'z1'}}) self.assertData('c', {'k2': 'v2'}) nrows = (KeyData .update(data=KeyData.data['l2'][1][1].remove()) .where(KeyData.key == 'e') .execute()) self.assertData('e', {'l1': [0, 1, 2], 'l2': [1, [3], 7]}) def test_simple_update(self): KeyData = self.M nrows = (KeyData .update(data={'foo': 'bar'}) .where(KeyData.key.in_(['a', 'b'])) .execute()) self.assertData('a', {'foo': 'bar'}) self.assertData('b', {'foo': 'bar'}) def test_children(self): KeyData = self.M children = KeyData.data.children().alias('children') query = (KeyData .select(KeyData.key, children.c.fullkey.alias('fullkey')) .from_(KeyData, children) .where(~children.c.fullkey.contains('k')) .order_by(KeyData.id, SQL('fullkey'))) accum = [(row.key, row.fullkey) for row in query] self.assertEqual(accum, [ ('a', '$.x1'), ('b', '$.x2'), ('d', '$.x1'), ('e', '$.l1'), ('e', '$.l2')]) def test_tree(self): KeyData = self.M tree = KeyData.data.tree().alias('tree') query = (KeyData .select(tree.c.fullkey.alias('fullkey')) .from_(KeyData, tree) .where(KeyData.key == 'd') .order_by(SQL('1')) .tuples()) self.assertEqual([fullkey for fullkey, in query], [ '$', '$.x1', '$.x1.y1', '$.x1.y2']) @skip_unless(jsonb_installed(), 'requires sqlite jsonb support') class TestJSONBFieldFunctions(TestJSONFieldFunctions): requires = [JBData] M = JBData def assertData(self, key, expected): q = JBData.select(fn.json(JBData.data)).where(JBData.key == key) self.assertEqual(q.get().data, expected) def test_extract_multiple(self): # We need to override this, otherwise we end up with jsonb returned. expr = fn.json(JBData.data.extract('$.k1', '$.k2')) query = JBData.select( JBData.key, expr.python_value(json.loads).alias('keys')) self.assertEqual(sorted((k.key, k.keys) for k in query), [ ('a', ['v1', None]), ('b', [None, 'v2']), ('c', ['v1', 'v2']), ('d', [None, None]), ('e', [None, None])]) class TestSqliteExtensions(BaseTestCase): def test_virtual_model(self): class Test(VirtualModel): class Meta: database = database extension_module = 'ext1337' legacy_table_names = False options = {'huey': 'cat', 'mickey': 'dog'} primary_key = False class SubTest(Test): pass self.assertSQL(Test._schema._create_table(), ( 'CREATE VIRTUAL TABLE IF NOT EXISTS "test" ' 'USING ext1337 ' '(huey=cat, mickey=dog)'), []) self.assertSQL(SubTest._schema._create_table(), ( 'CREATE VIRTUAL TABLE IF NOT EXISTS "sub_test" ' 'USING ext1337 ' '(huey=cat, mickey=dog)'), []) self.assertSQL( Test._schema._create_table(huey='kitten', zaizee='cat'), ('CREATE VIRTUAL TABLE IF NOT EXISTS "test" ' 'USING ext1337 (huey=kitten, mickey=dog, zaizee=cat)'), []) def test_autoincrement_field(self): class AutoIncrement(TestModel): id = AutoIncrementField() data = TextField() class Meta: database = database self.assertSQL(AutoIncrement._schema._create_table(), ( 'CREATE TABLE IF NOT EXISTS "auto_increment" ' '("id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, ' '"data" TEXT NOT NULL)'), []) class BaseFTSTestCase(object): messages = ( ('A faith is a necessity to a man. Woe to him who believes in ' 'nothing.'), ('All who call on God in true faith, earnestly from the heart, will ' 'certainly be heard, and will receive what they have asked and ' 'desired.'), ('Be faithful in small things because it is in them that your ' 'strength lies.'), ('Faith consists in believing when it is beyond the power of reason ' 'to believe.'), ('Faith has to do with things that are not seen and hope with things ' 'that are not at hand.')) values = ( ('aaaaa bbbbb ccccc ddddd', 'aaaaa ccccc', 'zzzzz zzzzz', 1), ('bbbbb ccccc ddddd eeeee', 'bbbbb', 'zzzzz', 2), ('ccccc ccccc ddddd fffff', 'ccccc', 'yyyyy', 3), ('ddddd', 'ccccc', 'xxxxx', 4)) def assertMessages(self, query, indexes): self.assertEqual([obj.message for obj in query], [self.messages[idx] for idx in indexes]) class TestFullTextSearch(BaseFTSTestCase, ModelTestCase): database = database requires = [ Post, ContentPost, ContentPostMessage, Document, MultiColumn] @requires_models(Document) def test_fts_insert_or_replace(self): # We can use replace to create a new row. n = Document.replace(docid=100, message='m100').execute() self.assertEqual(n, 100) self.assertEqual(Document.select().count(), 1) # We can use replace to update an existing row. n = Document.replace(docid=100, message='x100').execute() self.assertEqual(n, 100) self.assertEqual(Document.select().count(), 1) # Adds a new row. n = Document.replace(docid=101, message='x101').execute() self.assertEqual(n, 101) self.assertEqual(Document.select().count(), 2) query = Document.select().order_by(Document.message) self.assertEqual(list(query.tuples()), [(100, 'x100'), (101, 'x101')]) @requires_models(Document) def test_fts_manual(self): messages = [Document.create(message=message) for message in self.messages] query = (Document .select() .where(Document.match('believe')) .order_by(Document.docid)) self.assertMessages(query, [0, 3]) query = Document.search('believe') self.assertMessages(query, [3, 0]) # Test peewee's "rank" algorithm, as presented in the SQLite FTS3 docs. query = Document.search('things', with_score=True) self.assertEqual([(row.message, row.score) for row in query], [ (self.messages[4], -2. / 3), (self.messages[2], -1. / 3)]) # Test peewee's bm25 ranking algorithm. query = Document.search_bm25('things', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[4], -0.45), (self.messages[2], -0.36)]) # Another test of bm25 ranking. query = Document.search_bm25('believe', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[3], -0.49), (self.messages[0], -0.35)]) query = Document.search_bm25('god faith', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[1], -0.92)]) query = Document.search_bm25('"it is"', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[2], -0.36), (self.messages[3], -0.36)]) def test_fts_delete_row(self): posts = [Post.create(message=msg) for msg in self.messages] ContentPost.rebuild() query = (ContentPost .select(ContentPost, ContentPost.rank().alias('score')) .where(ContentPost.match('believe')) .order_by(ContentPost.docid)) self.assertMessages(query, [0, 3]) query = (ContentPost .select(ContentPost.docid) .order_by(ContentPost.docid)) for content_post in query: self.assertEqual(content_post.delete_instance(), 1) for post in posts: self.assertEqual( (ContentPost .delete() .where(ContentPost.message == post.message) .execute()), 1) # None of the deletes were processed since the table is managed. self.assertEqual(ContentPost.select().count(), 5) documents = [Document.create(message=message) for message in self.messages] self.assertEqual(Document.select().count(), 5) for document in documents: self.assertEqual( (Document .delete() .where(Document.message == document.message) .execute()), 1) self.assertEqual(Document.select().count(), 0) def _create_multi_column(self): for c1, c2, c3, c4 in self.values: MultiColumn.create(c1=c1, c2=c2, c3=c3, c4=c4) @requires_models(MultiColumn) def test_fts_multi_column(self): def assertResults(term, expected): results = [(x.c4, round(x.score, 2)) for x in MultiColumn.search(term, with_score=True)] self.assertEqual(results, expected) self._create_multi_column() assertResults('bbbbb', [ (2, -1.5), # 1/2 + 1/1 (1, -0.5)]) # 1/2 # `ccccc` appears four times in `c1`, three times in `c2`. assertResults('ccccc', [ (3, -.83), # 2/4 + 1/3 (1, -.58), # 1/4 + 1/3 (4, -.33), # 1/3 (2, -.25), # 1/4 ]) # `zzzzz` appears three times in c3. assertResults('zzzzz', [(1, -.67), (2, -.33)]) self.assertEqual( [x.score for x in MultiColumn.search('ddddd', with_score=True)], [-.25, -.25, -.25, -.25]) @requires_models(MultiColumn) def test_bm25(self): def assertResults(term, expected): query = MultiColumn.search_bm25(term, [1.0, 0, 0, 0], True) self.assertEqual( [(mc.c4, round(mc.score, 2)) for mc in query], expected) self._create_multi_column() MultiColumn.create(c1='aaaaa fffff', c4=5) assertResults('aaaaa', [(5, -0.39), (1, -0.3)]) assertResults('fffff', [(5, -0.39), (3, -0.3)]) assertResults('eeeee', [(2, -0.97)]) # No column specified, use the first text field. query = MultiColumn.search_bm25('fffff', [1.0, 0, 0, 0], True) self.assertEqual([(mc.c4, round(mc.score, 2)) for mc in query], [ (5, -0.39), (3, -0.3)]) # Use helpers. query = (MultiColumn .select( MultiColumn.c4, MultiColumn.bm25(1.0).alias('score')) .where(MultiColumn.match('aaaaa')) .order_by(SQL('score'))) self.assertEqual([(mc.c4, round(mc.score, 2)) for mc in query], [ (5, -0.39), (1, -0.3)]) def assertAllColumns(term, expected): query = MultiColumn.search_bm25(term, with_score=True) self.assertEqual( [(mc.c4, round(mc.score, 2)) for mc in query], expected) assertAllColumns('aaaaa ddddd', [(1, -1.08)]) assertAllColumns('zzzzz ddddd', [(1, -0.36), (2, -0.34)]) assertAllColumns('ccccc bbbbb ddddd', [(2, -1.39), (1, -0.3)]) @requires_models(Document) def test_bm25_alt_corpus(self): for message in self.messages: Document.create(message=message) query = Document.search_bm25('things', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[4], -0.45), (self.messages[2], -0.36)]) query = Document.search_bm25('believe', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[3], -0.49), (self.messages[0], -0.35)]) # Indeterminate order since all are 0.0. All phrases contain the word # faith, so there is no meaningful score. query = Document.search_bm25('faith', with_score=True) self.assertEqual([round(d.score, 2) for d in query], [-0.] * 5) def _test_fts_auto(self, ModelClass): posts = [] for message in self.messages: posts.append(Post.create(message=message)) # Nothing matches, index is not built. pq = ModelClass.select().where(ModelClass.match('faith')) self.assertEqual(list(pq), []) ModelClass.rebuild() ModelClass.optimize() # it will stem faithful -> faith b/c we use the porter tokenizer pq = (ModelClass .select() .where(ModelClass.match('faith')) .order_by(ModelClass.docid)) self.assertMessages(pq, range(len(self.messages))) pq = (ModelClass .select() .where(ModelClass.match('believe')) .order_by(ModelClass.docid)) self.assertMessages(pq, [0, 3]) pq = (ModelClass .select() .where(ModelClass.match('thin*')) .order_by(ModelClass.docid)) self.assertMessages(pq, [2, 4]) pq = (ModelClass .select() .where(ModelClass.match('"it is"')) .order_by(ModelClass.docid)) self.assertMessages(pq, [2, 3]) pq = ModelClass.search('things', with_score=True) self.assertEqual([(x.message, x.score) for x in pq], [ (self.messages[4], -2.0 / 3), (self.messages[2], -1.0 / 3), ]) pq = (ModelClass .select(ModelClass.rank()) .where(ModelClass.match('faithful')) .tuples()) self.assertEqual([x[0] for x in pq], [-.2] * 5) pq = (ModelClass .search('faithful', with_score=True) .dicts()) self.assertEqual([x['score'] for x in pq], [-.2] * 5) def test_fts_auto_model(self): self._test_fts_auto(ContentPost) def test_fts_auto_field(self): self._test_fts_auto(ContentPostMessage) def test_weighting(self): self._create_multi_column() def assertResults(method, term, weights, expected): results = [ (x.c4, round(x.score, 2)) for x in method(term, weights=weights, with_score=True)] self.assertEqual(results, expected) assertResults(MultiColumn.search, 'bbbbb', None, [ (2, -1.5), # 1/2 + 1/1 (1, -0.5), # 1/2 ]) assertResults(MultiColumn.search, 'bbbbb', [1., 5., 0.], [ (2, -5.5), # 1/2 + (5 * 1/1) (1, -0.5), # 1/2 + (5 * 0) ]) assertResults(MultiColumn.search, 'bbbbb', [1., .5, 0.], [ (2, -1.), # 1/2 + (.5 * 1/1) (1, -0.5), # 1/2 + (.5 * 0) ]) assertResults(MultiColumn.search, 'bbbbb', [1., -1., 0.], [ (1, -0.5), # 1/2 + (-1 * 0) (2, 0.5), # 1/2 + (-1 * 1/1) ]) # BM25 assertResults(MultiColumn.search_bm25, 'bbbbb', None, [ (2, -0.85), (1, -0.)]) assertResults(MultiColumn.search_bm25, 'bbbbb', [1., 5., 0.], [ (2, -4.24), (1, -0.)]) assertResults(MultiColumn.search_bm25, 'bbbbb', [1., .5, 0.], [ (2, -0.42), (1, -0.)]) assertResults(MultiColumn.search_bm25, 'bbbbb', [1., -1., 0.], [ (1, -0.), (2, 0.85)]) def test_fts_match_single_column(self): data = ( ('m1c1 aaaa', 'm1c2 bbbb', 'm1c3 cccc'), ('m2c1 dddd', 'm2c2 eeee', 'm2c3 ffff'), ('m3c1 cccc', 'm3c2 bbbb', 'm3c3 aaaa'), ) for c1, c2, c3 in data: MultiColumn.create(c1=c1, c2=c2, c3=c3, c4=0) def assertSearch(field, value, expected): query = (MultiColumn .select() .where(field.match(value)) .order_by(MultiColumn.c1)) self.assertEqual([mc.c1[:2] for mc in query], expected) assertSearch(MultiColumn.c1, 'aaaa', ['m1']) assertSearch(MultiColumn.c1, 'bbbb', []) assertSearch(MultiColumn.c1, 'cccc', ['m3']) assertSearch(MultiColumn.c2, 'bbbb', ['m1', 'm3']) assertSearch(MultiColumn.c2, 'eeee', ['m2']) assertSearch(MultiColumn.c3, 'cccc', ['m1']) assertSearch(MultiColumn.c3, 'aaaa', ['m3']) def test_fts_score_single_column(self): data = ( ('m1c1 aaaa', 'm1c2 bbbb', 'm1c3 cccc'), ('m2c1 dddd', 'm2c2 eeee', 'm2c3 ffff'), ('m3c1 cccc', 'm3c2 bbbb aaaa', 'm3c3 aaaa aaaa'), ) for c1, c2, c3 in data: MultiColumn.create(c1=c1, c2=c2, c3=c3, c4=0) def assertQueryScore(field, search_term, expected, *weights): rank = MultiColumn.bm25(*weights) query = (MultiColumn .select(MultiColumn, rank.alias('score')) .where(field.match(search_term)) .order_by(rank)) results = [(r.c1[:2], round(r.score, 2)) for r in query] self.assertEqual(results, expected) assertQueryScore(MultiColumn.c1, 'aaaa', [('m1', -0.51)]) assertQueryScore(MultiColumn.c1, 'dddd', [('m2', -0.51)]) assertQueryScore(MultiColumn.c2, 'bbbb', [('m1', -0.), ('m3', -0.)]) assertQueryScore(MultiColumn.c2, 'eeee', [('m2', -0.51)]) assertQueryScore(MultiColumn.c3, 'aaaa', [('m3', -0.62)]) assertQueryScore(MultiColumn.c1, 'aaaa', [('m1', -1.02)], 2., 0., 0.) assertQueryScore(MultiColumn.c2, 'bbbb', [('m1', -0.), ('m3', -0.)], 0., 1.0, 0.) assertQueryScore(MultiColumn.c2, 'eeee', [('m2', -1.02)], 0., 2., 0.) assertQueryScore(MultiColumn.c3, 'aaaa', [('m3', -0.31)], 0., 1., 0.5) @skip_unless(compile_option('enable_fts4')) @requires_models(MultiColumn) def test_match_column_queries(self): data = ( ('alpha one', 'apple aspires to ace artsy beta launch'), ('beta two', 'beta boasts better broadcast over apple'), ('gamma three', 'gold gray green gamma ray delta data'), ('delta four', 'delta data indicates downturn for apple beta'), ) MC = MultiColumn for i, (title, message) in enumerate(data): MC.create(c1=title, c2=message, c3='', c4=i) def assertQ(expr, idxscore): q = (MC .select(MC, MC.bm25().alias('score')) .where(expr) .order_by(SQL('score'), MC.c4)) self.assertEqual([(r.c4, round(r.score, 2)) for r in q], idxscore) # Single whitespace does not affect the mapping of col->term. We can # also store the column value in quotes if single-quotes are used. assertQ(MC.match('beta'), [(1, -0.85), (0, -0.), (3, -0.)]) assertQ(MC.match('c1:beta'), [(1, -0.85)]) assertQ(MC.match('c1: beta'), [(1, -0.85)]) assertQ(MC.match('c1: ^bet*'), [(1, -0.85)]) assertQ(MC.match('c1: \'beta\''), [(1, -0.85)]) assertQ(MC.match('"beta"'), [(1, -0.85), (0, -0.), (3, -0.)]) # Alternatively, just specify the column explicitly. assertQ(MC.c1.match('beta'), [(1, -0.85)]) assertQ(MC.c1.match(' beta '), [(1, -0.85)]) assertQ(MC.c1.match('"beta"'), [(1, -0.85)]) assertQ(MC.c1.match('"^bet*"'), [(1, -0.85)]) # apple beta delta gamma # 0 | alpha | X X # 1 | beta | X X # 2 | gamma | X X # 3 | delta | X X X # assertQ(MC.match('delta NOT gamma'), [(3, -0.85)]) assertQ(MC.match('delta NOT c2:gamma'), [(3, -0.85)]) assertQ(MC.match('"delta"'), [(3, -0.85), (2, -0.)]) assertQ(MC.match('c1:delta OR c2:delta'), [(3, -0.85), (2, -0.)]) assertQ(MC.match('"^delta"'), [(3, -1.69)]) assertQ(MC.match('(delta AND c2:apple) OR c1:alpha'), [(3, -0.85), (0, -0.85)]) assertQ(MC.match('(c2:delta AND c2:apple) OR c1:alpha'), [(0, -0.85), (3, -0.)]) assertQ(MC.match('c2:delta c2:apple OR c1:alpha'), [(0, -0.85), (3, -0.)]) assertQ(MC.match('(c2:delta AND c2:apple) OR beta'), [(1, -0.85), (3, -0.), (0, -0.)]) assertQ(MC.match('c2:delta AND (c2:apple OR c1:alpha)'), [(3, -0.)]) # c2 apple (0,1,3) OR (...irrelevant...). assertQ(MC.match('c2:apple OR c1:alpha NOT delta'), [(0, -0.85), (1, -0.), (3, -0.)]) assertQ(MC.match('c2:apple OR (c1:alpha NOT c2:delta)'), [(0, -0.85), (1, -0.), (3, -0.)]) # c2 apple OR c1 alpha (0, 1, 3) AND NOT delta (2, 3) -> (0, 1). assertQ(MC.match('(c2:apple OR c1:alpha) NOT delta'), [(0, -0.85), (1, -0.)]) @skip_unless(CYTHON_EXTENSION, 'requires sqlite c extension') class TestFullTextSearchCython(TestFullTextSearch): database = SqliteExtDatabase(':memory:', c_extensions=CYTHON_EXTENSION) def test_c_extensions(self): self.assertTrue(self.database._c_extensions) self.assertTrue(Post._meta.database._c_extensions) def test_bm25f(self): def assertResults(term, expected): query = MultiColumn.search_bm25f(term, [1.0, 0, 0, 0], True) self.assertEqual( [(mc.c4, round(mc.score, 2)) for mc in query], expected) self._create_multi_column() MultiColumn.create(c1='aaaaa fffff', c4=5) assertResults('aaaaa', [(5, -0.76), (1, -0.62)]) assertResults('fffff', [(5, -0.76), (3, -0.65)]) assertResults('eeeee', [(2, -2.13)]) # No column specified, use the first text field. query = MultiColumn.search_bm25f('aaaaa OR fffff', [1., 3., 0, 0], 1) self.assertEqual([(mc.c4, round(mc.score, 2)) for mc in query], [ (1, -14.18), (5, -12.01), (3, -11.48)]) def test_lucene(self): for message in self.messages: Document.create(message=message) def assertResults(term, expected, sort_cleaned=False): query = Document.search_lucene(term, with_score=True) cleaned = [ (round(doc.score, 3), ' '.join(doc.message.split()[:2])) for doc in query] if sort_cleaned: cleaned = sorted(cleaned) self.assertEqual(cleaned, expected) assertResults('things', [ (-0.166, 'Faith has'), (-0.137, 'Be faithful')]) assertResults('faith', [ (0.036, 'All who'), (0.042, 'Faith has'), (0.047, 'A faith'), (0.049, 'Be faithful'), (0.049, 'Faith consists')], sort_cleaned=True) @skip_unless(FTS5Model.fts5_installed(), 'requires fts5') class TestFTS5(BaseFTSTestCase, ModelTestCase): database = database requires = [FTS5Test] test_corpus = ( ('foo aa bb', 'aa bb cc ' * 10, 1), ('bar bb cc', 'bb cc dd ' * 9, 2), ('baze cc dd', 'cc dd ee ' * 8, 3), ('nug aa dd', 'bb cc ' * 7, 4)) def setUp(self): super(TestFTS5, self).setUp() for title, data, misc in self.test_corpus: FTS5Test.create(title=title, data=data, misc=misc) def test_create_table(self): query = FTS5Test._schema._create_table() self.assertSQL(query, ( 'CREATE VIRTUAL TABLE IF NOT EXISTS "fts5_test" USING fts5 ' '("title", "data", "misc" UNINDEXED)'), []) def test_custom_fts5_command(self): merge_sql = FTS5Test._fts_cmd_sql('merge', rank=4) self.assertSQL(merge_sql, ( 'INSERT INTO "fts5_test" ("fts5_test", "rank") VALUES (?, ?)'), ['merge', 4]) FTS5Test.merge(4) # Runs without error. FTS5Test.insert_many([{'title': 'k%08d' % i, 'data': 'v%08d' % i} for i in range(100)]).execute() FTS5Test.integrity_check(rank=0) FTS5Test.optimize() def test_create_table_options(self): class Test1(FTS5Model): f1 = SearchField() f2 = SearchField(unindexed=True) f3 = SearchField() class Meta: database = self.database options = { 'prefix': (2, 3), 'tokenize': 'porter unicode61', 'content': Post, 'content_rowid': Post.id} query = Test1._schema._create_table() self.assertSQL(query, ( 'CREATE VIRTUAL TABLE IF NOT EXISTS "test1" USING fts5 (' '"f1", "f2" UNINDEXED, "f3", ' 'content="post", content_rowid="id", ' 'prefix=\'2,3\', tokenize="porter unicode61")'), []) def assertResults(self, query, expected, scores=False, alias='score'): if scores: results = [(obj.title, round(getattr(obj, alias), 7)) for obj in query] else: results = [obj.title for obj in query] self.assertEqual(results, expected) def test_search(self): query = FTS5Test.search('bb') self.assertSQL(query, ( 'SELECT "t1"."rowid", "t1"."title", "t1"."data", "t1"."misc" ' 'FROM "fts5_test" AS "t1" ' 'WHERE ("fts5_test" MATCH ?) ORDER BY rank'), ['bb']) self.assertResults(query, ['nug aa dd', 'foo aa bb', 'bar bb cc']) self.assertResults(FTS5Test.search('baze OR dd'), ['baze cc dd', 'bar bb cc', 'nug aa dd']) @requires_models(FTS5Document) def test_fts_manual(self): messages = [FTS5Document.create(message=message) for message in self.messages] query = (FTS5Document .select() .where(FTS5Document.match('believe')) .order_by(FTS5Document.rowid)) self.assertMessages(query, [0, 3]) query = FTS5Document.search('believe') self.assertMessages(query, [3, 0]) # Test SQLite's built-in ranking algorithm (bm25). The results should # be comparable to our user-defined implementation. query = FTS5Document.search('things', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[4], -0.45), (self.messages[2], -0.37)]) # Another test of bm25 ranking. query = FTS5Document.search_bm25('believe', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[3], -0.49), (self.messages[0], -0.36)]) query = FTS5Document.search_bm25('god faith', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[1], -0.93)]) query = FTS5Document.search_bm25('"it is"', with_score=True) self.assertEqual([(d.message, round(d.score, 2)) for d in query], [ (self.messages[2], -0.37), (self.messages[3], -0.37)]) def test_match_column_queries(self): data = ( ('alpha one', 'apple aspires to ace artsy beta launch'), ('beta two', 'beta boasts better broadcast over apple'), ('gamma three', 'gold gray green gamma ray delta data'), ('delta four', 'delta data indicates downturn for apple beta'), ) FT = FTS5Test for i, (title, message) in enumerate(data): FT.create(title=title, data=message, misc=str(i)) def assertQ(expr, idxscore): q = (FT .select(FT, FT.bm25().alias('score')) .where(expr) .order_by(SQL('score'), FT.misc.cast('int'))) self.assertEqual([(int(r.misc), round(r.score, 2)) for r in q], idxscore) # Single whitespace does not affect the mapping of col->term. We can # also store the column value in quotes if single-quotes are used. assertQ(FT.match('beta'), [(1, -0.74), (0, -0.57), (3, -0.57)]) assertQ(FT.match('title: beta'), [(1, -2.08)]) assertQ(FT.match('title: ^bet*'), [(1, -2.08)]) assertQ(FT.match('title: "beta"'), [(1, -2.08)]) assertQ(FT.match('"beta"'), [(1, -0.74), (0, -0.57), (3, -0.57)]) # Alternatively, just specify the column explicitly. assertQ(FT.title.match('beta'), [(1, -2.08)]) assertQ(FT.title.match(' beta '), [(1, -2.08)]) assertQ(FT.title.match('"beta"'), [(1, -2.08)]) assertQ(FT.title.match('^bet*'), [(1, -2.08)]) assertQ(FT.title.match('"^bet*"'), []) # No wildcards in quotes! # apple beta delta gamma # 0 | alpha | X X # 1 | beta | X X # 2 | gamma | X X # 3 | delta | X X X # assertQ(FT.match('delta NOT gamma'), [(3, -1.53)]) assertQ(FT.match('delta NOT data:gamma'), [(3, -1.53)]) assertQ(FT.match('"delta"'), [(3, -1.53), (2, -1.2)]) assertQ(FT.match('title:delta OR data:delta'), [(3, -3.21), (2, -1.2)]) assertQ(FT.match('"^delta"'), [(3, -1.53), (2, -1.2)]) # Different. assertQ(FT.match('^delta'), [(3, -2.57)]) # Different from FTS4. assertQ(FT.match('(delta AND data:apple) OR title:alpha'), [(3, -2.09), (0, -2.02)]) assertQ(FT.match('(data:delta AND data:apple) OR title:alpha'), [(0, -2.02), (3, -1.76)]) assertQ(FT.match('data:delta data:apple OR title:alpha'), [(0, -2.02), (3, -1.76)]) assertQ(FT.match('(data:delta AND data:apple) OR beta'), [(3, -2.33), (1, -0.74), (0, -0.57)]) assertQ(FT.match('data:delta AND (data:apple OR title:alpha)'), [(3, -1.76)]) # data apple (0,1,3) OR (...irrelevant...). assertQ(FT.match('data:apple OR title:alpha NOT delta'), [(0, -2.58), (1, -0.58), (3, -0.57)]) assertQ(FT.match('data:apple OR (title:alpha NOT data:delta)'), [(0, -2.58), (1, -0.58), (3, -0.57)]) # data apple OR title alpha (0, 1, 3) AND NOT delta (2, 3) -> (0, 1). assertQ(FT.match('(data:apple OR title:alpha) NOT delta'), [(0, -2.58), (1, -0.58)]) def test_highlight_function(self): query = (FTS5Test .search('dd') .select(FTS5Test.title.highlight('[', ']').alias('hi'))) accum = [row.hi for row in query] self.assertEqual(accum, ['baze cc [dd]', 'bar bb cc', 'nug aa [dd]']) query = (FTS5Test .search('bb') .select(FTS5Test.data.highlight('[', ']').alias('hi'))) accum = [row.hi[:7] for row in query] self.assertEqual(accum, ['[bb] cc', 'aa [bb]', '[bb] cc']) def test_snippet_function(self): snip = FTS5Test.data.snippet('[', ']', max_tokens=5).alias('snip') query = FTS5Test.search('dd').select(snip) accum = [row.snip for row in query] self.assertEqual(accum, [ 'cc [dd] ee cc [dd]...', 'bb cc [dd] bb cc...', 'bb cc bb cc bb...']) @skip_unless(CYTHON_EXTENSION, 'requires sqlite c extension') class TestMurmurHash(ModelTestCase): database = SqliteExtDatabase(':memory:', c_extensions=CYTHON_EXTENSION, hash_functions=True) def assertHash(self, s, e, fn_name='murmurhash'): func = getattr(fn, fn_name) query = Select(columns=[func(s)]) cursor = self.database.execute(query) self.assertEqual(cursor.fetchone()[0], e) @skip_if(sys.byteorder == 'big', 'fails on big endian') def test_murmur_hash(self): self.assertHash('testkey', 2871421366) self.assertHash('murmur', 3883399899) self.assertHash('', 0) self.assertHash('this is a test of a longer string', 2569735385) self.assertHash(None, None) @skip_if(sys.version_info[0] == 3, 'requres python 2') def test_checksums(self): self.assertHash('testkey', -225678656, 'crc32') self.assertHash('murmur', 1507884895, 'crc32') self.assertHash('', 0, 'crc32') self.assertHash('testkey', 203686666, 'adler32') self.assertHash('murmur', 155714217, 'adler32') self.assertHash('', 1, 'adler32') class TestUserDefinedCallbacks(ModelTestCase): database = database requires = [Post, Values] def test_custom_agg(self): data = ( (1, 3.4, 1.0), (1, 6.4, 2.3), (1, 4.3, 0.9), (2, 3.4, 1.4), (3, 2.7, 1.1), (3, 2.5, 1.1), ) for klass, value, wt in data: Values.create(klass=klass, value=value, weight=wt) vq = (Values .select( Values.klass, fn.weighted_avg(Values.value).alias('wtavg'), fn.avg(Values.value).alias('avg')) .group_by(Values.klass)) q_data = [(v.klass, v.wtavg, v.avg) for v in vq] self.assertEqual(q_data, [ (1, 4.7, 4.7), (2, 3.4, 3.4), (3, 2.6, 2.6)]) vq = (Values .select( Values.klass, fn.weighted_avg2(Values.value, Values.weight).alias('wtavg'), fn.avg(Values.value).alias('avg')) .group_by(Values.klass)) q_data = [(v.klass, str(v.wtavg)[:4], v.avg) for v in vq] self.assertEqual(q_data, [ (1, '5.23', 4.7), (2, '3.4', 3.4), (3, '2.6', 2.6)]) def test_custom_collation(self): for i in [1, 4, 3, 5, 2]: Post.create(message='p%d' % i) pq = Post.select().order_by(NodeList((Post.message, SQL('collate collate_reverse')))) self.assertEqual([p.message for p in pq], ['p5', 'p4', 'p3', 'p2', 'p1']) def test_collation_decorator(self): posts = [Post.create(message=m) for m in ['aaa', 'Aab', 'ccc', 'Bba', 'BbB']] pq = Post.select().order_by(collate_case_insensitive.collation(Post.message)) self.assertEqual([p.message for p in pq], [ 'aaa', 'Aab', 'Bba', 'BbB', 'ccc']) def test_custom_function(self): p1 = Post.create(message='this is a test') p2 = Post.create(message='another TEST') sq = Post.select().where(fn.title_case(Post.message) == 'This Is A Test') self.assertEqual(list(sq), [p1]) sq = Post.select(fn.title_case(Post.message)).tuples() self.assertEqual([x[0] for x in sq], [ 'This Is A Test', 'Another Test', ]) def test_function_decorator(self): [Post.create(message=m) for m in ['testing', 'chatting ', ' foo']] pq = Post.select(fn.rstrip(Post.message, 'ing')).order_by(Post.id) self.assertEqual([x[0] for x in pq.tuples()], [ 'test', 'chatting ', ' foo']) pq = Post.select(fn.rstrip(Post.message, ' ')).order_by(Post.id) self.assertEqual([x[0] for x in pq.tuples()], [ 'testing', 'chatting', ' foo']) def test_use_across_connections(self): db = get_in_memory_db() @db.func() def rev(s): return s[::-1] db.connect(); db.close(); db.connect() curs = db.execute_sql('select rev(?)', ('hello',)) self.assertEqual(curs.fetchone(), ('olleh',)) class TestRowIDField(ModelTestCase): database = database requires = [RowIDModel] def test_model_meta(self): self.assertEqual(RowIDModel._meta.sorted_field_names, ['rowid', 'data']) self.assertEqual(RowIDModel._meta.primary_key.name, 'rowid') self.assertTrue(RowIDModel._meta.auto_increment) def test_rowid_field(self): r1 = RowIDModel.create(data=10) self.assertEqual(r1.rowid, 1) self.assertEqual(r1.data, 10) r2 = RowIDModel.create(data=20) self.assertEqual(r2.rowid, 2) self.assertEqual(r2.data, 20) query = RowIDModel.select().where(RowIDModel.rowid == 2) self.assertSQL(query, ( 'SELECT "t1"."rowid", "t1"."data" ' 'FROM "row_id_model" AS "t1" ' 'WHERE ("t1"."rowid" = ?)'), [2]) r_db = query.get() self.assertEqual(r_db.rowid, 2) self.assertEqual(r_db.data, 20) r_db2 = query.columns(RowIDModel.rowid, RowIDModel.data).get() self.assertEqual(r_db2.rowid, 2) self.assertEqual(r_db2.data, 20) def test_insert_with_rowid(self): RowIDModel.insert({RowIDModel.rowid: 5, RowIDModel.data: 1}).execute() self.assertEqual(5, RowIDModel.select(RowIDModel.rowid).first().rowid) def test_insert_many_with_rowid_without_field_validation(self): RowIDModel.insert_many([{RowIDModel.rowid: 5, RowIDModel.data: 1}]).execute() self.assertEqual(5, RowIDModel.select(RowIDModel.rowid).first().rowid) def test_insert_many_with_rowid_with_field_validation(self): RowIDModel.insert_many([{RowIDModel.rowid: 5, RowIDModel.data: 1}]).execute() self.assertEqual(5, RowIDModel.select(RowIDModel.rowid).first().rowid) class TestTransitiveClosure(BaseTestCase): def test_model_factory(self): class Category(TestModel): name = CharField() parent = ForeignKeyField('self', null=True) Closure = ClosureTable(Category) self.assertEqual(Closure._meta.extension_module, 'transitive_closure') self.assertEqual(Closure._meta.columns, {}) self.assertEqual(Closure._meta.fields, {}) self.assertFalse(Closure._meta.primary_key) self.assertEqual(Closure._meta.options, { 'idcolumn': 'id', 'parentcolumn': 'parent_id', 'tablename': 'category', }) class Alt(TestModel): pk = AutoField() ref = ForeignKeyField('self', null=True) Closure = ClosureTable(Alt) self.assertEqual(Closure._meta.columns, {}) self.assertEqual(Closure._meta.fields, {}) self.assertFalse(Closure._meta.primary_key) self.assertEqual(Closure._meta.options, { 'idcolumn': 'pk', 'parentcolumn': 'ref_id', 'tablename': 'alt', }) class NoForeignKey(TestModel): pass self.assertRaises(ValueError, ClosureTable, NoForeignKey) class BaseExtModel(TestModel): class Meta: database = database @skip_unless(CLOSURE_EXTENSION, 'requires closure table extension') class TestTransitiveClosureManyToMany(BaseTestCase): def setUp(self): super(TestTransitiveClosureManyToMany, self).setUp() database.load_extension(CLOSURE_EXTENSION.rstrip('.so')) database.close() def tearDown(self): super(TestTransitiveClosureManyToMany, self).tearDown() database.unload_extension(CLOSURE_EXTENSION.rstrip('.so')) database.close() def test_manytomany(self): class Person(BaseExtModel): name = CharField() class Relationship(BaseExtModel): person = ForeignKeyField(Person) relation = ForeignKeyField(Person, backref='related_to') PersonClosure = ClosureTable( Person, referencing_class=Relationship, foreign_key=Relationship.relation, referencing_key=Relationship.person) database.drop_tables([Person, Relationship, PersonClosure], safe=True) database.create_tables([Person, Relationship, PersonClosure]) c = Person.create(name='charlie') m = Person.create(name='mickey') h = Person.create(name='huey') z = Person.create(name='zaizee') Relationship.create(person=c, relation=h) Relationship.create(person=c, relation=m) Relationship.create(person=h, relation=z) Relationship.create(person=h, relation=m) def assertPeople(query, expected): self.assertEqual(sorted([p.name for p in query]), expected) PC = PersonClosure assertPeople(PC.descendants(c), []) assertPeople(PC.ancestors(c), ['huey', 'mickey', 'zaizee']) assertPeople(PC.siblings(c), ['huey']) assertPeople(PC.descendants(h), ['charlie']) assertPeople(PC.ancestors(h), ['mickey', 'zaizee']) assertPeople(PC.siblings(h), ['charlie']) assertPeople(PC.descendants(z), ['charlie', 'huey']) assertPeople(PC.ancestors(z), []) assertPeople(PC.siblings(z), []) @skip_unless(CLOSURE_EXTENSION and os.path.exists(CLOSURE_EXTENSION), 'requires closure extension') class TestTransitiveClosureIntegration(BaseTestCase): tree = { 'books': [ {'fiction': [ {'scifi': [ {'hard scifi': []}, {'dystopian': []}]}, {'westerns': []}, {'classics': []}, ]}, {'non-fiction': [ {'biographies': []}, {'essays': []}, ]}, ] } def setUp(self): super(TestTransitiveClosureIntegration, self).setUp() database.load_extension(CLOSURE_EXTENSION.rstrip('.so')) database.close() def tearDown(self): super(TestTransitiveClosureIntegration, self).tearDown() database.unload_extension(CLOSURE_EXTENSION.rstrip('.so')) database.close() def initialize_models(self): class Category(BaseExtModel): name = CharField() parent = ForeignKeyField('self', null=True) @classmethod def g(cls, name): return cls.get(cls.name == name) Closure = ClosureTable(Category) database.drop_tables([Category, Closure], safe=True) database.create_tables([Category, Closure]) def build_tree(nodes, parent=None): for name, subnodes in nodes.items(): category = Category.create(name=name, parent=parent) if subnodes: for subnode in subnodes: build_tree(subnode, category) build_tree(self.tree) return Category, Closure def assertNodes(self, query, *expected): self.assertEqual( set([category.name for category in query]), set(expected)) def test_build_tree(self): Category, Closure = self.initialize_models() self.assertEqual(Category.select().count(), 10) def test_descendants(self): Category, Closure = self.initialize_models() books = Category.g('books') self.assertNodes( Closure.descendants(books), 'fiction', 'scifi', 'hard scifi', 'dystopian', 'westerns', 'classics', 'non-fiction', 'biographies', 'essays') self.assertNodes(Closure.descendants(books, 0), 'books') self.assertNodes( Closure.descendants(books, 1), 'fiction', 'non-fiction') self.assertNodes( Closure.descendants(books, 2), 'scifi', 'westerns', 'classics', 'biographies', 'essays') self.assertNodes( Closure.descendants(books, 3), 'hard scifi', 'dystopian') fiction = Category.g('fiction') self.assertNodes( Closure.descendants(fiction), 'scifi', 'hard scifi', 'dystopian', 'westerns', 'classics') self.assertNodes( Closure.descendants(fiction, 1), 'scifi', 'westerns', 'classics') self.assertNodes( Closure.descendants(fiction, 2), 'hard scifi', 'dystopian') self.assertNodes( Closure.descendants(Category.g('scifi')), 'hard scifi', 'dystopian') self.assertNodes( Closure.descendants(Category.g('scifi'), include_node=True), 'scifi', 'hard scifi', 'dystopian') self.assertNodes(Closure.descendants(Category.g('hard scifi'), 1)) def test_ancestors(self): Category, Closure = self.initialize_models() hard_scifi = Category.g('hard scifi') self.assertNodes( Closure.ancestors(hard_scifi), 'scifi', 'fiction', 'books') self.assertNodes( Closure.ancestors(hard_scifi, include_node=True), 'hard scifi', 'scifi', 'fiction', 'books') self.assertNodes(Closure.ancestors(hard_scifi, 2), 'fiction') self.assertNodes(Closure.ancestors(hard_scifi, 3), 'books') non_fiction = Category.g('non-fiction') self.assertNodes(Closure.ancestors(non_fiction), 'books') self.assertNodes(Closure.ancestors(non_fiction, include_node=True), 'non-fiction', 'books') self.assertNodes(Closure.ancestors(non_fiction, 1), 'books') books = Category.g('books') self.assertNodes(Closure.ancestors(books, include_node=True), 'books') self.assertNodes(Closure.ancestors(books)) self.assertNodes(Closure.ancestors(books, 1)) def test_siblings(self): Category, Closure = self.initialize_models() self.assertNodes( Closure.siblings(Category.g('hard scifi')), 'dystopian') self.assertNodes( Closure.siblings(Category.g('hard scifi'), include_node=True), 'hard scifi', 'dystopian') self.assertNodes( Closure.siblings(Category.g('classics')), 'scifi', 'westerns') self.assertNodes( Closure.siblings(Category.g('classics'), include_node=True), 'scifi', 'westerns', 'classics') self.assertNodes( Closure.siblings(Category.g('fiction')), 'non-fiction') def test_tree_changes(self): Category, Closure = self.initialize_models() books = Category.g('books') fiction = Category.g('fiction') dystopian = Category.g('dystopian') essays = Category.g('essays') new_root = Category.create(name='products') Category.create(name='magazines', parent=new_root) books.parent = new_root books.save() dystopian.delete_instance() essays.parent = books essays.save() Category.create(name='rants', parent=essays) Category.create(name='poetry', parent=books) query = (Category .select(Category.name, Closure.depth) .join(Closure, on=(Category.id == Closure.id)) .where(Closure.root == new_root) .order_by(Closure.depth, Category.name) .tuples()) self.assertEqual(list(query), [ ('products', 0), ('books', 1), ('magazines', 1), ('essays', 2), ('fiction', 2), ('non-fiction', 2), ('poetry', 2), ('biographies', 3), ('classics', 3), ('rants', 3), ('scifi', 3), ('westerns', 3), ('hard scifi', 4), ]) def test_id_not_overwritten(self): class Node(BaseExtModel): parent = ForeignKeyField('self', null=True) name = CharField() NodeClosure = ClosureTable(Node) database.create_tables([Node, NodeClosure], safe=True) root = Node.create(name='root') c1 = Node.create(name='c1', parent=root) c2 = Node.create(name='c2', parent=root) query = NodeClosure.descendants(root) self.assertEqual(sorted([(n.id, n.name) for n in query]), [(c1.id, 'c1'), (c2.id, 'c2')]) database.drop_tables([Node, NodeClosure]) class KV(LSMTable): key = TextField(primary_key=True) val_b = BlobField() val_i = IntegerField() val_f = FloatField() val_t = TextField() class Meta: database = database filename = 'test_lsm.ldb' class KVS(LSMTable): key = TextField(primary_key=True) value = TextField() class Meta: database = database filename = 'test_lsm.ldb' class KVI(LSMTable): key = IntegerField(primary_key=True) value = TextField() class Meta: database = database filename = 'test_lsm.ldb' @skip_unless(LSM_EXTENSION and os.path.exists(LSM_EXTENSION), 'requires lsm1 sqlite extension') class TestLSM1Extension(BaseTestCase): def setUp(self): super(TestLSM1Extension, self).setUp() if os.path.exists(KV._meta.filename): os.unlink(KV._meta.filename) database.connect() database.load_extension(LSM_EXTENSION.rstrip('.so')) def tearDown(self): super(TestLSM1Extension, self).tearDown() database.unload_extension(LSM_EXTENSION.rstrip('.so')) database.close() if os.path.exists(KV._meta.filename): os.unlink(KV._meta.filename) def test_lsm_extension(self): self.assertSQL(KV._schema._create_table(), ( 'CREATE VIRTUAL TABLE IF NOT EXISTS "kv" USING lsm1 ' '("test_lsm.ldb", "key", TEXT, "val_b", "val_i", ' '"val_f", "val_t")'), []) self.assertSQL(KVS._schema._create_table(), ( 'CREATE VIRTUAL TABLE IF NOT EXISTS "kvs" USING lsm1 ' '("test_lsm.ldb", "key", TEXT, "value")'), []) self.assertSQL(KVI._schema._create_table(), ( 'CREATE VIRTUAL TABLE IF NOT EXISTS "kvi" USING lsm1 ' '("test_lsm.ldb", "key", UINT, "value")'), []) def test_lsm_crud_operations(self): database.create_tables([KV]) with database.transaction(): KV.create(key='k0', val_b=None, val_i=0, val_f=0.1, val_t='v0') v0 = KV['k0'] self.assertEqual(v0.key, 'k0') self.assertEqual(v0.val_b, None) self.assertEqual(v0.val_i, 0) self.assertEqual(v0.val_f, 0.1) self.assertEqual(v0.val_t, 'v0') self.assertRaises(KV.DoesNotExist, lambda: KV['k1']) # Test that updates work as expected. KV['k0'] = (None, 1338, 3.14, 'v2-e') v0_db = KV['k0'] self.assertEqual(v0_db.val_i, 1338) self.assertEqual(v0_db.val_f, 3.14) self.assertEqual(v0_db.val_t, 'v2-e') self.assertEqual(len([item for item in KV.select()]), 1) del KV['k0'] self.assertEqual(len([item for item in KV.select()]), 0) def test_insert_replace(self): database.create_tables([KVS]) KVS.insert({'key': 'k0', 'value': 'v0'}).execute() self.assertEqual(KVS['k0'], 'v0') KVS.replace({'key': 'k0', 'value': 'v0-e'}).execute() self.assertEqual(KVS['k0'], 'v0-e') # Implicit. KVS['k0'] = 'v0-x' self.assertEqual(KVS['k0'], 'v0-x') def test_index_performance(self): database.create_tables([KVS]) data = [{'key': 'k%s' % i, 'value': 'v%s' % i} for i in range(20)] KVS.insert_many(data).execute() self.assertEqual(KVS.select().count(), 20) self.assertEqual(KVS['k0'], 'v0') self.assertEqual(KVS['k19'], 'v19') keys = [row.key for row in KVS['k4.1':'k8.9']] self.assertEqual(keys, ['k5', 'k6', 'k7', 'k8']) keys = sorted([row.key for row in KVS[:'k13']]) self.assertEqual(keys, ['k0', 'k1', 'k10', 'k11', 'k12', 'k13']) keys = [row.key for row in KVS['k5':]] self.assertEqual(keys, ['k5', 'k6', 'k7', 'k8', 'k9']) data = [tuple(row) for row in KVS[KVS.key > 'k5']] self.assertEqual(data, [ ('k6', 'v6'), ('k7', 'v7'), ('k8', 'v8'), ('k9', 'v9')]) del KVS[KVS.key.between('k10', 'k18')] self.assertEqual(sorted([row.key for row in KVS[:'k2']]), ['k0', 'k1', 'k19', 'k2']) del KVS['k3.1':'k8.1'] self.assertEqual([row.key for row in KVS[:]], ['k0', 'k1', 'k19', 'k2', 'k3', 'k9']) del KVS['k1'] self.assertRaises(KVS.DoesNotExist, lambda: KVS['k1']) def test_index_uint(self): database.create_tables([KVI]) data = [{'key': i, 'value': 'v%s' % i} for i in range(100)] with database.transaction(): KVI.insert_many(data).execute() keys = [row.key for row in KVI[27:33]] self.assertEqual(keys, [27, 28, 29, 30, 31, 32, 33]) keys = sorted([row.key for row in KVI[KVI.key < 4]]) self.assertEqual(keys, [0, 1, 2, 3]) keys = [row.key for row in KVI[KVI.key > 95]] self.assertEqual(keys, [96, 97, 98, 99]) @skip_unless(json_installed(), 'requires json1 sqlite extension') class TestJsonContains(ModelTestCase): database = SqliteExtDatabase(':memory:', json_contains=True) requires = [KeyData] test_data = ( ('a', {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}), ('b', {'k2': 'v2', 'k3': 'v3', 'k4': 'v4'}), ('c', {'k3': 'v3', 'x1': {'y1': 'z1', 'y2': 'z2'}}), ('d', {'k4': 'v4', 'x1': {'y2': 'z2', 'y3': [0, 1, 2]}}), ('e', ['foo', 'bar', [0, 1, 2]]), ) def setUp(self): super(TestJsonContains, self).setUp() with self.database.atomic(): for key, data in self.test_data: KeyData.create(key=key, data=data) def assertContains(self, obj, expected): contains = fn.json_contains(KeyData.data, json.dumps(obj)) query = (KeyData .select(KeyData.key) .where(contains) .order_by(KeyData.key) .namedtuples()) self.assertEqual([m.key for m in query], expected) def test_json_contains(self): # Simple checks for key. self.assertContains('k1', ['a']) self.assertContains('k2', ['a', 'b']) self.assertContains('k3', ['a', 'b', 'c']) self.assertContains('kx', []) self.assertContains('y1', []) # Partial dictionary. self.assertContains({'k1': 'v1'}, ['a']) self.assertContains({'k2': 'v2'}, ['a', 'b']) self.assertContains({'k3': 'v3'}, ['a', 'b', 'c']) self.assertContains({'k2': 'v2', 'k3': 'v3'}, ['a', 'b']) self.assertContains({'k2': 'vx'}, []) self.assertContains({'k2': 'v2', 'k3': 'vx'}, []) self.assertContains({'y1': 'z1'}, []) # List, interpreted as list of keys. self.assertContains(['k1', 'k2'], ['a']) self.assertContains(['k4'], ['b', 'd']) self.assertContains(['kx'], []) self.assertContains(['y1'], []) # List, interpreted as ordered list of items. self.assertContains(['foo'], ['e']) self.assertContains(['foo', 'bar'], ['e']) self.assertContains(['bar', 'foo'], []) # Nested dictionaries. self.assertContains({'x1': 'y1'}, ['c']) self.assertContains({'x1': ['y1']}, ['c']) self.assertContains({'x1': {'y1': 'z1'}}, ['c']) self.assertContains({'x1': {'y2': 'z2'}}, ['c', 'd']) self.assertContains({'x1': {'y2': 'z2'}, 'k4': 'v4'}, ['d']) self.assertContains({'x1': {'yx': 'z1'}}, []) self.assertContains({'x1': {'y1': 'z1', 'y3': 'z3'}}, []) self.assertContains({'x1': {'y2': 'zx'}}, []) self.assertContains({'x1': {'k4': 'v4'}}, []) # Mixing dictionaries and lists. self.assertContains({'x1': {'y2': 'z2', 'y3': [0]}}, ['d']) self.assertContains({'x1': {'y2': 'z2', 'y3': [0, 1, 2]}}, ['d']) self.assertContains({'x1': {'y2': 'z2', 'y3': [0, 1, 2, 4]}}, []) self.assertContains({'x1': {'y2': 'z2', 'y3': [0, 2]}}, []) class CalendarMonth(TestModel): name = TextField() value = IntegerField() class CalendarDay(TestModel): month = ForeignKeyField(CalendarMonth, backref='days') value = IntegerField() class TestIntWhereChain(ModelTestCase): database = database requires = [CalendarMonth, CalendarDay] def test_int_where_chain(self): with self.database.atomic(): jan = CalendarMonth.create(name='january', value=1) feb = CalendarMonth.create(name='february', value=2) CalendarDay.insert_many([{'month': jan, 'value': i + 1} for i in range(31)]).execute() CalendarDay.insert_many([{'month': feb, 'value': i + 1} for i in range(28)]).execute() def assertValues(query, expected): self.assertEqual(sorted([d.value for d in query]), list(expected)) q = CalendarDay.select().join(CalendarMonth) jq = q.where(CalendarMonth.name == 'january') jq1 = jq.where(CalendarDay.value >= 25) assertValues(jq1, range(25, 32)) jq2 = jq1.where(CalendarDay.value < 30) assertValues(jq2, range(25, 30)) fq = q.where(CalendarMonth.name == 'february') fq1 = fq.where(CalendarDay.value >= 25) assertValues(fq1, range(25, 29)) fq2 = fq1.where(CalendarDay.value < 30) assertValues(fq2, range(25, 29)) class Datum(TestModel): a = BareField() b = BareField(collation='BINARY') c = BareField(collation='RTRIM') d = BareField(collation='NOCASE') class TestCollatedFieldDefinitions(ModelTestCase): database = get_in_memory_db() requires = [Datum] def test_collated_fields(self): rows = ( (1, 'abc', 'abc', 'abc ', 'abc'), (2, 'abc', 'abc', 'abc', 'ABC'), (3, 'abc', 'abc', 'abc ', 'Abc'), (4, 'abc', 'abc ', 'ABC', 'abc')) for pk, a, b, c, d in rows: Datum.create(id=pk, a=a, b=b, c=c, d=d) def assertC(query, expected): self.assertEqual([r.id for r in query], expected) base = Datum.select().order_by(Datum.id) # Text comparison a=b is performed using binary collating sequence. assertC(base.where(Datum.a == Datum.b), [1, 2, 3]) # Text comparison a=b is performed using the RTRIM collating sequence. assertC(base.where(Datum.a == Datum.b.collate('RTRIM')), [1, 2, 3, 4]) # Text comparison d=a is performed using the NOCASE collating sequence. assertC(base.where(Datum.d == Datum.a), [1, 2, 3, 4]) # Text comparison a=d is performed using the BINARY collating sequence. assertC(base.where(Datum.a == Datum.d), [1, 4]) # Text comparison 'abc'=c is performed using RTRIM collating sequence. assertC(base.where('abc' == Datum.c), [1, 2, 3]) # Text comparison c='abc' is performed using RTRIM collating sequence. assertC(base.where(Datum.c == 'abc'), [1, 2, 3]) # Grouping is performed using the NOCASE collating sequence (Values # 'abc', 'ABC', and 'Abc' are placed in the same group). query = Datum.select(fn.COUNT(Datum.id)).group_by(Datum.d) self.assertEqual(query.scalar(), 4) # Grouping is performed using the BINARY collating sequence. 'abc' and # 'ABC' and 'Abc' form different groups. query = Datum.select(fn.COUNT(Datum.id)).group_by(Datum.d.concat('')) self.assertEqual([r[0] for r in query.tuples()], [1, 1, 2]) # Sorting or column c is performed using the RTRIM collating sequence. assertC(base.order_by(Datum.c, Datum.id), [4, 1, 2, 3]) # Sorting of (c||'') is performed using the BINARY collating sequence. assertC(base.order_by(Datum.c.concat(''), Datum.id), [4, 2, 3, 1]) # Sorting of column c is performed using the NOCASE collating sequence. assertC(base.order_by(Datum.c.collate('NOCASE'), Datum.id), [2, 4, 3, 1]) class TestReadOnly(ModelTestCase): database = db_loader('sqlite3') @skip_if(sys.version_info < (3, 4, 0), 'requres python >= 3.4.0') @requires_models(User) def test_read_only(self): User.create(username='foo') db_filename = self.database.database db = SqliteDatabase('file:%s?mode=ro' % db_filename, uri=True) cursor = db.execute_sql('select username from users') self.assertEqual(cursor.fetchone(), ('foo',)) self.assertRaises(OperationalError, db.execute_sql, 'insert into users (username) values (?)', ('huey',)) # We cannot create a database if in read-only mode. db = SqliteDatabase('file:xx_not_exists.db?mode=ro', uri=True) self.assertRaises(OperationalError, db.connect) class TDecModel(TestModel): value = TDecimalField(max_digits=24, decimal_places=16, auto_round=True) class TestTDecimalField(ModelTestCase): database = get_in_memory_db() requires = [TDecModel] def test_tdecimal_field(self): value = D('12345678.0123456789012345') value_ov = D('12345678.012345678901234567890123456789') td1 = TDecModel.create(value=value) td2 = TDecModel.create(value=value_ov) td1_db = TDecModel.get(TDecModel.id == td1.id) self.assertEqual(td1_db.value, value) td2_db = TDecModel.get(TDecModel.id == td2.id) self.assertEqual(td2_db.value, D('12345678.0123456789012346')) class KVR(TestModel): key = TextField(primary_key=True) value = IntegerField() @skip_unless(database.server_version >= (3, 35, 0), 'sqlite returning clause required') class TestSqliteReturning(ModelTestCase): database = database requires = [Person, User, KVR] def test_sqlite_returning(self): iq = (User .insert_many([{'username': 'u%s' % i} for i in range(3)]) .returning(User.id)) self.assertEqual([r.id for r in iq.execute()], [1, 2, 3]) res = (User .insert_many([{'username': 'u%s' % i} for i in (4, 5)]) .returning(User) .execute()) self.assertEqual([(r.id, r.username) for r in res], [(4, 'u4'), (5, 'u5')]) # Simple insert returns the ID. res = User.insert(username='u6').execute() self.assertEqual(res, 6) iq = (User .insert_many([{'username': 'u%s' % i} for i in (7, 8, 9)]) .returning(User) .namedtuples()) curs = iq.execute() self.assertEqual([u.id for u in curs], [7, 8, 9]) def test_sqlite_on_conflict_returning(self): p = Person.create(first='f1', last='l1', dob='1990-01-01') self.assertEqual(p.id, 1) iq = Person.insert_many([ {'first': 'f%s' % i, 'last': 'l%s' %i, 'dob': '1990-01-%02d' % i} for i in range(1, 3)]) iq = iq.on_conflict(conflict_target=[Person.first, Person.last], update={'dob': '2000-01-01'}) p1, p2 = iq.returning(Person).execute() self.assertEqual((p1.first, p1.last), ('f1', 'l1')) self.assertEqual(p1.dob, datetime.date(2000, 1, 1)) self.assertEqual((p2.first, p2.last), ('f2', 'l2')) self.assertEqual(p2.dob, datetime.date(1990, 1, 2)) p3 = Person.insert(first='f3', last='l3', dob='1990-01-03').execute() self.assertEqual(p3, 3) def test_text_pk(self): res = KVR.create(key='k1', value=1) self.assertEqual((res.key, res.value), ('k1', 1)) res = KVR.insert(key='k2', value=2).execute() self.assertEqual(res, 2) #self.assertEqual(res, 'k2') # insert_many() returns the primary-key as usual. iq = (KVR .insert_many([{'key': 'k%s' % i, 'value': i} for i in (3, 4)]) .returning(KVR.key)) self.assertEqual([r.key for r in iq.execute()], ['k3', 'k4']) iq = KVR.insert_many([{'key': 'k%s' % i, 'value': i} for i in (4, 5)]) iq = iq.on_conflict(conflict_target=[KVR.key], update={KVR.value: KVR.value + 10}) res = iq.returning(KVR).execute() self.assertEqual([(r.key, r.value) for r in res], [('k4', 14), ('k5', 5)]) res = (KVR .update(value=KVR.value + 10) .where(KVR.key.in_(['k1', 'k3', 'kx'])) .returning(KVR) .execute()) self.assertEqual([(r.key, r.value) for r in res], [('k1', 11), ('k3', 13)]) res = (KVR.delete() .where(KVR.key.not_in(['k2', 'k3', 'k4'])) .returning(KVR) .execute()) self.assertEqual([(r.key, r.value) for r in res], [('k1', 11), ('k5', 5)]) @skip_unless(database.server_version >= (3, 35, 0), 'sqlite returning clause required') class TestSqliteReturningConfig(ModelTestCase): database = SqliteExtDatabase(':memory:', returning_clause=True) requires = [KVR, User] def test_pk_set_properly(self): user = User.create(username='u1') self.assertEqual(user.id, 1) kvr = KVR.create(key='k1', value=1) self.assertEqual(kvr.key, 'k1') def test_insert_behavior(self): iq = User.insert({'username': 'u1'}) self.assertEqual(iq.execute(), 1) iq = User.insert_many([{'username': 'u2'}, {'username': 'u3'}]) self.assertEqual(list(iq.execute()), [(2,), (3,)]) # NOTE: sqlite3_changes() does not return the inserted rowcount until # the statement has been consumed. The fact that it returned 2 is a # side-effect of the statement cache and our having consumed the query # in the previous test assertion. So this test is invalid. #iq = User.insert_many([('u4',), ('u5',)]).as_rowcount() #self.assertEqual(iq.execute(), 2) iq = KVR.insert({'key': 'k1', 'value': 1}) self.assertEqual(iq.execute(), 'k1') iq = KVR.insert_many([('k2', 2), ('k3', 3)]) self.assertEqual(list(iq.execute()), [('k2',), ('k3',)]) # See note above. #iq = KVR.insert_many([('k4', 4), ('k5', 5)]).as_rowcount() #self.assertEqual(iq.execute(), 2) def test_insert_on_conflict(self): KVR.create(key='k1', value=1) iq = (KVR.insert({'key': 'k1', 'value': 100}) .on_conflict(conflict_target=[KVR.key], update={KVR.value: KVR.value + 10})) self.assertEqual(iq.execute(), 'k1') self.assertEqual(KVR.get(KVR.key == 'k1').value, 11) KVR.create(key='k2', value=2) iq = (KVR.insert_many([ {'key': 'k1', 'value': 100}, {'key': 'k2', 'value': 200}, {'key': 'k3', 'value': 300}]) .on_conflict(conflict_target=[KVR.key], update={KVR.value: KVR.value + 10})) self.assertEqual(list(iq.execute()), [('k1',), ('k2',), ('k3',)]) self.assertEqual(sorted(KVR.select().tuples()), [('k1', 21), ('k2', 12), ('k3', 300)]) def test_update_delete_rowcounts(self): users = [User.create(username=u) for u in 'abc'] kvrs = [KVR.create(key='k%s' % i, value=i) for i in (1, 2, 3)] uq = User.update(username='c2').where(User.username == 'c') self.assertEqual(uq.execute(), 1) uq = User.update(username=User.username.concat('x')) self.assertEqual(uq.execute(), 3) dq = User.delete().where(User.username.in_(['bx', 'c2x'])) self.assertEqual(dq.execute(), 2) uq = KVR.update(value=KVR.value + 10).where(KVR.key == 'k3') self.assertEqual(uq.execute(), 1) uq = KVR.update(value=KVR.value + 100) self.assertEqual(uq.execute(), 3) dq = KVR.delete().where(KVR.value.in_([102, 113])) self.assertEqual(dq.execute(), 2) def test_update_delete_explicit_returning(self): users = [User.create(username=u) for u in 'abc'] uq = (User.update(username='c2') .where(User.username == 'c') .returning(User.id, User.username)) for _ in range(2): self.assertEqual([u.username for u in uq.execute()], ['c2']) self.assertEqual(list(uq.clone().execute()), []) uq = (User.update(username=User.username.concat('x')) .where(~User.username.endswith('x')) # For idempotency. .returning(User.id, User.username) .tuples()) for _ in range(2): self.assertEqual(sorted(uq.execute()), [(1, 'ax'), (2, 'bx'), (3, 'c2x')]) self.assertEqual(list(uq.clone().execute()), []) dq = User.delete().where(User.username == 'c2x').returning(User) for _ in range(2): # The result is cached to support multiple iterations. self.assertEqual([u.username for u in dq.execute()], ['c2x']) self.assertEqual(list(dq.clone().execute()), []) dq = User.delete().returning(User).tuples() for _ in range(2): # The result is cached to support multiple iterations. self.assertEqual(sorted(dq.execute()), [(1, 'ax'), (2, 'bx')]) self.assertEqual(list(dq.clone().execute()), []) def test_bulk_create_update(self): users = [User(username='u%s' % i) for i in range(5)] with self.assertQueryCount(1): User.bulk_create(users) self.assertEqual(User.select().count(), 5) self.assertEqual(sorted(User.select().tuples()), [ (1, 'u0'), (2, 'u1'), (3, 'u2'), (4, 'u3'), (5, 'u4')]) users[0].username = 'u0x' users[2].username = 'u2x' users[4].username = 'u4x' with self.assertQueryCount(1): n = User.bulk_update(users, ['username']) self.assertEqual(n, 5) self.assertEqual(sorted(User.select().tuples()), [ (1, 'u0x'), (2, 'u1'), (3, 'u2x'), (4, 'u3'), (5, 'u4x')]) @requires_models(User, Tweet) def test_fk_set_correctly(self): # Ensure FK can be set lazily. user = User(username='u1') tweet = Tweet(user=user, content='t1') user.save() tweet.save() @skip_unless(database.server_version >= (3, 20, 0), 'sqlite deterministic requires >= 3.20') @skip_unless(sys.version_info >= (3, 8, 0), 'sqlite deterministic requires Python >= 3.8') class TestDeterministicFunction(ModelTestCase): database = get_in_memory_db() def test_deterministic(self): db = self.database @db.func(deterministic=True) def pylower(s): if s is not None: return s.lower() class Reg(db.Model): key = TextField() class Meta: indexes = [ SQL('create unique index "reg_pylower_key" ' 'on "reg" (pylower("key"))')] db.create_tables([Reg]) Reg.create(key='k1') with self.assertRaises(IntegrityError): with db.atomic(): Reg.create(key='K1') peewee-3.17.7/tests/sqlite_changelog.py000066400000000000000000000137131470346076600201310ustar00rootroot00000000000000import datetime from peewee import * from playhouse.sqlite_changelog import ChangeLog from playhouse.sqlite_ext import JSONField from playhouse.sqlite_ext import SqliteExtDatabase from .base import ModelTestCase from .base import TestModel from .base import requires_models from .base import skip_unless from .sqlite_helpers import json_installed database = SqliteExtDatabase(':memory:', pragmas={'foreign_keys': 1}) class Person(TestModel): name = TextField() dob = DateField() class Note(TestModel): person = ForeignKeyField(Person, on_delete='CASCADE') content = TextField() timestamp = TimestampField() status = IntegerField(default=0) class CT1(TestModel): f1 = TextField() f2 = IntegerField(null=True) f3 = FloatField() fi = IntegerField() class CT2(TestModel): data = JSONField() # Diff of json? changelog = ChangeLog(database) CL = changelog.model @skip_unless(json_installed(), 'requires sqlite json1') class TestChangeLog(ModelTestCase): database = database requires = [Person, Note] def setUp(self): super(TestChangeLog, self).setUp() changelog.install(Person) changelog.install(Note, skip_fields=['timestamp']) self.last_index = 0 def assertChanges(self, changes, last_index=None): last_index = last_index or self.last_index query = (CL .select(CL.action, CL.table, CL.changes) .order_by(CL.id) .offset(last_index)) accum = list(query.tuples()) self.last_index += len(accum) self.assertEqual(accum, changes) def test_changelog(self): huey = Person.create(name='huey', dob=datetime.date(2010, 5, 1)) zaizee = Person.create(name='zaizee', dob=datetime.date(2013, 1, 1)) self.assertChanges([ ('INSERT', 'person', {'name': [None, 'huey'], 'dob': [None, '2010-05-01']}), ('INSERT', 'person', {'name': [None, 'zaizee'], 'dob': [None, '2013-01-01']})]) zaizee.dob = datetime.date(2013, 2, 2) zaizee.save() self.assertChanges([ ('UPDATE', 'person', {'dob': ['2013-01-01', '2013-02-02']})]) zaizee.name = 'zaizee-x' zaizee.dob = datetime.date(2013, 3, 3) zaizee.save() huey.save() # No changes. self.assertChanges([ ('UPDATE', 'person', {'name': ['zaizee', 'zaizee-x'], 'dob': ['2013-02-02', '2013-03-03']}), ('UPDATE', 'person', {})]) zaizee.delete_instance() self.assertChanges([ ('DELETE', 'person', {'name': ['zaizee-x', None], 'dob': ['2013-03-03', None]})]) nh1 = Note.create(person=huey, content='huey1', status=1) nh2 = Note.create(person=huey, content='huey2', status=2) self.assertChanges([ ('INSERT', 'note', {'person_id': [None, huey.id], 'content': [None, 'huey1'], 'status': [None, 1]}), ('INSERT', 'note', {'person_id': [None, huey.id], 'content': [None, 'huey2'], 'status': [None, 2]})]) nh1.content = 'huey1-x' nh1.status = 0 nh1.save() mickey = Person.create(name='mickey', dob=datetime.date(2009, 8, 1)) nh2.person = mickey nh2.save() self.assertChanges([ ('UPDATE', 'note', {'content': ['huey1', 'huey1-x'], 'status': [1, 0]}), ('INSERT', 'person', {'name': [None, 'mickey'], 'dob': [None, '2009-08-01']}), ('UPDATE', 'note', {'person_id': [huey.id, mickey.id]})]) mickey.delete_instance() self.assertChanges([ ('DELETE', 'note', {'person_id': [mickey.id, None], 'content': ['huey2', None], 'status': [2, None]}), ('DELETE', 'person', {'name': ['mickey', None], 'dob': ['2009-08-01', None]})]) @requires_models(CT1) def test_changelog_details(self): changelog.install(CT1, skip_fields=['fi'], insert=False, delete=False) c1 = CT1.create(f1='v1', f2=1, f3=1.5, fi=0) self.assertChanges([]) CT1.update(f1='v1-x', f2=2, f3=2.5, fi=1).execute() self.assertChanges([ ('UPDATE', 'ct1', { 'f1': ['v1', 'v1-x'], 'f2': [1, 2], 'f3': [1.5, 2.5]})]) c1.f2 = None c1.save() # Overwrites previously-changed fields. self.assertChanges([('UPDATE', 'ct1', { 'f1': ['v1-x', 'v1'], 'f2': [2, None], 'f3': [2.5, 1.5]})]) c1.delete_instance() self.assertChanges([]) @requires_models(CT2) def test_changelog_jsonfield(self): changelog.install(CT2) ca = CT2.create(data={'k1': 'v1'}) cb = CT2.create(data=['i0', 'i1', 'i2']) cc = CT2.create(data='hello') self.assertChanges([ ('INSERT', 'ct2', {'data': [None, {'k1': 'v1'}]}), ('INSERT', 'ct2', {'data': [None, ['i0', 'i1', 'i2']]}), ('INSERT', 'ct2', {'data': [None, 'hello']})]) ca.data['k1'] = 'v1-x' cb.data.append('i3') cc.data = 'world' ca.save() cb.save() cc.save() self.assertChanges([ ('UPDATE', 'ct2', {'data': [{'k1': 'v1'}, {'k1': 'v1-x'}]}), ('UPDATE', 'ct2', {'data': [['i0', 'i1', 'i2'], ['i0', 'i1', 'i2', 'i3']]}), ('UPDATE', 'ct2', {'data': ['hello', 'world']})]) cc.data = 13.37 cc.save() self.assertChanges([('UPDATE', 'ct2', {'data': ['world', 13.37]})]) ca.delete_instance() self.assertChanges([ ('DELETE', 'ct2', {'data': [{'k1': 'v1-x'}, None]})]) peewee-3.17.7/tests/sqlite_helpers.py000066400000000000000000000015771470346076600176510ustar00rootroot00000000000000from peewee import sqlite3 def json_installed(): if sqlite3.sqlite_version_info < (3, 9, 0): return False tmp_db = sqlite3.connect(':memory:') try: tmp_db.execute('select json(?)', (1337,)) except: return False finally: tmp_db.close() return True def json_patch_installed(): return sqlite3.sqlite_version_info >= (3, 18, 0) def json_text_installed(): return sqlite3.sqlite_version_info >= (3, 38, 0) def jsonb_installed(): return sqlite3.sqlite_version_info >= (3, 45, 0) def compile_option(p): if not hasattr(compile_option, '_pragma_cache'): conn = sqlite3.connect(':memory:') curs = conn.execute('pragma compile_options') opts = [opt.lower().split('=')[0].strip() for opt, in curs.fetchall()] compile_option._pragma_cache = set(opts) return p in compile_option._pragma_cache peewee-3.17.7/tests/sqlite_udf.py000066400000000000000000000376601470346076600167670ustar00rootroot00000000000000import datetime import json import random from peewee import * from peewee import sqlite3 from playhouse.sqlite_ext import SqliteExtDatabase from playhouse.sqlite_udf import register_all from .base import IS_SQLITE_9 from .base import ModelTestCase from .base import TestModel from .base import db_loader from .base import skip_unless try: from playhouse import _sqlite_ext as cython_ext except ImportError: cython_ext = None try: from playhouse import _sqlite_udf as cython_udf except ImportError: cython_udf = None def requires_cython(method): return skip_unless(cython_udf is not None, 'requires sqlite udf c extension')(method) database = db_loader('sqlite') register_all(database) class User(TestModel): username = TextField() class APIResponse(TestModel): url = TextField(default='') data = TextField(default='') timestamp = DateTimeField(default=datetime.datetime.now) class Generic(TestModel): value = IntegerField(default=0) x = Field(null=True) MODELS = [User, APIResponse, Generic] class FixedOffset(datetime.tzinfo): def __init__(self, offset, name, dstoffset=42): if isinstance(offset, int): offset = datetime.timedelta(minutes=offset) if isinstance(dstoffset, int): dstoffset = datetime.timedelta(minutes=dstoffset) self.__offset = offset self.__name = name self.__dstoffset = dstoffset def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return self.__dstoffset class BaseTestUDF(ModelTestCase): database = database def sql1(self, sql, *params): cursor = self.database.execute_sql(sql, params) return cursor.fetchone()[0] class TestAggregates(BaseTestUDF): requires = [Generic] def _store_values(self, *values): with self.database.atomic(): for value in values: Generic.create(x=value) def mts(self, seconds): return (datetime.datetime(2015, 1, 1) + datetime.timedelta(seconds=seconds)) def test_min_avg_tdiff(self): self.assertEqual(self.sql1('select mintdiff(x) from generic;'), None) self.assertEqual(self.sql1('select avgtdiff(x) from generic;'), None) self._store_values(self.mts(10)) self.assertEqual(self.sql1('select mintdiff(x) from generic;'), None) self.assertEqual(self.sql1('select avgtdiff(x) from generic;'), 0) self._store_values(self.mts(15)) self.assertEqual(self.sql1('select mintdiff(x) from generic;'), 5) self.assertEqual(self.sql1('select avgtdiff(x) from generic;'), 5) self._store_values( self.mts(22), self.mts(52), self.mts(18), self.mts(41), self.mts(2), self.mts(33)) self.assertEqual(self.sql1('select mintdiff(x) from generic;'), 3) self.assertEqual( round(self.sql1('select avgtdiff(x) from generic;'), 1), 7.1) self._store_values(self.mts(22)) self.assertEqual(self.sql1('select mintdiff(x) from generic;'), 0) def test_duration(self): self.assertEqual(self.sql1('select duration(x) from generic;'), None) self._store_values(self.mts(10)) self.assertEqual(self.sql1('select duration(x) from generic;'), 0) self._store_values(self.mts(15)) self.assertEqual(self.sql1('select duration(x) from generic;'), 5) self._store_values( self.mts(22), self.mts(11), self.mts(52), self.mts(18), self.mts(41), self.mts(2), self.mts(33)) self.assertEqual(self.sql1('select duration(x) from generic;'), 50) @requires_cython def test_median(self): self.assertEqual(self.sql1('select median(x) from generic;'), None) self._store_values(1) self.assertEqual(self.sql1('select median(x) from generic;'), 1) self._store_values(3, 6, 6, 6, 7, 7, 7, 7, 12, 12, 17) self.assertEqual(self.sql1('select median(x) from generic;'), 7) Generic.delete().execute() self._store_values(9, 2, 2, 3, 3, 1) self.assertEqual(self.sql1('select median(x) from generic;'), 3) Generic.delete().execute() self._store_values(4, 4, 1, 8, 2, 2, 5, 8, 1) self.assertEqual(self.sql1('select median(x) from generic;'), 4) def test_mode(self): self.assertEqual(self.sql1('select mode(x) from generic;'), None) self._store_values(1) self.assertEqual(self.sql1('select mode(x) from generic;'), 1) self._store_values(4, 5, 6, 1, 3, 4, 1, 4, 9, 3, 4) self.assertEqual(self.sql1('select mode(x) from generic;'), 4) def test_ranges(self): self.assertEqual(self.sql1('select minrange(x) from generic'), None) self.assertEqual(self.sql1('select avgrange(x) from generic'), None) self.assertEqual(self.sql1('select range(x) from generic'), None) self._store_values(1) self.assertEqual(self.sql1('select minrange(x) from generic'), 0) self.assertEqual(self.sql1('select avgrange(x) from generic'), 0) self.assertEqual(self.sql1('select range(x) from generic'), 0) self._store_values(4, 8, 13, 19) self.assertEqual(self.sql1('select minrange(x) from generic'), 3) self.assertEqual(self.sql1('select avgrange(x) from generic'), 4.5) self.assertEqual(self.sql1('select range(x) from generic'), 18) Generic.delete().execute() self._store_values(19, 4, 5, 20, 5, 8) self.assertEqual(self.sql1('select range(x) from generic'), 16) class TestScalarFunctions(BaseTestUDF): requires = MODELS def test_if_then_else(self): for i in range(4): User.create(username='u%d' % (i + 1)) with self.assertQueryCount(1): query = (User .select( User.username, fn.if_then_else( User.username << ['u1', 'u2'], 'one or two', 'other').alias('name_type')) .order_by(User.id)) self.assertEqual([row.name_type for row in query], [ 'one or two', 'one or two', 'other', 'other']) def test_strip_tz(self): dt = datetime.datetime(2015, 1, 1, 12, 0) # 13 hours, 37 minutes. dt_tz = dt.replace(tzinfo=FixedOffset(13 * 60 + 37, 'US/LFK')) api_dt = APIResponse.create(timestamp=dt) api_dt_tz = APIResponse.create(timestamp=dt_tz) # Re-fetch from the database. api_dt_db = APIResponse.get(APIResponse.id == api_dt.id) api_dt_tz_db = APIResponse.get(APIResponse.id == api_dt_tz.id) # Assert the timezone is present, first of all, and that they were # stored in the database. self.assertEqual(api_dt_db.timestamp, dt) query = (APIResponse .select( APIResponse.id, fn.strip_tz(APIResponse.timestamp).alias('ts')) .order_by(APIResponse.id)) ts, ts_tz = query[:] self.assertEqual(ts.ts, dt) self.assertEqual(ts_tz.ts, dt) def test_human_delta(self): values = [0, 1, 30, 300, 3600, 7530, 300000] for value in values: Generic.create(value=value) delta = fn.human_delta(Generic.value).coerce(False) query = (Generic .select( Generic.value, delta.alias('delta')) .order_by(Generic.value)) results = query.tuples()[:] self.assertEqual(results, [ (0, '0 seconds'), (1, '1 second'), (30, '30 seconds'), (300, '5 minutes'), (3600, '1 hour'), (7530, '2 hours, 5 minutes, 30 seconds'), (300000, '3 days, 11 hours, 20 minutes'), ]) def test_file_ext(self): data = ( ('test.py', '.py'), ('test.x.py', '.py'), ('test', ''), ('test.', '.'), ('/foo.bar/test/nug.py', '.py'), ('/foo.bar/test/nug', ''), ) for filename, ext in data: res = self.sql1('SELECT file_ext(?)', filename) self.assertEqual(res, ext) def test_gz(self): random.seed(1) A = ord('A') z = ord('z') with self.database.atomic(): def randstr(l): return ''.join([ chr(random.randint(A, z)) for _ in range(l)]) data = ( 'a', 'a' * 1024, randstr(1024), randstr(4096), randstr(1024 * 64)) for s in data: compressed = self.sql1('select gzip(?)', s) decompressed = self.sql1('select gunzip(?)', compressed) self.assertEqual(decompressed.decode('utf-8'), s) def test_hostname(self): r = json.dumps({'success': True}) data = ( ('https://charlesleifer.com/api/', r), ('https://a.charlesleifer.com/api/foo', r), ('www.nugget.com', r), ('nugz.com', r), ('http://a.b.c.peewee/foo', r), ('https://charlesleifer.com/xx', r), ('https://charlesleifer.com/xx', r), ) with self.database.atomic(): for url, response in data: APIResponse.create(url=url, data=data) with self.assertQueryCount(1): query = (APIResponse .select( fn.hostname(APIResponse.url).alias('host'), fn.COUNT(APIResponse.id).alias('count')) .group_by(fn.hostname(APIResponse.url)) .order_by( fn.COUNT(APIResponse.id).desc(), fn.hostname(APIResponse.url))) results = query.tuples()[:] self.assertEqual(results, [ ('charlesleifer.com', 3), ('', 2), ('a.b.c.peewee', 1), ('a.charlesleifer.com', 1)]) @skip_unless(IS_SQLITE_9, 'requires sqlite >= 3.9') def test_toggle(self): self.assertEqual(self.sql1('select toggle(?)', 'foo'), 1) self.assertEqual(self.sql1('select toggle(?)', 'bar'), 1) self.assertEqual(self.sql1('select toggle(?)', 'foo'), 0) self.assertEqual(self.sql1('select toggle(?)', 'foo'), 1) self.assertEqual(self.sql1('select toggle(?)', 'bar'), 0) self.assertEqual(self.sql1('select clear_toggles()'), None) self.assertEqual(self.sql1('select toggle(?)', 'foo'), 1) def test_setting(self): self.assertEqual(self.sql1('select setting(?, ?)', 'k1', 'v1'), 'v1') self.assertEqual(self.sql1('select setting(?, ?)', 'k2', 'v2'), 'v2') self.assertEqual(self.sql1('select setting(?)', 'k1'), 'v1') self.assertEqual(self.sql1('select setting(?, ?)', 'k2', 'v2-x'), 'v2-x') self.assertEqual(self.sql1('select setting(?)', 'k2'), 'v2-x') self.assertEqual(self.sql1('select setting(?)', 'kx'), None) self.assertEqual(self.sql1('select clear_settings()'), None) self.assertEqual(self.sql1('select setting(?)', 'k1'), None) def test_random_range(self): vals = ((1, 10), (1, 100), (0, 2), (1, 5, 2)) results = [] for params in vals: random.seed(1) results.append(random.randrange(*params)) for params, expected in zip(vals, results): random.seed(1) if len(params) == 3: pstr = '?, ?, ?' else: pstr = '?, ?' self.assertEqual( self.sql1('select randomrange(%s)' % pstr, *params), expected) def test_sqrt(self): self.assertEqual(self.sql1('select sqrt(?)', 4), 2) self.assertEqual(round(self.sql1('select sqrt(?)', 2), 2), 1.41) def test_tonumber(self): data = ( ('123', 123), ('1.23', 1.23), ('1e4', 10000), ('-10', -10), ('x', None), ('13d', None), ) for inp, outp in data: self.assertEqual(self.sql1('select tonumber(?)', inp), outp) @requires_cython def test_leven(self): self.assertEqual( self.sql1('select levenshtein_dist(?, ?)', 'abc', 'ba'), 2) self.assertEqual( self.sql1('select levenshtein_dist(?, ?)', 'abcde', 'eba'), 4) self.assertEqual( self.sql1('select levenshtein_dist(?, ?)', 'abcde', 'abcde'), 0) @requires_cython def test_str_dist(self): self.assertEqual( self.sql1('select str_dist(?, ?)', 'abc', 'ba'), 3) self.assertEqual( self.sql1('select str_dist(?, ?)', 'abcde', 'eba'), 6) self.assertEqual( self.sql1('select str_dist(?, ?)', 'abcde', 'abcde'), 0) def test_substr_count(self): self.assertEqual( self.sql1('select substr_count(?, ?)', 'foo bar baz', 'a'), 2) self.assertEqual( self.sql1('select substr_count(?, ?)', 'foo bor baz', 'o'), 3) self.assertEqual( self.sql1('select substr_count(?, ?)', 'foodooboope', 'oo'), 3) self.assertEqual(self.sql1('select substr_count(?, ?)', 'xx', ''), 0) self.assertEqual(self.sql1('select substr_count(?, ?)', '', ''), 0) def test_strip_chars(self): self.assertEqual( self.sql1('select strip_chars(?, ?)', ' hey foo ', ' '), 'hey foo') @skip_unless(cython_ext is not None, 'requires sqlite c extension') @skip_unless(sqlite3.sqlite_version_info >= (3, 9), 'requires sqlite >= 3.9') class TestVirtualTableFunctions(ModelTestCase): database = database requires = MODELS def sqln(self, sql, *p): cursor = self.database.execute_sql(sql, p) return cursor.fetchall() def test_regex_search(self): usernames = [ 'charlie', 'hu3y17', 'zaizee2012', '1234.56789', 'hurr durr'] for username in usernames: User.create(username=username) rgx = '[0-9]+' results = self.sqln( ('SELECT user.username, regex_search.match ' 'FROM user, regex_search(?, user.username) ' 'ORDER BY regex_search.match'), rgx) self.assertEqual([row for row in results], [ ('1234.56789', '1234'), ('hu3y17', '17'), ('zaizee2012', '2012'), ('hu3y17', '3'), ('1234.56789', '56789'), ]) def test_date_series(self): ONE_DAY = 86400 def assertValues(start, stop, step_seconds, expected): results = self.sqln('select * from date_series(?, ?, ?)', start, stop, step_seconds) self.assertEqual(results, expected) assertValues('2015-01-01', '2015-01-05', 86400, [ ('2015-01-01',), ('2015-01-02',), ('2015-01-03',), ('2015-01-04',), ('2015-01-05',), ]) assertValues('2015-01-01', '2015-01-05', 86400 / 2, [ ('2015-01-01 00:00:00',), ('2015-01-01 12:00:00',), ('2015-01-02 00:00:00',), ('2015-01-02 12:00:00',), ('2015-01-03 00:00:00',), ('2015-01-03 12:00:00',), ('2015-01-04 00:00:00',), ('2015-01-04 12:00:00',), ('2015-01-05 00:00:00',), ]) assertValues('14:20:15', '14:24', 30, [ ('14:20:15',), ('14:20:45',), ('14:21:15',), ('14:21:45',), ('14:22:15',), ('14:22:45',), ('14:23:15',), ('14:23:45',), ]) peewee-3.17.7/tests/sqliteq.py000066400000000000000000000163151470346076600163040ustar00rootroot00000000000000import os import sys import threading import time import unittest from functools import partial try: import gevent from gevent.event import Event as GreenEvent except ImportError: gevent = None from peewee import * from playhouse.sqliteq import ResultTimeout from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import WriterPaused from .base import BaseTestCase from .base import TestModel from .base import db_loader from .base import skip_if get_db = partial(db_loader, 'sqlite', db_class=SqliteQueueDatabase) db = db_loader('sqlite') class User(TestModel): name = TextField(unique=True) class Meta: table_name = 'threaded_db_test_user' class BaseTestQueueDatabase(object): database_config = {} n_rows = 20 n_threads = 20 def setUp(self): super(BaseTestQueueDatabase, self).setUp() User._meta.database = db with db: db.create_tables([User], safe=True) User._meta.database = \ self.database = get_db(**self.database_config) # Sanity check at startup. self.assertEqual(self.database.queue_size(), 0) def tearDown(self): super(BaseTestQueueDatabase, self).tearDown() User._meta.database = db with db: User.drop_table() if not self.database.is_closed(): self.database.close() if not db.is_closed(): db.close() filename = db.database if os.path.exists(filename): os.unlink(filename) def test_query_error(self): self.database.start() curs = self.database.execute_sql('foo bar baz') self.assertRaises(OperationalError, curs.fetchone) self.database.stop() def test_integrity_error(self): self.database.start() u = User.create(name='u') self.assertRaises(IntegrityError, User.create, name='u') def test_query_execution(self): qr = User.select().execute() self.assertEqual(self.database.queue_size(), 0) self.database.start() try: users = list(qr) huey = User.create(name='huey') mickey = User.create(name='mickey') self.assertTrue(huey.id is not None) self.assertTrue(mickey.id is not None) self.assertEqual(self.database.queue_size(), 0) finally: self.database.stop() def create_thread(self, fn, *args): raise NotImplementedError def create_event(self): raise NotImplementedError def test_multiple_threads(self): def create_rows(idx, nrows): for i in range(idx, idx + nrows): User.create(name='u-%s' % i) total = self.n_threads * self.n_rows self.database.start() threads = [self.create_thread(create_rows, i, self.n_rows) for i in range(0, total, self.n_rows)] [t.start() for t in threads] [t.join() for t in threads] self.assertEqual(User.select().count(), total) self.database.stop() def test_pause(self): event_a = self.create_event() event_b = self.create_event() def create_user(name, event, expect_paused): event.wait() if expect_paused: self.assertRaises(WriterPaused, lambda: User.create(name=name)) else: User.create(name=name) self.database.start() t_a = self.create_thread(create_user, 'a', event_a, True) t_a.start() t_b = self.create_thread(create_user, 'b', event_b, False) t_b.start() User.create(name='c') self.assertEqual(User.select().count(), 1) # Pause operations but preserve the writer thread/connection. self.database.pause() event_a.set() self.assertEqual(User.select().count(), 1) t_a.join() self.database.unpause() self.assertEqual(User.select().count(), 1) event_b.set() t_b.join() self.assertEqual(User.select().count(), 2) self.database.stop() def test_restart(self): self.database.start() User.create(name='a') self.database.stop() self.database._results_timeout = 0.0001 self.assertRaises(ResultTimeout, User.create, name='b') self.assertEqual(User.select().count(), 1) self.database.start() # Will execute the pending "b" INSERT. self.database._results_timeout = None User.create(name='c') self.assertEqual(User.select().count(), 3) self.assertEqual(sorted(u.name for u in User.select()), ['a', 'b', 'c']) def test_waiting(self): D = {} def create_user(name): D[name] = User.create(name=name).id threads = [self.create_thread(create_user, name) for name in ('huey', 'charlie', 'zaizee')] [t.start() for t in threads] def get_users(): D['users'] = [(user.id, user.name) for user in User.select()] tg = self.create_thread(get_users) tg.start() threads.append(tg) self.database.start() [t.join() for t in threads] self.database.stop() self.assertEqual(sorted(D), ['charlie', 'huey', 'users', 'zaizee']) def test_next_method(self): self.database.start() User.create(name='mickey') User.create(name='huey') query = iter(User.select().order_by(User.name)) self.assertEqual(next(query).name, 'huey') self.assertEqual(next(query).name, 'mickey') self.assertRaises(StopIteration, lambda: next(query)) self.assertEqual( next(self.database.execute_sql('PRAGMA journal_mode'))[0], 'wal') self.database.stop() class TestThreadedDatabaseThreads(BaseTestQueueDatabase, BaseTestCase): database_config = {'use_gevent': False} def tearDown(self): self.database._results_timeout = None super(TestThreadedDatabaseThreads, self).tearDown() def create_thread(self, fn, *args): t = threading.Thread(target=fn, args=args) t.daemon = True return t def create_event(self): return threading.Event() def test_timeout(self): @self.database.func() def slow(n): time.sleep(n) return 'slept %0.2f' % n self.database.start() # Make the result timeout very small, then call our function which # will cause the query results to time-out. self.database._results_timeout = 0.001 def do_query(): # Prepend a space so that we can force it through the threaded # pipeline, otherwise it would execute normally. cursor = self.database.execute_sql(' select slow(?)', (0.01,)) self.assertEqual(cursor.fetchone()[0], 'slept 0.01') self.assertRaises(ResultTimeout, do_query) self.database.stop() @skip_if(gevent is None, 'gevent not installed') class TestThreadedDatabaseGreenlets(BaseTestQueueDatabase, BaseTestCase): database_config = {'use_gevent': True} n_rows = 10 n_threads = 40 def create_thread(self, fn, *args): return gevent.Greenlet(fn, *args) def create_event(self): return GreenEvent() peewee-3.17.7/tests/test_utils.py000066400000000000000000000047551470346076600170260ustar00rootroot00000000000000import functools from .base import ModelTestCase from .base import TestModel from peewee import * from playhouse.test_utils import assert_query_count from playhouse.test_utils import count_queries class Data(TestModel): key = CharField() class Meta: order_by = ('key',) class DataItem(TestModel): data = ForeignKeyField(Data, backref='items') value = CharField() class Meta: order_by = ('value',) class TestQueryCounter(ModelTestCase): requires = [DataItem, Data] def test_count(self): with count_queries() as count: Data.create(key='k1') Data.create(key='k2') self.assertEqual(count.count, 2) with count_queries() as count: items = [item.key for item in Data.select().order_by(Data.key)] self.assertEqual(items, ['k1', 'k2']) Data.get(Data.key == 'k1') Data.get(Data.key == 'k2') self.assertEqual(count.count, 3) def test_only_select(self): with count_queries(only_select=True) as count: for i in range(10): Data.create(key=str(i)) items = [item.key for item in Data.select()] Data.get(Data.key == '0') Data.get(Data.key == '9') Data.delete().where( Data.key << ['1', '3', '5', '7', '9']).execute() items = [item.key for item in Data.select().order_by(Data.key)] self.assertEqual(items, ['0', '2', '4', '6', '8']) self.assertEqual(count.count, 4) def test_assert_query_count_decorator(self): @assert_query_count(2) def will_fail_under(): Data.create(key='x') @assert_query_count(2) def will_fail_over(): for i in range(3): Data.create(key=str(i)) @assert_query_count(4) def will_succeed(): for i in range(4): Data.create(key=str(i + 100)) will_succeed() self.assertRaises(AssertionError, will_fail_under) self.assertRaises(AssertionError, will_fail_over) def test_assert_query_count_ctx_mgr(self): with assert_query_count(3): for i in range(3): Data.create(key=str(i)) def will_fail(): with assert_query_count(2): Data.create(key='x') self.assertRaises(AssertionError, will_fail) @assert_query_count(3) def test_only_three(self): for i in range(3): Data.create(key=str(i)) peewee-3.17.7/tests/transactions.py000066400000000000000000000326171470346076600173350ustar00rootroot00000000000000from peewee import * from .base import DatabaseTestCase from .base import IS_CRDB from .base import IS_CRDB_NESTED_TX from .base import IS_MYSQL from .base import IS_POSTGRESQL from .base import IS_SQLITE from .base import ModelTestCase from .base import db from .base import new_connection from .base import skip_if from .base import skip_unless from .base_models import Register class BaseTransactionTestCase(ModelTestCase): requires = [Register] def assertRegister(self, vals): query = Register.select().order_by(Register.value) self.assertEqual([register.value for register in query], vals) def _save(self, *vals): Register.insert([{Register.value: val} for val in vals]).execute() def requires_nested(fn): return skip_if(IS_CRDB and not IS_CRDB_NESTED_TX, 'nested transaction support is required')(fn) class TestTransaction(BaseTransactionTestCase): def test_simple(self): self.assertFalse(db.in_transaction()) with db.atomic(): self.assertTrue(db.in_transaction()) self._save(1) self.assertFalse(db.in_transaction()) self.assertRegister([1]) # Explicit rollback, implicit commit. with db.atomic() as txn: self._save(2) txn.rollback() self.assertTrue(db.in_transaction()) self._save(3) self.assertFalse(db.in_transaction()) self.assertRegister([1, 3]) # Explicit rollbacks. with db.atomic() as txn: self._save(4) txn.rollback() self._save(5) txn.rollback() self.assertRegister([1, 3]) @requires_nested def test_transactions(self): self.assertFalse(db.in_transaction()) with db.atomic(): self.assertTrue(db.in_transaction()) self._save(1) self.assertRegister([1]) with db.atomic() as txn: self._save(2) txn.rollback() self._save(3) with db.atomic() as sp1: self._save(4) with db.atomic() as sp2: self._save(5) sp2.rollback() with db.atomic() as sp3: self._save(6) with db.atomic() as sp4: self._save(7) with db.atomic() as sp5: self._save(8) self.assertRegister([1, 3, 4, 6, 7, 8]) sp4.rollback() self.assertRegister([1, 3, 4, 6]) self.assertRegister([1, 3, 4, 6]) def test_commit_rollback(self): with db.atomic() as txn: self._save(1) txn.commit() self._save(2) txn.rollback() self.assertRegister([1]) with db.atomic() as txn: self._save(3) txn.rollback() self._save(4) self.assertRegister([1, 4]) @requires_nested def test_commit_rollback_nested(self): with db.atomic() as txn: self.test_commit_rollback() txn.rollback() self.assertRegister([]) with db.atomic(): self.test_commit_rollback() self.assertRegister([1, 4]) def test_nesting_transaction_obj(self): self.assertRegister([]) with db.transaction() as txn: self._save(1) with db.transaction() as txn2: self._save(2) txn2.rollback() # Actually issues a rollback. self.assertRegister([]) self._save(3) self.assertRegister([3]) with db.transaction() as txn: self._save(4) with db.transaction() as txn2: with db.transaction() as txn3: self._save(5) txn3.commit() # Actually commits. self._save(6) txn2.rollback() self.assertRegister([3, 4, 5]) with db.transaction() as txn: self._save(6) try: with db.transaction() as txn2: self._save(7) raise ValueError() except ValueError: pass self.assertRegister([3, 4, 5, 6, 7]) @requires_nested def test_savepoint_commit(self): with db.atomic() as txn: self._save(1) txn.rollback() self._save(2) txn.commit() with db.atomic() as sp: self._save(3) sp.rollback() self._save(4) sp.commit() self.assertRegister([2, 4]) def test_atomic_decorator(self): @db.atomic() def save(i): self._save(i) save(1) self.assertRegister([1]) def text_atomic_exception(self): def will_fail(self): with db.atomic(): self._save(1) self._save(None) self.assertRaises(IntegrityError, will_fail) self.assertRegister([]) def user_error(self): with db.atomic(): self._save(2) raise ValueError self.assertRaises(ValueError, user_error) self.assertRegister([]) def test_manual_commit(self): with db.manual_commit(): db.begin() self._save(1) db.rollback() db.begin() self._save(2) db.commit() with db.manual_commit(): db.begin() self._save(3) db.rollback() db.begin() self._save(4) db.commit() self.assertRegister([2, 4]) def test_mixing_manual_atomic(self): @db.manual_commit() def will_fail(): pass @db.atomic() def also_fails(): pass with db.atomic(): self.assertRaises(ValueError, will_fail) with db.manual_commit(): self.assertRaises(ValueError, also_fails) with db.manual_commit(): with self.assertRaises(ValueError): with db.atomic(): pass with db.atomic(): with self.assertRaises(ValueError): with db.manual_commit(): pass def test_closing_db_in_transaction(self): with db.atomic(): self.assertRaises(OperationalError, db.close) @requires_nested def test_db_context_manager(self): db.close() self.assertTrue(db.is_closed()) with db: self.assertFalse(db.is_closed()) self._save(1) with db: self._save(2) try: with db: self._save(3) raise ValueError('xxx') except ValueError: pass self._save(4) try: with db: self._save(5) with db: self._save(6) raise ValueError('yyy') except ValueError: pass self.assertFalse(db.is_closed()) self.assertTrue(db.is_closed()) self.assertRegister([1, 2, 4]) @requires_nested class TestSession(BaseTransactionTestCase): def test_session(self): self.assertTrue(db.session_start()) self.assertTrue(db.session_start()) self.assertEqual(db.transaction_depth(), 2) self._save(1) self.assertTrue(db.session_commit()) self.assertEqual(db.transaction_depth(), 1) self._save(2) # Now we're in autocommit mode. self.assertTrue(db.session_rollback()) self.assertEqual(db.transaction_depth(), 0) self.assertTrue(db.session_start()) self._save(3) self.assertTrue(db.session_rollback()) self.assertRegister([1]) def test_session_with_closed_db(self): db.close() self.assertTrue(db.session_start()) self.assertFalse(db.is_closed()) self.assertRaises(OperationalError, db.close) self._save(1) self.assertTrue(db.session_rollback()) self.assertRegister([]) def test_session_inside_context_manager(self): with db.atomic(): self.assertTrue(db.session_start()) self._save(1) self.assertTrue(db.session_commit()) self._save(2) self.assertTrue(db.session_rollback()) db.session_start() self._save(3) self.assertRegister([1, 3]) def test_commit_rollback_mix(self): db.session_start() with db.atomic() as txn: # Will be a savepoint. self._save(1) with db.atomic() as t2: self._save(2) with db.atomic() as t3: self._save(3) t2.rollback() txn.commit() self._save(4) txn.rollback() self.assertTrue(db.session_commit()) self.assertRegister([1]) def test_session_rollback(self): db.session_start() self._save(1) with db.atomic() as txn: self._save(2) with db.atomic() as t2: self._save(3) self.assertRegister([1, 2, 3]) self.assertTrue(db.session_rollback()) self.assertRegister([]) db.session_start() self._save(1) with db.transaction() as txn: self._save(2) with db.transaction() as t2: self._save(3) t2.rollback() # Rolls back everything, starts new txn. db.session_commit() self.assertRegister([]) def test_session_commit(self): db.session_start() self._save(1) with db.transaction() as txn: self._save(2) with db.transaction() as t2: self._save(3) t2.commit() # Saves everything, starts new txn. txn.rollback() self.assertTrue(db.session_rollback()) self.assertRegister([1, 2, 3]) @skip_unless(IS_SQLITE, 'requires sqlite for transaction lock type') class TestTransactionLockType(BaseTransactionTestCase): def test_lock_type(self): db2 = new_connection(timeout=0.001) db2.connect() with self.database.atomic(lock_type='EXCLUSIVE') as txn: with self.assertRaises(OperationalError): with db2.atomic(lock_type='IMMEDIATE') as t2: self._save(1) self._save(2) self.assertRegister([2]) with self.database.atomic('IMMEDIATE') as txn: with self.assertRaises(OperationalError): with db2.atomic('EXCLUSIVE') as t2: self._save(3) self._save(4) self.assertRegister([2, 4]) with self.database.transaction(lock_type='DEFERRED') as txn: self._save(5) # Deferred -> Exclusive after our write. with self.assertRaises(OperationalError): with db2.transaction(lock_type='IMMEDIATE') as t2: self._save(6) self.assertRegister([2, 4, 5]) class TestTransactionIsolationLevel(BaseTransactionTestCase): @skip_unless(IS_POSTGRESQL, 'requires postgresql') def test_isolation_level_pg(self): db2 = new_connection() db2.connect() with db2.atomic(isolation_level='SERIALIZABLE'): with db.atomic(isolation_level='SERIALIZABLE'): self._save(1) self.assertDB2(db2, []) self.assertDB2(db2, []) self.assertDB2(db2, [1]) with db2.atomic(isolation_level='READ COMMITTED'): with db.atomic(): self._save(2) self.assertDB2(db2, [1]) self.assertDB2(db2, [1, 2]) self.assertDB2(db2, [1, 2]) # NB: Read Uncommitted is treated as Read Committed by PG, so we don't # test it here. with db2.atomic(isolation_level='REPEATABLE READ'): with db.atomic(isolation_level='REPEATABLE READ'): self._save(3) self.assertDB2(db2, [1, 2]) self.assertDB2(db2, [1, 2]) self.assertDB2(db2, [1, 2, 3]) @skip_unless(IS_MYSQL, 'requires mysql') def test_isolation_level_mysql(self): db2 = new_connection() db2.connect() with db2.atomic(): with db.atomic(isolation_level='SERIALIZABLE'): self._save(1) self.assertDB2(db2, []) self.assertDB2(db2, []) self.assertDB2(db2, [1]) with db2.atomic(isolation_level='READ COMMITTED'): with db.atomic(): self._save(2) self.assertDB2(db2, [1]) self.assertDB2(db2, [1, 2]) self.assertDB2(db2, [1, 2]) with db2.atomic(isolation_level='READ UNCOMMITTED'): with db.atomic(): self._save(3) self.assertDB2(db2, [1, 2, 3]) self.assertDB2(db2, [1, 2, 3]) self.assertDB2(db2, [1, 2, 3]) with db2.atomic(isolation_level='REPEATABLE READ'): with db.atomic(isolation_level='REPEATABLE READ'): self._save(4) self.assertDB2(db2, [1, 2, 3]) self.assertDB2(db2, [1, 2, 3]) self.assertDB2(db2, [1, 2, 3, 4]) def assertDB2(self, db2, vals): with Register.bind_ctx(db2): q = Register.select().order_by(Register.value) self.assertEqual([r.value for r in q], vals)