SQLAlchemy-1.0.11/0000775000175000017500000000000012636376632014640 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/setup.py0000664000175000017500000001301312636375552016350 0ustar classicclassic00000000000000"""setup.py Please see README for basic installation instructions. """ import os import re import sys from distutils.command.build_ext import build_ext from distutils.errors import (CCompilerError, DistutilsExecError, DistutilsPlatformError) has_feature = False try: from setuptools import setup, Extension try: # see # https://bitbucket.org/pypa/setuptools/issue/65/deprecate-and-remove-features, # where they may remove Feature. from setuptools import Feature has_feature = True except ImportError: pass except ImportError: from distutils.core import setup, Extension py3k = False cmdclass = {} extra = {} if sys.version_info < (2, 6): raise Exception("SQLAlchemy requires Python 2.6 or higher.") elif sys.version_info >= (3, 0): py3k = True import platform cpython = platform.python_implementation() == 'CPython' ext_modules = [ Extension('sqlalchemy.cprocessors', sources=['lib/sqlalchemy/cextension/processors.c']), Extension('sqlalchemy.cresultproxy', sources=['lib/sqlalchemy/cextension/resultproxy.c']), Extension('sqlalchemy.cutils', sources=['lib/sqlalchemy/cextension/utils.c']) ] ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) if sys.platform == 'win32': # 2.6's distutils.msvc9compiler can raise an IOError when failing to # find the compiler ext_errors += (IOError,) class BuildFailed(Exception): def __init__(self): self.cause = sys.exc_info()[1] # work around py 2/3 different syntax class ve_build_ext(build_ext): # This class allows C extension building to fail. def run(self): try: build_ext.run(self) except DistutilsPlatformError: raise BuildFailed() def build_extension(self, ext): try: build_ext.build_extension(self, ext) except ext_errors: raise BuildFailed() except ValueError: # this can happen on Windows 64 bit, see Python issue 7511 if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3 raise BuildFailed() raise cmdclass['build_ext'] = ve_build_ext def status_msgs(*msgs): print('*' * 75) for msg in msgs: print(msg) print('*' * 75) def find_packages(location): packages = [] for pkg in ['sqlalchemy']: for _dir, subdirectories, files in ( os.walk(os.path.join(location, pkg))): if '__init__.py' in files: tokens = _dir.split(os.sep)[len(location.split(os.sep)):] packages.append(".".join(tokens)) return packages v_file = open(os.path.join(os.path.dirname(__file__), 'lib', 'sqlalchemy', '__init__.py')) VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(v_file.read()).group(1) v_file.close() r_file = open(os.path.join(os.path.dirname(__file__), 'README.rst')) readme = r_file.read() r_file.close() def run_setup(with_cext): kwargs = extra.copy() if with_cext: if has_feature: kwargs['features'] = {'cextensions': Feature( "optional C speed-enhancements", standard=True, ext_modules=ext_modules )} else: kwargs['ext_modules'] = ext_modules setup(name="SQLAlchemy", version=VERSION, description="Database Abstraction Library", author="Mike Bayer", author_email="mike_mp@zzzcomputing.com", url="http://www.sqlalchemy.org", packages=find_packages('lib'), package_dir={'': 'lib'}, license="MIT License", cmdclass=cmdclass, tests_require=['pytest >= 2.5.2', 'mock', 'pytest-xdist'], test_suite="sqlalchemy.testing.distutils_run", long_description=readme, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends", "Operating System :: OS Independent", ], **kwargs ) if not cpython: run_setup(False) status_msgs( "WARNING: C extensions are not supported on " + "this Python platform, speedups are not enabled.", "Plain-Python build succeeded." ) elif os.environ.get('DISABLE_SQLALCHEMY_CEXT'): run_setup(False) status_msgs( "DISABLE_SQLALCHEMY_CEXT is set; " + "not attempting to build C extensions.", "Plain-Python build succeeded." ) else: try: run_setup(True) except BuildFailed as exc: status_msgs( exc.cause, "WARNING: The C extension could not be compiled, " + "speedups are not enabled.", "Failure information, if any, is above.", "Retrying the build without the C extension now." ) run_setup(False) status_msgs( "WARNING: The C extension could not be compiled, " + "speedups are not enabled.", "Plain-Python build succeeded." ) SQLAlchemy-1.0.11/tox.ini0000664000175000017500000000242112636375552016152 0ustar classicclassic00000000000000[tox] envlist = full,py26,py27,py33,py34,py35 [testenv] deps=pytest mock # -E : ignore PYTHON* environment variables (such as PYTHONPATH) # -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE # the latter is picked up by conftest.py setenv= PYTHONPATH= PYTHONNOUSERSITE=1 # we need this because our CI has all the DBAPIs and such # pre-installed in individual site-packages directories. sitepackages=True # always install fully and use that; this way options like # DISABLE_SQLALCHEMY_CEXT are honored usedevelop=False # tox as of 2.0 blocks all environment variables from the # outside, unless they are here (or in TOX_TESTENV_PASSENV, # wildcards OK). Need at least these passenv=ORACLE_HOME NLS_LANG commands= python -m pytest {posargs} [testenv:full] [testenv:coverage] setenv= DISABLE_SQLALCHEMY_CEXT=1 # see also .coveragerc deps=pytest-cov coverage mock commands= python -m pytest --cov=sqlalchemy --cov-report term --cov-report xml \ --exclude-tag memory-intensive \ --exclude-tag timing-intensive \ -k "not aaa_profiling" \ {posargs} [testenv:pep8] deps=flake8 commands = python -m flake8 {posargs} [flake8] show-source = True ignore = E711,E712,E721,N806 exclude=.venv,.git,.tox,dist,doc,*egg,build SQLAlchemy-1.0.11/README.rst0000664000175000017500000001162312636375552016332 0ustar classicclassic00000000000000SQLAlchemy ========== The Python SQL Toolkit and Object Relational Mapper Introduction ------------- SQLAlchemy is the Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL. SQLAlchemy provides a full suite of well known enterprise-level persistence patterns, designed for efficient and high-performing database access, adapted into a simple and Pythonic domain language. Major SQLAlchemy features include: * An industrial strength ORM, built from the core on the identity map, unit of work, and data mapper patterns. These patterns allow transparent persistence of objects using a declarative configuration system. Domain models can be constructed and manipulated naturally, and changes are synchronized with the current transaction automatically. * A relationally-oriented query system, exposing the full range of SQL's capabilities explicitly, including joins, subqueries, correlation, and most everything else, in terms of the object model. Writing queries with the ORM uses the same techniques of relational composition you use when writing SQL. While you can drop into literal SQL at any time, it's virtually never needed. * A comprehensive and flexible system of eager loading for related collections and objects. Collections are cached within a session, and can be loaded on individual access, all at once using joins, or by query per collection across the full result set. * A Core SQL construction system and DBAPI interaction layer. The SQLAlchemy Core is separate from the ORM and is a full database abstraction layer in its own right, and includes an extensible Python-based SQL expression language, schema metadata, connection pooling, type coercion, and custom types. * All primary and foreign key constraints are assumed to be composite and natural. Surrogate integer primary keys are of course still the norm, but SQLAlchemy never assumes or hardcodes to this model. * Database introspection and generation. Database schemas can be "reflected" in one step into Python structures representing database metadata; those same structures can then generate CREATE statements right back out - all within the Core, independent of the ORM. SQLAlchemy's philosophy: * SQL databases behave less and less like object collections the more size and performance start to matter; object collections behave less and less like tables and rows the more abstraction starts to matter. SQLAlchemy aims to accommodate both of these principles. * An ORM doesn't need to hide the "R". A relational database provides rich, set-based functionality that should be fully exposed. SQLAlchemy's ORM provides an open-ended set of patterns that allow a developer to construct a custom mediation layer between a domain model and a relational schema, turning the so-called "object relational impedance" issue into a distant memory. * The developer, in all cases, makes all decisions regarding the design, structure, and naming conventions of both the object model as well as the relational schema. SQLAlchemy only provides the means to automate the execution of these decisions. * With SQLAlchemy, there's no such thing as "the ORM generated a bad query" - you retain full control over the structure of queries, including how joins are organized, how subqueries and correlation is used, what columns are requested. Everything SQLAlchemy does is ultimately the result of a developer- initiated decision. * Don't use an ORM if the problem doesn't need one. SQLAlchemy consists of a Core and separate ORM component. The Core offers a full SQL expression language that allows Pythonic construction of SQL constructs that render directly to SQL strings for a target database, returning result sets that are essentially enhanced DBAPI cursors. * Transactions should be the norm. With SQLAlchemy's ORM, nothing goes to permanent storage until commit() is called. SQLAlchemy encourages applications to create a consistent means of delineating the start and end of a series of operations. * Never render a literal value in a SQL statement. Bound parameters are used to the greatest degree possible, allowing query optimizers to cache query plans effectively and making SQL injection attacks a non-issue. Documentation ------------- Latest documentation is at: http://www.sqlalchemy.org/docs/ Installation / Requirements --------------------------- Full documentation for installation is at `Installation `_. Getting Help / Development / Bug reporting ------------------------------------------ Please refer to the `SQLAlchemy Community Guide `_. License ------- SQLAlchemy is distributed under the `MIT license `_. SQLAlchemy-1.0.11/sqla_nose.py0000775000175000017500000000144612636375552017206 0ustar classicclassic00000000000000#!/usr/bin/env python """ nose runner script. This script is a front-end to "nosetests" which installs SQLAlchemy's testing plugin into the local environment. """ import sys import nose import os if not sys.flags.no_user_site: sys.path.insert( 0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib') ) # use bootstrapping so that test plugins are loaded # without touching the main library before coverage starts bootstrap_file = os.path.join( os.path.dirname(__file__), "lib", "sqlalchemy", "testing", "plugin", "bootstrap.py" ) with open(bootstrap_file) as f: code = compile(f.read(), "bootstrap.py", 'exec') to_bootstrap = "nose" exec(code, globals(), locals()) from noseplugin import NoseSQLAlchemy nose.main(addplugins=[NoseSQLAlchemy()]) SQLAlchemy-1.0.11/MANIFEST.in0000664000175000017500000000077112636375552016403 0ustar classicclassic00000000000000# any kind of "*" pulls in __init__.pyc files, # so all extensions are explicit. recursive-include doc *.html *.css *.txt *.js *.jpg *.png *.py Makefile *.rst *.mako *.sty recursive-include examples *.py *.xml recursive-include test *.py *.dat # include the c extensions, which otherwise # don't come in if --with-cextensions isn't specified. recursive-include lib *.c *.txt include README* AUTHORS LICENSE distribute_setup.py sa2to3.py ez_setup.py sqla_nose.py CHANGES* tox.ini prune doc/build/output SQLAlchemy-1.0.11/AUTHORS0000664000175000017500000000104112636375552015704 0ustar classicclassic00000000000000SQLAlchemy was created by Michael Bayer. Major contributing authors include: - Michael Bayer - Jason Kirtland - Gaetan de Menten - Diana Clarke - Michael Trier - Philip Jenvey - Ants Aasma - Paul Johnston - Jonathan Ellis For a larger list of SQLAlchemy contributors over time, see: http://www.sqlalchemy.org/trac/wiki/Contributors SQLAlchemy-1.0.11/PKG-INFO0000664000175000017500000001533512636376632015744 0ustar classicclassic00000000000000Metadata-Version: 1.1 Name: SQLAlchemy Version: 1.0.11 Summary: Database Abstraction Library Home-page: http://www.sqlalchemy.org Author: Mike Bayer Author-email: mike_mp@zzzcomputing.com License: MIT License Description: SQLAlchemy ========== The Python SQL Toolkit and Object Relational Mapper Introduction ------------- SQLAlchemy is the Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL. SQLAlchemy provides a full suite of well known enterprise-level persistence patterns, designed for efficient and high-performing database access, adapted into a simple and Pythonic domain language. Major SQLAlchemy features include: * An industrial strength ORM, built from the core on the identity map, unit of work, and data mapper patterns. These patterns allow transparent persistence of objects using a declarative configuration system. Domain models can be constructed and manipulated naturally, and changes are synchronized with the current transaction automatically. * A relationally-oriented query system, exposing the full range of SQL's capabilities explicitly, including joins, subqueries, correlation, and most everything else, in terms of the object model. Writing queries with the ORM uses the same techniques of relational composition you use when writing SQL. While you can drop into literal SQL at any time, it's virtually never needed. * A comprehensive and flexible system of eager loading for related collections and objects. Collections are cached within a session, and can be loaded on individual access, all at once using joins, or by query per collection across the full result set. * A Core SQL construction system and DBAPI interaction layer. The SQLAlchemy Core is separate from the ORM and is a full database abstraction layer in its own right, and includes an extensible Python-based SQL expression language, schema metadata, connection pooling, type coercion, and custom types. * All primary and foreign key constraints are assumed to be composite and natural. Surrogate integer primary keys are of course still the norm, but SQLAlchemy never assumes or hardcodes to this model. * Database introspection and generation. Database schemas can be "reflected" in one step into Python structures representing database metadata; those same structures can then generate CREATE statements right back out - all within the Core, independent of the ORM. SQLAlchemy's philosophy: * SQL databases behave less and less like object collections the more size and performance start to matter; object collections behave less and less like tables and rows the more abstraction starts to matter. SQLAlchemy aims to accommodate both of these principles. * An ORM doesn't need to hide the "R". A relational database provides rich, set-based functionality that should be fully exposed. SQLAlchemy's ORM provides an open-ended set of patterns that allow a developer to construct a custom mediation layer between a domain model and a relational schema, turning the so-called "object relational impedance" issue into a distant memory. * The developer, in all cases, makes all decisions regarding the design, structure, and naming conventions of both the object model as well as the relational schema. SQLAlchemy only provides the means to automate the execution of these decisions. * With SQLAlchemy, there's no such thing as "the ORM generated a bad query" - you retain full control over the structure of queries, including how joins are organized, how subqueries and correlation is used, what columns are requested. Everything SQLAlchemy does is ultimately the result of a developer- initiated decision. * Don't use an ORM if the problem doesn't need one. SQLAlchemy consists of a Core and separate ORM component. The Core offers a full SQL expression language that allows Pythonic construction of SQL constructs that render directly to SQL strings for a target database, returning result sets that are essentially enhanced DBAPI cursors. * Transactions should be the norm. With SQLAlchemy's ORM, nothing goes to permanent storage until commit() is called. SQLAlchemy encourages applications to create a consistent means of delineating the start and end of a series of operations. * Never render a literal value in a SQL statement. Bound parameters are used to the greatest degree possible, allowing query optimizers to cache query plans effectively and making SQL injection attacks a non-issue. Documentation ------------- Latest documentation is at: http://www.sqlalchemy.org/docs/ Installation / Requirements --------------------------- Full documentation for installation is at `Installation `_. Getting Help / Development / Bug reporting ------------------------------------------ Please refer to the `SQLAlchemy Community Guide `_. License ------- SQLAlchemy is distributed under the `MIT license `_. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Database :: Front-Ends Classifier: Operating System :: OS Independent SQLAlchemy-1.0.11/README.unittests.rst0000664000175000017500000003031612636375552020373 0ustar classicclassic00000000000000===================== SQLALCHEMY UNIT TESTS ===================== **NOTE:** SQLAlchemy as of 0.9.4 now standardizes on `pytest `_ for test running! However, the existing support for Nose **still remains**! That is, you can now run the tests via pytest or nose. We hope to keep the suite nose-compatible indefinitely however this might change at some point. SQLAlchemy unit tests by default run using Python's built-in sqlite3 module. If running on a Python installation that doesn't include this module, then pysqlite or compatible must be installed. Unit tests can be run with pytest or nose: py.test: http://pytest.org/ nose: https://pypi.python.org/pypi/nose/ The suite includes enhanced support when running with pytest. SQLAlchemy implements plugins for both pytest and nose that must be present when tests are run. In the case of pytest, this plugin is automatically used when pytest is run against the SQLAlchemy source tree. However, for Nose support, a special test runner script must be used. The test suite as also requires the mock library. While mock is part of the Python standard library as of 3.3, previous versions will need to have it installed, and is available at:: https://pypi.python.org/pypi/mock RUNNING TESTS VIA SETUP.PY -------------------------- A plain vanilla run of all tests using sqlite can be run via setup.py, and requires that pytest is installed:: $ python setup.py test RUNNING ALL TESTS - PYTEST -------------------------- To run all tests:: $ py.test The pytest configuration in setup.cfg will point the runner at the test/ directory, where it consumes a conftest.py file that gets everything else up and running. RUNNING ALL TESTS - NOSE -------------------------- When using Nose, a bootstrap script is provided which sets up sys.path as well as installs the nose plugin:: $ ./sqla_nose.py Assuming all tests pass, this is a very unexciting output. To make it more interesting:: $ ./sqla_nose.py -v RUNNING INDIVIDUAL TESTS --------------------------------- Any directory of test modules can be run at once by specifying the directory path, and a specific file can be specified as well:: $ py.test test/dialect $ py.test test/orm/test_mapper.py When using nose, the setup.cfg currently sets "where" to "test/", so the "test/" prefix is omitted:: $ ./sqla_nose.py dialect/ $ ./sqla_nose.py orm/test_mapper.py With Nose, it is often more intuitive to specify tests as module paths:: $ ./sqla_nose.py test.orm.test_mapper Nose can also specify a test class and optional method using this syntax:: $ ./sqla_nose.py test.orm.test_mapper:MapperTest.test_utils With pytest, the -k flag is used to limit tests:: $ py.test test/orm/test_mapper.py -k "MapperTest and test_utils" COMMAND LINE OPTIONS -------------------- SQLAlchemy-specific options are added to both runners, which are viewable within the help screen. With pytest, these options are easier to locate as they are underneath the "sqlalchemy" grouping:: $ py.test --help $ ./sqla_nose.py --help The --help screen is a combination of common nose options and options which the SQLAlchemy nose plugin adds. The most commonly SQLAlchemy-specific options used are '--db' and '--dburi'. Both pytest and nose support the same set of SQLAlchemy options, though pytest features a bit more capability with them. DATABASE TARGETS ---------------- Tests will target an in-memory SQLite database by default. To test against another database, use the --dburi option with any standard SQLAlchemy URL:: --dburi=postgresql://user:password@localhost/test If you'll be running the tests frequently, database aliases can save a lot of typing. The --dbs option lists the built-in aliases and their matching URLs:: $ py.test --dbs Available --db options (use --dburi to override) mysql mysql://scott:tiger@127.0.0.1:3306/test oracle oracle://scott:tiger@127.0.0.1:1521 postgresql postgresql://scott:tiger@127.0.0.1:5432/test [...] To run tests against an aliased database:: $ py.test --db postgresql This list of database urls is present in the setup.cfg file. The list can be modified/extended by adding a file ``test.cfg`` at the top level of the SQLAlchemy source distribution which includes additional entries:: [db] postgresql=postgresql://myuser:mypass@localhost/mydb Your custom entries will override the defaults and you'll see them reflected in the output of --dbs. MULTIPLE DATABASE TARGETS ------------------------- As of SQLAlchemy 0.9.4, the test runner supports **multiple databases at once**. This doesn't mean that the entire test suite runs for each database, but instead specific test suites may do so, while other tests may choose to run on a specific target out of those available. For example, if the tests underneath test/dialect/ are run, the majority of these tests are either specific to a particular backend, or are marked as "multiple", meaning they will run repeatedly for each database in use. If one runs the test suite as follows:: $ py.test test/dialect --db sqlite --db postgresql --db mysql The tests underneath test/dialect/test_suite.py will be tripled up, running as appropriate for each target database, whereas dialect-specific tests within test/dialect/mysql, test/dialect/postgresql/ test/dialect/test_sqlite.py should run fully with no skips, as each suite has its target database available. The multiple targets feature is available both under pytest and nose, however when running nose, the "multiple runner" feature won't be available; instead, the first database target will be used. When running with multiple targets, tests that don't prefer a specific target will be run against the first target specified. Putting sqlite first in the list will lead to a much faster suite as the in-memory database is extremely fast for setting up and tearing down tables. DATABASE CONFIGURATION ---------------------- Use an empty database and a database user with general DBA privileges. The test suite will be creating and dropping many tables and other DDL, and preexisting tables will interfere with the tests. Several tests require alternate usernames or schemas to be present, which are used to test dotted-name access scenarios. On some databases such as Oracle or Sybase, these are usernames, and others such as Postgresql and MySQL they are schemas. The requirement applies to all backends except SQLite and Firebird. The names are:: test_schema test_schema_2 (only used on Postgresql) Please refer to your vendor documentation for the proper syntax to create these namespaces - the database user must have permission to create and drop tables within these schemas. Its perfectly fine to run the test suite without these namespaces present, it only means that a handful of tests which expect them to be present will fail. Additional steps specific to individual databases are as follows:: POSTGRESQL: To enable unicode testing with JSONB, create the database with UTF8 encoding:: postgres=# create database test with owner=scott encoding='utf8' template=template0; To include tests for HSTORE, create the HSTORE type engine:: postgres=# \c test; You are now connected to database "test" as user "postgresql". test=# create extension hstore; CREATE EXTENSION MYSQL: Default storage engine should be "MyISAM". Tests that require "InnoDB" as the engine will specify this explicitly. ORACLE: a user named "test_schema" is created. The primary database user needs to be able to create and drop tables, synonyms, and constraints within the "test_schema" user. For this to work fully, including that the user has the "REFERENCES" role in a remote schema for tables not yet defined (REFERENCES is per-table), it is required that the test the user be present in the "DBA" role: grant dba to scott; SYBASE: Similar to Oracle, "test_schema" is created as a user, and the primary test user needs to have the "sa_role". It's also recommended to turn on "trunc log on chkpt" and to use a separate transaction log device - Sybase basically seizes up when the transaction log is full otherwise. A full series of setup assuming sa/master: disk init name="translog", physname="/opt/sybase/data/translog.dat", size="10M" create database sqlalchemy on default log on translog="10M" sp_dboption sqlalchemy, "trunc log on chkpt", true sp_addlogin scott, "tiger7" sp_addlogin test_schema, "tiger7" use sqlalchemy sp_adduser scott sp_adduser test_schema grant all to scott sp_role "grant", sa_role, scott Sybase will still freeze for up to a minute when the log becomes full. To manually dump the log:: dump tran sqlalchemy with truncate_only MSSQL: Tests that involve multiple connections require Snapshot Isolation ability implemented on the test database in order to prevent deadlocks that will occur with record locking isolation. This feature is only available with MSSQL 2005 and greater. You must enable snapshot isolation at the database level and set the default cursor isolation with two SQL commands: ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON MSSQL+zxJDBC: Trying to run the unit tests on Windows against SQL Server requires using a test.cfg configuration file as the cmd.exe shell won't properly pass the URL arguments into the nose test runner. POSTGRESQL: Full-text search configuration should be set to English, else several tests of ``.match()`` will fail. This can be set (if it isn't so already) with: ALTER DATABASE test SET default_text_search_config = 'pg_catalog.english' CONFIGURING LOGGING ------------------- SQLAlchemy logs its activity and debugging through Python's logging package. Any log target can be directed to the console with command line options, such as:: $ ./sqla_nose.py test.orm.unitofwork --log-info=sqlalchemy.orm.mapper \ --log-debug=sqlalchemy.pool --log-info=sqlalchemy.engine This would log mapper configuration, connection pool checkouts, and SQL statement execution. BUILT-IN COVERAGE REPORTING ------------------------------ Coverage is tracked using the coverage plugins built for pytest or nose:: $ py.test test/sql/test_query --cov=sqlalchemy $ ./sqla_nose.py test.sql.test_query --with-coverage BIG COVERAGE TIP !!! There is an issue where existing .pyc files may store the incorrect filepaths, which will break the coverage system. If coverage numbers are coming out as low/zero, try deleting all .pyc files. DEVELOPING AND TESTING NEW DIALECTS ----------------------------------- See the file README.dialects.rst for detail on dialects. TESTING WITH MULTIPLE PYTHON VERSIONS USING TOX ----------------------------------------------- If you want to test across multiple versions of Python, you may find `tox `_ useful. SQLAlchemy includes a tox.ini file:: tox -e full SQLAlchemy uses tox mostly for pre-fab testing configurations, to simplify configuration of Jenkins jobs, and *not* for testing different Python interpreters simultaneously. You can of course create whatever alternate tox.ini file you want. Environments include:: "full" - runs a full py.test "coverage" - runs a py.test plus coverage, skipping memory/timing intensive tests "pep8" - runs flake8 against the codebase (useful with --diff to check against a patch) PARALLEL TESTING ---------------- Parallel testing is supported using the Pytest xdist plugin. Supported databases currently include sqlite, postgresql, and mysql. The username for the database should have CREATE DATABASE and DROP DATABASE privileges. After installing pytest-xdist, testing is run adding the -n option. For example, to run against sqlite, mysql, postgresql with four processes:: tox -e -- -n 4 --db sqlite --db postgresql --db mysql Each backend has a different scheme for setting up the database. Postgresql still needs the "test_schema" and "test_schema_2" schemas present, as the parallel databases are created using the base database as a "template". SQLAlchemy-1.0.11/examples/0000775000175000017500000000000012636376632016456 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/large_collection/0000775000175000017500000000000012636376632021763 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/large_collection/large_collection.py0000664000175000017500000000633312636375552025647 0ustar classicclassic00000000000000 from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey, create_engine) from sqlalchemy.orm import (mapper, relationship, sessionmaker) meta = MetaData() org_table = Table('organizations', meta, Column('org_id', Integer, primary_key=True), Column('org_name', String(50), nullable=False, key='name'), mysql_engine='InnoDB') member_table = Table('members', meta, Column('member_id', Integer, primary_key=True), Column('member_name', String(50), nullable=False, key='name'), Column('org_id', Integer, ForeignKey('organizations.org_id', ondelete="CASCADE")), mysql_engine='InnoDB') class Organization(object): def __init__(self, name): self.name = name class Member(object): def __init__(self, name): self.name = name mapper(Organization, org_table, properties = { 'members' : relationship(Member, # Organization.members will be a Query object - no loading # of the entire collection occurs unless requested lazy="dynamic", # Member objects "belong" to their parent, are deleted when # removed from the collection cascade="all, delete-orphan", # "delete, delete-orphan" cascade does not load in objects on delete, # allows ON DELETE CASCADE to handle it. # this only works with a database that supports ON DELETE CASCADE - # *not* sqlite or MySQL with MyISAM passive_deletes=True, ) }) mapper(Member, member_table) if __name__ == '__main__': engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) meta.create_all(engine) # expire_on_commit=False means the session contents # will not get invalidated after commit. sess = sessionmaker(engine, expire_on_commit=False)() # create org with some members org = Organization('org one') org.members.append(Member('member one')) org.members.append(Member('member two')) org.members.append(Member('member three')) sess.add(org) print("-------------------------\nflush one - save org + 3 members\n") sess.commit() # the 'members' collection is a Query. it issues # SQL as needed to load subsets of the collection. print("-------------------------\nload subset of members\n") members = org.members.filter(member_table.c.name.like('%member t%')).all() print(members) # new Members can be appended without any # SQL being emitted to load the full collection org.members.append(Member('member four')) org.members.append(Member('member five')) org.members.append(Member('member six')) print("-------------------------\nflush two - save 3 more members\n") sess.commit() # delete the object. Using ON DELETE CASCADE # SQL is only emitted for the head row - the Member rows # disappear automatically without the need for additional SQL. sess.delete(org) print("-------------------------\nflush three - delete org, delete members in one statement\n") sess.commit() print("-------------------------\nno Member rows should remain:\n") print(sess.query(Member).count()) sess.close() print("------------------------\ndone. dropping tables.") meta.drop_all(engine)SQLAlchemy-1.0.11/examples/large_collection/__init__.py0000664000175000017500000000061712636375552024100 0ustar classicclassic00000000000000"""Large collection example. Illustrates the options to use with :func:`~sqlalchemy.orm.relationship()` when the list of related objects is very large, including: * "dynamic" relationships which query slices of data as accessed * how to use ON DELETE CASCADE in conjunction with ``passive_deletes=True`` to greatly improve the performance of related collection deletion. .. autosource:: """ SQLAlchemy-1.0.11/examples/nested_sets/0000775000175000017500000000000012636376632020776 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/nested_sets/__init__.py0000664000175000017500000000021612636375552023106 0ustar classicclassic00000000000000""" Illustrates a rudimentary way to implement the "nested sets" pattern for hierarchical data using the SQLAlchemy ORM. .. autosource:: """SQLAlchemy-1.0.11/examples/nested_sets/nested_sets.py0000664000175000017500000000667512636375552023706 0ustar classicclassic00000000000000"""Celko's "Nested Sets" Tree Structure. http://www.intelligententerprise.com/001020/celko.jhtml """ from sqlalchemy import (create_engine, Column, Integer, String, select, case, func) from sqlalchemy.orm import Session, aliased from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import event Base = declarative_base() class Employee(Base): __tablename__ = 'personnel' __mapper_args__ = { 'batch': False # allows extension to fire for each # instance before going to the next. } parent = None emp = Column(String, primary_key=True) left = Column("lft", Integer, nullable=False) right = Column("rgt", Integer, nullable=False) def __repr__(self): return "Employee(%s, %d, %d)" % (self.emp, self.left, self.right) @event.listens_for(Employee, "before_insert") def before_insert(mapper, connection, instance): if not instance.parent: instance.left = 1 instance.right = 2 else: personnel = mapper.mapped_table right_most_sibling = connection.scalar( select([personnel.c.rgt]). where(personnel.c.emp == instance.parent.emp) ) connection.execute( personnel.update( personnel.c.rgt >= right_most_sibling).values( lft=case( [(personnel.c.lft > right_most_sibling, personnel.c.lft + 2)], else_=personnel.c.lft ), rgt=case( [(personnel.c.rgt >= right_most_sibling, personnel.c.rgt + 2)], else_=personnel.c.rgt ) ) ) instance.left = right_most_sibling instance.right = right_most_sibling + 1 # before_update() would be needed to support moving of nodes # after_delete() would be needed to support removal of nodes. engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(bind=engine) albert = Employee(emp='Albert') bert = Employee(emp='Bert') chuck = Employee(emp='Chuck') donna = Employee(emp='Donna') eddie = Employee(emp='Eddie') fred = Employee(emp='Fred') bert.parent = albert chuck.parent = albert donna.parent = chuck eddie.parent = chuck fred.parent = chuck # the order of "add" is important here. elements must be added in # the order in which they should be INSERTed. session.add_all([albert, bert, chuck, donna, eddie, fred]) session.commit() print(session.query(Employee).all()) # 1. Find an employee and all their supervisors, no matter how deep the tree. ealias = aliased(Employee) print(session.query(Employee).\ filter(ealias.left.between(Employee.left, Employee.right)).\ filter(ealias.emp == 'Eddie').all()) #2. Find the employee and all their subordinates. # (This query has a nice symmetry with the first query.) print(session.query(Employee).\ filter(Employee.left.between(ealias.left, ealias.right)).\ filter(ealias.emp == 'Chuck').all()) #3. Find the level of each node, so you can print the tree # as an indented listing. for indentation, employee in session.query( func.count(Employee.emp).label('indentation') - 1, ealias).\ filter(ealias.left.between(Employee.left, Employee.right)).\ group_by(ealias.emp).\ order_by(ealias.left): print(" " * indentation + str(employee)) SQLAlchemy-1.0.11/examples/graphs/0000775000175000017500000000000012636376632017742 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/graphs/directed_graph.py0000664000175000017500000000437112636375552023265 0ustar classicclassic00000000000000"""a directed graph example.""" from sqlalchemy import MetaData, Table, Column, Integer, ForeignKey, \ create_engine from sqlalchemy.orm import mapper, relationship, sessionmaker from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Node(Base): __tablename__ = 'node' node_id = Column(Integer, primary_key=True) def __init__(self, id): self.node_id = id def add_neighbors(self, *nodes): for node in nodes: Edge(self, node) return self def higher_neighbors(self): return [x.higher_node for x in self.lower_edges] def lower_neighbors(self): return [x.lower_node for x in self.higher_edges] class Edge(Base): __tablename__ = 'edge' lower_id = Column(Integer, ForeignKey('node.node_id'), primary_key=True) higher_id = Column(Integer, ForeignKey('node.node_id'), primary_key=True) lower_node = relationship(Node, primaryjoin=lower_id==Node.node_id, backref='lower_edges') higher_node = relationship(Node, primaryjoin=higher_id==Node.node_id, backref='higher_edges') # here we have lower.node_id <= higher.node_id def __init__(self, n1, n2): if n1.node_id < n2.node_id: self.lower_node = n1 self.higher_node = n2 else: self.lower_node = n2 self.higher_node = n1 engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = sessionmaker(engine)() # create a directed graph like this: # n1 -> n2 -> n5 # -> n7 # -> n3 -> n6 n1 = Node(1) n2 = Node(2) n3 = Node(3) n4 = Node(4) n5 = Node(5) n6 = Node(6) n7 = Node(7) n2.add_neighbors(n5, n1) n3.add_neighbors(n6) n7.add_neighbors(n2) n1.add_neighbors(n3) session.add_all([n1, n2, n3, n4, n5, n6, n7]) session.commit() assert [x.node_id for x in n3.higher_neighbors()] == [6] assert [x.node_id for x in n3.lower_neighbors()] == [1] assert [x.node_id for x in n2.lower_neighbors()] == [1] assert [x.node_id for x in n2.higher_neighbors()] == [5,7] SQLAlchemy-1.0.11/examples/graphs/__init__.py0000664000175000017500000000057012636375552022055 0ustar classicclassic00000000000000"""An example of persistence for a directed graph structure. The graph is stored as a collection of edges, each referencing both a "lower" and an "upper" node in a table of nodes. Basic persistence and querying for lower- and upper- neighbors are illustrated:: n2 = Node(2) n5 = Node(5) n2.add_neighbor(n5) print n2.higher_neighbors() .. autosource:: """SQLAlchemy-1.0.11/examples/custom_attributes/0000775000175000017500000000000012636376632022236 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/custom_attributes/custom_management.py0000664000175000017500000000573412636375552026327 0ustar classicclassic00000000000000"""Illustrates customized class instrumentation, using the :mod:`sqlalchemy.ext.instrumentation` extension package. In this example, mapped classes are modified to store their state in a dictionary attached to an attribute named "_goofy_dict", instead of using __dict__. this example illustrates how to replace SQLAlchemy's class descriptors with a user-defined system. """ from sqlalchemy import create_engine, MetaData, Table, Column, Integer, Text,\ ForeignKey from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.orm.attributes import set_attribute, get_attribute, \ del_attribute from sqlalchemy.orm.instrumentation import is_instrumented from sqlalchemy.ext.instrumentation import InstrumentationManager class MyClassState(InstrumentationManager): def get_instance_dict(self, class_, instance): return instance._goofy_dict def initialize_instance_dict(self, class_, instance): instance.__dict__['_goofy_dict'] = {} def install_state(self, class_, instance, state): instance.__dict__['_goofy_dict']['state'] = state def state_getter(self, class_): def find(instance): return instance.__dict__['_goofy_dict']['state'] return find class MyClass(object): __sa_instrumentation_manager__ = MyClassState def __init__(self, **kwargs): for k in kwargs: setattr(self, k, kwargs[k]) def __getattr__(self, key): if is_instrumented(self, key): return get_attribute(self, key) else: try: return self._goofy_dict[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): if is_instrumented(self, key): set_attribute(self, key, value) else: self._goofy_dict[key] = value def __delattr__(self, key): if is_instrumented(self, key): del_attribute(self, key) else: del self._goofy_dict[key] if __name__ == '__main__': engine = create_engine('sqlite://') meta = MetaData() table1 = Table('table1', meta, Column('id', Integer, primary_key=True), Column('name', Text)) table2 = Table('table2', meta, Column('id', Integer, primary_key=True), Column('name', Text), Column('t1id', Integer, ForeignKey('table1.id'))) meta.create_all(engine) class A(MyClass): pass class B(MyClass): pass mapper(A, table1, properties={ 'bs': relationship(B) }) mapper(B, table2) a1 = A(name='a1', bs=[B(name='b1'), B(name='b2')]) assert a1.name == 'a1' assert a1.bs[0].name == 'b1' sess = Session(engine) sess.add(a1) sess.commit() a1 = sess.query(A).get(a1.id) assert a1.name == 'a1' assert a1.bs[0].name == 'b1' a1.bs.remove(a1.bs[0]) sess.commit() a1 = sess.query(A).get(a1.id) assert len(a1.bs) == 1 SQLAlchemy-1.0.11/examples/custom_attributes/listen_for_events.py0000664000175000017500000000400312636375552026335 0ustar classicclassic00000000000000""" Illustrates how to attach events to all instrumented attributes and listen for change events. """ from sqlalchemy import event def configure_listener(class_, key, inst): def append(instance, value, initiator): instance.receive_change_event("append", key, value, None) def remove(instance, value, initiator): instance.receive_change_event("remove", key, value, None) def set_(instance, value, oldvalue, initiator): instance.receive_change_event("set", key, value, oldvalue) event.listen(inst, 'append', append) event.listen(inst, 'remove', remove) event.listen(inst, 'set', set_) if __name__ == '__main__': from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base class Base(object): def receive_change_event(self, verb, key, value, oldvalue): s = "Value '%s' %s on attribute '%s', " % (value, verb, key) if oldvalue: s += "which replaced the value '%s', " % oldvalue s += "on object %s" % self print(s) Base = declarative_base(cls=Base) event.listen(Base, 'attribute_instrument', configure_listener) class MyMappedClass(Base): __tablename__ = "mytable" id = Column(Integer, primary_key=True) data = Column(String(50)) related_id = Column(Integer, ForeignKey("related.id")) related = relationship("Related", backref="mapped") def __str__(self): return "MyMappedClass(data=%r)" % self.data class Related(Base): __tablename__ = "related" id = Column(Integer, primary_key=True) data = Column(String(50)) def __str__(self): return "Related(data=%r)" % self.data # classes are instrumented. Demonstrate the events ! m1 = MyMappedClass(data='m1', related=Related(data='r1')) m1.data = 'm1mod' m1.related.mapped.append(MyMappedClass(data='m2')) del m1.data SQLAlchemy-1.0.11/examples/custom_attributes/__init__.py0000664000175000017500000000015612636375552024351 0ustar classicclassic00000000000000""" Two examples illustrating modifications to SQLAlchemy's attribute management system. .. autosource:: """SQLAlchemy-1.0.11/examples/association/0000775000175000017500000000000012636376632020772 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/association/__init__.py0000664000175000017500000000032712636375552023105 0ustar classicclassic00000000000000""" Examples illustrating the usage of the "association object" pattern, where an intermediary class mediates the relationship between two classes that are associated in a many-to-many pattern. .. autosource:: """SQLAlchemy-1.0.11/examples/association/dict_of_sets_with_default.py0000664000175000017500000000535212636375552026555 0ustar classicclassic00000000000000"""dict_of_sets_with_default.py an advanced association proxy example which illustrates nesting of association proxies to produce multi-level Python collections, in this case a dictionary with string keys and sets of integers as values, which conceal the underlying mapped classes. This is a three table model which represents a parent table referencing a dictionary of string keys and sets as values, where each set stores a collection of integers. The association proxy extension is used to hide the details of this persistence. The dictionary also generates new collections upon access of a non-existent key, in the same manner as Python's "collections.defaultdict" object. """ from sqlalchemy import String, Integer, Column, create_engine, ForeignKey from sqlalchemy.orm import relationship, Session from sqlalchemy.orm.collections import MappedCollection from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy import operator class Base(object): id = Column(Integer, primary_key=True) Base = declarative_base(cls=Base) class GenDefaultCollection(MappedCollection): def __missing__(self, key): self[key] = b = B(key) return b class A(Base): __tablename__ = "a" associations = relationship("B", collection_class=lambda: GenDefaultCollection(operator.attrgetter("key")) ) collections = association_proxy("associations", "values") """Bridge the association from 'associations' over to the 'values' association proxy of B. """ class B(Base): __tablename__ = "b" a_id = Column(Integer, ForeignKey("a.id"), nullable=False) elements = relationship("C", collection_class=set) key = Column(String) values = association_proxy("elements", "value") """Bridge the association from 'elements' over to the 'value' element of C.""" def __init__(self, key, values=None): self.key = key if values: self.values = values class C(Base): __tablename__ = "c" b_id = Column(Integer, ForeignKey("b.id"), nullable=False) value = Column(Integer) def __init__(self, value): self.value = value if __name__ == '__main__': engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) # only "A" is referenced explicitly. Using "collections", # we deal with a dict of key/sets of integers directly. session.add_all([ A(collections={ "1": set([1, 2, 3]), }) ]) session.commit() a1 = session.query(A).first() print(a1.collections["1"]) a1.collections["1"].add(4) session.commit() a1.collections["2"].update([7, 8, 9]) session.commit() print(a1.collections["2"]) SQLAlchemy-1.0.11/examples/association/proxied_association.py0000664000175000017500000000637412636375552025424 0ustar classicclassic00000000000000"""proxied_association.py same example as basic_association, adding in usage of :mod:`sqlalchemy.ext.associationproxy` to make explicit references to ``OrderItem`` optional. """ from datetime import datetime from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, DateTime, Float, ForeignKey, and_) from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy Base = declarative_base() class Order(Base): __tablename__ = 'order' order_id = Column(Integer, primary_key=True) customer_name = Column(String(30), nullable=False) order_date = Column(DateTime, nullable=False, default=datetime.now()) order_items = relationship("OrderItem", cascade="all, delete-orphan", backref='order') items = association_proxy("order_items", "item") def __init__(self, customer_name): self.customer_name = customer_name class Item(Base): __tablename__ = 'item' item_id = Column(Integer, primary_key=True) description = Column(String(30), nullable=False) price = Column(Float, nullable=False) def __init__(self, description, price): self.description = description self.price = price def __repr__(self): return 'Item(%r, %r)' % ( self.description, self.price ) class OrderItem(Base): __tablename__ = 'orderitem' order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True) item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True) price = Column(Float, nullable=False) def __init__(self, item, price=None): self.item = item self.price = price or item.price item = relationship(Item, lazy='joined') if __name__ == '__main__': engine = create_engine('sqlite://') Base.metadata.create_all(engine) session = Session(engine) # create catalog tshirt, mug, hat, crowbar = ( Item('SA T-Shirt', 10.99), Item('SA Mug', 6.50), Item('SA Hat', 8.99), Item('MySQL Crowbar', 16.99) ) session.add_all([tshirt, mug, hat, crowbar]) session.commit() # create an order order = Order('john smith') # add items via the association proxy. # the OrderItem is created automatically. order.items.append(mug) order.items.append(hat) # add an OrderItem explicitly. order.order_items.append(OrderItem(crowbar, 10.99)) session.add(order) session.commit() # query the order, print items order = session.query(Order).filter_by(customer_name='john smith').one() # print items based on the OrderItem collection directly print([(assoc.item.description, assoc.price, assoc.item.price) for assoc in order.order_items]) # print items based on the "proxied" items collection print([(item.description, item.price) for item in order.items]) # print customers who bought 'MySQL Crowbar' on sale orders = session.query(Order).\ join('order_items', 'item').\ filter(Item.description == 'MySQL Crowbar').\ filter(Item.price > OrderItem.price) print([o.customer_name for o in orders]) SQLAlchemy-1.0.11/examples/association/basic_association.py0000664000175000017500000000624112636375552025024 0ustar classicclassic00000000000000"""basic_association.py illustrate a many-to-many relationship between an "Order" and a collection of "Item" objects, associating a purchase price with each via an association object called "OrderItem" The association object pattern is a form of many-to-many which associates additional data with each association between parent/child. The example illustrates an "order", referencing a collection of "items", with a particular price paid associated with each "item". """ from datetime import datetime from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, DateTime, Float, ForeignKey, and_) from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Order(Base): __tablename__ = 'order' order_id = Column(Integer, primary_key=True) customer_name = Column(String(30), nullable=False) order_date = Column(DateTime, nullable=False, default=datetime.now()) order_items = relationship("OrderItem", cascade="all, delete-orphan", backref='order') def __init__(self, customer_name): self.customer_name = customer_name class Item(Base): __tablename__ = 'item' item_id = Column(Integer, primary_key=True) description = Column(String(30), nullable=False) price = Column(Float, nullable=False) def __init__(self, description, price): self.description = description self.price = price def __repr__(self): return 'Item(%r, %r)' % ( self.description, self.price ) class OrderItem(Base): __tablename__ = 'orderitem' order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True) item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True) price = Column(Float, nullable=False) def __init__(self, item, price=None): self.item = item self.price = price or item.price item = relationship(Item, lazy='joined') if __name__ == '__main__': engine = create_engine('sqlite://') Base.metadata.create_all(engine) session = Session(engine) # create catalog tshirt, mug, hat, crowbar = ( Item('SA T-Shirt', 10.99), Item('SA Mug', 6.50), Item('SA Hat', 8.99), Item('MySQL Crowbar', 16.99) ) session.add_all([tshirt, mug, hat, crowbar]) session.commit() # create an order order = Order('john smith') # add three OrderItem associations to the Order and save order.order_items.append(OrderItem(mug)) order.order_items.append(OrderItem(crowbar, 10.99)) order.order_items.append(OrderItem(hat)) session.add(order) session.commit() # query the order, print items order = session.query(Order).filter_by(customer_name='john smith').one() print([(order_item.item.description, order_item.price) for order_item in order.order_items]) # print customers who bought 'MySQL Crowbar' on sale q = session.query(Order).join('order_items', 'item') q = q.filter(and_(Item.description == 'MySQL Crowbar', Item.price > OrderItem.price)) print([order.customer_name for order in q]) SQLAlchemy-1.0.11/examples/adjacency_list/0000775000175000017500000000000012636376632021432 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/adjacency_list/adjacency_list.py0000664000175000017500000000672212636375552024767 0ustar classicclassic00000000000000from sqlalchemy import Column, ForeignKey, Integer, String, create_engine from sqlalchemy.orm import Session, relationship, backref,\ joinedload_all from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class TreeNode(Base): __tablename__ = 'tree' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey(id)) name = Column(String(50), nullable=False) children = relationship("TreeNode", # cascade deletions cascade="all, delete-orphan", # many to one + adjacency list - remote_side # is required to reference the 'remote' # column in the join condition. backref=backref("parent", remote_side=id), # children will be represented as a dictionary # on the "name" attribute. collection_class=attribute_mapped_collection('name'), ) def __init__(self, name, parent=None): self.name = name self.parent = parent def __repr__(self): return "TreeNode(name=%r, id=%r, parent_id=%r)" % ( self.name, self.id, self.parent_id ) def dump(self, _indent=0): return " " * _indent + repr(self) + \ "\n" + \ "".join([ c.dump(_indent + 1) for c in self.children.values()] ) if __name__ == '__main__': engine = create_engine('sqlite://', echo=True) def msg(msg, *args): msg = msg % args print("\n\n\n" + "-" * len(msg.split("\n")[0])) print(msg) print("-" * len(msg.split("\n")[0])) msg("Creating Tree Table:") Base.metadata.create_all(engine) session = Session(engine) node = TreeNode('rootnode') TreeNode('node1', parent=node) TreeNode('node3', parent=node) node2 = TreeNode('node2') TreeNode('subnode1', parent=node2) node.children['node2'] = node2 TreeNode('subnode2', parent=node.children['node2']) msg("Created new tree structure:\n%s", node.dump()) msg("flush + commit:") session.add(node) session.commit() msg("Tree After Save:\n %s", node.dump()) TreeNode('node4', parent=node) TreeNode('subnode3', parent=node.children['node4']) TreeNode('subnode4', parent=node.children['node4']) TreeNode('subsubnode1', parent=node.children['node4'].children['subnode3']) # remove node1 from the parent, which will trigger a delete # via the delete-orphan cascade. del node.children['node1'] msg("Removed node1. flush + commit:") session.commit() msg("Tree after save:\n %s", node.dump()) msg("Emptying out the session entirely, " "selecting tree on root, using eager loading to join four levels deep.") session.expunge_all() node = session.query(TreeNode).\ options(joinedload_all("children", "children", "children", "children")).\ filter(TreeNode.name == "rootnode").\ first() msg("Full Tree:\n%s", node.dump()) msg("Marking root node as deleted, flush + commit:") session.delete(node) session.commit() SQLAlchemy-1.0.11/examples/adjacency_list/__init__.py0000664000175000017500000000042112636375552023540 0ustar classicclassic00000000000000""" An example of a dictionary-of-dictionaries structure mapped using an adjacency list model. E.g.:: node = TreeNode('rootnode') node.append('node1') node.append('node3') session.add(node) session.commit() dump_tree(node) .. autosource:: """ SQLAlchemy-1.0.11/examples/dogpile_caching/0000775000175000017500000000000012636376632021555 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/dogpile_caching/model.py0000664000175000017500000000572212636375552023235 0ustar classicclassic00000000000000"""model.py The datamodel, which represents Person that has multiple Address objects, each with PostalCode, City, Country. Person --(1..n)--> Address Address --(has a)--> PostalCode PostalCode --(has a)--> City City --(has a)--> Country """ from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from .caching_query import FromCache, RelationshipCache from .environment import Base, bootstrap class Country(Base): __tablename__ = 'country' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) def __init__(self, name): self.name = name class City(Base): __tablename__ = 'city' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) country_id = Column(Integer, ForeignKey('country.id'), nullable=False) country = relationship(Country) def __init__(self, name, country): self.name = name self.country = country class PostalCode(Base): __tablename__ = 'postal_code' id = Column(Integer, primary_key=True) code = Column(String(10), nullable=False) city_id = Column(Integer, ForeignKey('city.id'), nullable=False) city = relationship(City) @property def country(self): return self.city.country def __init__(self, code, city): self.code = code self.city = city class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) person_id = Column(Integer, ForeignKey('person.id'), nullable=False) street = Column(String(200), nullable=False) postal_code_id = Column(Integer, ForeignKey('postal_code.id')) postal_code = relationship(PostalCode) @property def city(self): return self.postal_code.city @property def country(self): return self.postal_code.country def __str__(self): return "%s\t"\ "%s, %s\t"\ "%s" % (self.street, self.city.name, self.postal_code.code, self.country.name) class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) addresses = relationship(Address, collection_class=set) def __init__(self, name, *addresses): self.name = name self.addresses = set(addresses) def __str__(self): return self.name def __repr__(self): return "Person(name=%r)" % self.name def format_full(self): return "\t".join([str(x) for x in [self] + list(self.addresses)]) # Caching options. A set of three RelationshipCache options # which can be applied to Query(), causing the "lazy load" # of these attributes to be loaded from cache. cache_address_bits = RelationshipCache(PostalCode.city, "default").\ and_( RelationshipCache(City.country, "default") ).and_( RelationshipCache(Address.postal_code, "default") ) bootstrap()SQLAlchemy-1.0.11/examples/dogpile_caching/advanced.py0000664000175000017500000000560612636375552023703 0ustar classicclassic00000000000000"""advanced.py Illustrate usage of Query combined with the FromCache option, including front-end loading, cache invalidation and collection caching. """ from .environment import Session from .model import Person, cache_address_bits from .caching_query import FromCache, RelationshipCache def load_name_range(start, end, invalidate=False): """Load Person objects on a range of names. start/end are integers, range is then "person " - "person ". The cache option we set up is called "name_range", indicating a range of names for the Person class. The `Person.addresses` collections are also cached. Its basically another level of tuning here, as that particular cache option can be transparently replaced with joinedload(Person.addresses). The effect is that each Person and their Address collection is cached either together or separately, affecting the kind of SQL that emits for unloaded Person objects as well as the distribution of data within the cache. """ q = Session.query(Person).\ filter(Person.name.between("person %.2d" % start, "person %.2d" % end)).\ options(cache_address_bits).\ options(FromCache("default", "name_range")) # have the "addresses" collection cached separately # each lazyload of Person.addresses loads from cache. q = q.options(RelationshipCache(Person.addresses, "default")) # alternatively, eagerly load the "addresses" collection, so that they'd # be cached together. This issues a bigger SQL statement and caches # a single, larger value in the cache per person rather than two # separate ones. #q = q.options(joinedload(Person.addresses)) # if requested, invalidate the cache on current criterion. if invalidate: q.invalidate() return q.all() print("two through twelve, possibly from cache:\n") print(", ".join([p.name for p in load_name_range(2, 12)])) print("\ntwenty five through forty, possibly from cache:\n") print(", ".join([p.name for p in load_name_range(25, 40)])) # loading them again, no SQL is emitted print("\ntwo through twelve, from the cache:\n") print(", ".join([p.name for p in load_name_range(2, 12)])) # but with invalidate, they are print("\ntwenty five through forty, invalidate first:\n") print(", ".join([p.name for p in load_name_range(25, 40, True)])) # illustrate the address loading from either cache/already # on the Person print("\n\nPeople plus addresses, two through twelve, addresses possibly from cache") for p in load_name_range(2, 12): print(p.format_full()) # illustrate the address loading from either cache/already # on the Person print("\n\nPeople plus addresses, two through twelve, addresses from cache") for p in load_name_range(2, 12): print(p.format_full()) print("\n\nIf this was the first run of advanced.py, try "\ "a second run. Only one SQL statement will be emitted.") SQLAlchemy-1.0.11/examples/dogpile_caching/fixture_data.py0000664000175000017500000000321512636375552024607 0ustar classicclassic00000000000000"""fixture_data.py Installs some sample data. Here we have a handful of postal codes for a few US/ Canadian cities. Then, 100 Person records are installed, each with a randomly selected postal code. """ from .environment import Session, Base from .model import City, Country, PostalCode, Person, Address import random def install(): Base.metadata.create_all(Session().bind) data = [ ('Chicago', 'United States', ('60601', '60602', '60603', '60604')), ('Montreal', 'Canada', ('H2S 3K9', 'H2B 1V4', 'H7G 2T8')), ('Edmonton', 'Canada', ('T5J 1R9', 'T5J 1Z4', 'T5H 1P6')), ('New York', 'United States', ('10001', '10002', '10003', '10004', '10005', '10006')), ('San Francisco', 'United States', ('94102', '94103', '94104', '94105', '94107', '94108')) ] countries = {} all_post_codes = [] for city, country, postcodes in data: try: country = countries[country] except KeyError: countries[country] = country = Country(country) city = City(city, country) pc = [PostalCode(code, city) for code in postcodes] Session.add_all(pc) all_post_codes.extend(pc) for i in range(1, 51): person = Person( "person %.2d" % i, Address( street="street %.2d" % i, postal_code=all_post_codes[ random.randint(0, len(all_post_codes) - 1)] ) ) Session.add(person) Session.commit() # start the demo fresh Session.remove()SQLAlchemy-1.0.11/examples/dogpile_caching/relationship_caching.py0000664000175000017500000000202612636375552026304 0ustar classicclassic00000000000000"""relationship_caching.py Illustrates how to add cache options on relationship endpoints, so that lazyloads load from cache. Load a set of Person and Address objects, specifying that related PostalCode, City, Country objects should be pulled from long term cache. """ from .environment import Session, root from .model import Person, cache_address_bits from sqlalchemy.orm import joinedload import os for p in Session.query(Person).options(joinedload(Person.addresses), cache_address_bits): print(p.format_full()) print("\n\nIf this was the first run of relationship_caching.py, SQL was likely emitted to "\ "load postal codes, cities, countries.\n"\ "If run a second time, assuming the cache is still valid, "\ "only a single SQL statement will run - all "\ "related data is pulled from cache.\n"\ "To clear the cache, delete the file %r. \n"\ "This will cause a re-load of cities, postal codes and countries on "\ "the next run.\n"\ % os.path.join(root, 'cache.dbm')) SQLAlchemy-1.0.11/examples/dogpile_caching/__init__.py0000664000175000017500000000410412636375552023665 0ustar classicclassic00000000000000""" Illustrates how to embed `dogpile.cache `_ functionality within the :class:`.Query` object, allowing full cache control as well as the ability to pull "lazy loaded" attributes from long term cache as well. .. versionchanged:: 0.8 The example was modernized to use dogpile.cache, replacing Beaker as the caching library in use. In this demo, the following techniques are illustrated: * Using custom subclasses of :class:`.Query` * Basic technique of circumventing Query to pull from a custom cache source instead of the database. * Rudimental caching with dogpile.cache, using "regions" which allow global control over a fixed set of configurations. * Using custom :class:`.MapperOption` objects to configure options on a Query, including the ability to invoke the options deep within an object graph when lazy loads occur. E.g.:: # query for Person objects, specifying cache q = Session.query(Person).options(FromCache("default")) # specify that each Person's "addresses" collection comes from # cache too q = q.options(RelationshipCache(Person.addresses, "default")) # query print q.all() To run, both SQLAlchemy and dogpile.cache must be installed or on the current PYTHONPATH. The demo will create a local directory for datafiles, insert initial data, and run. Running the demo a second time will utilize the cache files already present, and exactly one SQL statement against two tables will be emitted - the displayed result however will utilize dozens of lazyloads that all pull from cache. The demo scripts themselves, in order of complexity, are run as Python modules so that relative imports work:: python -m examples.dogpile_caching.helloworld python -m examples.dogpile_caching.relationship_caching python -m examples.dogpile_caching.advanced python -m examples.dogpile_caching.local_session_caching .. autosource:: :files: environment.py, caching_query.py, model.py, fixture_data.py, \ helloworld.py, relationship_caching.py, advanced.py, \ local_session_caching.py """ SQLAlchemy-1.0.11/examples/dogpile_caching/helloworld.py0000664000175000017500000000471412636375552024310 0ustar classicclassic00000000000000"""helloworld.py Illustrate how to load some data, and cache the results. """ from .environment import Session from .model import Person from .caching_query import FromCache # load Person objects. cache the result in the "default" cache region print("loading people....") people = Session.query(Person).options(FromCache("default")).all() # remove the Session. next query starts from scratch. Session.remove() # load again, using the same FromCache option. now they're cached, # so no SQL is emitted. print("loading people....again!") people = Session.query(Person).options(FromCache("default")).all() # Specifying a different query produces a different cache key, so # these results are independently cached. print("loading people two through twelve") people_two_through_twelve = Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 02", "person 12")).\ all() # the data is cached under string structure of the SQL statement, *plus* # the bind parameters of the query. So this query, having # different literal parameters under "Person.name.between()" than the # previous one, issues new SQL... print("loading people five through fifteen") people_five_through_fifteen = Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 05", "person 15")).\ all() # ... but using the same params as are already cached, no SQL print("loading people two through twelve...again!") people_two_through_twelve = Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 02", "person 12")).\ all() # invalidate the cache for the three queries we've done. Recreate # each Query, which includes at the very least the same FromCache, # same list of objects to be loaded, and the same parameters in the # same order, then call invalidate(). print("invalidating everything") Session.query(Person).options(FromCache("default")).invalidate() Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 02", "person 12")).invalidate() Session.query(Person).\ options(FromCache("default", "people_on_range")).\ filter(Person.name.between("person 05", "person 15")).invalidate() SQLAlchemy-1.0.11/examples/dogpile_caching/caching_query.py0000664000175000017500000002053012636375552024750 0ustar classicclassic00000000000000"""caching_query.py Represent functions and classes which allow the usage of Dogpile caching with SQLAlchemy. Introduces a query option called FromCache. The three new concepts introduced here are: * CachingQuery - a Query subclass that caches and retrieves results in/from dogpile.cache. * FromCache - a query option that establishes caching parameters on a Query * RelationshipCache - a variant of FromCache which is specific to a query invoked during a lazy load. * _params_from_query - extracts value parameters from a Query. The rest of what's here are standard SQLAlchemy and dogpile.cache constructs. """ from sqlalchemy.orm.interfaces import MapperOption from sqlalchemy.orm.query import Query from sqlalchemy.sql import visitors from dogpile.cache.api import NO_VALUE class CachingQuery(Query): """A Query subclass which optionally loads full results from a dogpile cache region. The CachingQuery optionally stores additional state that allows it to consult a dogpile.cache cache before accessing the database, in the form of a FromCache or RelationshipCache object. Each of these objects refer to the name of a :class:`dogpile.cache.Region` that's been configured and stored in a lookup dictionary. When such an object has associated itself with the CachingQuery, the corresponding :class:`dogpile.cache.Region` is used to locate a cached result. If none is present, then the Query is invoked normally, the results being cached. The FromCache and RelationshipCache mapper options below represent the "public" method of configuring this state upon the CachingQuery. """ def __init__(self, regions, *args, **kw): self.cache_regions = regions Query.__init__(self, *args, **kw) def __iter__(self): """override __iter__ to pull results from dogpile if particular attributes have been configured. Note that this approach does *not* detach the loaded objects from the current session. If the cache backend is an in-process cache (like "memory") and lives beyond the scope of the current session's transaction, those objects may be expired. The method here can be modified to first expunge() each loaded item from the current session before returning the list of items, so that the items in the cache are not the same ones in the current Session. """ if hasattr(self, '_cache_region'): return self.get_value(createfunc=lambda: list(Query.__iter__(self))) else: return Query.__iter__(self) def _get_cache_plus_key(self): """Return a cache region plus key.""" dogpile_region = self.cache_regions[self._cache_region.region] if self._cache_region.cache_key: key = self._cache_region.cache_key else: key = _key_from_query(self) return dogpile_region, key def invalidate(self): """Invalidate the cache value represented by this Query.""" dogpile_region, cache_key = self._get_cache_plus_key() dogpile_region.delete(cache_key) def get_value(self, merge=True, createfunc=None, expiration_time=None, ignore_expiration=False): """Return the value from the cache for this query. Raise KeyError if no value present and no createfunc specified. """ dogpile_region, cache_key = self._get_cache_plus_key() # ignore_expiration means, if the value is in the cache # but is expired, return it anyway. This doesn't make sense # with createfunc, which says, if the value is expired, generate # a new value. assert not ignore_expiration or not createfunc, \ "Can't ignore expiration and also provide createfunc" if ignore_expiration or not createfunc: cached_value = dogpile_region.get(cache_key, expiration_time=expiration_time, ignore_expiration=ignore_expiration) else: cached_value = dogpile_region.get_or_create( cache_key, createfunc, expiration_time=expiration_time ) if cached_value is NO_VALUE: raise KeyError(cache_key) if merge: cached_value = self.merge_result(cached_value, load=False) return cached_value def set_value(self, value): """Set the value in the cache for this query.""" dogpile_region, cache_key = self._get_cache_plus_key() dogpile_region.set(cache_key, value) def query_callable(regions, query_cls=CachingQuery): def query(*arg, **kw): return query_cls(regions, *arg, **kw) return query def _key_from_query(query, qualifier=None): """Given a Query, create a cache key. There are many approaches to this; here we use the simplest, which is to create an md5 hash of the text of the SQL statement, combined with stringified versions of all the bound parameters within it. There's a bit of a performance hit with compiling out "query.statement" here; other approaches include setting up an explicit cache key with a particular Query, then combining that with the bound parameter values. """ stmt = query.with_labels().statement compiled = stmt.compile() params = compiled.params # here we return the key as a long string. our "key mangler" # set up with the region will boil it down to an md5. return " ".join( [str(compiled)] + [str(params[k]) for k in sorted(params)]) class FromCache(MapperOption): """Specifies that a Query should load results from a cache.""" propagate_to_loaders = False def __init__(self, region="default", cache_key=None): """Construct a new FromCache. :param region: the cache region. Should be a region configured in the dictionary of dogpile regions. :param cache_key: optional. A string cache key that will serve as the key to the query. Use this if your query has a huge amount of parameters (such as when using in_()) which correspond more simply to some other identifier. """ self.region = region self.cache_key = cache_key def process_query(self, query): """Process a Query during normal loading operation.""" query._cache_region = self class RelationshipCache(MapperOption): """Specifies that a Query as called within a "lazy load" should load results from a cache.""" propagate_to_loaders = True def __init__(self, attribute, region="default", cache_key=None): """Construct a new RelationshipCache. :param attribute: A Class.attribute which indicates a particular class relationship() whose lazy loader should be pulled from the cache. :param region: name of the cache region. :param cache_key: optional. A string cache key that will serve as the key to the query, bypassing the usual means of forming a key from the Query itself. """ self.region = region self.cache_key = cache_key self._relationship_options = { (attribute.property.parent.class_, attribute.property.key): self } def process_query_conditionally(self, query): """Process a Query that is used within a lazy loader. (the process_query_conditionally() method is a SQLAlchemy hook invoked only within lazyload.) """ if query._current_path: mapper, prop = query._current_path[-2:] key = prop.key for cls in mapper.class_.__mro__: if (cls, key) in self._relationship_options: relationship_option = self._relationship_options[(cls, key)] query._cache_region = relationship_option break def and_(self, option): """Chain another RelationshipCache option to this one. While many RelationshipCache objects can be specified on a single Query separately, chaining them together allows for a more efficient lookup during load. """ self._relationship_options.update(option._relationship_options) return self SQLAlchemy-1.0.11/examples/dogpile_caching/local_session_caching.py0000664000175000017500000000645412636375552026451 0ustar classicclassic00000000000000"""local_session_caching.py Grok everything so far ? This example creates a new dogpile.cache backend that will persist data in a dictionary which is local to the current session. remove() the session and the cache is gone. Create a new Dogpile cache backend that will store cached data local to the current Session. This is an advanced example which assumes familiarity with the basic operation of CachingQuery. """ from dogpile.cache.api import CacheBackend, NO_VALUE from dogpile.cache.region import register_backend class ScopedSessionBackend(CacheBackend): """A dogpile backend which will cache objects locally on the current session. When used with the query_cache system, the effect is that the objects in the cache are the same as that within the session - the merge() is a formality that doesn't actually create a second instance. This makes it safe to use for updates of data from an identity perspective (still not ideal for deletes though). When the session is removed, the cache is gone too, so the cache is automatically disposed upon session.remove(). """ def __init__(self, arguments): self.scoped_session = arguments['scoped_session'] def get(self, key): return self._cache_dictionary.get(key, NO_VALUE) def set(self, key, value): self._cache_dictionary[key] = value def delete(self, key): self._cache_dictionary.pop(key, None) @property def _cache_dictionary(self): """Return the cache dictionary linked to the current Session.""" sess = self.scoped_session() try: cache_dict = sess._cache_dictionary except AttributeError: sess._cache_dictionary = cache_dict = {} return cache_dict register_backend("sqlalchemy.session", __name__, "ScopedSessionBackend") if __name__ == '__main__': from .environment import Session, regions from .caching_query import FromCache from dogpile.cache import make_region # set up a region based on the ScopedSessionBackend, # pointing to the scoped_session declared in the example # environment. regions['local_session'] = make_region().configure( 'sqlalchemy.session', arguments={ "scoped_session": Session } ) from .model import Person # query to load Person by name, with criterion # of "person 10" q = Session.query(Person).\ options(FromCache("local_session")).\ filter(Person.name == "person 10") # load from DB person10 = q.one() # next call, the query is cached. person10 = q.one() # clear out the Session. The "_cache_dictionary" dictionary # disappears with it. Session.remove() # query calls from DB again person10 = q.one() # identity is preserved - person10 is the *same* object that's # ultimately inside the cache. So it is safe to manipulate # the not-queried-for attributes of objects when using such a # cache without the need to invalidate - however, any change # that would change the results of a cached query, such as # inserts, deletes, or modification to attributes that are # part of query criterion, still require careful invalidation. cache, key = q._get_cache_plus_key() assert person10 is cache.get(key)[0] SQLAlchemy-1.0.11/examples/dogpile_caching/environment.py0000664000175000017500000000452612636375552024502 0ustar classicclassic00000000000000"""environment.py Establish data / cache file paths, and configurations, bootstrap fixture data if necessary. """ from . import caching_query from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base from dogpile.cache.region import make_region import os from hashlib import md5 import sys py2k = sys.version_info < (3, 0) if py2k: input = raw_input # dogpile cache regions. A home base for cache configurations. regions = {} # scoped_session. Apply our custom CachingQuery class to it, # using a callable that will associate the dictionary # of regions with the Query. Session = scoped_session( sessionmaker( query_cls=caching_query.query_callable(regions) ) ) # global declarative base class. Base = declarative_base() root = "./dogpile_data/" if not os.path.exists(root): input("Will create datafiles in %r.\n" "To reset the cache + database, delete this directory.\n" "Press enter to continue.\n" % root ) os.makedirs(root) dbfile = os.path.join(root, "dogpile_demo.db") engine = create_engine('sqlite:///%s' % dbfile, echo=True) Session.configure(bind=engine) def md5_key_mangler(key): """Receive cache keys as long concatenated strings; distill them into an md5 hash. """ return md5(key.encode('ascii')).hexdigest() # configure the "default" cache region. regions['default'] = make_region( # the "dbm" backend needs # string-encoded keys key_mangler=md5_key_mangler ).configure( # using type 'file' to illustrate # serialized persistence. Normally # memcached or similar is a better choice # for caching. 'dogpile.cache.dbm', expiration_time=3600, arguments={ "filename": os.path.join(root, "cache.dbm") } ) # optional; call invalidate() on the region # once created so that all data is fresh when # the app is restarted. Good for development, # on a production system needs to be used carefully # regions['default'].invalidate() installed = False def bootstrap(): global installed from . import fixture_data if not os.path.exists(dbfile): fixture_data.install() installed = TrueSQLAlchemy-1.0.11/examples/inheritance/0000775000175000017500000000000012636376632020747 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/inheritance/single.py0000664000175000017500000000634212636375552022607 0ustar classicclassic00000000000000"""Single-table inheritance example.""" from sqlalchemy import MetaData, Table, Column, Integer, String, \ ForeignKey, create_engine from sqlalchemy.orm import mapper, relationship, sessionmaker metadata = MetaData() # a table to store companies companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True), Column('name', String(50))) employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('company_id', Integer, ForeignKey('companies.company_id')), Column('name', String(50)), Column('type', String(20)), Column('status', String(20)), Column('engineer_name', String(50)), Column('primary_language', String(50)), Column('manager_name', String(50)) ) class Person(object): def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): def __repr__(self): return "Engineer %s, status %s, engineer_name %s, "\ "primary_language %s" % \ (self.name, self.status, self.engineer_name, self.primary_language) class Manager(Person): def __repr__(self): return "Manager %s, status %s, manager_name %s" % \ (self.name, self.status, self.manager_name) class Company(object): def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __repr__(self): return "Company %s" % self.name person_mapper = mapper(Person, employees_table, polymorphic_on=employees_table.c.type, polymorphic_identity='person') manager_mapper = mapper(Manager, inherits=person_mapper, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, inherits=person_mapper, polymorphic_identity='engineer') mapper(Company, companies, properties={ 'employees': relationship(Person, lazy=True, backref='company') }) engine = create_engine('sqlite:///', echo=True) metadata.create_all(engine) session = sessionmaker(engine)() c = Company(name='company1') c.employees.append(Manager(name='pointy haired boss', status='AAB', manager_name='manager1')) c.employees.append(Engineer(name='dilbert', status='BBA', engineer_name='engineer1', primary_language='java')) c.employees.append(Person(name='joesmith', status='HHH')) c.employees.append(Engineer(name='wally', status='CGG', engineer_name='engineer2', primary_language='python' )) c.employees.append(Manager(name='jsmith', status='ABA', manager_name='manager2')) session.add(c) session.commit() c = session.query(Company).get(1) for e in c.employees: print(e, e.company) print("\n") dilbert = session.query(Person).filter_by(name='dilbert').one() dilbert2 = session.query(Engineer).filter_by(name='dilbert').one() assert dilbert is dilbert2 dilbert.engineer_name = 'hes dibert!' session.flush() session.expunge_all() c = session.query(Company).get(1) for e in c.employees: print(e) session.delete(c) session.commit() SQLAlchemy-1.0.11/examples/inheritance/concrete.py0000664000175000017500000000414212636375552023124 0ustar classicclassic00000000000000"""Concrete (table-per-class) inheritance example.""" from sqlalchemy import create_engine, MetaData, Table, Column, Integer, \ String from sqlalchemy.orm import mapper, sessionmaker, polymorphic_union metadata = MetaData() managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(40)) ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(40)) ) engine = create_engine('sqlite:///', echo=True) metadata.create_all(engine) class Employee(object): def __init__(self, name): self.name = name def __repr__(self): return self.__class__.__name__ + " " + self.name class Manager(Employee): def __init__(self, name, manager_data): self.name = name self.manager_data = manager_data def __repr__(self): return self.__class__.__name__ + " " + \ self.name + " " + self.manager_data class Engineer(Employee): def __init__(self, name, engineer_info): self.name = name self.engineer_info = engineer_info def __repr__(self): return self.__class__.__name__ + " " + \ self.name + " " + self.engineer_info pjoin = polymorphic_union({ 'manager':managers_table, 'engineer':engineers_table }, 'type', 'pjoin') employee_mapper = mapper(Employee, pjoin, polymorphic_on=pjoin.c.type) manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') session = sessionmaker(engine)() m1 = Manager("pointy haired boss", "manager1") e1 = Engineer("wally", "engineer1") e2 = Engineer("dilbert", "engineer2") session.add(m1) session.add(e1) session.add(e2) session.commit() print(session.query(Employee).all()) SQLAlchemy-1.0.11/examples/inheritance/__init__.py0000664000175000017500000000022712636375552023061 0ustar classicclassic00000000000000"""Working examples of single-table, joined-table, and concrete-table inheritance as described in :ref:`datamapping_inheritance`. .. autosource:: """SQLAlchemy-1.0.11/examples/inheritance/joined.py0000664000175000017500000000755712636375552022607 0ustar classicclassic00000000000000"""Joined-table (table-per-subclass) inheritance example.""" from sqlalchemy import Table, Column, Integer, String, \ ForeignKey, create_engine, inspect, or_ from sqlalchemy.orm import relationship, Session, with_polymorphic from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Company(Base): __tablename__ = 'company' id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Person", backref='company', cascade='all, delete-orphan') def __repr__(self): return "Company %s" % self.name class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) company_id = Column(Integer, ForeignKey('company.id')) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'person', 'polymorphic_on':type } def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('person.id'), primary_key=True) status = Column(String(30)) engineer_name = Column(String(30)) primary_language = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'engineer', } def __repr__(self): return "Engineer %s, status %s, engineer_name %s, "\ "primary_language %s" % \ (self.name, self.status, self.engineer_name, self.primary_language) class Manager(Person): __tablename__ = 'manager' id = Column(Integer, ForeignKey('person.id'), primary_key=True) status = Column(String(30)) manager_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'manager', } def __repr__(self): return "Manager %s, status %s, manager_name %s" % \ (self.name, self.status, self.manager_name) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) c = Company(name='company1', employees=[ Manager( name='pointy haired boss', status='AAB', manager_name='manager1'), Engineer(name='dilbert', status='BBA', engineer_name='engineer1', primary_language='java'), Person(name='joesmith'), Engineer(name='wally', status='CGG', engineer_name='engineer2', primary_language='python'), Manager(name='jsmith', status='ABA', manager_name='manager2') ]) session.add(c) session.commit() c = session.query(Company).get(1) for e in c.employees: print(e, inspect(e).key, e.company) assert set([e.name for e in c.employees]) == set(['pointy haired boss', 'dilbert', 'joesmith', 'wally', 'jsmith']) print("\n") dilbert = session.query(Person).filter_by(name='dilbert').one() dilbert2 = session.query(Engineer).filter_by(name='dilbert').one() assert dilbert is dilbert2 dilbert.engineer_name = 'hes dilbert!' session.commit() c = session.query(Company).get(1) for e in c.employees: print(e) # query using with_polymorphic. eng_manager = with_polymorphic(Person, [Engineer, Manager], aliased=True) print(session.query(eng_manager).\ filter( or_(eng_manager.Engineer.engineer_name=='engineer1', eng_manager.Manager.manager_name=='manager2' ) ).all()) # illustrate join from Company, # We use aliased=True # to help when the selectable is used as the target of a join. eng_manager = with_polymorphic(Person, [Engineer, Manager], aliased=True) print(session.query(Company).\ join( eng_manager, Company.employees ).filter( or_(eng_manager.Engineer.engineer_name=='engineer1', eng_manager.Manager.manager_name=='manager2') ).all()) session.commit() SQLAlchemy-1.0.11/examples/sharding/0000775000175000017500000000000012636376632020255 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/sharding/__init__.py0000664000175000017500000000273312636375552022373 0ustar classicclassic00000000000000"""A basic example of using the SQLAlchemy Sharding API. Sharding refers to horizontally scaling data across multiple databases. The basic components of a "sharded" mapping are: * multiple databases, each assigned a 'shard id' * a function which can return a single shard id, given an instance to be saved; this is called "shard_chooser" * a function which can return a list of shard ids which apply to a particular instance identifier; this is called "id_chooser". If it returns all shard ids, all shards will be searched. * a function which can return a list of shard ids to try, given a particular Query ("query_chooser"). If it returns all shard ids, all shards will be queried and the results joined together. In this example, four sqlite databases will store information about weather data on a database-per-continent basis. We provide example shard_chooser, id_chooser and query_chooser functions. The query_chooser illustrates inspection of the SQL expression element in order to attempt to determine a single shard being requested. The construction of generic sharding routines is an ambitious approach to the issue of organizing instances among multiple databases. For a more plain-spoken alternative, the "distinct entity" approach is a simple method of assigning objects to different tables (and potentially database nodes) in an explicit way - described on the wiki at `EntityName `_. .. autosource:: """ SQLAlchemy-1.0.11/examples/sharding/attribute_shard.py0000664000175000017500000002237312636375552024022 0ustar classicclassic00000000000000 # step 1. imports from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, ForeignKey, Float, DateTime, event) from sqlalchemy.orm import sessionmaker, mapper, relationship from sqlalchemy.ext.horizontal_shard import ShardedSession from sqlalchemy.sql import operators, visitors import datetime # step 2. databases. # db1 is used for id generation. The "pool_threadlocal" # causes the id_generator() to use the same connection as that # of an ongoing transaction within db1. echo = True db1 = create_engine('sqlite://', echo=echo, pool_threadlocal=True) db2 = create_engine('sqlite://', echo=echo) db3 = create_engine('sqlite://', echo=echo) db4 = create_engine('sqlite://', echo=echo) # step 3. create session function. this binds the shard ids # to databases within a ShardedSession and returns it. create_session = sessionmaker(class_=ShardedSession) create_session.configure(shards={ 'north_america':db1, 'asia':db2, 'europe':db3, 'south_america':db4 }) # step 4. table setup. meta = MetaData() # we need a way to create identifiers which are unique across all # databases. one easy way would be to just use a composite primary key, where one # value is the shard id. but here, we'll show something more "generic", an # id generation function. we'll use a simplistic "id table" stored in database # #1. Any other method will do just as well; UUID, hilo, application-specific, etc. ids = Table('ids', meta, Column('nextid', Integer, nullable=False)) def id_generator(ctx): # in reality, might want to use a separate transaction for this. c = db1.connect() nextid = c.execute(ids.select(for_update=True)).scalar() c.execute(ids.update(values={ids.c.nextid : ids.c.nextid + 1})) return nextid # table setup. we'll store a lead table of continents/cities, # and a secondary table storing locations. # a particular row will be placed in the database whose shard id corresponds to the # 'continent'. in this setup, secondary rows in 'weather_reports' will # be placed in the same DB as that of the parent, but this can be changed # if you're willing to write more complex sharding functions. weather_locations = Table("weather_locations", meta, Column('id', Integer, primary_key=True, default=id_generator), Column('continent', String(30), nullable=False), Column('city', String(50), nullable=False) ) weather_reports = Table("weather_reports", meta, Column('id', Integer, primary_key=True), Column('location_id', Integer, ForeignKey('weather_locations.id')), Column('temperature', Float), Column('report_time', DateTime, default=datetime.datetime.now), ) # create tables for db in (db1, db2, db3, db4): meta.drop_all(db) meta.create_all(db) # establish initial "id" in db1 db1.execute(ids.insert(), nextid=1) # step 5. define sharding functions. # we'll use a straight mapping of a particular set of "country" # attributes to shard id. shard_lookup = { 'North America':'north_america', 'Asia':'asia', 'Europe':'europe', 'South America':'south_america' } def shard_chooser(mapper, instance, clause=None): """shard chooser. looks at the given instance and returns a shard id note that we need to define conditions for the WeatherLocation class, as well as our secondary Report class which will point back to its WeatherLocation via its 'location' attribute. """ if isinstance(instance, WeatherLocation): return shard_lookup[instance.continent] else: return shard_chooser(mapper, instance.location) def id_chooser(query, ident): """id chooser. given a primary key, returns a list of shards to search. here, we don't have any particular information from a pk so we just return all shard ids. often, you'd want to do some kind of round-robin strategy here so that requests are evenly distributed among DBs. """ return ['north_america', 'asia', 'europe', 'south_america'] def query_chooser(query): """query chooser. this also returns a list of shard ids, which can just be all of them. but here we'll search into the Query in order to try to narrow down the list of shards to query. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids for column, operator, value in _get_query_comparisons(query): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object # and occur when using ORM-mapped attributes like # "WeatherLocation.continent"). A simpler comparison, though less accurate, # would be "column.key == 'continent'". if column.shares_lineage(weather_locations.c.continent): if operator == operators.eq: ids.append(shard_lookup[value]) elif operator == operators.in_op: ids.extend(shard_lookup[v] for v in value) if len(ids) == 0: return ['north_america', 'asia', 'europe', 'south_america'] else: return ids def _get_query_comparisons(query): """Search an orm.Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form (column, operator, values). "values" is a single value or tuple of values depending on the operator. """ binds = {} clauses = set() comparisons = [] def visit_bindparam(bind): # visit a bind parameter. # check in _params for it first if bind.key in query._params: value = query._params[bind.key] elif bind.callable: # some ORM functions (lazy loading) # place the bind's value as a # callable for deferred evaluation. value = bind.callable() else: # just use .value value = bind.value binds[bind] = value def visit_column(column): clauses.add(column) def visit_binary(binary): # special handling for "col IN (params)" if binary.left in clauses and \ binary.operator == operators.in_op and \ hasattr(binary.right, 'clauses'): comparisons.append( (binary.left, binary.operator, tuple(binds[bind] for bind in binary.right.clauses) ) ) elif binary.left in clauses and binary.right in binds: comparisons.append( (binary.left, binary.operator,binds[binary.right]) ) elif binary.left in binds and binary.right in clauses: comparisons.append( (binary.right, binary.operator,binds[binary.left]) ) # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. if query._criterion is not None: visitors.traverse_depthfirst(query._criterion, {}, {'bindparam':visit_bindparam, 'binary':visit_binary, 'column':visit_column } ) return comparisons # further configure create_session to use these functions create_session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, query_chooser=query_chooser ) # step 6. mapped classes. class WeatherLocation(object): def __init__(self, continent, city): self.continent = continent self.city = city class Report(object): def __init__(self, temperature): self.temperature = temperature # step 7. mappers mapper(WeatherLocation, weather_locations, properties={ 'reports':relationship(Report, backref='location') }) mapper(Report, weather_reports) # step 8 (optional), events. The "shard_id" is placed # in the QueryContext where it can be intercepted and associated # with objects, if needed. def add_shard_id(instance, ctx): instance.shard_id = ctx.attributes["shard_id"] event.listen(WeatherLocation, "load", add_shard_id) event.listen(Report, "load", add_shard_id) # save and load objects! tokyo = WeatherLocation('Asia', 'Tokyo') newyork = WeatherLocation('North America', 'New York') toronto = WeatherLocation('North America', 'Toronto') london = WeatherLocation('Europe', 'London') dublin = WeatherLocation('Europe', 'Dublin') brasilia = WeatherLocation('South America', 'Brasila') quito = WeatherLocation('South America', 'Quito') tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) quito.reports.append(Report(85)) sess = create_session() for c in [tokyo, newyork, toronto, london, dublin, brasilia, quito]: sess.add(c) sess.commit() tokyo_id = tokyo.id sess.close() t = sess.query(WeatherLocation).get(tokyo_id) assert t.city == tokyo.city assert t.reports[0].temperature == 80.0 north_american_cities = sess.query(WeatherLocation).filter(WeatherLocation.continent == 'North America') assert [c.city for c in north_american_cities] == ['New York', 'Toronto'] asia_and_europe = sess.query(WeatherLocation).filter(WeatherLocation.continent.in_(['Europe', 'Asia'])) assert set([c.city for c in asia_and_europe]) == set(['Tokyo', 'London', 'Dublin']) SQLAlchemy-1.0.11/examples/materialized_paths/0000775000175000017500000000000012636376632022327 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/materialized_paths/materialized_paths.py0000664000175000017500000000762512636375552026564 0ustar classicclassic00000000000000"""Illustrates the "materialized paths" pattern. Materialized paths is a way to represent a tree structure in SQL with fast descendant and ancestor queries at the expense of moving nodes (which require O(n) UPDATEs in the worst case, where n is the number of nodes in the tree). It is a good balance in terms of performance and simplicity between the nested sets model and the adjacency list model. It works by storing all nodes in a table with a path column, containing a string of delimited IDs. Think file system paths: 1 1.2 1.3 1.3.4 1.3.5 1.3.6 1.7 1.7.8 1.7.9 1.7.9.10 1.7.11 Descendant queries are simple left-anchored LIKE queries, and ancestors are already stored in the path itself. Updates require going through all descendants and changing the prefix. """ from sqlalchemy import Column, Integer, String, func, select, create_engine from sqlalchemy.orm import remote, foreign, relationship, Session from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.sql.expression import cast from sqlalchemy.dialects.postgresql import ARRAY Base = declarative_base() class Node(Base): __tablename__ = "node" id = Column(Integer, primary_key=True, autoincrement=False) path = Column(String(500), nullable=False, index=True) # To find the descendants of this node, we look for nodes whose path # starts with this node's path. descendants = relationship( "Node", viewonly=True, order_by=path, primaryjoin=remote(foreign(path)).like(path.concat(".%"))) # Finding the ancestors is a little bit trickier. We need to create a fake # secondary table since this behaves like a many-to-many join. secondary = select([ id.label("id"), func.unnest(cast(func.string_to_array( func.regexp_replace(path, r"\.?\d+$", ""), "."), ARRAY(Integer))).label("ancestor_id") ]).alias() ancestors = relationship("Node", viewonly=True, secondary=secondary, primaryjoin=id == secondary.c.id, secondaryjoin=secondary.c.ancestor_id == id, order_by=path) @property def depth(self): return len(self.path.split(".")) - 1 def __repr__(self): return "Node(id={})".format(self.id) def __str__(self): root_depth = self.depth s = [str(self.id)] s.extend(((n.depth - root_depth) * " " + str(n.id)) for n in self.descendants) return "\n".join(s) def move_to(self, new_parent): new_path = new_parent.path + "." + str(self.id) for n in self.descendants: n.path = new_path + n.path[len(self.path):] self.path = new_path if __name__ == "__main__": engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) Base.metadata.create_all(engine) session = Session(engine) print("-" * 80) print("create a tree") session.add_all([ Node(id=1, path="1"), Node(id=2, path="1.2"), Node(id=3, path="1.3"), Node(id=4, path="1.3.4"), Node(id=5, path="1.3.5"), Node(id=6, path="1.3.6"), Node(id=7, path="1.7"), Node(id=8, path="1.7.8"), Node(id=9, path="1.7.9"), Node(id=10, path="1.7.9.10"), Node(id=11, path="1.7.11"), ]) session.flush() print(str(session.query(Node).get(1))) print("-" * 80) print("move 7 under 3") session.query(Node).get(7).move_to(session.query(Node).get(3)) session.flush() print(str(session.query(Node).get(1))) print("-" * 80) print("move 3 under 2") session.query(Node).get(3).move_to(session.query(Node).get(2)) session.flush() print(str(session.query(Node).get(1))) print("-" * 80) print("find the ancestors of 10") print([n.id for n in session.query(Node).get(10).ancestors]) session.close() Base.metadata.drop_all(engine) SQLAlchemy-1.0.11/examples/materialized_paths/__init__.py0000664000175000017500000000016612636375552024443 0ustar classicclassic00000000000000"""Illustrates the "materialized paths" pattern for hierarchical data using the SQLAlchemy ORM. .. autosource:: """ SQLAlchemy-1.0.11/examples/performance/0000775000175000017500000000000012636376632020757 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/performance/bulk_inserts.py0000664000175000017500000000764512636375552024051 0ustar classicclassic00000000000000"""This series of tests illustrates different ways to INSERT a large number of rows in bulk. """ from . import Profiler from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine, bindparam from sqlalchemy.orm import Session Base = declarative_base() engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) description = Column(String(255)) Profiler.init("bulk_inserts", num=100000) @Profiler.setup def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) @Profiler.profile def test_flush_no_pk(n): """Individual INSERT statements via the ORM, calling upon last row id""" session = Session(bind=engine) for chunk in range(0, n, 1000): session.add_all([ Customer( name='customer name %d' % i, description='customer description %d' % i) for i in range(chunk, chunk + 1000) ]) session.flush() session.commit() @Profiler.profile def test_bulk_save_return_pks(n): """Individual INSERT statements in "bulk", but calling upon last row id""" session = Session(bind=engine) session.bulk_save_objects([ Customer( name='customer name %d' % i, description='customer description %d' % i ) for i in range(n) ], return_defaults=True) session.commit() @Profiler.profile def test_flush_pk_given(n): """Batched INSERT statements via the ORM, PKs already defined""" session = Session(bind=engine) for chunk in range(0, n, 1000): session.add_all([ Customer( id=i + 1, name='customer name %d' % i, description='customer description %d' % i) for i in range(chunk, chunk + 1000) ]) session.flush() session.commit() @Profiler.profile def test_bulk_save(n): """Batched INSERT statements via the ORM in "bulk", discarding PKs.""" session = Session(bind=engine) session.bulk_save_objects([ Customer( name='customer name %d' % i, description='customer description %d' % i ) for i in range(n) ]) session.commit() @Profiler.profile def test_bulk_insert_mappings(n): """Batched INSERT statements via the ORM "bulk", using dictionaries.""" session = Session(bind=engine) session.bulk_insert_mappings(Customer, [ dict( name='customer name %d' % i, description='customer description %d' % i ) for i in range(n) ]) session.commit() @Profiler.profile def test_core_insert(n): """A single Core INSERT construct inserting mappings in bulk.""" conn = engine.connect() conn.execute( Customer.__table__.insert(), [ dict( name='customer name %d' % i, description='customer description %d' % i ) for i in range(n) ]) @Profiler.profile def test_dbapi_raw(n): """The DBAPI's API inserting rows in bulk.""" conn = engine.pool._creator() cursor = conn.cursor() compiled = Customer.__table__.insert().values( name=bindparam('name'), description=bindparam('description')).\ compile(dialect=engine.dialect) if compiled.positional: args = ( ('customer name %d' % i, 'customer description %d' % i) for i in range(n)) else: args = ( dict( name='customer name %d' % i, description='customer description %d' % i ) for i in range(n) ) cursor.executemany( str(compiled), list(args) ) conn.commit() conn.close() if __name__ == '__main__': Profiler.main() SQLAlchemy-1.0.11/examples/performance/bulk_updates.py0000664000175000017500000000264512636375552024022 0ustar classicclassic00000000000000"""This series of tests illustrates different ways to UPDATE a large number of rows in bulk. """ from . import Profiler from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine, bindparam from sqlalchemy.orm import Session Base = declarative_base() engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) description = Column(String(255)) Profiler.init("bulk_updates", num=100000) @Profiler.setup def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) s = Session(engine) for chunk in range(0, num, 10000): s.bulk_insert_mappings(Customer, [ { 'name': 'customer name %d' % i, 'description': 'customer description %d' % i } for i in range(chunk, chunk + 10000) ]) s.commit() @Profiler.profile def test_orm_flush(n): """UPDATE statements via the ORM flush process.""" session = Session(bind=engine) for chunk in range(0, n, 1000): customers = session.query(Customer).\ filter(Customer.id.between(chunk, chunk + 1000)).all() for customer in customers: customer.description += "updated" session.flush() session.commit() SQLAlchemy-1.0.11/examples/performance/__main__.py0000664000175000017500000000021612636375552023050 0ustar classicclassic00000000000000"""Allows the examples/performance package to be run as a script.""" from . import Profiler if __name__ == '__main__': Profiler.main() SQLAlchemy-1.0.11/examples/performance/single_inserts.py0000664000175000017500000001073012636375552024362 0ustar classicclassic00000000000000"""In this series of tests, we're looking at a method that inserts a row within a distinct transaction, and afterwards returns to essentially a "closed" state. This would be analogous to an API call that starts up a database connection, inserts the row, commits and closes. """ from . import Profiler from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine, bindparam, pool from sqlalchemy.orm import Session Base = declarative_base() engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) description = Column(String(255)) Profiler.init("single_inserts", num=10000) @Profiler.setup def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) if engine.dialect.name == 'sqlite': engine.pool = pool.StaticPool(creator=engine.pool._creator) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) @Profiler.profile def test_orm_commit(n): """Individual INSERT/COMMIT pairs via the ORM""" for i in range(n): session = Session(bind=engine) session.add( Customer( name='customer name %d' % i, description='customer description %d' % i) ) session.commit() @Profiler.profile def test_bulk_save(n): """Individual INSERT/COMMIT pairs using the "bulk" API """ for i in range(n): session = Session(bind=engine) session.bulk_save_objects([ Customer( name='customer name %d' % i, description='customer description %d' % i )]) session.commit() @Profiler.profile def test_bulk_insert_dictionaries(n): """Individual INSERT/COMMIT pairs using the "bulk" API with dictionaries""" for i in range(n): session = Session(bind=engine) session.bulk_insert_mappings(Customer, [ dict( name='customer name %d' % i, description='customer description %d' % i )]) session.commit() @Profiler.profile def test_core(n): """Individual INSERT/COMMIT pairs using Core.""" for i in range(n): with engine.begin() as conn: conn.execute( Customer.__table__.insert(), dict( name='customer name %d' % i, description='customer description %d' % i ) ) @Profiler.profile def test_core_query_caching(n): """Individual INSERT/COMMIT pairs using Core with query caching""" cache = {} ins = Customer.__table__.insert() for i in range(n): with engine.begin() as conn: conn.execution_options(compiled_cache=cache).execute( ins, dict( name='customer name %d' % i, description='customer description %d' % i ) ) @Profiler.profile def test_dbapi_raw_w_connect(n): """Individual INSERT/COMMIT pairs w/ DBAPI + connection each time""" _test_dbapi_raw(n, True) @Profiler.profile def test_dbapi_raw_w_pool(n): """Individual INSERT/COMMIT pairs w/ DBAPI + connection pool""" _test_dbapi_raw(n, False) def _test_dbapi_raw(n, connect): compiled = Customer.__table__.insert().values( name=bindparam('name'), description=bindparam('description')).\ compile(dialect=engine.dialect) if compiled.positional: args = ( ('customer name %d' % i, 'customer description %d' % i) for i in range(n)) else: args = ( dict( name='customer name %d' % i, description='customer description %d' % i ) for i in range(n) ) sql = str(compiled) if connect: for arg in args: # there's no connection pool, so if these were distinct # calls, we'd be connecting each time conn = engine.pool._creator() cursor = conn.cursor() cursor.execute(sql, arg) lastrowid = cursor.lastrowid conn.commit() conn.close() else: for arg in args: conn = engine.raw_connection() cursor = conn.cursor() cursor.execute(sql, arg) lastrowid = cursor.lastrowid conn.commit() conn.close() if __name__ == '__main__': Profiler.main() SQLAlchemy-1.0.11/examples/performance/large_resultsets.py0000664000175000017500000001242612636375552024725 0ustar classicclassic00000000000000"""In this series of tests, we are looking at time to load a large number of very small and simple rows. A special test here illustrates the difference between fetching the rows from the raw DBAPI and throwing them away, vs. assembling each row into a completely basic Python object and appending to a list. The time spent typically more than doubles. The point is that while DBAPIs will give you raw rows very fast if they are written in C, the moment you do anything with those rows, even something trivial, overhead grows extremely fast in cPython. SQLAlchemy's Core and lighter-weight ORM options add absolutely minimal overhead, and the full blown ORM doesn't do terribly either even though mapped objects provide a huge amount of functionality. """ from . import Profiler from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import Session, Bundle Base = declarative_base() engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) description = Column(String(255)) Profiler.init("large_resultsets", num=500000) @Profiler.setup_once def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) s = Session(engine) for chunk in range(0, num, 10000): s.execute( Customer.__table__.insert(), params=[ { 'name': 'customer name %d' % i, 'description': 'customer description %d' % i } for i in range(chunk, chunk + 10000)]) s.commit() @Profiler.profile def test_orm_full_objects_list(n): """Load fully tracked ORM objects into one big list().""" sess = Session(engine) objects = list(sess.query(Customer).limit(n)) @Profiler.profile def test_orm_full_objects_chunks(n): """Load fully tracked ORM objects a chunk at a time using yield_per().""" sess = Session(engine) for obj in sess.query(Customer).yield_per(1000).limit(n): pass @Profiler.profile def test_orm_bundles(n): """Load lightweight "bundle" objects using the ORM.""" sess = Session(engine) bundle = Bundle('customer', Customer.id, Customer.name, Customer.description) for row in sess.query(bundle).yield_per(10000).limit(n): pass @Profiler.profile def test_orm_columns(n): """Load individual columns into named tuples using the ORM.""" sess = Session(engine) for row in sess.query( Customer.id, Customer.name, Customer.description).yield_per(10000).limit(n): pass @Profiler.profile def test_core_fetchall(n): """Load Core result rows using fetchall.""" with engine.connect() as conn: result = conn.execute(Customer.__table__.select().limit(n)).fetchall() for row in result: data = row['id'], row['name'], row['description'] @Profiler.profile def test_core_fetchmany_w_streaming(n): """Load Core result rows using fetchmany/streaming.""" with engine.connect() as conn: result = conn.execution_options(stream_results=True).\ execute(Customer.__table__.select().limit(n)) while True: chunk = result.fetchmany(10000) if not chunk: break for row in chunk: data = row['id'], row['name'], row['description'] @Profiler.profile def test_core_fetchmany(n): """Load Core result rows using Core / fetchmany.""" with engine.connect() as conn: result = conn.execute(Customer.__table__.select().limit(n)) while True: chunk = result.fetchmany(10000) if not chunk: break for row in chunk: data = row['id'], row['name'], row['description'] @Profiler.profile def test_dbapi_fetchall_plus_append_objects(n): """Load rows using DBAPI fetchall(), generate an object for each row.""" _test_dbapi_raw(n, True) @Profiler.profile def test_dbapi_fetchall_no_object(n): """Load rows using DBAPI fetchall(), don't make any objects.""" _test_dbapi_raw(n, False) def _test_dbapi_raw(n, make_objects): compiled = Customer.__table__.select().limit(n).\ compile( dialect=engine.dialect, compile_kwargs={"literal_binds": True}) if make_objects: # because if you're going to roll your own, you're probably # going to do this, so see how this pushes you right back into # ORM land anyway :) class SimpleCustomer(object): def __init__(self, id, name, description): self.id = id self.name = name self.description = description sql = str(compiled) conn = engine.raw_connection() cursor = conn.cursor() cursor.execute(sql) if make_objects: for row in cursor.fetchall(): # ensure that we fully fetch! customer = SimpleCustomer( id=row[0], name=row[1], description=row[2]) else: for row in cursor.fetchall(): # ensure that we fully fetch! data = row[0], row[1], row[2] conn.close() if __name__ == '__main__': Profiler.main() SQLAlchemy-1.0.11/examples/performance/__init__.py0000664000175000017500000003347512636375552023104 0ustar classicclassic00000000000000"""A performance profiling suite for a variety of SQLAlchemy use cases. Each suite focuses on a specific use case with a particular performance profile and associated implications: * bulk inserts * individual inserts, with or without transactions * fetching large numbers of rows * running lots of short queries All suites include a variety of use patterns illustrating both Core and ORM use, and are generally sorted in order of performance from worst to greatest, inversely based on amount of functionality provided by SQLAlchemy, greatest to least (these two things generally correspond perfectly). A command line tool is presented at the package level which allows individual suites to be run:: $ python -m examples.performance --help usage: python -m examples.performance [-h] [--test TEST] [--dburl DBURL] [--num NUM] [--profile] [--dump] [--runsnake] [--echo] {bulk_inserts,large_resultsets,single_inserts} positional arguments: {bulk_inserts,large_resultsets,single_inserts} suite to run optional arguments: -h, --help show this help message and exit --test TEST run specific test name --dburl DBURL database URL, default sqlite:///profile.db --num NUM Number of iterations/items/etc for tests; default is 0 module-specific --profile run profiling and dump call counts --dump dump full call profile (implies --profile) --runsnake invoke runsnakerun (implies --profile) --echo Echo SQL output An example run looks like:: $ python -m examples.performance bulk_inserts Or with options:: $ python -m examples.performance bulk_inserts \\ --dburl mysql+mysqldb://scott:tiger@localhost/test \\ --profile --num 1000 .. seealso:: :ref:`faq_how_to_profile` File Listing ------------- .. autosource:: Running all tests with time --------------------------- This is the default form of run:: $ python -m examples.performance single_inserts Tests to run: test_orm_commit, test_bulk_save, test_bulk_insert_dictionaries, test_core, test_core_query_caching, test_dbapi_raw_w_connect, test_dbapi_raw_w_pool test_orm_commit : Individual INSERT/COMMIT pairs via the ORM (10000 iterations); total time 13.690218 sec test_bulk_save : Individual INSERT/COMMIT pairs using the "bulk" API (10000 iterations); total time 11.290371 sec test_bulk_insert_dictionaries : Individual INSERT/COMMIT pairs using the "bulk" API with dictionaries (10000 iterations); total time 10.814626 sec test_core : Individual INSERT/COMMIT pairs using Core. (10000 iterations); total time 9.665620 sec test_core_query_caching : Individual INSERT/COMMIT pairs using Core with query caching (10000 iterations); total time 9.209010 sec test_dbapi_raw_w_connect : Individual INSERT/COMMIT pairs w/ DBAPI + connection each time (10000 iterations); total time 9.551103 sec test_dbapi_raw_w_pool : Individual INSERT/COMMIT pairs w/ DBAPI + connection pool (10000 iterations); total time 8.001813 sec Dumping Profiles for Individual Tests -------------------------------------- A Python profile output can be dumped for all tests, or more commonly individual tests:: $ python -m examples.performance single_inserts --test test_core --num 1000 --dump Tests to run: test_core test_core : Individual INSERT/COMMIT pairs using Core. (1000 iterations); total fn calls 186109 186109 function calls (186102 primitive calls) in 1.089 seconds Ordered by: internal time, call count ncalls tottime percall cumtime percall filename:lineno(function) 1000 0.634 0.001 0.634 0.001 {method 'commit' of 'sqlite3.Connection' objects} 1000 0.154 0.000 0.154 0.000 {method 'execute' of 'sqlite3.Cursor' objects} 1000 0.021 0.000 0.074 0.000 /Users/classic/dev/sqlalchemy/lib/sqlalchemy/sql/compiler.py:1950(_get_colparams) 1000 0.015 0.000 0.034 0.000 /Users/classic/dev/sqlalchemy/lib/sqlalchemy/engine/default.py:503(_init_compiled) 1 0.012 0.012 1.091 1.091 examples/performance/single_inserts.py:79(test_core) ... Using RunSnake -------------- This option requires the `RunSnake `_ command line tool be installed:: $ python -m examples.performance single_inserts --test test_core --num 1000 --runsnake A graphical RunSnake output will be displayed. .. _examples_profiling_writeyourown: Writing your Own Suites ----------------------- The profiler suite system is extensible, and can be applied to your own set of tests. This is a valuable technique to use in deciding upon the proper approach for some performance-critical set of routines. For example, if we wanted to profile the difference between several kinds of loading, we can create a file ``test_loads.py``, with the following content:: from examples.performance import Profiler from sqlalchemy import Integer, Column, create_engine, ForeignKey from sqlalchemy.orm import relationship, joinedload, subqueryload, Session from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() engine = None session = None class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) # Init with name of file, default number of items Profiler.init("test_loads", 1000) @Profiler.setup_once def setup_once(dburl, echo, num): "setup once. create an engine, insert fixture data" global engine engine = create_engine(dburl, echo=echo) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) sess = Session(engine) sess.add_all([ Parent(children=[Child() for j in range(100)]) for i in range(num) ]) sess.commit() @Profiler.setup def setup(dburl, echo, num): "setup per test. create a new Session." global session session = Session(engine) # pre-connect so this part isn't profiled (if we choose) session.connection() @Profiler.profile def test_lazyload(n): "load everything, no eager loading." for parent in session.query(Parent): parent.children @Profiler.profile def test_joinedload(n): "load everything, joined eager loading." for parent in session.query(Parent).options(joinedload("children")): parent.children @Profiler.profile def test_subqueryload(n): "load everything, subquery eager loading." for parent in session.query(Parent).options(subqueryload("children")): parent.children if __name__ == '__main__': Profiler.main() We can run our new script directly:: $ python test_loads.py --dburl postgresql+psycopg2://scott:tiger@localhost/test Running setup once... Tests to run: test_lazyload, test_joinedload, test_subqueryload test_lazyload : load everything, no eager loading. (1000 iterations); total time 11.971159 sec test_joinedload : load everything, joined eager loading. (1000 iterations); total time 2.754592 sec test_subqueryload : load everything, subquery eager loading. (1000 iterations); total time 2.977696 sec As well as see RunSnake output for an individual test:: $ python test_loads.py --num 100 --runsnake --test test_joinedload """ import argparse import cProfile import pstats import os import time import re import sys class Profiler(object): tests = [] _setup = None _setup_once = None name = None num = 0 def __init__(self, options): self.test = options.test self.dburl = options.dburl self.runsnake = options.runsnake self.profile = options.profile self.dump = options.dump self.callers = options.callers self.num = options.num self.echo = options.echo self.stats = [] @classmethod def init(cls, name, num): cls.name = name cls.num = num @classmethod def profile(cls, fn): if cls.name is None: raise ValueError( "Need to call Profile.init(, ) first.") cls.tests.append(fn) return fn @classmethod def setup(cls, fn): if cls._setup is not None: raise ValueError("setup function already set to %s" % cls._setup) cls._setup = staticmethod(fn) return fn @classmethod def setup_once(cls, fn): if cls._setup_once is not None: raise ValueError( "setup_once function already set to %s" % cls._setup_once) cls._setup_once = staticmethod(fn) return fn def run(self): if self.test: tests = [fn for fn in self.tests if fn.__name__ == self.test] if not tests: raise ValueError("No such test: %s" % self.test) else: tests = self.tests if self._setup_once: print("Running setup once...") self._setup_once(self.dburl, self.echo, self.num) print("Tests to run: %s" % ", ".join([t.__name__ for t in tests])) for test in tests: self._run_test(test) self.stats[-1].report() def _run_with_profile(self, fn): pr = cProfile.Profile() pr.enable() try: result = fn(self.num) finally: pr.disable() stats = pstats.Stats(pr).sort_stats('cumulative') self.stats.append(TestResult(self, fn, stats=stats)) return result def _run_with_time(self, fn): now = time.time() try: return fn(self.num) finally: total = time.time() - now self.stats.append(TestResult(self, fn, total_time=total)) def _run_test(self, fn): if self._setup: self._setup(self.dburl, self.echo, self.num) if self.profile or self.runsnake or self.dump: self._run_with_profile(fn) else: self._run_with_time(fn) @classmethod def main(cls): parser = argparse.ArgumentParser("python -m examples.performance") if cls.name is None: parser.add_argument( "name", choices=cls._suite_names(), help="suite to run") if len(sys.argv) > 1: potential_name = sys.argv[1] try: suite = __import__(__name__ + "." + potential_name) except ImportError: pass parser.add_argument( "--test", type=str, help="run specific test name" ) parser.add_argument( '--dburl', type=str, default="sqlite:///profile.db", help="database URL, default sqlite:///profile.db" ) parser.add_argument( '--num', type=int, default=cls.num, help="Number of iterations/items/etc for tests; " "default is %d module-specific" % cls.num ) parser.add_argument( '--profile', action='store_true', help='run profiling and dump call counts') parser.add_argument( '--dump', action='store_true', help='dump full call profile (implies --profile)') parser.add_argument( '--callers', action='store_true', help='print callers as well (implies --dump)') parser.add_argument( '--runsnake', action='store_true', help='invoke runsnakerun (implies --profile)') parser.add_argument( '--echo', action='store_true', help="Echo SQL output") args = parser.parse_args() args.dump = args.dump or args.callers args.profile = args.profile or args.dump or args.runsnake if cls.name is None: suite = __import__(__name__ + "." + args.name) Profiler(args).run() @classmethod def _suite_names(cls): suites = [] for file_ in os.listdir(os.path.dirname(__file__)): match = re.match(r'^([a-z].*).py$', file_) if match: suites.append(match.group(1)) return suites class TestResult(object): def __init__(self, profile, test, stats=None, total_time=None): self.profile = profile self.test = test self.stats = stats self.total_time = total_time def report(self): print(self._summary()) if self.profile.profile: self.report_stats() def _summary(self): summary = "%s : %s (%d iterations)" % ( self.test.__name__, self.test.__doc__, self.profile.num) if self.total_time: summary += "; total time %f sec" % self.total_time if self.stats: summary += "; total fn calls %d" % self.stats.total_calls return summary def report_stats(self): if self.profile.runsnake: self._runsnake() elif self.profile.dump: self._dump() def _dump(self): self.stats.sort_stats('time', 'calls') self.stats.print_stats() if self.profile.callers: self.stats.print_callers() def _runsnake(self): filename = "%s.profile" % self.test.__name__ try: self.stats.dump_stats(filename) os.system("runsnake %s" % filename) finally: os.remove(filename) SQLAlchemy-1.0.11/examples/performance/short_selects.py0000664000175000017500000000722512636375552024220 0ustar classicclassic00000000000000"""This series of tests illustrates different ways to INSERT a large number of rows in bulk. """ from . import Profiler from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine, \ bindparam, select from sqlalchemy.orm import Session, deferred from sqlalchemy.ext import baked import random Base = declarative_base() engine = None ids = range(1, 11000) class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) description = Column(String(255)) q = Column(Integer) p = Column(Integer) x = deferred(Column(Integer)) y = deferred(Column(Integer)) z = deferred(Column(Integer)) Profiler.init("short_selects", num=10000) @Profiler.setup def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) sess = Session(engine) sess.add_all([ Customer( id=i, name='c%d' % i, description="c%d" % i, q=i * 10, p=i * 20, x=i * 30, y=i * 40, ) for i in ids ]) sess.commit() @Profiler.profile def test_orm_query(n): """test a straight ORM query of the full entity.""" session = Session(bind=engine) for id_ in random.sample(ids, n): session.query(Customer).filter(Customer.id == id_).one() @Profiler.profile def test_orm_query_cols_only(n): """test an ORM query of only the entity columns.""" session = Session(bind=engine) for id_ in random.sample(ids, n): session.query( Customer.id, Customer.name, Customer.description ).filter(Customer.id == id_).one() @Profiler.profile def test_baked_query(n): """test a baked query of the full entity.""" bakery = baked.bakery() s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery(lambda s: s.query(Customer)) q += lambda q: q.filter(Customer.id == bindparam('id')) q(s).params(id=id_).one() @Profiler.profile def test_baked_query_cols_only(n): """test a baked query of only the entity columns.""" bakery = baked.bakery() s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery( lambda s: s.query( Customer.id, Customer.name, Customer.description)) q += lambda q: q.filter(Customer.id == bindparam('id')) q(s).params(id=id_).one() @Profiler.profile def test_core_new_stmt_each_time(n): """test core, creating a new statement each time.""" with engine.connect() as conn: for id_ in random.sample(ids, n): stmt = select([Customer.__table__]).where(Customer.id == id_) row = conn.execute(stmt).first() tuple(row) @Profiler.profile def test_core_reuse_stmt(n): """test core, reusing the same statement (but recompiling each time).""" stmt = select([Customer.__table__]).where(Customer.id == bindparam('id')) with engine.connect() as conn: for id_ in random.sample(ids, n): row = conn.execute(stmt, id=id_).first() tuple(row) @Profiler.profile def test_core_reuse_stmt_compiled_cache(n): """test core, reusing the same statement + compiled cache.""" compiled_cache = {} stmt = select([Customer.__table__]).where(Customer.id == bindparam('id')) with engine.connect().\ execution_options(compiled_cache=compiled_cache) as conn: for id_ in random.sample(ids, n): row = conn.execute(stmt, id=id_).first() tuple(row) if __name__ == '__main__': Profiler.main() SQLAlchemy-1.0.11/examples/postgis/0000775000175000017500000000000012636376632020146 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/postgis/__init__.py0000664000175000017500000000220712636375552022260 0ustar classicclassic00000000000000"""A naive example illustrating techniques to help embed PostGIS functionality. This example was originally developed in the hopes that it would be extrapolated into a comprehensive PostGIS integration layer. We are pleased to announce that this has come to fruition as `GeoAlchemy `_. The example illustrates: * a DDL extension which allows CREATE/DROP to work in conjunction with AddGeometryColumn/DropGeometryColumn * a Geometry type, as well as a few subtypes, which convert result row values to a GIS-aware object, and also integrates with the DDL extension. * a GIS-aware object which stores a raw geometry value and provides a factory for functions such as AsText(). * an ORM comparator which can override standard column methods on mapped objects to produce GIS operators. * an attribute event listener that intercepts strings and converts to GeomFromText(). * a standalone operator example. The implementation is limited to only public, well known and simple to use extension points. E.g.:: print session.query(Road).filter(Road.road_geom.intersects(r1.road_geom)).all() .. autosource:: """ SQLAlchemy-1.0.11/examples/postgis/postgis.py0000664000175000017500000002145312636375552022215 0ustar classicclassic00000000000000from sqlalchemy.types import UserDefinedType, _Binary, TypeDecorator from sqlalchemy.sql import expression, type_coerce from sqlalchemy import event, Table import binascii # Python datatypes class GisElement(object): """Represents a geometry value.""" def __str__(self): return self.desc def __repr__(self): return "<%s at 0x%x; %r>" % (self.__class__.__name__, id(self), self.desc) class BinaryGisElement(GisElement, expression.Function): """Represents a Geometry value expressed as binary.""" def __init__(self, data): self.data = data expression.Function.__init__(self, "ST_GeomFromEWKB", data, type_=Geometry(coerce_="binary")) @property def desc(self): return self.as_hex @property def as_hex(self): return binascii.hexlify(self.data) class TextualGisElement(GisElement, expression.Function): """Represents a Geometry value expressed as text.""" def __init__(self, desc, srid=-1): self.desc = desc expression.Function.__init__(self, "ST_GeomFromText", desc, srid, type_=Geometry) # SQL datatypes. class Geometry(UserDefinedType): """Base PostGIS Geometry column type.""" name = "GEOMETRY" def __init__(self, dimension=None, srid=-1, coerce_="text"): self.dimension = dimension self.srid = srid self.coerce = coerce_ class comparator_factory(UserDefinedType.Comparator): """Define custom operations for geometry types.""" # override the __eq__() operator def __eq__(self, other): return self.op('~=')(other) # add a custom operator def intersects(self, other): return self.op('&&')(other) # any number of GIS operators can be overridden/added here # using the techniques above. def _coerce_compared_value(self, op, value): return self def get_col_spec(self): return self.name def bind_expression(self, bindvalue): if self.coerce == "text": return TextualGisElement(bindvalue) elif self.coerce == "binary": return BinaryGisElement(bindvalue) else: assert False def column_expression(self, col): if self.coerce == "text": return func.ST_AsText(col, type_=self) elif self.coerce == "binary": return func.ST_AsBinary(col, type_=self) else: assert False def bind_processor(self, dialect): def process(value): if isinstance(value, GisElement): return value.desc else: return value return process def result_processor(self, dialect, coltype): if self.coerce == "text": fac = TextualGisElement elif self.coerce == "binary": fac = BinaryGisElement else: assert False def process(value): if value is not None: return fac(value) else: return value return process def adapt(self, impltype): return impltype(dimension=self.dimension, srid=self.srid, coerce_=self.coerce) # other datatypes can be added as needed. class Point(Geometry): name = 'POINT' class Curve(Geometry): name = 'CURVE' class LineString(Curve): name = 'LINESTRING' # ... etc. # DDL integration # Postgis historically has required AddGeometryColumn/DropGeometryColumn # and other management methods in order to create Postgis columns. Newer # versions don't appear to require these special steps anymore. However, # here we illustrate how to set up these features in any case. def setup_ddl_events(): @event.listens_for(Table, "before_create") def before_create(target, connection, **kw): dispatch("before-create", target, connection) @event.listens_for(Table, "after_create") def after_create(target, connection, **kw): dispatch("after-create", target, connection) @event.listens_for(Table, "before_drop") def before_drop(target, connection, **kw): dispatch("before-drop", target, connection) @event.listens_for(Table, "after_drop") def after_drop(target, connection, **kw): dispatch("after-drop", target, connection) def dispatch(event, table, bind): if event in ('before-create', 'before-drop'): regular_cols = [c for c in table.c if not isinstance(c.type, Geometry)] gis_cols = set(table.c).difference(regular_cols) table.info["_saved_columns"] = table.c # temporarily patch a set of columns not including the # Geometry columns table.columns = expression.ColumnCollection(*regular_cols) if event == 'before-drop': for c in gis_cols: bind.execute( select([ func.DropGeometryColumn( 'public', table.name, c.name)], autocommit=True) ) elif event == 'after-create': table.columns = table.info.pop('_saved_columns') for c in table.c: if isinstance(c.type, Geometry): bind.execute( select([ func.AddGeometryColumn( table.name, c.name, c.type.srid, c.type.name, c.type.dimension)], autocommit=True) ) elif event == 'after-drop': table.columns = table.info.pop('_saved_columns') setup_ddl_events() # illustrate usage if __name__ == '__main__': from sqlalchemy import (create_engine, MetaData, Column, Integer, String, func, select) from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base engine = create_engine('postgresql://scott:tiger@localhost/test', echo=True) metadata = MetaData(engine) Base = declarative_base(metadata=metadata) class Road(Base): __tablename__ = 'roads' road_id = Column(Integer, primary_key=True) road_name = Column(String) road_geom = Column(Geometry(2)) metadata.drop_all() metadata.create_all() session = sessionmaker(bind=engine)() # Add objects. We can use strings... session.add_all([ Road(road_name='Jeff Rd', road_geom='LINESTRING(191232 243118,191108 243242)'), Road(road_name='Geordie Rd', road_geom='LINESTRING(189141 244158,189265 244817)'), Road(road_name='Paul St', road_geom='LINESTRING(192783 228138,192612 229814)'), Road(road_name='Graeme Ave', road_geom='LINESTRING(189412 252431,189631 259122)'), Road(road_name='Phil Tce', road_geom='LINESTRING(190131 224148,190871 228134)'), ]) # or use an explicit TextualGisElement (similar to saying func.GeomFromText()) r = Road(road_name='Dave Cres', road_geom=TextualGisElement('LINESTRING(198231 263418,198213 268322)', -1)) session.add(r) # pre flush, the TextualGisElement represents the string we sent. assert str(r.road_geom) == 'LINESTRING(198231 263418,198213 268322)' session.commit() # after flush and/or commit, all the TextualGisElements become PersistentGisElements. assert str(r.road_geom) == "LINESTRING(198231 263418,198213 268322)" r1 = session.query(Road).filter(Road.road_name == 'Graeme Ave').one() # illustrate the overridden __eq__() operator. # strings come in as TextualGisElements r2 = session.query(Road).filter(Road.road_geom == 'LINESTRING(189412 252431,189631 259122)').one() r3 = session.query(Road).filter(Road.road_geom == r1.road_geom).one() assert r1 is r2 is r3 # core usage just fine: road_table = Road.__table__ stmt = select([road_table]).where(road_table.c.road_geom.intersects(r1.road_geom)) print(session.execute(stmt).fetchall()) # TODO: for some reason the auto-generated labels have the internal replacement # strings exposed, even though PG doesn't complain # look up the hex binary version, using SQLAlchemy casts as_binary = session.scalar(select([type_coerce(r.road_geom, Geometry(coerce_="binary"))])) assert as_binary.as_hex == \ '01020000000200000000000000b832084100000000e813104100000000283208410000000088601041' # back again, same method ! as_text = session.scalar(select([type_coerce(as_binary, Geometry(coerce_="text"))])) assert as_text.desc == "LINESTRING(198231 263418,198213 268322)" session.rollback() metadata.drop_all() SQLAlchemy-1.0.11/examples/join_conditions/0000775000175000017500000000000012636376632021646 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/join_conditions/threeway.py0000664000175000017500000000741112636375552024053 0ustar classicclassic00000000000000"""Illustrate a "three way join" - where a primary table joins to a remote table via an association table, but then the primary table also needs to refer to some columns in the remote table directly. E.g.:: first.first_id -> second.first_id second.other_id --> partitioned.other_id first.partition_key ---------------------> partitioned.partition_key For a relationship like this, "second" is a lot like a "secondary" table, but the mechanics aren't present within the "secondary" feature to allow for the join directly between first and partitioned. Instead, we will derive a selectable from partitioned and second combined together, then link first to that derived selectable. If we define the derived selectable as:: second JOIN partitioned ON second.other_id = partitioned.other_id A JOIN from first to this derived selectable is then:: first JOIN (second JOIN partitioned ON second.other_id = partitioned.other_id) ON first.first_id = second.first_id AND first.partition_key = partitioned.partition_key We will use the "non primary mapper" feature in order to produce this. A non primary mapper is essentially an "extra" :func:`.mapper` that we can use to associate a particular class with some selectable that is not its usual mapped table. It is used only when called upon within a Query (or a :func:`.relationship`). """ from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class First(Base): __tablename__ = 'first' first_id = Column(Integer, primary_key=True) partition_key = Column(String) def __repr__(self): return ("First(%s, %s)" % (self.first_id, self.partition_key)) class Second(Base): __tablename__ = 'second' first_id = Column(Integer, primary_key=True) other_id = Column(Integer, primary_key=True) class Partitioned(Base): __tablename__ = 'partitioned' other_id = Column(Integer, primary_key=True) partition_key = Column(String, primary_key=True) def __repr__(self): return ("Partitioned(%s, %s)" % (self.other_id, self.partition_key)) j = join(Partitioned, Second, Partitioned.other_id == Second.other_id) partitioned_second = mapper(Partitioned, j, non_primary=True, properties={ # note we need to disambiguate columns here - the join() # will provide them as j.c._ for access, # but they retain their real names in the mapping "other_id": [j.c.partitioned_other_id, j.c.second_other_id], }) First.partitioned = relationship( partitioned_second, primaryjoin=and_( First.partition_key == partitioned_second.c.partition_key, First.first_id == foreign(partitioned_second.c.first_id) ), innerjoin=True) # when using any database other than SQLite, we will get a nested # join, e.g. "first JOIN (partitioned JOIN second ON ..) ON ..". # On SQLite, SQLAlchemy needs to render a full subquery. e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) s = Session(e) s.add_all([ First(first_id=1, partition_key='p1'), First(first_id=2, partition_key='p1'), First(first_id=3, partition_key='p2'), Second(first_id=1, other_id=1), Second(first_id=2, other_id=1), Second(first_id=3, other_id=2), Partitioned(partition_key='p1', other_id=1), Partitioned(partition_key='p1', other_id=2), Partitioned(partition_key='p2', other_id=2), ]) s.commit() for row in s.query(First, Partitioned).join(First.partitioned): print(row) for f in s.query(First): for p in f.partitioned: print(f.partition_key, p.partition_key) SQLAlchemy-1.0.11/examples/join_conditions/cast.py0000664000175000017500000000520612636375552023155 0ustar classicclassic00000000000000"""Illustrate a :func:`.relationship` that joins two columns where those columns are not of the same type, and a CAST must be used on the SQL side in order to match them. When complete, we'd like to see a load of the relationship to look like:: -- load the primary row, a_id is a string SELECT a.id AS a_id_1, a.a_id AS a_a_id FROM a WHERE a.a_id = '2' -- then load the collection using CAST, b.a_id is an integer SELECT b.id AS b_id, b.a_id AS b_a_id FROM b WHERE CAST('2' AS INTEGER) = b.a_id The relationship is essentially configured as follows:: class B(Base): # ... a = relationship(A, primaryjoin=cast(A.a_id, Integer) == foreign(B.a_id), backref="bs") Where above, we are making use of the :func:`.cast` function in order to produce CAST, as well as the :func:`.foreign` :term:`annotation` function in order to note to the ORM that ``B.a_id`` should be treated like the "foreign key" column. """ from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class StringAsInt(TypeDecorator): """Coerce string->integer type. This is needed only if the relationship() from int to string is writable, as SQLAlchemy will copy the string parent values into the integer attribute on the child during a flush. """ impl = Integer def process_bind_param(self, value, dialect): if value is not None: value = int(value) return value class A(Base): """Parent. The referenced column is a string type.""" __tablename__ = 'a' id = Column(Integer, primary_key=True) a_id = Column(String) class B(Base): """Child. The column we reference 'A' with is an integer.""" __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(StringAsInt) a = relationship("A", # specify primaryjoin. The string form is optional # here, but note that Declarative makes available all # of the built-in functions we might need, including # cast() and foreign(). primaryjoin="cast(A.a_id, Integer) == foreign(B.a_id)", backref="bs") # we demonstrate with SQLite, but the important part # is the CAST rendered in the SQL output. e = create_engine('sqlite://', echo=True) Base.metadata.create_all(e) s = Session(e) s.add_all([ A(a_id="1"), A(a_id="2", bs=[B(), B()]), A(a_id="3", bs=[B()]), ]) s.commit() b1 = s.query(B).filter_by(a_id="2").first() print(b1.a) a1 = s.query(A).filter_by(a_id="2").first() print(a1.bs)SQLAlchemy-1.0.11/examples/join_conditions/__init__.py0000664000175000017500000000026212636375552023757 0ustar classicclassic00000000000000"""Examples of various :func:`.orm.relationship` configurations, which make use of the ``primaryjoin`` argument to compose special types of join conditions. .. autosource:: """SQLAlchemy-1.0.11/examples/generic_associations/0000775000175000017500000000000012636376632022651 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/generic_associations/generic_fk.py0000664000175000017500000001036612636375552025325 0ustar classicclassic00000000000000"""generic_fk.py Illustrates a so-called "generic foreign key", in a similar fashion to that of popular frameworks such as Django, ROR, etc. This approach bypasses standard referential integrity practices, in that the "foreign key" column is not actually constrained to refer to any particular table; instead, in-application logic is used to determine which table is referenced. This approach is not in line with SQLAlchemy's usual style, as foregoing foreign key integrity means that the tables can easily contain invalid references and also have no ability to use in-database cascade functionality. However, due to the popularity of these systems, as well as that it uses the fewest number of tables (which doesn't really offer any "advantage", though seems to be comforting to many) this recipe remains in high demand, so in the interests of having an easy StackOverflow answer queued up, here it is. The author recommends "table_per_related" or "table_per_association" instead of this approach. .. versionadded:: 0.8.3 """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, \ String, and_ from sqlalchemy.orm import Session, relationship, foreign, remote, backref from sqlalchemy import event @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class Address(Base): """The Address class. This represents all address records in a single table. """ street = Column(String) city = Column(String) zip = Column(String) discriminator = Column(String) """Refers to the type of parent.""" parent_id = Column(Integer) """Refers to the primary key of the parent. This could refer to any table. """ @property def parent(self): """Provides in-Python access to the "parent" by choosing the appropriate relationship. """ return getattr(self, "parent_%s" % self.discriminator) def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a relationship to the address_association table for each parent. """ @event.listens_for(HasAddresses, "mapper_configured", propagate=True) def setup_listener(mapper, class_): name = class_.__name__ discriminator = name.lower() class_.addresses = relationship(Address, primaryjoin=and_( class_.id == foreign(remote(Address.parent_id)), Address.discriminator == discriminator ), backref=backref( "parent_%s" % discriminator, primaryjoin=remote(class_.id) == foreign(Address.parent_id) ) ) @event.listens_for(class_.addresses, "append") def append_address(target, value, initiator): value.discriminator = discriminator class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Address( street='123 anywhere street', city="New York", zip="10110"), Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print(address) print(address.parent)SQLAlchemy-1.0.11/examples/generic_associations/__init__.py0000664000175000017500000000135412636375552024765 0ustar classicclassic00000000000000""" Illustrates various methods of associating multiple types of parents with a particular child object. The examples all use the declarative extension along with declarative mixins. Each one presents the identical use case at the end - two classes, ``Customer`` and ``Supplier``, both subclassing the ``HasAddresses`` mixin, which ensures that the parent class is provided with an ``addresses`` collection which contains ``Address`` objects. The :viewsource:`.discriminator_on_association` and :viewsource:`.generic_fk` scripts are modernized versions of recipes presented in the 2007 blog post `Polymorphic Associations with SQLAlchemy `_. .. autosource:: """SQLAlchemy-1.0.11/examples/generic_associations/table_per_association.py0000664000175000017500000000565612636375552027570 0ustar classicclassic00000000000000"""table_per_association.py Illustrates a mixin which provides a generic association via a individually generated association tables for each parent class. The associated objects themselves are persisted in a single table shared among all parents. This configuration has the advantage that all Address rows are in one table, so that the definition of "Address" can be maintained in one place. The association table contains the foreign key to Address so that Address has no dependency on the system. """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, \ String, ForeignKey, Table from sqlalchemy.orm import Session, relationship @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class Address(Base): """The Address class. This represents all address records in a single table. """ street = Column(String) city = Column(String) zip = Column(String) def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a new address_association table for each parent. """ @declared_attr def addresses(cls): address_association = Table( "%s_addresses" % cls.__tablename__, cls.metadata, Column("address_id", ForeignKey("address.id"), primary_key=True), Column("%s_id" % cls.__tablename__, ForeignKey("%s.id" % cls.__tablename__), primary_key=True), ) return relationship(Address, secondary=address_association) class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Address( street='123 anywhere street', city="New York", zip="10110"), Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print(address) # no parent hereSQLAlchemy-1.0.11/examples/generic_associations/table_per_related.py0000664000175000017500000000635212636375552026666 0ustar classicclassic00000000000000"""table_per_related.py Illustrates a generic association which persists association objects within individual tables, each one generated to persist those objects on behalf of a particular parent class. This configuration has the advantage that each type of parent maintains its "Address" rows separately, so that collection size for one type of parent will have no impact on other types of parent. Navigation between parent and "Address" is simple, direct, and bidirectional. This recipe is the most efficient (speed wise and storage wise) and simple of all of them. The creation of many related tables may seem at first like an issue but there really isn't any - the management and targeting of these tables is completely automated. """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, String, ForeignKey from sqlalchemy.orm import Session, relationship @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class Address(object): """Define columns that will be present in each 'Address' table. This is a declarative mixin, so additional mapped attributes beyond simple columns specified here should be set up using @declared_attr. """ street = Column(String) city = Column(String) zip = Column(String) def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a new Address class for each parent. """ @declared_attr def addresses(cls): cls.Address = type( "%sAddress" % cls.__name__, (Address, Base,), dict( __tablename__="%s_address" % cls.__tablename__, parent_id=Column(Integer, ForeignKey("%s.id" % cls.__tablename__)), parent=relationship(cls) ) ) return relationship(cls.Address) class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Customer.Address( street='123 anywhere street', city="New York", zip="10110"), Customer.Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Supplier.Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print(address) print(address.parent)SQLAlchemy-1.0.11/examples/generic_associations/discriminator_on_association.py0000664000175000017500000001025212636375552031162 0ustar classicclassic00000000000000"""discriminator_on_related.py Illustrates a mixin which provides a generic association using a single target table and a single association table, referred to by all parent tables. The association table contains a "discriminator" column which determines what type of parent object associates to each particular row in the association table. SQLAlchemy's single-table-inheritance feature is used to target different association types. This configuration attempts to simulate a so-called "generic foreign key" as closely as possible without actually foregoing the use of real foreign keys. Unlike table-per-related and table-per-association, it uses a fixed number of tables to serve any number of potential parent objects, but is also slightly more complex. """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, \ String, ForeignKey from sqlalchemy.orm import Session, relationship, backref from sqlalchemy.ext.associationproxy import association_proxy @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class AddressAssociation(Base): """Associates a collection of Address objects with a particular parent. """ __tablename__ = "address_association" discriminator = Column(String) """Refers to the type of parent.""" __mapper_args__ = {"polymorphic_on": discriminator} class Address(Base): """The Address class. This represents all address records in a single table. """ association_id = Column(Integer, ForeignKey("address_association.id")) street = Column(String) city = Column(String) zip = Column(String) association = relationship("AddressAssociation", backref="addresses") parent = association_proxy("association", "parent") def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a relationship to the address_association table for each parent. """ @declared_attr def address_association_id(cls): return Column(Integer, ForeignKey("address_association.id")) @declared_attr def address_association(cls): name = cls.__name__ discriminator = name.lower() assoc_cls = type( "%sAddressAssociation" % name, (AddressAssociation, ), dict( __tablename__=None, __mapper_args__={ "polymorphic_identity": discriminator } ) ) cls.addresses = association_proxy( "address_association", "addresses", creator=lambda addresses: assoc_cls(addresses=addresses) ) return relationship(assoc_cls, backref=backref("parent", uselist=False)) class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Address( street='123 anywhere street', city="New York", zip="10110"), Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print(address) print(address.parent)SQLAlchemy-1.0.11/examples/__init__.py0000664000175000017500000000000012636375552020555 0ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/vertical/0000775000175000017500000000000012636376632020267 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/vertical/dictlike.py0000664000175000017500000001165212636375552022436 0ustar classicclassic00000000000000"""Mapping a vertical table as a dictionary. This example illustrates accessing and modifying a "vertical" (or "properties", or pivoted) table via a dict-like interface. These are tables that store free-form object properties as rows instead of columns. For example, instead of:: # A regular ("horizontal") table has columns for 'species' and 'size' Table('animal', metadata, Column('id', Integer, primary_key=True), Column('species', Unicode), Column('size', Unicode)) A vertical table models this as two tables: one table for the base or parent entity, and another related table holding key/value pairs:: Table('animal', metadata, Column('id', Integer, primary_key=True)) # The properties table will have one row for a 'species' value, and # another row for the 'size' value. Table('properties', metadata Column('animal_id', Integer, ForeignKey('animal.id'), primary_key=True), Column('key', UnicodeText), Column('value', UnicodeText)) Because the key/value pairs in a vertical scheme are not fixed in advance, accessing them like a Python dict can be very convenient. The example below can be used with many common vertical schemas as-is or with minor adaptations. """ from __future__ import unicode_literals class ProxiedDictMixin(object): """Adds obj[key] access to a mapped class. This class basically proxies dictionary access to an attribute called ``_proxied``. The class which inherits this class should have an attribute called ``_proxied`` which points to a dictionary. """ def __len__(self): return len(self._proxied) def __iter__(self): return iter(self._proxied) def __getitem__(self, key): return self._proxied[key] def __contains__(self, key): return key in self._proxied def __setitem__(self, key, value): self._proxied[key] = value def __delitem__(self, key): del self._proxied[key] if __name__ == '__main__': from sqlalchemy import (Column, Integer, Unicode, ForeignKey, UnicodeText, and_, create_engine) from sqlalchemy.orm import relationship, Session from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy Base = declarative_base() class AnimalFact(Base): """A fact about an animal.""" __tablename__ = 'animal_fact' animal_id = Column(ForeignKey('animal.id'), primary_key=True) key = Column(Unicode(64), primary_key=True) value = Column(UnicodeText) class Animal(ProxiedDictMixin, Base): """an Animal""" __tablename__ = 'animal' id = Column(Integer, primary_key=True) name = Column(Unicode(100)) facts = relationship("AnimalFact", collection_class=attribute_mapped_collection('key')) _proxied = association_proxy("facts", "value", creator= lambda key, value: AnimalFact(key=key, value=value)) def __init__(self, name): self.name = name def __repr__(self): return "Animal(%r)" % self.name @classmethod def with_characteristic(self, key, value): return self.facts.any(key=key, value=value) engine = create_engine("sqlite://") Base.metadata.create_all(engine) session = Session(bind=engine) stoat = Animal('stoat') stoat['color'] = 'reddish' stoat['cuteness'] = 'somewhat' # dict-like assignment transparently creates entries in the # stoat.facts collection: print(stoat.facts['color']) session.add(stoat) session.commit() critter = session.query(Animal).filter(Animal.name == 'stoat').one() print(critter['color']) print(critter['cuteness']) critter['cuteness'] = 'very' print('changing cuteness:') marten = Animal('marten') marten['color'] = 'brown' marten['cuteness'] = 'somewhat' session.add(marten) shrew = Animal('shrew') shrew['cuteness'] = 'somewhat' shrew['poisonous-part'] = 'saliva' session.add(shrew) loris = Animal('slow loris') loris['cuteness'] = 'fairly' loris['poisonous-part'] = 'elbows' session.add(loris) q = (session.query(Animal). filter(Animal.facts.any( and_(AnimalFact.key == 'color', AnimalFact.value == 'reddish')))) print('reddish animals', q.all()) q = session.query(Animal).\ filter(Animal.with_characteristic("color", 'brown')) print('brown animals', q.all()) q = session.query(Animal).\ filter(~Animal.with_characteristic("poisonous-part", 'elbows')) print('animals without poisonous-part == elbows', q.all()) q = (session.query(Animal). filter(Animal.facts.any(value='somewhat'))) print('any animal with any .value of "somewhat"', q.all()) SQLAlchemy-1.0.11/examples/vertical/dictlike-polymorphic.py0000664000175000017500000001601512636375552024777 0ustar classicclassic00000000000000"""Mapping a polymorphic-valued vertical table as a dictionary. Builds upon the dictlike.py example to also add differently typed columns to the "fact" table, e.g.:: Table('properties', metadata Column('owner_id', Integer, ForeignKey('owner.id'), primary_key=True), Column('key', UnicodeText), Column('type', Unicode(16)), Column('int_value', Integer), Column('char_value', UnicodeText), Column('bool_value', Boolean), Column('decimal_value', Numeric(10,2))) For any given properties row, the value of the 'type' column will point to the '_value' column active for that row. This example approach uses exactly the same dict mapping approach as the 'dictlike' example. It only differs in the mapping for vertical rows. Here, we'll use a @hybrid_property to build a smart '.value' attribute that wraps up reading and writing those various '_value' columns and keeps the '.type' up to date. """ from sqlalchemy.orm.interfaces import PropComparator from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import event from sqlalchemy import literal_column from .dictlike import ProxiedDictMixin class PolymorphicVerticalProperty(object): """A key/value pair with polymorphic value storage. The class which is mapped should indicate typing information within the "info" dictionary of mapped Column objects; see the AnimalFact mapping below for an example. """ def __init__(self, key, value=None): self.key = key self.value = value @hybrid_property def value(self): fieldname, discriminator = self.type_map[self.type] if fieldname is None: return None else: return getattr(self, fieldname) @value.setter def value(self, value): py_type = type(value) fieldname, discriminator = self.type_map[py_type] self.type = discriminator if fieldname is not None: setattr(self, fieldname, value) @value.deleter def value(self): self._set_value(None) @value.comparator class value(PropComparator): """A comparator for .value, builds a polymorphic comparison via CASE. """ def __init__(self, cls): self.cls = cls def _case(self): pairs = set(self.cls.type_map.values()) whens = [ ( literal_column("'%s'" % discriminator), cast(getattr(self.cls, attribute), String) ) for attribute, discriminator in pairs if attribute is not None ] return case(whens, self.cls.type, null()) def __eq__(self, other): return self._case() == cast(other, String) def __ne__(self, other): return self._case() != cast(other, String) def __repr__(self): return '<%s %r=%r>' % (self.__class__.__name__, self.key, self.value) @event.listens_for(PolymorphicVerticalProperty, "mapper_configured", propagate=True) def on_new_class(mapper, cls_): """Look for Column objects with type info in them, and work up a lookup table.""" info_dict = {} info_dict[type(None)] = (None, 'none') info_dict['none'] = (None, 'none') for k in mapper.c.keys(): col = mapper.c[k] if 'type' in col.info: python_type, discriminator = col.info['type'] info_dict[python_type] = (k, discriminator) info_dict[discriminator] = (k, discriminator) cls_.type_map = info_dict if __name__ == '__main__': from sqlalchemy import (Column, Integer, Unicode, ForeignKey, UnicodeText, and_, or_, String, Boolean, cast, null, case, create_engine) from sqlalchemy.orm import relationship, Session from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy Base = declarative_base() class AnimalFact(PolymorphicVerticalProperty, Base): """A fact about an animal.""" __tablename__ = 'animal_fact' animal_id = Column(ForeignKey('animal.id'), primary_key=True) key = Column(Unicode(64), primary_key=True) type = Column(Unicode(16)) # add information about storage for different types # in the info dictionary of Columns int_value = Column(Integer, info={'type': (int, 'integer')}) char_value = Column(UnicodeText, info={'type': (str, 'string')}) boolean_value = Column(Boolean, info={'type': (bool, 'boolean')}) class Animal(ProxiedDictMixin._base_class(Base)): """an Animal""" __tablename__ = 'animal' id = Column(Integer, primary_key=True) name = Column(Unicode(100)) facts = relationship("AnimalFact", collection_class=attribute_mapped_collection('key')) _proxied = association_proxy("facts", "value", creator= lambda key, value: AnimalFact(key=key, value=value)) def __init__(self, name): self.name = name def __repr__(self): return "Animal(%r)" % self.name @classmethod def with_characteristic(self, key, value): return self.facts.any(key=key, value=value) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) stoat = Animal('stoat') stoat['color'] = 'red' stoat['cuteness'] = 7 stoat['weasel-like'] = True session.add(stoat) session.commit() critter = session.query(Animal).filter(Animal.name == 'stoat').one() print(critter['color']) print(critter['cuteness']) print("changing cuteness value and type:") critter['cuteness'] = 'very cute' session.commit() marten = Animal('marten') marten['cuteness'] = 5 marten['weasel-like'] = True marten['poisonous'] = False session.add(marten) shrew = Animal('shrew') shrew['cuteness'] = 5 shrew['weasel-like'] = False shrew['poisonous'] = True session.add(shrew) session.commit() q = (session.query(Animal). filter(Animal.facts.any( and_(AnimalFact.key == 'weasel-like', AnimalFact.value == True)))) print('weasel-like animals', q.all()) q = (session.query(Animal). filter(Animal.with_characteristic('weasel-like', True))) print('weasel-like animals again', q.all()) q = (session.query(Animal). filter(Animal.with_characteristic('poisonous', False))) print('animals with poisonous=False', q.all()) q = (session.query(Animal). filter(or_( Animal.with_characteristic('poisonous', False), ~Animal.facts.any(AnimalFact.key == 'poisonous') ) ) ) print('non-poisonous animals', q.all()) q = (session.query(Animal). filter(Animal.facts.any(AnimalFact.value == 5))) print('any animal with a .value of 5', q.all()) SQLAlchemy-1.0.11/examples/vertical/__init__.py0000664000175000017500000000202312636375552022375 0ustar classicclassic00000000000000""" Illustrates "vertical table" mappings. A "vertical table" refers to a technique where individual attributes of an object are stored as distinct rows in a table. The "vertical table" technique is used to persist objects which can have a varied set of attributes, at the expense of simple query control and brevity. It is commonly found in content/document management systems in order to represent user-created structures flexibly. Two variants on the approach are given. In the second, each row references a "datatype" which contains information about the type of information stored in the attribute, such as integer, string, or date. Example:: shrew = Animal(u'shrew') shrew[u'cuteness'] = 5 shrew[u'weasel-like'] = False shrew[u'poisonous'] = True session.add(shrew) session.flush() q = (session.query(Animal). filter(Animal.facts.any( and_(AnimalFact.key == u'weasel-like', AnimalFact.value == True)))) print 'weasel-like animals', q.all() .. autosource:: """SQLAlchemy-1.0.11/examples/versioned_history/0000775000175000017500000000000012636376632022235 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/versioned_history/test_versioning.py0000664000175000017500000005045012636375552026035 0ustar classicclassic00000000000000"""Unit tests illustrating usage of the ``history_meta.py`` module functions.""" from unittest import TestCase from sqlalchemy.ext.declarative import declarative_base from .history_meta import Versioned, versioned_session from sqlalchemy import create_engine, Column, Integer, String, \ ForeignKey, Boolean, select from sqlalchemy.orm import clear_mappers, Session, deferred, relationship, \ column_property from sqlalchemy.testing import AssertsCompiledSQL, eq_, assert_raises from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.orm import exc as orm_exc import warnings warnings.simplefilter("error") engine = None def setup_module(): global engine engine = create_engine('sqlite://', echo=True) class TestVersioning(TestCase, AssertsCompiledSQL): __dialect__ = 'default' def setUp(self): self.session = Session(engine) self.Base = declarative_base() versioned_session(self.session) def tearDown(self): self.session.close() clear_mappers() self.Base.metadata.drop_all(engine) def create_tables(self): self.Base.metadata.create_all(engine) def test_plain(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) self.create_tables() sess = self.session sc = SomeClass(name='sc1') sess.add(sc) sess.commit() sc.name = 'sc1modified' sess.commit() assert sc.version == 2 SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( sess.query(SomeClassHistory).filter( SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1')] ) sc.name = 'sc1modified2' eq_( sess.query(SomeClassHistory).order_by( SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified') ] ) assert sc.version == 3 sess.commit() sc.name = 'temp' sc.name = 'sc1modified2' sess.commit() eq_( sess.query(SomeClassHistory).order_by( SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified') ] ) sess.delete(sc) sess.commit() eq_( sess.query(SomeClassHistory).order_by( SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified'), SomeClassHistory(version=3, name='sc1modified2') ] ) def test_w_mapper_versioning(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) SomeClass.__mapper__.version_id_col = SomeClass.__table__.c.version self.create_tables() sess = self.session sc = SomeClass(name='sc1') sess.add(sc) sess.commit() s2 = Session(sess.bind) sc2 = s2.query(SomeClass).first() sc2.name = 'sc1modified' sc.name = 'sc1modified_again' sess.commit() eq_(sc.version, 2) assert_raises( orm_exc.StaleDataError, s2.flush ) def test_from_null(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) self.create_tables() sess = self.session sc = SomeClass() sess.add(sc) sess.commit() sc.name = 'sc1' sess.commit() assert sc.version == 2 def test_insert_null(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) boole = Column(Boolean, default=False) self.create_tables() sess = self.session sc = SomeClass(boole=True) sess.add(sc) sess.commit() sc.boole = None sess.commit() sc.boole = False sess.commit() SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( sess.query(SomeClassHistory.boole).order_by( SomeClassHistory.id).all(), [(True, ), (None, )] ) eq_(sc.version, 3) def test_deferred(self): """test versioning of unloaded, deferred columns.""" class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) data = deferred(Column(String(25))) self.create_tables() sess = self.session sc = SomeClass(name='sc1', data='somedata') sess.add(sc) sess.commit() sess.close() sc = sess.query(SomeClass).first() assert 'data' not in sc.__dict__ sc.name = 'sc1modified' sess.commit() assert sc.version == 2 SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( sess.query(SomeClassHistory).filter( SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1', data='somedata')] ) def test_joined_inheritance(self): class BaseClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'basetable' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'base'} class SubClassSeparatePk(BaseClass): __tablename__ = 'subtable1' id = column_property( Column(Integer, primary_key=True), BaseClass.id ) base_id = Column(Integer, ForeignKey('basetable.id')) subdata1 = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'sep'} class SubClassSamePk(BaseClass): __tablename__ = 'subtable2' id = Column( Integer, ForeignKey('basetable.id'), primary_key=True) subdata2 = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'same'} self.create_tables() sess = self.session sep1 = SubClassSeparatePk(name='sep1', subdata1='sep1subdata') base1 = BaseClass(name='base1') same1 = SubClassSamePk(name='same1', subdata2='same1subdata') sess.add_all([sep1, base1, same1]) sess.commit() base1.name = 'base1mod' same1.subdata2 = 'same1subdatamod' sep1.name = 'sep1mod' sess.commit() BaseClassHistory = BaseClass.__history_mapper__.class_ SubClassSeparatePkHistory = \ SubClassSeparatePk.__history_mapper__.class_ SubClassSamePkHistory = SubClassSamePk.__history_mapper__.class_ eq_( sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(), [ SubClassSeparatePkHistory( id=1, name='sep1', type='sep', version=1), BaseClassHistory(id=2, name='base1', type='base', version=1), SubClassSamePkHistory( id=3, name='same1', type='same', version=1) ] ) same1.subdata2 = 'same1subdatamod2' eq_( sess.query(BaseClassHistory).order_by( BaseClassHistory.id, BaseClassHistory.version).all(), [ SubClassSeparatePkHistory( id=1, name='sep1', type='sep', version=1), BaseClassHistory(id=2, name='base1', type='base', version=1), SubClassSamePkHistory( id=3, name='same1', type='same', version=1), SubClassSamePkHistory( id=3, name='same1', type='same', version=2) ] ) base1.name = 'base1mod2' eq_( sess.query(BaseClassHistory).order_by( BaseClassHistory.id, BaseClassHistory.version).all(), [ SubClassSeparatePkHistory( id=1, name='sep1', type='sep', version=1), BaseClassHistory(id=2, name='base1', type='base', version=1), BaseClassHistory( id=2, name='base1mod', type='base', version=2), SubClassSamePkHistory( id=3, name='same1', type='same', version=1), SubClassSamePkHistory( id=3, name='same1', type='same', version=2) ] ) def test_joined_inheritance_multilevel(self): class BaseClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'basetable' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'base'} class SubClass(BaseClass): __tablename__ = 'subtable' id = column_property( Column(Integer, primary_key=True), BaseClass.id ) base_id = Column(Integer, ForeignKey('basetable.id')) subdata1 = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'sub'} class SubSubClass(SubClass): __tablename__ = 'subsubtable' id = Column(Integer, ForeignKey('subtable.id'), primary_key=True) subdata2 = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'subsub'} self.create_tables() SubSubHistory = SubSubClass.__history_mapper__.class_ sess = self.session q = sess.query(SubSubHistory) self.assert_compile( q, "SELECT " "subsubtable_history.id AS subsubtable_history_id, " "subtable_history.id AS subtable_history_id, " "basetable_history.id AS basetable_history_id, " "subsubtable_history.changed AS subsubtable_history_changed, " "subtable_history.changed AS subtable_history_changed, " "basetable_history.changed AS basetable_history_changed, " "basetable_history.name AS basetable_history_name, " "basetable_history.type AS basetable_history_type, " "subsubtable_history.version AS subsubtable_history_version, " "subtable_history.version AS subtable_history_version, " "basetable_history.version AS basetable_history_version, " "subtable_history.base_id AS subtable_history_base_id, " "subtable_history.subdata1 AS subtable_history_subdata1, " "subsubtable_history.subdata2 AS subsubtable_history_subdata2 " "FROM basetable_history " "JOIN subtable_history " "ON basetable_history.id = subtable_history.base_id " "AND basetable_history.version = subtable_history.version " "JOIN subsubtable_history ON subtable_history.id = " "subsubtable_history.id AND subtable_history.version = " "subsubtable_history.version" ) ssc = SubSubClass(name='ss1', subdata1='sd1', subdata2='sd2') sess.add(ssc) sess.commit() eq_( sess.query(SubSubHistory).all(), [] ) ssc.subdata1 = 'sd11' ssc.subdata2 = 'sd22' sess.commit() eq_( sess.query(SubSubHistory).all(), [SubSubHistory(name='ss1', subdata1='sd1', subdata2='sd2', type='subsub', version=1)] ) eq_(ssc, SubSubClass( name='ss1', subdata1='sd11', subdata2='sd22', version=2)) def test_joined_inheritance_changed(self): class BaseClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'basetable' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'base' } class SubClass(BaseClass): __tablename__ = 'subtable' id = Column(Integer, ForeignKey('basetable.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity': 'sep'} self.create_tables() BaseClassHistory = BaseClass.__history_mapper__.class_ SubClassHistory = SubClass.__history_mapper__.class_ sess = self.session s1 = SubClass(name='s1') sess.add(s1) sess.commit() s1.name = 's2' sess.commit() actual_changed_base = sess.scalar( select([BaseClass.__history_mapper__.local_table.c.changed])) actual_changed_sub = sess.scalar( select([SubClass.__history_mapper__.local_table.c.changed])) h1 = sess.query(BaseClassHistory).first() eq_(h1.changed, actual_changed_base) eq_(h1.changed, actual_changed_sub) h1 = sess.query(SubClassHistory).first() eq_(h1.changed, actual_changed_base) eq_(h1.changed, actual_changed_sub) def test_single_inheritance(self): class BaseClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'basetable' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'base'} class SubClass(BaseClass): subname = Column(String(50), unique=True) __mapper_args__ = {'polymorphic_identity': 'sub'} self.create_tables() sess = self.session b1 = BaseClass(name='b1') sc = SubClass(name='s1', subname='sc1') sess.add_all([b1, sc]) sess.commit() b1.name = 'b1modified' BaseClassHistory = BaseClass.__history_mapper__.class_ SubClassHistory = SubClass.__history_mapper__.class_ eq_( sess.query(BaseClassHistory).order_by( BaseClassHistory.id, BaseClassHistory.version).all(), [BaseClassHistory(id=1, name='b1', type='base', version=1)] ) sc.name = 's1modified' b1.name = 'b1modified2' eq_( sess.query(BaseClassHistory).order_by( BaseClassHistory.id, BaseClassHistory.version).all(), [ BaseClassHistory(id=1, name='b1', type='base', version=1), BaseClassHistory( id=1, name='b1modified', type='base', version=2), SubClassHistory(id=2, name='s1', type='sub', version=1) ] ) # test the unique constraint on the subclass # column sc.name = "modifyagain" sess.flush() def test_unique(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50), unique=True) data = Column(String(50)) self.create_tables() sess = self.session sc = SomeClass(name='sc1', data='sc1') sess.add(sc) sess.commit() sc.data = 'sc1modified' sess.commit() assert sc.version == 2 sc.data = 'sc1modified2' sess.commit() assert sc.version == 3 def test_relationship(self): class SomeRelated(self.Base, ComparableEntity): __tablename__ = 'somerelated' id = Column(Integer, primary_key=True) class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) related_id = Column(Integer, ForeignKey('somerelated.id')) related = relationship("SomeRelated", backref='classes') SomeClassHistory = SomeClass.__history_mapper__.class_ self.create_tables() sess = self.session sc = SomeClass(name='sc1') sess.add(sc) sess.commit() assert sc.version == 1 sr1 = SomeRelated() sc.related = sr1 sess.commit() assert sc.version == 2 eq_( sess.query(SomeClassHistory).filter( SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1', related_id=None)] ) sc.related = None eq_( sess.query(SomeClassHistory).order_by( SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1', related_id=None), SomeClassHistory(version=2, name='sc1', related_id=sr1.id) ] ) assert sc.version == 3 def test_backref_relationship(self): class SomeRelated(self.Base, ComparableEntity): __tablename__ = 'somerelated' id = Column(Integer, primary_key=True) name = Column(String(50)) related_id = Column(Integer, ForeignKey('sometable.id')) related = relationship("SomeClass", backref='related') class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) self.create_tables() sess = self.session sc = SomeClass() sess.add(sc) sess.commit() assert sc.version == 1 sr = SomeRelated(name='sr', related=sc) sess.add(sr) sess.commit() assert sc.version == 1 sr.name = 'sr2' sess.commit() assert sc.version == 1 sess.delete(sr) sess.commit() assert sc.version == 1 def test_create_double_flush(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(30)) other = Column(String(30)) self.create_tables() sc = SomeClass() self.session.add(sc) self.session.flush() sc.name = 'Foo' self.session.flush() assert sc.version == 2 def test_mutate_plain_column(self): class Document(self.Base, Versioned): __tablename__ = 'document' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String, nullable=True) description_ = Column('description', String, nullable=True) self.create_tables() document = Document() self.session.add(document) document.name = 'Foo' self.session.commit() document.name = 'Bar' self.session.commit() DocumentHistory = Document.__history_mapper__.class_ v2 = self.session.query(Document).one() v1 = self.session.query(DocumentHistory).one() self.assertEqual(v1.id, v2.id) self.assertEqual(v2.name, 'Bar') self.assertEqual(v1.name, 'Foo') def test_mutate_named_column(self): class Document(self.Base, Versioned): __tablename__ = 'document' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String, nullable=True) description_ = Column('description', String, nullable=True) self.create_tables() document = Document() self.session.add(document) document.description_ = 'Foo' self.session.commit() document.description_ = 'Bar' self.session.commit() DocumentHistory = Document.__history_mapper__.class_ v2 = self.session.query(Document).one() v1 = self.session.query(DocumentHistory).one() self.assertEqual(v1.id, v2.id) self.assertEqual(v2.description_, 'Bar') self.assertEqual(v1.description_, 'Foo') SQLAlchemy-1.0.11/examples/versioned_history/history_meta.py0000664000175000017500000002017112636375552025317 0ustar classicclassic00000000000000"""Versioned mixin class and other utilities.""" from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.orm import mapper, attributes, object_mapper from sqlalchemy.orm.exc import UnmappedColumnError from sqlalchemy import Table, Column, ForeignKeyConstraint, Integer, DateTime from sqlalchemy import event, util import datetime from sqlalchemy.orm.properties import RelationshipProperty def col_references_table(col, table): for fk in col.foreign_keys: if fk.references(table): return True return False def _is_versioning_col(col): return "version_meta" in col.info def _history_mapper(local_mapper): cls = local_mapper.class_ # set the "active_history" flag # on on column-mapped attributes so that the old version # of the info is always loaded (currently sets it on all attributes) for prop in local_mapper.iterate_properties: getattr(local_mapper.class_, prop.key).impl.active_history = True super_mapper = local_mapper.inherits super_history_mapper = getattr(cls, '__history_mapper__', None) polymorphic_on = None super_fks = [] def _col_copy(col): orig = col col = col.copy() orig.info['history_copy'] = col col.unique = False col.default = col.server_default = None return col properties = util.OrderedDict() if not super_mapper or \ local_mapper.local_table is not super_mapper.local_table: cols = [] version_meta = {"version_meta": True} # add column.info to identify # columns specific to versioning for column in local_mapper.local_table.c: if _is_versioning_col(column): continue col = _col_copy(column) if super_mapper and \ col_references_table(column, super_mapper.local_table): super_fks.append( ( col.key, list(super_history_mapper.local_table.primary_key)[0] ) ) cols.append(col) if column is local_mapper.polymorphic_on: polymorphic_on = col orig_prop = local_mapper.get_property_by_column(column) # carry over column re-mappings if len(orig_prop.columns) > 1 or \ orig_prop.columns[0].key != orig_prop.key: properties[orig_prop.key] = tuple( col.info['history_copy'] for col in orig_prop.columns) if super_mapper: super_fks.append( ( 'version', super_history_mapper.local_table.c.version ) ) # "version" stores the integer version id. This column is # required. cols.append( Column( 'version', Integer, primary_key=True, autoincrement=False, info=version_meta)) # "changed" column stores the UTC timestamp of when the # history row was created. # This column is optional and can be omitted. cols.append(Column( 'changed', DateTime, default=datetime.datetime.utcnow, info=version_meta)) if super_fks: cols.append(ForeignKeyConstraint(*zip(*super_fks))) table = Table( local_mapper.local_table.name + '_history', local_mapper.local_table.metadata, *cols, schema=local_mapper.local_table.schema ) else: # single table inheritance. take any additional columns that may have # been added and add them to the history table. for column in local_mapper.local_table.c: if column.key not in super_history_mapper.local_table.c: col = _col_copy(column) super_history_mapper.local_table.append_column(col) table = None if super_history_mapper: bases = (super_history_mapper.class_,) if table is not None: properties['changed'] = ( (table.c.changed, ) + tuple(super_history_mapper.attrs.changed.columns) ) else: bases = local_mapper.base_mapper.class_.__bases__ versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {}) m = mapper( versioned_cls, table, inherits=super_history_mapper, polymorphic_on=polymorphic_on, polymorphic_identity=local_mapper.polymorphic_identity, properties=properties ) cls.__history_mapper__ = m if not super_history_mapper: local_mapper.local_table.append_column( Column('version', Integer, default=1, nullable=False) ) local_mapper.add_property( "version", local_mapper.local_table.c.version) class Versioned(object): @declared_attr def __mapper_cls__(cls): def map(cls, *arg, **kw): mp = mapper(cls, *arg, **kw) _history_mapper(mp) return mp return map def versioned_objects(iter): for obj in iter: if hasattr(obj, '__history_mapper__'): yield obj def create_version(obj, session, deleted=False): obj_mapper = object_mapper(obj) history_mapper = obj.__history_mapper__ history_cls = history_mapper.class_ obj_state = attributes.instance_state(obj) attr = {} obj_changed = False for om, hm in zip( obj_mapper.iterate_to_root(), history_mapper.iterate_to_root() ): if hm.single: continue for hist_col in hm.local_table.c: if _is_versioning_col(hist_col): continue obj_col = om.local_table.c[hist_col.key] # get the value of the # attribute based on the MapperProperty related to the # mapped column. this will allow usage of MapperProperties # that have a different keyname than that of the mapped column. try: prop = obj_mapper.get_property_by_column(obj_col) except UnmappedColumnError: # in the case of single table inheritance, there may be # columns on the mapped table intended for the subclass only. # the "unmapped" status of the subclass column on the # base class is a feature of the declarative module. continue # expired object attributes and also deferred cols might not # be in the dict. force it to load no matter what by # using getattr(). if prop.key not in obj_state.dict: getattr(obj, prop.key) a, u, d = attributes.get_history(obj, prop.key) if d: attr[prop.key] = d[0] obj_changed = True elif u: attr[prop.key] = u[0] elif a: # if the attribute had no value. attr[prop.key] = a[0] obj_changed = True if not obj_changed: # not changed, but we have relationships. OK # check those too for prop in obj_mapper.iterate_properties: if isinstance(prop, RelationshipProperty) and \ attributes.get_history( obj, prop.key, passive=attributes.PASSIVE_NO_INITIALIZE).has_changes(): for p in prop.local_columns: if p.foreign_keys: obj_changed = True break if obj_changed is True: break if not obj_changed and not deleted: return attr['version'] = obj.version hist = history_cls() for key, value in attr.items(): setattr(hist, key, value) session.add(hist) obj.version += 1 def versioned_session(session): @event.listens_for(session, 'before_flush') def before_flush(session, flush_context, instances): for obj in versioned_objects(session.dirty): create_version(obj, session) for obj in versioned_objects(session.deleted): create_version(obj, session, deleted=True) SQLAlchemy-1.0.11/examples/versioned_history/__init__.py0000664000175000017500000000307312636375552024351 0ustar classicclassic00000000000000""" Illustrates an extension which creates version tables for entities and stores records for each change. The given extensions generate an anonymous "history" class which represents historical versions of the target object. Usage is illustrated via a unit test module ``test_versioning.py``, which can be run via nose:: cd examples/versioning nosetests -v A fragment of example usage, using declarative:: from history_meta import Versioned, versioned_session Base = declarative_base() class SomeClass(Versioned, Base): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) def __eq__(self, other): assert type(other) is SomeClass and other.id == self.id Session = sessionmaker(bind=engine) versioned_session(Session) sess = Session() sc = SomeClass(name='sc1') sess.add(sc) sess.commit() sc.name = 'sc1modified' sess.commit() assert sc.version == 2 SomeClassHistory = SomeClass.__history_mapper__.class_ assert sess.query(SomeClassHistory).\\ filter(SomeClassHistory.version == 1).\\ all() \\ == [SomeClassHistory(version=1, name='sc1')] The ``Versioned`` mixin is designed to work with declarative. To use the extension with classical mappers, the ``_history_mapper`` function can be applied:: from history_meta import _history_mapper m = mapper(SomeClass, sometable) _history_mapper(m) SomeHistoryClass = SomeClass.__history_mapper__.class_ .. autosource:: """SQLAlchemy-1.0.11/examples/elementtree/0000775000175000017500000000000012636376632020767 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/elementtree/test3.xml0000664000175000017500000000025112636375552022551 0ustar classicclassic00000000000000 test3
one there
SQLAlchemy-1.0.11/examples/elementtree/adjacency_list.py0000664000175000017500000001726112636375552024324 0ustar classicclassic00000000000000"""Illustrates an explicit way to persist an XML document expressed using ElementTree. Each DOM node is stored in an individual table row, with attributes represented in a separate table. The nodes are associated in a hierarchy using an adjacency list structure. A query function is introduced which can search for nodes along any path with a given structure of attributes, basically a (very narrow) subset of xpath. This example explicitly marshals/unmarshals the ElementTree document into mapped entities which have their own tables. Compare to pickle.py which uses pickle to accomplish the same task. Note that the usage of both styles of persistence are identical, as is the structure of the main Document class. """ ################################# PART I - Imports/Coniguration #################################### from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey, Unicode, and_, create_engine) from sqlalchemy.orm import mapper, relationship, Session, lazyload import sys, os, io, re from xml.etree import ElementTree e = create_engine('sqlite://') meta = MetaData() ################################# PART II - Table Metadata ######################################### # stores a top level record of an XML document. documents = Table('documents', meta, Column('document_id', Integer, primary_key=True), Column('filename', String(30), unique=True), Column('element_id', Integer, ForeignKey('elements.element_id')) ) # stores XML nodes in an adjacency list model. This corresponds to # Element and SubElement objects. elements = Table('elements', meta, Column('element_id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('elements.element_id')), Column('tag', Unicode(30), nullable=False), Column('text', Unicode), Column('tail', Unicode) ) # stores attributes. This corresponds to the dictionary of attributes # stored by an Element or SubElement. attributes = Table('attributes', meta, Column('element_id', Integer, ForeignKey('elements.element_id'), primary_key=True), Column('name', Unicode(100), nullable=False, primary_key=True), Column('value', Unicode(255))) meta.create_all(e) #################################### PART III - Model ############################################# # our document class. contains a string name, # and the ElementTree root element. class Document(object): def __init__(self, name, element): self.filename = name self.element = element def __str__(self): buf = io.StringIO() self.element.write(buf) return buf.getvalue() #################################### PART IV - Persistence Mapping ################################# # Node class. a non-public class which will represent # the DB-persisted Element/SubElement object. We cannot create mappers for # ElementTree elements directly because they are at the very least not new-style # classes, and also may be backed by native implementations. # so here we construct an adapter. class _Node(object): pass # Attribute class. also internal, this will represent the key/value attributes stored for # a particular Node. class _Attribute(object): def __init__(self, name, value): self.name = name self.value = value # setup mappers. Document will eagerly load a list of _Node objects. mapper(Document, documents, properties={ '_root':relationship(_Node, lazy='joined', cascade="all") }) mapper(_Node, elements, properties={ 'children':relationship(_Node, cascade="all"), # eagerly load attributes 'attributes':relationship(_Attribute, lazy='joined', cascade="all, delete-orphan"), }) mapper(_Attribute, attributes) # define marshalling functions that convert from _Node/_Attribute to/from ElementTree objects. # this will set the ElementTree element as "document._element", and append the root _Node # object to the "_root" mapped collection. class ElementTreeMarshal(object): def __get__(self, document, owner): if document is None: return self if hasattr(document, '_element'): return document._element def traverse(node, parent=None): if parent is not None: elem = ElementTree.SubElement(parent, node.tag) else: elem = ElementTree.Element(node.tag) elem.text = node.text elem.tail = node.tail for attr in node.attributes: elem.attrib[attr.name] = attr.value for child in node.children: traverse(child, parent=elem) return elem document._element = ElementTree.ElementTree(traverse(document._root)) return document._element def __set__(self, document, element): def traverse(node): n = _Node() n.tag = str(node.tag) n.text = str(node.text) n.tail = str(node.tail) n.children = [traverse(n2) for n2 in node] n.attributes = [_Attribute(str(k), str(v)) for k, v in node.attrib.items()] return n document._root = traverse(element.getroot()) document._element = element def __delete__(self, document): del document._element document._root = [] # override Document's "element" attribute with the marshaller. Document.element = ElementTreeMarshal() ########################################### PART V - Basic Persistence Example ##################### line = "\n--------------------------------------------------------" # save to DB session = Session(e) # get ElementTree documents for file in ('test.xml', 'test2.xml', 'test3.xml'): filename = os.path.join(os.path.dirname(__file__), file) doc = ElementTree.parse(filename) session.add(Document(file, doc)) print("\nSaving three documents...", line) session.commit() print("Done.") print("\nFull text of document 'text.xml':", line) document = session.query(Document).filter_by(filename="test.xml").first() print(document) ############################################ PART VI - Searching for Paths ######################### # manually search for a document which contains "/somefile/header/field1:hi" d = session.query(Document).join('_root', aliased=True).filter(_Node.tag=='somefile').\ join('children', aliased=True, from_joinpoint=True).filter(_Node.tag=='header').\ join('children', aliased=True, from_joinpoint=True).filter( and_(_Node.tag=='field1', _Node.text=='hi')).one() print(d) # generalize the above approach into an extremely impoverished xpath function: def find_document(path, compareto): j = documents prev_elements = None query = session.query(Document) attribute = '_root' for i, match in enumerate(re.finditer(r'/([\w_]+)(?:\[@([\w_]+)(?:=(.*))?\])?', path)): (token, attrname, attrvalue) = match.group(1, 2, 3) query = query.join(attribute, aliased=True, from_joinpoint=True).filter(_Node.tag==token) attribute = 'children' if attrname: if attrvalue: query = query.join('attributes', aliased=True, from_joinpoint=True).filter( and_(_Attribute.name==attrname, _Attribute.value==attrvalue)) else: query = query.join('attributes', aliased=True, from_joinpoint=True).filter( _Attribute.name==attrname) return query.options(lazyload('_root')).filter(_Node.text==compareto).all() for path, compareto in ( ('/somefile/header/field1', 'hi'), ('/somefile/field1', 'hi'), ('/somefile/header/field2', 'there'), ('/somefile/header/field2[@attr=foo]', 'there') ): print("\nDocuments containing '%s=%s':" % (path, compareto), line) print([d.filename for d in find_document(path, compareto)]) SQLAlchemy-1.0.11/examples/elementtree/test.xml0000664000175000017500000000037112636375552022471 0ustar classicclassic00000000000000 This is somefile.
hi there Some additional text within the header.
Some more text within somefile.
SQLAlchemy-1.0.11/examples/elementtree/test2.xml0000664000175000017500000000011112636375552022543 0ustar classicclassic00000000000000 hi there SQLAlchemy-1.0.11/examples/elementtree/optimized_al.py0000664000175000017500000001773512636375552024036 0ustar classicclassic00000000000000"""Uses the same strategy as ``adjacency_list.py``, but associates each DOM row with its owning document row, so that a full document of DOM nodes can be loaded using O(1) queries - the construction of the "hierarchy" is performed after the load in a non-recursive fashion and is more efficient. """ ##################### PART I - Imports/Configuration ######################### from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey, Unicode, and_, create_engine) from sqlalchemy.orm import mapper, relationship, Session, lazyload import sys, os, io, re from xml.etree import ElementTree e = create_engine('sqlite://', echo=True) meta = MetaData() ####################### PART II - Table Metadata ############################# # stores a top level record of an XML document. documents = Table('documents', meta, Column('document_id', Integer, primary_key=True), Column('filename', String(30), unique=True), ) # stores XML nodes in an adjacency list model. This corresponds to # Element and SubElement objects. elements = Table('elements', meta, Column('element_id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('elements.element_id')), Column('document_id', Integer, ForeignKey('documents.document_id')), Column('tag', Unicode(30), nullable=False), Column('text', Unicode), Column('tail', Unicode) ) # stores attributes. This corresponds to the dictionary of attributes # stored by an Element or SubElement. attributes = Table('attributes', meta, Column('element_id', Integer, ForeignKey('elements.element_id'), primary_key=True), Column('name', Unicode(100), nullable=False, primary_key=True), Column('value', Unicode(255))) meta.create_all(e) ########################### PART III - Model ################################# # our document class. contains a string name, # and the ElementTree root element. class Document(object): def __init__(self, name, element): self.filename = name self.element = element def __str__(self): buf = io.StringIO() self.element.write(buf) return buf.getvalue() ########################## PART IV - Persistence Mapping ##################### # Node class. a non-public class which will represent # the DB-persisted Element/SubElement object. We cannot create mappers for # ElementTree elements directly because they are at the very least not new-style # classes, and also may be backed by native implementations. # so here we construct an adapter. class _Node(object): pass # Attribute class. also internal, this will represent the key/value attributes stored for # a particular Node. class _Attribute(object): def __init__(self, name, value): self.name = name self.value = value # setup mappers. Document will eagerly load a list of _Node objects. # they will be ordered in primary key/insert order, so that we can reconstruct # an ElementTree structure from the list. mapper(Document, documents, properties={ '_nodes':relationship(_Node, lazy='joined', cascade="all, delete-orphan") }) # the _Node objects change the way they load so that a list of _Nodes will organize # themselves hierarchically using the ElementTreeMarshal. this depends on the ordering of # nodes being hierarchical as well; relationship() always applies at least ROWID/primary key # ordering to rows which will suffice. mapper(_Node, elements, properties={ 'children':relationship(_Node, lazy=None), # doesnt load; used only for the save relationship 'attributes':relationship(_Attribute, lazy='joined', cascade="all, delete-orphan"), # eagerly load attributes }) mapper(_Attribute, attributes) # define marshalling functions that convert from _Node/_Attribute to/from ElementTree objects. # this will set the ElementTree element as "document._element", and append the root _Node # object to the "_nodes" mapped collection. class ElementTreeMarshal(object): def __get__(self, document, owner): if document is None: return self if hasattr(document, '_element'): return document._element nodes = {} root = None for node in document._nodes: if node.parent_id is not None: parent = nodes[node.parent_id] elem = ElementTree.SubElement(parent, node.tag) nodes[node.element_id] = elem else: parent = None elem = root = ElementTree.Element(node.tag) nodes[node.element_id] = root for attr in node.attributes: elem.attrib[attr.name] = attr.value elem.text = node.text elem.tail = node.tail document._element = ElementTree.ElementTree(root) return document._element def __set__(self, document, element): def traverse(node): n = _Node() n.tag = str(node.tag) n.text = str(node.text) n.tail = str(node.tail) document._nodes.append(n) n.children = [traverse(n2) for n2 in node] n.attributes = [_Attribute(str(k), str(v)) for k, v in node.attrib.items()] return n traverse(element.getroot()) document._element = element def __delete__(self, document): del document._element document._nodes = [] # override Document's "element" attribute with the marshaller. Document.element = ElementTreeMarshal() ###################### PART V - Basic Persistence Example #################### line = "\n--------------------------------------------------------" # save to DB session = Session(e) # get ElementTree documents for file in ('test.xml', 'test2.xml', 'test3.xml'): filename = os.path.join(os.path.dirname(__file__), file) doc = ElementTree.parse(filename) session.add(Document(file, doc)) print("\nSaving three documents...", line) session.commit() print("Done.") print("\nFull text of document 'text.xml':", line) document = session.query(Document).filter_by(filename="test.xml").first() print(document) ######################## PART VI - Searching for Paths ####################### # manually search for a document which contains "/somefile/header/field1:hi" print("\nManual search for /somefile/header/field1=='hi':", line) d = session.query(Document).join('_nodes', aliased=True).\ filter(and_(_Node.parent_id==None, _Node.tag=='somefile')).\ join('children', aliased=True, from_joinpoint=True).\ filter(_Node.tag=='header').\ join('children', aliased=True, from_joinpoint=True).\ filter(and_(_Node.tag=='field1', _Node.text=='hi')).\ one() print(d) # generalize the above approach into an extremely impoverished xpath function: def find_document(path, compareto): j = documents prev_elements = None query = session.query(Document) first = True for i, match in enumerate(re.finditer(r'/([\w_]+)(?:\[@([\w_]+)(?:=(.*))?\])?', path)): (token, attrname, attrvalue) = match.group(1, 2, 3) if first: query = query.join('_nodes', aliased=True).filter(_Node.parent_id==None) first = False else: query = query.join('children', aliased=True, from_joinpoint=True) query = query.filter(_Node.tag==token) if attrname: query = query.join('attributes', aliased=True, from_joinpoint=True) if attrvalue: query = query.filter(and_(_Attribute.name==attrname, _Attribute.value==attrvalue)) else: query = query.filter(_Attribute.name==attrname) return query.options(lazyload('_nodes')).filter(_Node.text==compareto).all() for path, compareto in ( ('/somefile/header/field1', 'hi'), ('/somefile/field1', 'hi'), ('/somefile/header/field2', 'there'), ('/somefile/header/field2[@attr=foo]', 'there') ): print("\nDocuments containing '%s=%s':" % (path, compareto), line) print([d.filename for d in find_document(path, compareto)]) SQLAlchemy-1.0.11/examples/elementtree/__init__.py0000664000175000017500000000146212636375552023103 0ustar classicclassic00000000000000""" Illustrates three strategies for persisting and querying XML documents as represented by ElementTree in a relational database. The techniques do not apply any mappings to the ElementTree objects directly, so are compatible with the native cElementTree as well as lxml, and can be adapted to suit any kind of DOM representation system. Querying along xpath-like strings is illustrated as well. E.g.:: # parse an XML file and persist in the database doc = ElementTree.parse("test.xml") session.add(Document(file, doc)) session.commit() # locate documents with a certain path/attribute structure for document in find_document('/somefile/header/field2[@attr=foo]'): # dump the XML print document .. autosource:: :files: pickle.py, adjacency_list.py, optimized_al.py """SQLAlchemy-1.0.11/examples/elementtree/pickle.py0000664000175000017500000000343512636375552022615 0ustar classicclassic00000000000000"""illustrates a quick and dirty way to persist an XML document expressed using ElementTree and pickle. This is a trivial example using PickleType to marshal/unmarshal the ElementTree document into a binary column. Compare to explicit.py which stores the individual components of the ElementTree structure in distinct rows using two additional mapped entities. Note that the usage of both styles of persistence are identical, as is the structure of the main Document class. """ from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, PickleType) from sqlalchemy.orm import mapper, Session import sys, os from xml.etree import ElementTree e = create_engine('sqlite://') meta = MetaData() # setup a comparator for the PickleType since it's a mutable # element. def are_elements_equal(x, y): return x == y # stores a top level record of an XML document. # the "element" column will store the ElementTree document as a BLOB. documents = Table('documents', meta, Column('document_id', Integer, primary_key=True), Column('filename', String(30), unique=True), Column('element', PickleType(comparator=are_elements_equal)) ) meta.create_all(e) # our document class. contains a string name, # and the ElementTree root element. class Document(object): def __init__(self, name, element): self.filename = name self.element = element # setup mapper. mapper(Document, documents) ###### time to test ! ######### # get ElementTree document filename = os.path.join(os.path.dirname(__file__), "test.xml") doc = ElementTree.parse(filename) # save to DB session = Session(e) session.add(Document("test.xml", doc)) session.commit() # restore document = session.query(Document).filter_by(filename="test.xml").first() # print document.element.write(sys.stdout) SQLAlchemy-1.0.11/examples/versioned_rows/0000775000175000017500000000000012636376632021526 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/versioned_rows/versioned_map.py0000664000175000017500000002161612636375552024741 0ustar classicclassic00000000000000"""A variant of the versioned_rows example. Here we store a dictionary of key/value pairs, storing the k/v's in a "vertical" fashion where each key gets a row. The value is split out into two separate datatypes, string and int - the range of datatype storage can be adjusted for individual needs. Changes to the "data" attribute of a ConfigData object result in the ConfigData object being copied into a new one, and new associations to its data are created. Values which aren't changed between versions are referenced by both the former and the newer ConfigData object. Overall, only INSERT statements are emitted - no rows are UPDATed or DELETEd. An optional feature is also illustrated which associates individual key/value pairs with the ConfigData object in which it first originated. Since a new row is only persisted when a new value is created for a particular key, the recipe provides a way to query among the full series of changes which occurred for any particular key in the dictionary. The set of all ConfigData in a particular table represents a single series of versions. By adding additional columns to ConfigData, the system can be made to store multiple version streams distinguished by those additional values. """ from sqlalchemy import Column, String, Integer, ForeignKey, \ create_engine from sqlalchemy.orm.interfaces import SessionExtension from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import attributes, relationship, backref, \ sessionmaker, make_transient, validates from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm.collections import attribute_mapped_collection class VersionExtension(SessionExtension): """Apply the new_version() method of objects which are marked as dirty during a flush. See http://www.sqlalchemy.org/trac/wiki/UsageRecipes/VersionedRows """ def before_flush(self, session, flush_context, instances): for instance in session.dirty: if hasattr(instance, 'new_version') and \ session.is_modified(instance, passive=True): # make it transient instance.new_version(session) # re-add session.add(instance) Base = declarative_base() class ConfigData(Base): """Represent a series of key/value pairs. ConfigData will generate a new version of itself upon change. The "data" dictionary provides access via string name mapped to a string/int value. """ __tablename__ = 'config' id = Column(Integer, primary_key=True) """Primary key column of this ConfigData.""" elements = relationship("ConfigValueAssociation", collection_class=attribute_mapped_collection("name"), backref=backref("config_data"), lazy="subquery" ) """Dictionary-backed collection of ConfigValueAssociation objects, keyed to the name of the associated ConfigValue. Note there's no "cascade" here. ConfigValueAssociation objects are never deleted or changed. """ def _new_value(name, value): """Create a new entry for usage in the 'elements' dictionary.""" return ConfigValueAssociation(ConfigValue(name, value)) data = association_proxy("elements", "value", creator=_new_value) """Proxy to the 'value' elements of each related ConfigValue, via the 'elements' dictionary. """ def __init__(self, data): self.data = data @validates('elements') def _associate_with_element(self, key, element): """Associate incoming ConfigValues with this ConfigData, if not already associated. This is an optional feature which allows more comprehensive history tracking. """ if element.config_value.originating_config is None: element.config_value.originating_config = self return element def new_version(self, session): # convert to an INSERT make_transient(self) self.id = None # history of the 'elements' collection. # this is a tuple of groups: (added, unchanged, deleted) hist = attributes.get_history(self, 'elements') # rewrite the 'elements' collection # from scratch, removing all history attributes.set_committed_value(self, 'elements', {}) # new elements in the "added" group # are moved to our new collection. for elem in hist.added: self.elements[elem.name] = elem # copy elements in the 'unchanged' group. # the new ones associate with the new ConfigData, # the old ones stay associated with the old ConfigData for elem in hist.unchanged: self.elements[elem.name] = ConfigValueAssociation(elem.config_value) # we also need to expire changes on each ConfigValueAssociation # that is to remain associated with the old ConfigData. # Here, each one takes care of that in its new_version() # method, though we could do that here as well. class ConfigValueAssociation(Base): """Relate ConfigData objects to associated ConfigValue objects.""" __tablename__ = 'config_value_association' config_id = Column(ForeignKey('config.id'), primary_key=True) """Reference the primary key of the ConfigData object.""" config_value_id = Column(ForeignKey('config_value.id'), primary_key=True) """Reference the primary key of the ConfigValue object.""" config_value = relationship("ConfigValue", lazy="joined", innerjoin=True) """Reference the related ConfigValue object.""" def __init__(self, config_value): self.config_value = config_value def new_version(self, session): """Expire all pending state, as ConfigValueAssociation is immutable.""" session.expire(self) @property def name(self): return self.config_value.name @property def value(self): return self.config_value.value @value.setter def value(self, value): """Intercept set events. Create a new ConfigValueAssociation upon change, replacing this one in the parent ConfigData's dictionary. If no net change, do nothing. """ if value != self.config_value.value: self.config_data.elements[self.name] = \ ConfigValueAssociation( ConfigValue(self.config_value.name, value) ) class ConfigValue(Base): """Represent an individual key/value pair at a given point in time. ConfigValue is immutable. """ __tablename__ = 'config_value' id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) originating_config_id = Column(Integer, ForeignKey('config.id'), nullable=False) int_value = Column(Integer) string_value = Column(String(255)) def __init__(self, name, value): self.name = name self.value = value originating_config = relationship("ConfigData") """Reference to the originating ConfigData. This is optional, and allows history tracking of individual values. """ def new_version(self, session): raise NotImplementedError("ConfigValue is immutable.") @property def value(self): for k in ('int_value', 'string_value'): v = getattr(self, k) if v is not None: return v else: return None @value.setter def value(self, value): if isinstance(value, int): self.int_value = value self.string_value = None else: self.string_value = str(value) self.int_value = None if __name__ == '__main__': engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine, extension=VersionExtension()) sess = Session() config = ConfigData({ 'user_name':'twitter', 'hash_id':'4fedffca37eaf', 'x':27, 'y':450 }) sess.add(config) sess.commit() version_one = config.id config.data['user_name'] = 'yahoo' sess.commit() version_two = config.id assert version_one != version_two # two versions have been created. assert config.data == { 'user_name':'yahoo', 'hash_id':'4fedffca37eaf', 'x':27, 'y':450 } old_config = sess.query(ConfigData).get(version_one) assert old_config.data == { 'user_name':'twitter', 'hash_id':'4fedffca37eaf', 'x':27, 'y':450 } # the history of any key can be acquired using # the originating_config_id attribute history = sess.query(ConfigValue).\ filter(ConfigValue.name=='user_name').\ order_by(ConfigValue.originating_config_id).\ all() assert [(h.value, h.originating_config_id) for h in history] == \ [('twitter', version_one), ('yahoo', version_two)] SQLAlchemy-1.0.11/examples/versioned_rows/versioned_rows.py0000664000175000017500000000540512636375552025154 0ustar classicclassic00000000000000"""Illustrates a method to intercept changes on objects, turning an UPDATE statement on a single row into an INSERT statement, so that a new row is inserted with the new data, keeping the old row intact. """ from sqlalchemy.orm import * from sqlalchemy import * from sqlalchemy.orm.interfaces import SessionExtension from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import attributes class Versioned(object): def new_version(self, session): # if on SQLA 0.6.1 or earlier, # make sure 'id' isn't expired. # self.id # make us transient (removes persistent # identity). make_transient(self) # set 'id' to None. # a new PK will be generated on INSERT. self.id = None class VersionExtension(SessionExtension): def before_flush(self, session, flush_context, instances): for instance in session.dirty: if not isinstance(instance, Versioned): continue if not session.is_modified(instance, passive=True): continue if not attributes.instance_state(instance).has_identity: continue # make it transient instance.new_version(session) # re-add session.add(instance) Base = declarative_base() engine = create_engine('sqlite://', echo=True) Session = sessionmaker(engine, extension=[VersionExtension()]) # example 1, simple versioning class Example(Versioned, Base): __tablename__ = 'example' id = Column(Integer, primary_key=True) data = Column(String) Base.metadata.create_all(engine) session = Session() e1 = Example(data='e1') session.add(e1) session.commit() e1.data = 'e2' session.commit() assert session.query(Example.id, Example.data).order_by(Example.id).all() == \ [(1, 'e1'), (2, 'e2')] # example 2, versioning with a parent class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref=backref('parent', uselist=False)) class Child(Versioned, Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) data = Column(String) def new_version(self, session): # expire parent's reference to us session.expire(self.parent, ['child']) # create new version Versioned.new_version(self, session) # re-add ourselves to the parent self.parent.child = self Base.metadata.create_all(engine) session = Session() p1 = Parent(child=Child(data='c1')) session.add(p1) session.commit() p1.child.data = 'c2' session.commit() assert p1.child_id == 2 assert session.query(Child.id, Child.data).order_by(Child.id).all() == \ [(1, 'c1'), (2, 'c2')]SQLAlchemy-1.0.11/examples/versioned_rows/__init__.py0000664000175000017500000000025012636375552023634 0ustar classicclassic00000000000000""" Illustrates an extension which versions data by storing new rows for each change; that is, what would normally be an UPDATE becomes an INSERT. .. autosource:: """SQLAlchemy-1.0.11/examples/dynamic_dict/0000775000175000017500000000000012636376632021105 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/examples/dynamic_dict/dynamic_dict.py0000664000175000017500000000462112636375552024111 0ustar classicclassic00000000000000class ProxyDict(object): def __init__(self, parent, collection_name, childclass, keyname): self.parent = parent self.collection_name = collection_name self.childclass = childclass self.keyname = keyname @property def collection(self): return getattr(self.parent, self.collection_name) def keys(self): descriptor = getattr(self.childclass, self.keyname) return [x[0] for x in self.collection.values(descriptor)] def __getitem__(self, key): x = self.collection.filter_by(**{self.keyname:key}).first() if x: return x else: raise KeyError(key) def __setitem__(self, key, value): try: existing = self[key] self.collection.remove(existing) except KeyError: pass self.collection.append(value) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine, Column, Integer, String, ForeignKey from sqlalchemy.orm import sessionmaker, relationship engine = create_engine('sqlite://', echo=True) Base = declarative_base(engine) class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) name = Column(String(50)) _collection = relationship("Child", lazy="dynamic", cascade="all, delete-orphan") @property def child_map(self): return ProxyDict(self, '_collection', Child, 'key') class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) key = Column(String(50)) parent_id = Column(Integer, ForeignKey('parent.id')) def __repr__(self): return "Child(key=%r)" % self.key Base.metadata.create_all() sess = sessionmaker()() p1 = Parent(name='p1') sess.add(p1) print("\n---------begin setting nodes, autoflush occurs\n") p1.child_map['k1'] = Child(key='k1') p1.child_map['k2'] = Child(key='k2') # this will autoflush the current map. # ['k1', 'k2'] print("\n---------print keys - flushes first\n") print(list(p1.child_map.keys())) # k1 print("\n---------print 'k1' node\n") print(p1.child_map['k1']) print("\n---------update 'k2' node - must find existing, and replace\n") p1.child_map['k2'] = Child(key='k2') print("\n---------print 'k2' key - flushes first\n") # k2 print(p1.child_map['k2']) print("\n---------print all child nodes\n") # [k1, k2b] print(sess.query(Child).all()) SQLAlchemy-1.0.11/examples/dynamic_dict/__init__.py0000664000175000017500000000037012636375552023216 0ustar classicclassic00000000000000""" Illustrates how to place a dictionary-like facade on top of a "dynamic" relation, so that dictionary operations (assuming simple string keys) can operate upon a large collection without loading the full collection at once. .. autosource:: """SQLAlchemy-1.0.11/LICENSE0000664000175000017500000000231512636375552015646 0ustar classicclassic00000000000000This is the MIT license: http://www.opensource.org/licenses/mit-license.php Copyright (c) 2005-2015 the SQLAlchemy authors and contributors . SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. SQLAlchemy-1.0.11/doc/0000775000175000017500000000000012636376632015405 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/changelog/0000775000175000017500000000000012636376632017334 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/0000775000175000017500000000000012636376632016504 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/changelog/0000775000175000017500000000000012636376632020433 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/changelog/migration_09.rst0000664000175000017500000020645012636375552023475 0ustar classicclassic00000000000000============================== What's New in SQLAlchemy 0.9? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.8, undergoing maintenance releases as of May, 2013, and SQLAlchemy version 0.9, which had its first production release on December 30, 2013. Document last updated: June 10, 2015 Introduction ============ This guide introduces what's new in SQLAlchemy version 0.9, and also documents changes which affect users migrating their applications from the 0.8 series of SQLAlchemy to 0.9. Please carefully review :ref:`behavioral_changes_orm_09` and :ref:`behavioral_changes_core_09` for potentially backwards-incompatible changes. Platform Support ================ Targeting Python 2.6 and Up Now, Python 3 without 2to3 ------------------------------------------------------- The first achievement of the 0.9 release is to remove the dependency on the 2to3 tool for Python 3 compatibility. To make this more straightforward, the lowest Python release targeted now is 2.6, which features a wide degree of cross-compatibility with Python 3. All SQLAlchemy modules and unit tests are now interpreted equally well with any Python interpreter from 2.6 forward, including the 3.1 and 3.2 interpreters. :ticket:`2671` C Extensions Supported on Python 3 ----------------------------------- The C extensions have been ported to support Python 3 and now build in both Python 2 and Python 3 environments. :ticket:`2161` .. _behavioral_changes_orm_09: Behavioral Changes - ORM ======================== .. _migration_2824: Composite attributes are now returned as their object form when queried on a per-attribute basis ------------------------------------------------------------------------------------------------ Using a :class:`.Query` in conjunction with a composite attribute now returns the object type maintained by that composite, rather than being broken out into individual columns. Using the mapping setup at :ref:`mapper_composite`:: >>> session.query(Vertex.start, Vertex.end).\ ... filter(Vertex.start == Point(3, 4)).all() [(Point(x=3, y=4), Point(x=5, y=6))] This change is backwards-incompatible with code that expects the individual attribute to be expanded into individual columns. To get that behavior, use the ``.clauses`` accessor:: >>> session.query(Vertex.start.clauses, Vertex.end.clauses).\ ... filter(Vertex.start == Point(3, 4)).all() [(3, 4, 5, 6)] .. seealso:: :ref:`change_2824` :ticket:`2824` .. _migration_2736: :meth:`.Query.select_from` no longer applies the clause to corresponding entities --------------------------------------------------------------------------------- The :meth:`.Query.select_from` method has been popularized in recent versions as a means of controlling the first thing that a :class:`.Query` object "selects from", typically for the purposes of controlling how a JOIN will render. Consider the following example against the usual ``User`` mapping:: select_stmt = select([User]).where(User.id == 7).alias() q = session.query(User).\ join(select_stmt, User.id == select_stmt.c.id).\ filter(User.name == 'ed') The above statement predictably renders SQL like the following:: SELECT "user".id AS user_id, "user".name AS user_name FROM "user" JOIN (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 ON "user".id = anon_1.id WHERE "user".name = :name_1 If we wanted to reverse the order of the left and right elements of the JOIN, the documentation would lead us to believe we could use :meth:`.Query.select_from` to do so:: q = session.query(User).\ select_from(select_stmt).\ join(User, User.id == select_stmt.c.id).\ filter(User.name == 'ed') However, in version 0.8 and earlier, the above use of :meth:`.Query.select_from` would apply the ``select_stmt`` to **replace** the ``User`` entity, as it selects from the ``user`` table which is compatible with ``User``:: -- SQLAlchemy 0.8 and earlier... SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 JOIN "user" ON anon_1.id = anon_1.id WHERE anon_1.name = :name_1 The above statement is a mess, the ON clause refers ``anon_1.id = anon_1.id``, our WHERE clause has been replaced with ``anon_1`` as well. This behavior is quite intentional, but has a different use case from that which has become popular for :meth:`.Query.select_from`. The above behavior is now available by a new method known as :meth:`.Query.select_entity_from`. This is a lesser used behavior that in modern SQLAlchemy is roughly equivalent to selecting from a customized :func:`.aliased` construct:: select_stmt = select([User]).where(User.id == 7) user_from_stmt = aliased(User, select_stmt.alias()) q = session.query(user_from_stmt).filter(user_from_stmt.name == 'ed') So with SQLAlchemy 0.9, our query that selects from ``select_stmt`` produces the SQL we expect:: -- SQLAlchemy 0.9 SELECT "user".id AS user_id, "user".name AS user_name FROM (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 JOIN "user" ON "user".id = id WHERE "user".name = :name_1 The :meth:`.Query.select_entity_from` method will be available in SQLAlchemy **0.8.2**, so applications which rely on the old behavior can transition to this method first, ensure all tests continue to function, then upgrade to 0.9 without issue. :ticket:`2736` .. _migration_2833: ``viewonly=True`` on ``relationship()`` prevents history from taking effect --------------------------------------------------------------------------- The ``viewonly`` flag on :func:`.relationship` is applied to prevent changes to the target attribute from having any effect within the flush process. This is achieved by eliminating the attribute from being considered during the flush. However, up until now, changes to the attribute would still register the parent object as "dirty" and trigger a potential flush. The change is that the ``viewonly`` flag now prevents history from being set for the target attribute as well. Attribute events like backrefs and user-defined events still continue to function normally. The change is illustrated as follows:: from sqlalchemy import Column, Integer, ForeignKey, create_engine from sqlalchemy.orm import backref, relationship, Session from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import inspect Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey('a.id')) a = relationship("A", backref=backref("bs", viewonly=True)) e = create_engine("sqlite://") Base.metadata.create_all(e) a = A() b = B() sess = Session(e) sess.add_all([a, b]) sess.commit() b.a = a assert b in sess.dirty # before 0.9.0 # assert a in sess.dirty # assert inspect(a).attrs.bs.history.has_changes() # after 0.9.0 assert a not in sess.dirty assert not inspect(a).attrs.bs.history.has_changes() :ticket:`2833` .. _migration_2751: Association Proxy SQL Expression Improvements and Fixes ------------------------------------------------------- The ``==`` and ``!=`` operators as implemented by an association proxy that refers to a scalar value on a scalar relationship now produces a more complete SQL expression, intended to take into account the "association" row being present or not when the comparison is against ``None``. Consider this mapping:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) b_id = Column(Integer, ForeignKey('b.id'), primary_key=True) b = relationship("B") b_value = association_proxy("b", "value") class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) value = Column(String) Up through 0.8, a query like the following:: s.query(A).filter(A.b_value == None).all() would produce:: SELECT a.id AS a_id, a.b_id AS a_b_id FROM a WHERE EXISTS (SELECT 1 FROM b WHERE b.id = a.b_id AND b.value IS NULL) In 0.9, it now produces:: SELECT a.id AS a_id, a.b_id AS a_b_id FROM a WHERE (EXISTS (SELECT 1 FROM b WHERE b.id = a.b_id AND b.value IS NULL)) OR a.b_id IS NULL The difference being, it not only checks ``b.value``, it also checks if ``a`` refers to no ``b`` row at all. This will return different results versus prior versions, for a system that uses this type of comparison where some parent rows have no association row. More critically, a correct expression is emitted for ``A.b_value != None``. In 0.8, this would return ``True`` for ``A`` rows that had no ``b``:: SELECT a.id AS a_id, a.b_id AS a_b_id FROM a WHERE NOT (EXISTS (SELECT 1 FROM b WHERE b.id = a.b_id AND b.value IS NULL)) Now in 0.9, the check has been reworked so that it ensures the A.b_id row is present, in addition to ``B.value`` being non-NULL:: SELECT a.id AS a_id, a.b_id AS a_b_id FROM a WHERE EXISTS (SELECT 1 FROM b WHERE b.id = a.b_id AND b.value IS NOT NULL) In addition, the ``has()`` operator is enhanced such that you can call it against a scalar column value with no criterion only, and it will produce criteria that checks for the association row being present or not:: s.query(A).filter(A.b_value.has()).all() output:: SELECT a.id AS a_id, a.b_id AS a_b_id FROM a WHERE EXISTS (SELECT 1 FROM b WHERE b.id = a.b_id) This is equivalent to ``A.b.has()``, but allows one to query against ``b_value`` directly. :ticket:`2751` .. _migration_2810: Association Proxy Missing Scalar returns None --------------------------------------------- An association proxy from a scalar attribute to a scalar will now return ``None`` if the proxied object isn't present. This is consistent with the fact that missing many-to-ones return None in SQLAlchemy, so should the proxied value. E.g.:: from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) bname = association_proxy("b", "name") class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey('a.id')) name = Column(String) a1 = A() # this is how m2o's always have worked assert a1.b is None # but prior to 0.9, this would raise AttributeError, # now returns None just like the proxied value. assert a1.bname is None :ticket:`2810` .. _change_2787: attributes.get_history() will query from the DB by default if value not present ------------------------------------------------------------------------------- A bugfix regarding :func:`.attributes.get_history` allows a column-based attribute to query out to the database for an unloaded value, assuming the ``passive`` flag is left at its default of ``PASSIVE_OFF``. Previously, this flag would not be honored. Additionally, a new method :meth:`.AttributeState.load_history` is added to complement the :attr:`.AttributeState.history` attribute, which will emit loader callables for an unloaded attribute. This is a small change demonstrated as follows:: from sqlalchemy import Column, Integer, String, create_engine, inspect from sqlalchemy.orm import Session, attributes from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) data = Column(String) e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) sess = Session(e) a1 = A(data='a1') sess.add(a1) sess.commit() # a1 is now expired # history doesn't emit loader callables assert inspect(a1).attrs.data.history == (None, None, None) # in 0.8, this would fail to load the unloaded state. assert attributes.get_history(a1, 'data') == ((), ['a1',], ()) # load_history() is now equiavlent to get_history() with # passive=PASSIVE_OFF ^ INIT_OK assert inspect(a1).attrs.data.load_history() == ((), ['a1',], ()) :ticket:`2787` .. _behavioral_changes_core_09: Behavioral Changes - Core ========================= Type objects no longer accept ignored keyword arguments ------------------------------------------------------- Up through the 0.8 series, most type objects accepted arbitrary keyword arguments which were silently ignored:: from sqlalchemy import Date, Integer # storage_format argument here has no effect on any backend; # it needs to be on the SQLite-specific type d = Date(storage_format="%(day)02d.%(month)02d.%(year)04d") # display_width argument here has no effect on any backend; # it needs to be on the MySQL-specific type i = Integer(display_width=5) This was a very old bug for which a deprecation warning was added to the 0.8 series, but because nobody ever runs Python with the "-W" flag, it was mostly never seen:: $ python -W always::DeprecationWarning ~/dev/sqlalchemy/test.py /Users/classic/dev/sqlalchemy/test.py:5: SADeprecationWarning: Passing arguments to type object constructor is deprecated d = Date(storage_format="%(day)02d.%(month)02d.%(year)04d") /Users/classic/dev/sqlalchemy/test.py:9: SADeprecationWarning: Passing arguments to type object constructor is deprecated i = Integer(display_width=5) As of the 0.9 series the "catch all" constructor is removed from :class:`.TypeEngine`, and these meaningless arguments are no longer accepted. The correct way to make use of dialect-specific arguments such as ``storage_format`` and ``display_width`` is to use the appropriate dialect-specific types:: from sqlalchemy.dialects.sqlite import DATE from sqlalchemy.dialects.mysql import INTEGER d = DATE(storage_format="%(day)02d.%(month)02d.%(year)04d") i = INTEGER(display_width=5) What about the case where we want the dialect-agnostic type also? We use the :meth:`.TypeEngine.with_variant` method:: from sqlalchemy import Date, Integer from sqlalchemy.dialects.sqlite import DATE from sqlalchemy.dialects.mysql import INTEGER d = Date().with_variant( DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), "sqlite" ) i = Integer().with_variant( INTEGER(display_width=5), "mysql" ) :meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy 0.7.2. So code that is running on the 0.8 series can be corrected to use this approach and tested before upgrading to 0.9. ``None`` can no longer be used as a "partial AND" constructor -------------------------------------------------------------- ``None`` can no longer be used as the "backstop" to form an AND condition piecemeal. This pattern was not a documented pattern even though some SQLAlchemy internals made use of it:: condition = None for cond in conditions: condition = condition & cond if condition is not None: stmt = stmt.where(condition) The above sequence, when ``conditions`` is non-empty, will on 0.9 produce ``SELECT .. WHERE AND NULL``. The ``None`` is no longer implicitly ignored, and is instead consistent with when ``None`` is interpreted in other contexts besides that of a conjunction. The correct code for both 0.8 and 0.9 should read:: from sqlalchemy.sql import and_ if conditions: stmt = stmt.where(and_(*conditions)) Another variant that works on all backends on 0.9, but on 0.8 only works on backends that support boolean constants:: from sqlalchemy.sql import true condition = true() for cond in conditions: condition = cond & condition stmt = stmt.where(condition) On 0.8, this will produce a SELECT statement that always has ``AND true`` in the WHERE clause, which is not accepted by backends that don't support boolean constants (MySQL, MSSQL). On 0.9, the ``true`` constant will be dropped within an ``and_()`` conjunction. .. seealso:: :ref:`migration_2804` .. _migration_2873: The "password" portion of a ``create_engine()`` no longer considers the ``+`` sign as an encoded space ------------------------------------------------------------------------------------------------------ For whatever reason, the Python function ``unquote_plus()`` was applied to the "password" field of a URL, which is an incorrect application of the encoding rules described in `RFC 1738 `_ in that it escaped spaces as plus signs. The stringiciation of a URL now only encodes ":", "@", or "/" and nothing else, and is now applied to both the ``username`` and ``password`` fields (previously it only applied to the password). On parsing, encoded characters are converted, but plus signs and spaces are passed through as is:: # password: "pass word + other:words" dbtype://user:pass word + other%3Awords@host/dbname # password: "apples/oranges" dbtype://username:apples%2Foranges@hostspec/database # password: "apples@oranges@@" dbtype://username:apples%40oranges%40%40@hostspec/database # password: '', username is "username@" dbtype://username%40:@hostspec/database :ticket:`2873` .. _migration_2879: The precedence rules for COLLATE have been changed -------------------------------------------------- Previously, an expression like the following:: print (column('x') == 'somevalue').collate("en_EN") would produce an expression like this:: -- 0.8 behavior (x = :x_1) COLLATE en_EN The above is misunderstood by MSSQL and is generally not the syntax suggested for any database. The expression will now produce the syntax illustrated by that of most database documentation:: -- 0.9 behavior x = :x_1 COLLATE en_EN The potentially backwards incompatible change arises if the :meth:`.collate` operator is being applied to the right-hand column, as follows:: print column('x') == literal('somevalue').collate("en_EN") In 0.8, this produces:: x = :param_1 COLLATE en_EN However in 0.9, will now produce the more accurate, but probably not what you want, form of:: x = (:param_1 COLLATE en_EN) The :meth:`.ColumnOperators.collate` operator now works more appropriately within an ``ORDER BY`` expression as well, as a specific precedence has been given to the ``ASC`` and ``DESC`` operators which will again ensure no parentheses are generated:: >>> # 0.8 >>> print column('x').collate('en_EN').desc() (x COLLATE en_EN) DESC >>> # 0.9 >>> print column('x').collate('en_EN').desc() x COLLATE en_EN DESC :ticket:`2879` .. _migration_2878: Postgresql CREATE TYPE AS ENUM now applies quoting to values ---------------------------------------------------------------- The :class:`.postgresql.ENUM` type will now apply escaping to single quote signs within the enumerated values:: >>> from sqlalchemy.dialects import postgresql >>> type = postgresql.ENUM('one', 'two', "three's", name="myenum") >>> from sqlalchemy.dialects.postgresql import base >>> print base.CreateEnumType(type).compile(dialect=postgresql.dialect()) CREATE TYPE myenum AS ENUM ('one','two','three''s') Existing workarounds which already escape single quote signs will need to be modified, else they will now double-escape. :ticket:`2878` New Features ============ .. _feature_2268: Event Removal API ----------------- Events established using :func:`.event.listen` or :func:`.event.listens_for` can now be removed using the new :func:`.event.remove` function. The ``target``, ``identifier`` and ``fn`` arguments sent to :func:`.event.remove` need to match exactly those which were sent for listening, and the event will be removed from all locations in which it had been established:: @event.listens_for(MyClass, "before_insert", propagate=True) def my_before_insert(mapper, connection, target): """listen for before_insert""" # ... event.remove(MyClass, "before_insert", my_before_insert) In the example above, the ``propagate=True`` flag is set. This means ``my_before_insert()`` is established as a listener for ``MyClass`` as well as all subclasses of ``MyClass``. The system tracks everywhere that the ``my_before_insert()`` listener function had been placed as a result of this call and removes it as a result of calling :func:`.event.remove`. The removal system uses a registry to associate arguments passed to :func:`.event.listen` with collections of event listeners, which are in many cases wrapped versions of the original user-supplied function. This registry makes heavy use of weak references in order to allow all the contained contents, such as listener targets, to be garbage collected when they go out of scope. :ticket:`2268` .. _feature_1418: New Query Options API; ``load_only()`` option --------------------------------------------- The system of loader options such as :func:`.orm.joinedload`, :func:`.orm.subqueryload`, :func:`.orm.lazyload`, :func:`.orm.defer`, etc. all build upon a new system known as :class:`.Load`. :class:`.Load` provides a "method chained" (a.k.a. :term:`generative`) approach to loader options, so that instead of joining together long paths using dots or multiple attribute names, an explicit loader style is given for each path. While the new way is slightly more verbose, it is simpler to understand in that there is no ambiguity in what options are being applied to which paths; it simplifies the method signatures of the options and provides greater flexibility particularly for column-based options. The old systems are to remain functional indefinitely as well and all styles can be mixed. **Old Way** To set a certain style of loading along every link in a multi-element path, the ``_all()`` option has to be used:: query(User).options(joinedload_all("orders.items.keywords")) **New Way** Loader options are now chainable, so the same ``joinedload(x)`` method is applied equally to each link, without the need to keep straight between :func:`.joinedload` and :func:`.joinedload_all`:: query(User).options(joinedload("orders").joinedload("items").joinedload("keywords")) **Old Way** Setting an option on path that is based on a subclass requires that all links in the path be spelled out as class bound attributes, since the :meth:`.PropComparator.of_type` method needs to be called:: session.query(Company).\ options( subqueryload_all( Company.employees.of_type(Engineer), Engineer.machines ) ) **New Way** Only those elements in the path that actually need :meth:`.PropComparator.of_type` need to be set as a class-bound attribute, string-based names can be resumed afterwards:: session.query(Company).\ options( subqueryload(Company.employees.of_type(Engineer)). subqueryload("machines") ) ) **Old Way** Setting the loader option on the last link in a long path uses a syntax that looks a lot like it should be setting the option for all links in the path, causing confusion:: query(User).options(subqueryload("orders.items.keywords")) **New Way** A path can now be spelled out using :func:`.defaultload` for entries in the path where the existing loader style should be unchanged. More verbose but the intent is clearer:: query(User).options(defaultload("orders").defaultload("items").subqueryload("keywords")) The dotted style can still be taken advantage of, particularly in the case of skipping over several path elements:: query(User).options(defaultload("orders.items").subqueryload("keywords")) **Old Way** The :func:`.defer` option on a path needed to be spelled out with the full path for each column:: query(User).options(defer("orders.description"), defer("orders.isopen")) **New Way** A single :class:`.Load` object that arrives at the target path can have :meth:`.Load.defer` called upon it repeatedly:: query(User).options(defaultload("orders").defer("description").defer("isopen")) The Load Class ^^^^^^^^^^^^^^^ The :class:`.Load` class can be used directly to provide a "bound" target, especially when multiple parent entities are present:: from sqlalchemy.orm import Load query(User, Address).options(Load(Address).joinedload("entries")) Load Only ^^^^^^^^^ A new option :func:`.load_only` achieves a "defer everything but" style of load, loading only the given columns and deferring the rest:: from sqlalchemy.orm import load_only query(User).options(load_only("name", "fullname")) # specify explicit parent entity query(User, Address).options(Load(User).load_only("name", "fullname")) # specify path query(User).options(joinedload(User.addresses).load_only("email_address")) Class-specific Wildcards ^^^^^^^^^^^^^^^^^^^^^^^^^ Using :class:`.Load`, a wildcard may be used to set the loading for all relationships (or perhaps columns) on a given entity, without affecting any others:: # lazyload all User relationships query(User).options(Load(User).lazyload("*")) # undefer all User columns query(User).options(Load(User).undefer("*")) # lazyload all Address relationships query(User).options(defaultload(User.addresses).lazyload("*")) # undefer all Address columns query(User).options(defaultload(User.addresses).undefer("*")) :ticket:`1418` .. _feature_2877: New ``text()`` Capabilities --------------------------- The :func:`.text` construct gains new methods: * :meth:`.TextClause.bindparams` allows bound parameter types and values to be set flexibly:: # setup values stmt = text("SELECT id, name FROM user " "WHERE name=:name AND timestamp=:timestamp").\ bindparams(name="ed", timestamp=datetime(2012, 11, 10, 15, 12, 35)) # setup types and/or values stmt = text("SELECT id, name FROM user " "WHERE name=:name AND timestamp=:timestamp").\ bindparams( bindparam("name", value="ed"), bindparam("timestamp", type_=DateTime() ).bindparam(timestamp=datetime(2012, 11, 10, 15, 12, 35)) * :meth:`.TextClause.columns` supersedes the ``typemap`` option of :func:`.text`, returning a new construct :class:`.TextAsFrom`:: # turn a text() into an alias(), with a .c. collection: stmt = text("SELECT id, name FROM user").columns(id=Integer, name=String) stmt = stmt.alias() stmt = select([addresses]).select_from( addresses.join(stmt), addresses.c.user_id == stmt.c.id) # or into a cte(): stmt = text("SELECT id, name FROM user").columns(id=Integer, name=String) stmt = stmt.cte("x") stmt = select([addresses]).select_from( addresses.join(stmt), addresses.c.user_id == stmt.c.id) :ticket:`2877` .. _feature_722: INSERT from SELECT ------------------ After literally years of pointless procrastination this relatively minor syntactical feature has been added, and is also backported to 0.8.3, so technically isn't "new" in 0.9. A :func:`.select` construct or other compatible construct can be passed to the new method :meth:`.Insert.from_select` where it will be used to render an ``INSERT .. SELECT`` construct:: >>> from sqlalchemy.sql import table, column >>> t1 = table('t1', column('a'), column('b')) >>> t2 = table('t2', column('x'), column('y')) >>> print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5))) INSERT INTO t1 (a, b) SELECT t2.x, t2.y FROM t2 WHERE t2.y = :y_1 The construct is smart enough to also accommodate ORM objects such as classes and :class:`.Query` objects:: s = Session() q = s.query(User.id, User.name).filter_by(name='ed') ins = insert(Address).from_select((Address.id, Address.email_address), q) rendering:: INSERT INTO addresses (id, email_address) SELECT users.id AS users_id, users.name AS users_name FROM users WHERE users.name = :name_1 :ticket:`722` .. _feature_github_42: New FOR UPDATE support on ``select()``, ``Query()`` --------------------------------------------------- An attempt is made to simplify the specification of the ``FOR UPDATE`` clause on ``SELECT`` statements made within Core and ORM, and support is added for the ``FOR UPDATE OF`` SQL supported by Postgresql and Oracle. Using the core :meth:`.GenerativeSelect.with_for_update`, options like ``FOR SHARE`` and ``NOWAIT`` can be specified individually, rather than linking to arbitrary string codes:: stmt = select([table]).with_for_update(read=True, nowait=True, of=table) On Posgtresql the above statement might render like:: SELECT table.a, table.b FROM table FOR SHARE OF table NOWAIT The :class:`.Query` object gains a similar method :meth:`.Query.with_for_update` which behaves in the same way. This method supersedes the existing :meth:`.Query.with_lockmode` method, which translated ``FOR UPDATE`` clauses using a different system. At the moment, the "lockmode" string argument is still accepted by the :meth:`.Session.refresh` method. .. _feature_2867: Floating Point String-Conversion Precision Configurable for Native Floating Point Types --------------------------------------------------------------------------------------- The conversion which SQLAlchemy does whenever a DBAPI returns a Python floating point type which is to be converted into a Python ``Decimal()`` necessarily involves an intermediary step which converts the floating point value to a string. The scale used for this string conversion was previously hardcoded to 10, and is now configurable. The setting is available on both the :class:`.Numeric` as well as the :class:`.Float` type, as well as all SQL- and dialect-specific descendant types, using the parameter ``decimal_return_scale``. If the type supports a ``.scale`` parameter, as is the case with :class:`.Numeric` and some float types such as :class:`.mysql.DOUBLE`, the value of ``.scale`` is used as the default for ``.decimal_return_scale`` if it is not otherwise specified. If both ``.scale`` and ``.decimal_return_scale`` are absent, then the default of 10 takes place. E.g.:: from sqlalchemy.dialects.mysql import DOUBLE import decimal data = Table('data', metadata, Column('double_value', mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)) ) conn.execute( data.insert(), double_value=45.768392065789, ) result = conn.scalar(select([data.c.double_value])) # previously, this would typically be Decimal("45.7683920658"), # e.g. trimmed to 10 decimal places # now we get 12, as requested, as MySQL can support this # much precision for DOUBLE assert result == decimal.Decimal("45.768392065789") :ticket:`2867` .. _change_2824: Column Bundles for ORM queries ------------------------------ The :class:`.Bundle` allows for querying of sets of columns, which are then grouped into one name under the tuple returned by the query. The initial purposes of :class:`.Bundle` are 1. to allow "composite" ORM columns to be returned as a single value in a column-based result set, rather than expanding them out into individual columns and 2. to allow the creation of custom result-set constructs within the ORM, using ad-hoc columns and return types, without involving the more heavyweight mechanics of mapped classes. .. seealso:: :ref:`migration_2824` :ref:`bundles` :ticket:`2824` Server Side Version Counting ----------------------------- The versioning feature of the ORM (now also documented at :ref:`mapper_version_counter`) can now make use of server-side version counting schemes, such as those produced by triggers or database system columns, as well as conditional programmatic schemes outside of the version_id_counter function itself. By providing the value ``False`` to the ``version_id_generator`` parameter, the ORM will use the already-set version identifier, or alternatively fetch the version identifier from each row at the same time the INSERT or UPDATE is emitted. When using a server-generated version identifier, it is strongly recommended that this feature be used only on a backend with strong RETURNING support (Postgresql, SQL Server; Oracle also supports RETURNING but the cx_oracle driver has only limited support), else the additional SELECT statements will add significant performance overhead. The example provided at :ref:`server_side_version_counter` illustrates the usage of the Postgresql ``xmin`` system column in order to integrate it with the ORM's versioning feature. .. seealso:: :ref:`server_side_version_counter` :ticket:`2793` .. _feature_1535: ``include_backrefs=False`` option for ``@validates`` ---------------------------------------------------- The :func:`.validates` function now accepts an option ``include_backrefs=True``, which will bypass firing the validator for the case where the event initiated from a backref:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship, validates from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") @validates("bs") def validate_bs(self, key, item): print("A.bs validator") return item class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey('a.id')) @validates("a", include_backrefs=False) def validate_a(self, key, item): print("B.a validator") return item a1 = A() a1.bs.append(B()) # prints only "A.bs validator" :ticket:`1535` Postgresql JSON Type -------------------- The Postgresql dialect now features a :class:`.postgresql.JSON` type to complement the :class:`.postgresql.HSTORE` type. .. seealso:: :class:`.postgresql.JSON` :ticket:`2581` .. _feature_automap: Automap Extension ----------------- A new extension is added in **0.9.1** known as :mod:`sqlalchemy.ext.automap`. This is an **experimental** extension which expands upon the functionality of Declarative as well as the :class:`.DeferredReflection` class. Essentially, the extension provides a base class :class:`.AutomapBase` which automatically generates mapped classes and relationships between them based on given table metadata. The :class:`.MetaData` in use normally might be produced via reflection, but there is no requirement that reflection is used. The most basic usage illustrates how :mod:`sqlalchemy.ext.automap` is able to deliver mapped classes, including relationships, based on a reflected schema:: from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine Base = automap_base() # engine, suppose it has two tables 'user' and 'address' set up engine = create_engine("sqlite:///mydatabase.db") # reflect the tables Base.prepare(engine, reflect=True) # mapped classes are now created with names matching that of the table # name. User = Base.classes.user Address = Base.classes.address session = Session(engine) # rudimentary relationships are produced session.add(Address(email_address="foo@bar.com", user=User(name="foo"))) session.commit() # collection-based relationships are by default named "_collection" print (u1.address_collection) Beyond that, the :class:`.AutomapBase` class is a declarative base, and supports all the features that declarative does. The "automapping" feature can be used with an existing, explicitly declared schema to generate relationships and missing classes only. Naming schemes and relationship-production routines can be dropped in using callable functions. It is hoped that the :class:`.AutomapBase` system provides a quick and modernized solution to the problem that the very famous `SQLSoup `_ also tries to solve, that of generating a quick and rudimentary object model from an existing database on the fly. By addressing the issue strictly at the mapper configuration level, and integrating fully with existing Declarative class techniques, :class:`.AutomapBase` seeks to provide a well-integrated approach to the issue of expediently auto-generating ad-hoc mappings. .. seealso:: :ref:`automap_toplevel` Behavioral Improvements ======================= Improvements that should produce no compatibility issues except in exceedingly rare and unusual hypothetical cases, but are good to be aware of in case there are unexpected issues. .. _feature_joins_09: Many JOIN and LEFT OUTER JOIN expressions will no longer be wrapped in (SELECT * FROM ..) AS ANON_1 --------------------------------------------------------------------------------------------------- For many years, the SQLAlchemy ORM has been held back from being able to nest a JOIN inside the right side of an existing JOIN (typically a LEFT OUTER JOIN, as INNER JOINs could always be flattened):: SELECT a.*, b.*, c.* FROM a LEFT OUTER JOIN (b JOIN c ON b.id = c.id) ON a.id This was due to the fact that SQLite, even today, cannot parse a statement of the above format:: SQLite version 3.7.15.2 2013-01-09 11:53:05 Enter ".help" for instructions Enter SQL statements terminated with a ";" sqlite> create table a(id integer); sqlite> create table b(id integer); sqlite> create table c(id integer); sqlite> select a.id, b.id, c.id from a left outer join (b join c on b.id=c.id) on b.id=a.id; Error: no such column: b.id Right-outer-joins are of course another way to work around right-side parenthesization; this would be significantly complicated and visually unpleasant to implement, but fortunately SQLite doesn't support RIGHT OUTER JOIN either :):: sqlite> select a.id, b.id, c.id from b join c on b.id=c.id ...> right outer join a on b.id=a.id; Error: RIGHT and FULL OUTER JOINs are not currently supported Back in 2005, it wasn't clear if other databases had trouble with this form, but today it seems clear every database tested except SQLite now supports it (Oracle 8, a very old database, doesn't support the JOIN keyword at all, but SQLAlchemy has always had a simple rewriting scheme in place for Oracle's syntax). To make matters worse, SQLAlchemy's usual workaround of applying a SELECT often degrades performance on platforms like Postgresql and MySQL:: SELECT a.*, anon_1.* FROM a LEFT OUTER JOIN ( SELECT b.id AS b_id, c.id AS c_id FROM b JOIN c ON b.id = c.id ) AS anon_1 ON a.id=anon_1.b_id A JOIN like the above form is commonplace when working with joined-table inheritance structures; any time :meth:`.Query.join` is used to join from some parent to a joined-table subclass, or when :func:`.joinedload` is used similarly, SQLAlchemy's ORM would always make sure a nested JOIN was never rendered, lest the query wouldn't be able to run on SQLite. Even though the Core has always supported a JOIN of the more compact form, the ORM had to avoid it. An additional issue would arise when producing joins across many-to-many relationships where special criteria is present in the ON clause. Consider an eager load join like the following:: session.query(Order).outerjoin(Order.items) Assuming a many-to-many from ``Order`` to ``Item`` which actually refers to a subclass like ``Subitem``, the SQL for the above would look like:: SELECT order.id, order.name FROM order LEFT OUTER JOIN order_item ON order.id = order_item.order_id LEFT OUTER JOIN item ON order_item.item_id = item.id AND item.type = 'subitem' What's wrong with the above query? Basically, that it will load many ``order`` / ``order_item`` rows where the criteria of ``item.type == 'subitem'`` is not true. As of SQLAlchemy 0.9, an entirely new approach has been taken. The ORM no longer worries about nesting JOINs in the right side of an enclosing JOIN, and it now will render these as often as possible while still returning the correct results. When the SQL statement is passed to be compiled, the **dialect compiler** will **rewrite the join** to suit the target backend, if that backend is known to not support a right-nested JOIN (which currently is only SQLite - if other backends have this issue please let us know!). So a regular ``query(Parent).join(Subclass)`` will now usually produce a simpler expression:: SELECT parent.id AS parent_id FROM parent JOIN ( base_table JOIN subclass_table ON base_table.id = subclass_table.id) ON parent.id = base_table.parent_id Joined eager loads like ``query(Parent).options(joinedload(Parent.subclasses))`` will alias the individual tables instead of wrapping in an ``ANON_1``:: SELECT parent.*, base_table_1.*, subclass_table_1.* FROM parent LEFT OUTER JOIN ( base_table AS base_table_1 JOIN subclass_table AS subclass_table_1 ON base_table_1.id = subclass_table_1.id) ON parent.id = base_table_1.parent_id Many-to-many joins and eagerloads will right nest the "secondary" and "right" tables:: SELECT order.id, order.name FROM order LEFT OUTER JOIN (order_item JOIN item ON order_item.item_id = item.id AND item.type = 'subitem') ON order_item.order_id = order.id All of these joins, when rendered with a :class:`.Select` statement that specifically specifies ``use_labels=True``, which is true for all the queries the ORM emits, are candidates for "join rewriting", which is the process of rewriting all those right-nested joins into nested SELECT statements, while maintaining the identical labeling used by the :class:`.Select`. So SQLite, the one database that won't support this very common SQL syntax even in 2013, shoulders the extra complexity itself, with the above queries rewritten as:: -- sqlite only! SELECT parent.id AS parent_id FROM parent JOIN ( SELECT base_table.id AS base_table_id, base_table.parent_id AS base_table_parent_id, subclass_table.id AS subclass_table_id FROM base_table JOIN subclass_table ON base_table.id = subclass_table.id ) AS anon_1 ON parent.id = anon_1.base_table_parent_id -- sqlite only! SELECT parent.id AS parent_id, anon_1.subclass_table_1_id AS subclass_table_1_id, anon_1.base_table_1_id AS base_table_1_id, anon_1.base_table_1_parent_id AS base_table_1_parent_id FROM parent LEFT OUTER JOIN ( SELECT base_table_1.id AS base_table_1_id, base_table_1.parent_id AS base_table_1_parent_id, subclass_table_1.id AS subclass_table_1_id FROM base_table AS base_table_1 JOIN subclass_table AS subclass_table_1 ON base_table_1.id = subclass_table_1.id ) AS anon_1 ON parent.id = anon_1.base_table_1_parent_id -- sqlite only! SELECT "order".id AS order_id FROM "order" LEFT OUTER JOIN ( SELECT order_item_1.order_id AS order_item_1_order_id, order_item_1.item_id AS order_item_1_item_id, item.id AS item_id, item.type AS item_type FROM order_item AS order_item_1 JOIN item ON item.id = order_item_1.item_id AND item.type IN (?) ) AS anon_1 ON "order".id = anon_1.order_item_1_order_id The :meth:`.Join.alias`, :func:`.aliased` and :func:`.with_polymorphic` functions now support a new argument, ``flat=True``, which is used to construct aliases of joined-table entities without embedding into a SELECT. This flag is not on by default, to help with backwards compatibility - but now a "polymorhpic" selectable can be joined as a target without any subqueries generated:: employee_alias = with_polymorphic(Person, [Engineer, Manager], flat=True) session.query(Company).join( Company.employees.of_type(employee_alias) ).filter( or_( Engineer.primary_language == 'python', Manager.manager_name == 'dilbert' ) ) Generates (everywhere except SQLite):: SELECT companies.company_id AS companies_company_id, companies.name AS companies_name FROM companies JOIN ( people AS people_1 LEFT OUTER JOIN engineers AS engineers_1 ON people_1.person_id = engineers_1.person_id LEFT OUTER JOIN managers AS managers_1 ON people_1.person_id = managers_1.person_id ) ON companies.company_id = people_1.company_id WHERE engineers.primary_language = %(primary_language_1)s OR managers.manager_name = %(manager_name_1)s :ticket:`2369` :ticket:`2587` .. _feature_2976: Right-nested inner joins available in joined eager loads --------------------------------------------------------- As of version 0.9.4, the above mentioned right-nested joining can be enabled in the case of a joined eager load where an "outer" join is linked to an "inner" on the right side. Normally, a joined eager load chain like the following:: query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) Would not produce an inner join; because of the LEFT OUTER JOIN from user->order, joined eager loading could not use an INNER join from order->items without changing the user rows that are returned, and would instead ignore the "chained" ``innerjoin=True`` directive. How 0.9.0 should have delivered this would be that instead of:: FROM users LEFT OUTER JOIN orders ON LEFT OUTER JOIN items ON the new "right-nested joins are OK" logic would kick in, and we'd get:: FROM users LEFT OUTER JOIN (orders JOIN items ON ) ON Since we missed the boat on that, to avoid further regressions we've added the above functionality by specifying the string ``"nested"`` to :paramref:`.joinedload.innerjoin`:: query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested")) This feature is new in 0.9.4. :ticket:`2976` ORM can efficiently fetch just-generated INSERT/UPDATE defaults using RETURNING ------------------------------------------------------------------------------- The :class:`.Mapper` has long supported an undocumented flag known as ``eager_defaults=True``. The effect of this flag is that when an INSERT or UPDATE proceeds, and the row is known to have server-generated default values, a SELECT would immediately follow it in order to "eagerly" load those new values. Normally, the server-generated columns are marked as "expired" on the object, so that no overhead is incurred unless the application actually accesses these columns soon after the flush. The ``eager_defaults`` flag was therefore not of much use as it could only decrease performance, and was present only to support exotic event schemes where users needed default values to be available immediately within the flush process. In 0.9, as a result of the version id enhancements, ``eager_defaults`` can now emit a RETURNING clause for these values, so on a backend with strong RETURNING support in particular Postgresql, the ORM can fetch newly generated default and SQL expression values inline with the INSERT or UPDATE. ``eager_defaults``, when enabled, makes use of RETURNING automatically when the target backend and :class:`.Table` supports "implicit returning". .. _change_2836: Subquery Eager Loading will apply DISTINCT to the innermost SELECT for some queries ------------------------------------------------------------------------------------ In an effort to reduce the number of duplicate rows that can be generated by subquery eager loading when a many-to-one relationship is involved, a DISTINCT keyword will be applied to the innermost SELECT when the join is targeting columns that do not comprise the primary key, as in when loading along a many to one. That is, when subquery loading on a many-to-one from A->B:: SELECT b.id AS b_id, b.name AS b_name, anon_1.b_id AS a_b_id FROM (SELECT DISTINCT a_b_id FROM a) AS anon_1 JOIN b ON b.id = anon_1.a_b_id Since ``a.b_id`` is a non-distinct foreign key, DISTINCT is applied so that redundant ``a.b_id`` are eliminated. The behavior can be turned on or off unconditionally for a particular :func:`.relationship` using the flag ``distinct_target_key``, setting the value to ``True`` for unconditionally on, ``False`` for unconditionally off, and ``None`` for the feature to take effect when the target SELECT is against columns that do not comprise a full primary key. In 0.9, ``None`` is the default. The option is also backported to 0.8 where the ``distinct_target_key`` option defaults to ``False``. While the feature here is designed to help performance by eliminating duplicate rows, the ``DISTINCT`` keyword in SQL itself can have a negative performance impact. If columns in the SELECT are not indexed, ``DISTINCT`` will likely perform an ``ORDER BY`` on the rowset which can be expensive. By keeping the feature limited just to foreign keys which are hopefully indexed in any case, it's expected that the new defaults are reasonable. The feature also does not eliminate every possible dupe-row scenario; if a many-to-one is present elsewhere in the chain of joins, dupe rows may still be present. :ticket:`2836` .. _migration_2789: Backref handlers can now propagate more than one level deep ----------------------------------------------------------- The mechanism by which attribute events pass along their "initiator", that is the object associated with the start of the event, has been changed; instead of a :class:`.AttributeImpl` being passed, a new object :class:`.attributes.Event` is passed instead; this object refers to the :class:`.AttributeImpl` as well as to an "operation token", representing if the operation is an append, remove, or replace operation. The attribute event system no longer looks at this "initiator" object in order to halt a recursive series of attribute events. Instead, the system of preventing endless recursion due to mutually-dependent backref handlers has been moved to the ORM backref event handlers specifically, which now take over the role of ensuring that a chain of mutually-dependent events (such as append to collection A.bs, set many-to-one attribute B.a in response) doesn't go into an endless recursion stream. The rationale here is that the backref system, given more detail and control over event propagation, can finally allow operations more than one level deep to occur; the typical scenario is when a collection append results in a many-to-one replacement operation, which in turn should cause the item to be removed from a previous collection:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(ForeignKey('parent.id')) p1 = Parent() p2 = Parent() c1 = Child() p1.children.append(c1) assert c1.parent is p1 # backref event establishes c1.parent as p1 p2.children.append(c1) assert c1.parent is p2 # backref event establishes c1.parent as p2 assert c1 not in p1.children # second backref event removes c1 from p1.children Above, prior to this change, the ``c1`` object would still have been present in ``p1.children``, even though it is also present in ``p2.children`` at the same time; the backref handlers would have stopped at replacing ``c1.parent`` with ``p2`` instead of ``p1``. In 0.9, using the more detailed :class:`.Event` object as well as letting the backref handlers make more detailed decisions about these objects, the propagation can continue onto removing ``c1`` from ``p1.children`` while maintaining a check against the propagation from going into an endless recursive loop. End-user code which a. makes use of the :meth:`.AttributeEvents.set`, :meth:`.AttributeEvents.append`, or :meth:`.AttributeEvents.remove` events, and b. initiates further attribute modification operations as a result of these events may need to be modified to prevent recursive loops, as the attribute system no longer stops a chain of events from propagating endlessly in the absence of the backref event handlers. Additionally, code which depends upon the value of the ``initiator`` will need to be adjusted to the new API, and furthermore must be ready for the value of ``initiator`` to change from its original value within a string of backref-initiated events, as the backref handlers may now swap in a new ``initiator`` value for some operations. :ticket:`2789` .. _change_2838: The typing system now handles the task of rendering "literal bind" values ------------------------------------------------------------------------- A new method is added to :class:`.TypeEngine` :meth:`.TypeEngine.literal_processor` as well as :meth:`.TypeDecorator.process_literal_param` for :class:`.TypeDecorator` which take on the task of rendering so-called "inline literal paramters" - parameters that normally render as "bound" values, but are instead being rendered inline into the SQL statement due to the compiler configuration. This feature is used when generating DDL for constructs such as :class:`.CheckConstraint`, as well as by Alembic when using constructs such as ``op.inline_literal()``. Previously, a simple "isinstance" check checked for a few basic types, and the "bind processor" was used unconditionally, leading to such issues as strings being encoded into utf-8 prematurely. Custom types written with :class:`.TypeDecorator` should continue to work in "inline literal" scenarios, as the :meth:`.TypeDecorator.process_literal_param` falls back to :meth:`.TypeDecorator.process_bind_param` by default, as these methods usually handle a data manipulation, not as much how the data is presented to the database. :meth:`.TypeDecorator.process_literal_param` can be specified to specifically produce a string representing how a value should be rendered into an inline DDL statement. :ticket:`2838` .. _change_2812: Schema identifiers now carry along their own quoting information --------------------------------------------------------------------- This change simplifies the Core's usage of so-called "quote" flags, such as the ``quote`` flag passed to :class:`.Table` and :class:`.Column`. The flag is now internalized within the string name itself, which is now represented as an instance of :class:`.quoted_name`, a string subclass. The :class:`.IdentifierPreparer` now relies solely on the quoting preferences reported by the :class:`.quoted_name` object rather than checking for any explicit ``quote`` flags in most cases. The issue resolved here includes that various case-sensitive methods such as :meth:`.Engine.has_table` as well as similar methods within dialects now function with explicitly quoted names, without the need to complicate or introduce backwards-incompatible changes to those APIs (many of which are 3rd party) with the details of quoting flags - in particular, a wider range of identifiers now function correctly with the so-called "uppercase" backends like Oracle, Firebird, and DB2 (backends that store and report upon table and column names using all uppercase for case insensitive names). The :class:`.quoted_name` object is used internally as needed; however if other keywords require fixed quoting preferences, the class is available publically. :ticket:`2812` .. _migration_2804: Improved rendering of Boolean constants, NULL constants, conjunctions ---------------------------------------------------------------------- New capabilities have been added to the :func:`.true` and :func:`.false` constants, in particular in conjunction with :func:`.and_` and :func:`.or_` functions as well as the behavior of the WHERE/HAVING clauses in conjunction with these types, boolean types overall, and the :func:`.null` constant. Starting with a table such as this:: from sqlalchemy import Table, Boolean, Integer, Column, MetaData t1 = Table('t', MetaData(), Column('x', Boolean()), Column('y', Integer)) A select construct will now render the boolean column as a binary expression on backends that don't feature ``true``/``false`` constant beahvior:: >>> from sqlalchemy import select, and_, false, true >>> from sqlalchemy.dialects import mysql, postgresql >>> print select([t1]).where(t1.c.x).compile(dialect=mysql.dialect()) SELECT t.x, t.y FROM t WHERE t.x = 1 The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi "short circuit" behavior, that is truncating a rendered expression, when a :func:`.true` or :func:`.false` constant is present:: >>> print select([t1]).where(and_(t1.c.y > 5, false())).compile( ... dialect=postgresql.dialect()) SELECT t.x, t.y FROM t WHERE false :func:`.true` can be used as the base to build up an expression:: >>> expr = true() >>> expr = expr & (t1.c.y > 5) >>> print select([t1]).where(expr) SELECT t.x, t.y FROM t WHERE t.y > :y_1 The boolean constants :func:`.true` and :func:`.false` themselves render as ``0 = 1`` and ``1 = 1`` for a backend with no boolean constants:: >>> print select([t1]).where(and_(t1.c.y > 5, false())).compile( ... dialect=mysql.dialect()) SELECT t.x, t.y FROM t WHERE 0 = 1 Interpretation of ``None``, while not particularly valid SQL, is at least now consistent:: >>> print select([t1.c.x]).where(None) SELECT t.x FROM t WHERE NULL >>> print select([t1.c.x]).where(None).where(None) SELECT t.x FROM t WHERE NULL AND NULL >>> print select([t1.c.x]).where(and_(None, None)) SELECT t.x FROM t WHERE NULL AND NULL :ticket:`2804` .. _migration_1068: Label constructs can now render as their name alone in an ORDER BY ------------------------------------------------------------------ For the case where a :class:`.Label` is used in both the columns clause as well as the ORDER BY clause of a SELECT, the label will render as just its name in the ORDER BY clause, assuming the underlying dialect reports support of this feature. E.g. an example like:: from sqlalchemy.sql import table, column, select, func t = table('t', column('c1'), column('c2')) expr = (func.foo(t.c.c1) + t.c.c2).label("expr") stmt = select([expr]).order_by(expr) print stmt Prior to 0.9 would render as:: SELECT foo(t.c1) + t.c2 AS expr FROM t ORDER BY foo(t.c1) + t.c2 And now renders as:: SELECT foo(t.c1) + t.c2 AS expr FROM t ORDER BY expr The ORDER BY only renders the label if the label isn't further embedded into an expression within the ORDER BY, other than a simple ``ASC`` or ``DESC``. The above format works on all databases tested, but might have compatibility issues with older database versions (MySQL 4? Oracle 8? etc.). Based on user reports we can add rules that will disable the feature based on database version detection. :ticket:`1068` .. _migration_2848: ``RowProxy`` now has tuple-sorting behavior ------------------------------------------- The :class:`.RowProxy` object acts much like a tuple, but up until now would not sort as a tuple if a list of them were sorted using ``sorted()``. The ``__eq__()`` method now compares both sides as a tuple and also an ``__lt__()`` method has been added:: users.insert().execute( dict(user_id=1, user_name='foo'), dict(user_id=2, user_name='bar'), dict(user_id=3, user_name='def'), ) rows = users.select().order_by(users.c.user_name).execute().fetchall() eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')]) eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')]) :ticket:`2848` .. _migration_2850: A bindparam() construct with no type gets upgraded via copy when a type is available ------------------------------------------------------------------------------------ The logic which "upgrades" a :func:`.bindparam` construct to take on the type of the enclosing expression has been improved in two ways. First, the :func:`.bindparam` object is **copied** before the new type is assigned, so that the given :func:`.bindparam` is not mutated in place. Secondly, this same operation occurs when an :class:`.Insert` or :class:`.Update` construct is compiled, regarding the "values" that were set in the statement via the :meth:`.ValuesBase.values` method. If given an untyped :func:`.bindparam`:: bp = bindparam("some_col") If we use this parameter as follows:: expr = mytable.c.col == bp The type for ``bp`` remains as ``NullType``, however if ``mytable.c.col`` is of type ``String``, then ``expr.right``, that is the right side of the binary expression, will take on the ``String`` type. Previously, ``bp`` itself would have been changed in place to have ``String`` as its type. Similarly, this operation occurs in an :class:`.Insert` or :class:`.Update`:: stmt = mytable.update().values(col=bp) Above, ``bp`` remains unchanged, but the ``String`` type will be used when the statement is executed, which we can see by examining the ``binds`` dictionary:: >>> compiled = stmt.compile() >>> compiled.binds['some_col'].type String The feature allows custom types to take their expected effect within INSERT/UPDATE statements without needing to explicitly specify those types within every :func:`.bindparam` expression. The potentially backwards-compatible changes involve two unlikely scenarios. Since the bound parameter is **cloned**, users should not be relying upon making in-place changes to a :func:`.bindparam` construct once created. Additionally, code which uses :func:`.bindparam` within an :class:`.Insert` or :class:`.Update` statement which is relying on the fact that the :func:`.bindparam` is not typed according to the column being assigned towards will no longer function in that way. :ticket:`2850` .. _migration_1765: Columns can reliably get their type from a column referred to via ForeignKey ---------------------------------------------------------------------------- There's a long standing behavior which says that a :class:`.Column` can be declared without a type, as long as that :class:`.Column` is referred to by a :class:`.ForeignKeyConstraint`, and the type from the referenced column will be copied into this one. The problem has been that this feature never worked very well and wasn't maintained. The core issue was that the :class:`.ForeignKey` object doesn't know what target :class:`.Column` it refers to until it is asked, typically the first time the foreign key is used to construct a :class:`.Join`. So until that time, the parent :class:`.Column` would not have a type, or more specifically, it would have a default type of :class:`.NullType`. While it's taken a long time, the work to reorganize the initialization of :class:`.ForeignKey` objects has been completed such that this feature can finally work acceptably. At the core of the change is that the :attr:`.ForeignKey.column` attribute no longer lazily initializes the location of the target :class:`.Column`; the issue with this system was that the owning :class:`.Column` would be stuck with :class:`.NullType` as its type until the :class:`.ForeignKey` happened to be used. In the new version, the :class:`.ForeignKey` coordinates with the eventual :class:`.Column` it will refer to using internal attachment events, so that the moment the referencing :class:`.Column` is associated with the :class:`.MetaData`, all :class:`.ForeignKey` objects that refer to it will be sent a message that they need to initialize their parent column. This system is more complicated but works more solidly; as a bonus, there are now tests in place for a wide variety of :class:`.Column` / :class:`.ForeignKey` configuration scenarios and error messages have been improved to be very specific to no less than seven different error conditions. Scenarios which now work correctly include: 1. The type on a :class:`.Column` is immediately present as soon as the target :class:`.Column` becomes associated with the same :class:`.MetaData`; this works no matter which side is configured first:: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) >>> t2.c.t1id.type NullType() >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) >>> t2.c.t1id.type Integer() 2. The system now works with :class:`.ForeignKeyConstraint` as well:: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKeyConstraint >>> metadata = MetaData() >>> t2 = Table('t2', metadata, ... Column('t1a'), Column('t1b'), ... ForeignKeyConstraint(['t1a', 't1b'], ['t1.a', 't1.b'])) >>> t2.c.t1a.type NullType() >>> t2.c.t1b.type NullType() >>> t1 = Table('t1', metadata, ... Column('a', Integer, primary_key=True), ... Column('b', Integer, primary_key=True)) >>> t2.c.t1a.type Integer() >>> t2.c.t1b.type Integer() 3. It even works for "multiple hops" - that is, a :class:`.ForeignKey` that refers to a :class:`.Column` that refers to another :class:`.Column`:: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) >>> t3 = Table('t3', metadata, Column('t2t1id', ForeignKey('t2.t1id'))) >>> t2.c.t1id.type NullType() >>> t3.c.t2t1id.type NullType() >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) >>> t2.c.t1id.type Integer() >>> t3.c.t2t1id.type Integer() :ticket:`1765` Dialect Changes =============== Firebird ``fdb`` is now the default Firebird dialect. ----------------------------------------------------- The ``fdb`` dialect is now used if an engine is created without a dialect specifier, i.e. ``firebird://``. ``fdb`` is a ``kinterbasdb`` compatible DBAPI which per the Firebird project is now their official Python driver. :ticket:`2504` Firebird ``fdb`` and ``kinterbasdb`` set ``retaining=False`` by default ----------------------------------------------------------------------- Both the ``fdb`` and ``kinterbasdb`` DBAPIs support a flag ``retaining=True`` which can be passed to the ``commit()`` and ``rollback()`` methods of its connection. The documented rationale for this flag is so that the DBAPI can re-use internal transaction state for subsequent transactions, for the purposes of improving performance. However, newer documentation refers to analyses of Firebird's "garbage collection" which expresses that this flag can have a negative effect on the database's ability to process cleanup tasks, and has been reported as *lowering* performance as a result. It's not clear how this flag is actually usable given this information, and as it appears to be only a performance enhancing feature, it now defaults to ``False``. The value can be controlled by passing the flag ``retaining=True`` to the :func:`.create_engine` call. This is a new flag which is added as of 0.8.2, so applications on 0.8.2 can begin setting this to ``True`` or ``False`` as desired. .. seealso:: :mod:`sqlalchemy.dialects.firebird.fdb` :mod:`sqlalchemy.dialects.firebird.kinterbasdb` http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information on the "retaining" flag. :ticket:`2763` SQLAlchemy-1.0.11/doc/build/changelog/changelog_01.rst0000664000175000017500000006444712636375552023433 0ustar classicclassic00000000000000 ============== 0.1 Changelog ============== .. changelog:: :version: 0.1.7 :released: Fri May 05 2006 .. change:: :tags: :tickets: some fixes to topological sort algorithm .. change:: :tags: :tickets: added DISTINCT ON support to Postgres (just supply distinct=[col1,col2..]) .. change:: :tags: :tickets: added __mod__ (% operator) to sql expressions .. change:: :tags: :tickets: "order_by" mapper property inherited from inheriting mapper .. change:: :tags: :tickets: fix to column type used when mapper UPDATES/DELETEs .. change:: :tags: :tickets: with convert_unicode=True, reflection was failing, has been fixed .. change:: :tags: :tickets: types types types! still weren't working....have to use TypeDecorator again :( .. change:: :tags: :tickets: mysql binary type converts array output to buffer, fixes PickleType .. change:: :tags: :tickets: fixed the attributes.py memory leak once and for all .. change:: :tags: :tickets: unittests are qualified based on the databases that support each one .. change:: :tags: :tickets: fixed bug where column defaults would clobber VALUES clause of insert objects .. change:: :tags: :tickets: fixed bug where table def w/ schema name would force engine connection .. change:: :tags: :tickets: fix for parenthesis to work correctly with subqueries in INSERT/UPDATE .. change:: :tags: :tickets: HistoryArraySet gets extend() method .. change:: :tags: :tickets: fixed lazyload support for other comparison operators besides = .. change:: :tags: :tickets: lazyload fix where two comparisons in the join condition point to the samem column .. change:: :tags: :tickets: added "construct_new" flag to mapper, will use __new__ to create instances instead of __init__ (standard in 0.2) .. change:: :tags: :tickets: added selectresults.py to SVN, missed it last time .. change:: :tags: :tickets: tweak to allow a many-to-many relationship from a table to itself via an association table .. change:: :tags: :tickets: small fix to "translate_row" function used by polymorphic example .. change:: :tags: :tickets: create_engine uses cgi.parse_qsl to read query string (out the window in 0.2) .. change:: :tags: :tickets: tweaks to CAST operator .. change:: :tags: :tickets: fixed function names LOCAL_TIME/LOCAL_TIMESTAMP -> LOCALTIME/LOCALTIMESTAMP .. change:: :tags: :tickets: fixed order of ORDER BY/HAVING in compile .. changelog:: :version: 0.1.6 :released: Wed Apr 12 2006 .. change:: :tags: :tickets: support for MS-SQL added courtesy Rick Morrison, Runar Petursson .. change:: :tags: :tickets: the latest SQLSoup from J. Ellis .. change:: :tags: :tickets: ActiveMapper has preliminary support for inheritance (Jeff Watkins) .. change:: :tags: :tickets: added a "mods" system which allows pluggable modules that modify/augment core functionality, using the function "install_mods(\*modnames)". .. change:: :tags: :tickets: added the first "mod", SelectResults, which modifies mapper selects to return generators that turn ranges into LIMIT/OFFSET queries (Jonas Borgstr? .. change:: :tags: :tickets: factored out querying capabilities of Mapper into a separate Query object which is Session-centric. this improves the performance of mapper.using(session) and makes other things possible. .. change:: :tags: :tickets: objectstore/Session refactored, the official way to save objects is now via the flush() method. The begin/commit functionality of Session is factored into LegacySession which is still established as the default behavior, until the 0.2 series. .. change:: :tags: :tickets: types system is bound to an engine at query compile time, not schema construction time. this simplifies the types system as well as the ProxyEngine. .. change:: :tags: :tickets: added 'version_id' keyword argument to mapper. this keyword should reference a Column object with type Integer, preferably non-nullable, which will be used on the mapped table to track version numbers. this number is incremented on each save operation and is specifed in the UPDATE/DELETE conditions so that it factors into the returned row count, which results in a ConcurrencyError if the value received is not the expected count. .. change:: :tags: :tickets: added 'entity_name' keyword argument to mapper. a mapper is now associated with a class via the class object as well as an optional entity_name parameter, which is a string defaulting to None. any number of primary mappers can be created for a class, qualified by the entity name. instances of those classes will issue all of their load and save operations through their entity_name-qualified mapper, and maintain separate a identity in the identity map for an otherwise equilvalent object. .. change:: :tags: :tickets: overhaul to the attributes system. code has been clarified, and also fixed to support proper polymorphic behavior on object attributes. .. change:: :tags: :tickets: added "for_update" flag to Select objects .. change:: :tags: :tickets: some fixes for backrefs .. change:: :tags: :tickets: fix for postgres1 DateTime type .. change:: :tags: :tickets: documentation pages mostly switched over to Markdown syntax .. changelog:: :version: 0.1.5 :released: Mon Mar 27 2006 .. change:: :tags: :tickets: added SQLSession concept to SQLEngine. this object keeps track of retrieving a connection from the connection pool as well as an in-progress transaction. methods push_session() and pop_session() added to SQLEngine which push/pop a new SQLSession onto the engine, allowing operation upon a second connection "nested" within the previous one, allowing nested transactions. Other tricks are sure to come later regarding SQLSession. .. change:: :tags: :tickets: added nest_on argument to objectstore.Session. This is a single SQLEngine or list of engines for which push_session()/pop_session() will be called each time this Session becomes the active session (via objectstore.push_session() or equivalent). This allows a unit of work Session to take advantage of the nested transaction feature without explicitly calling push_session/pop_session on the engine. .. change:: :tags: :tickets: factored apart objectstore/unitofwork to separate "Session scoping" from "uow commit heavy lifting" .. change:: :tags: :tickets: added populate_instance() method to MapperExtension. allows an extension to modify the population of object attributes. this method can call the populate_instance() method on another mapper to proxy the attribute population from one mapper to another; some row translation logic is also built in to help with this. .. change:: :tags: :tickets: fixed Oracle8-compatibility "use_ansi" flag which converts JOINs to comparisons with the = and (+) operators, passes basic unittests .. change:: :tags: :tickets: tweaks to Oracle LIMIT/OFFSET support .. change:: :tags: :tickets: Oracle reflection uses ALL_** views instead of USER_** to get larger list of stuff to reflect from .. change:: :tags: :tickets: 105 fixes to Oracle foreign key reflection .. change:: :tags: :tickets: objectstore.commit(obj1, obj2,...) adds an extra step to seek out private relations on properties and delete child objects, even though its not a global commit .. change:: :tags: :tickets: lots and lots of fixes to mappers which use inheritance, strengthened the concept of relations on a mapper being made towards the "local" table for that mapper, not the tables it inherits. allows more complex compositional patterns to work with lazy/eager loading. .. change:: :tags: :tickets: added support for mappers to inherit from others based on the same table, just specify the same table as that of both parent/child mapper. .. change:: :tags: :tickets: some minor speed improvements to the attributes system with regards to instantiating and populating new objects. .. change:: :tags: :tickets: fixed MySQL binary unit test .. change:: :tags: :tickets: INSERTs can receive clause elements as VALUES arguments, not just literal values .. change:: :tags: :tickets: support for calling multi-tokened functions, i.e. schema.mypkg.func() .. change:: :tags: :tickets: added J. Ellis' SQLSoup module to extensions package .. change:: :tags: :tickets: added "polymorphic" examples illustrating methods to load multiple object types from one mapper, the second of which uses the new populate_instance() method. small improvements to mapper, UNION construct to help the examples along .. change:: :tags: :tickets: improvements/fixes to session.refresh()/session.expire() (which may have been called "invalidate" earlier..) .. change:: :tags: :tickets: added session.expunge() which totally removes an object from the current session .. change:: :tags: :tickets: added \*args, \**kwargs pass-thru to engine.transaction(func) allowing easier creation of transactionalizing decorator functions .. change:: :tags: :tickets: added iterator interface to ResultProxy: "for row in result:..." .. change:: :tags: :tickets: added assertion to tx = session.begin(); tx.rollback(); tx.begin(), i.e. cant use it after a rollback() .. change:: :tags: :tickets: added date conversion on bind parameter fix to SQLite enabling dates to work with pysqlite1 .. change:: :tags: :tickets: 116 improvements to subqueries to more intelligently construct their FROM clauses .. change:: :tags: :tickets: added PickleType to types. .. change:: :tags: :tickets: fixed two bugs with column labels with regards to bind parameters: bind param keynames they are now generated from a column "label" in all relevant cases to take advantage of excess-name-length rules, and checks for a peculiar collision against a column named the same as "tablename_colname" added .. change:: :tags: :tickets: major overhaul to unit of work documentation, other documentation sections. .. change:: :tags: :tickets: fixed attributes bug where if an object is committed, its lazy-loaded list got blown away if it hadn't been loaded .. change:: :tags: :tickets: added unique_connection() method to engine, connection pool to return a connection that is not part of the thread-local context or any current transaction .. change:: :tags: :tickets: added invalidate() function to pooled connection. will remove the connection from the pool. still need work for engines to auto-reconnect to a stale DB though. .. change:: :tags: :tickets: added distinct() function to column elements so you can do func.count(mycol.distinct()) .. change:: :tags: :tickets: added "always_refresh" flag to Mapper, creates a mapper that will always refresh the attributes of objects it gets/selects from the DB, overwriting any changes made. .. changelog:: :version: 0.1.4 :released: Mon Mar 13 2006 .. change:: :tags: :tickets: create_engine() now uses genericized parameters; host/hostname, db/dbname/database, password/passwd, etc. for all engine connections. makes engine URIs much more "universal" .. change:: :tags: :tickets: added support for SELECT statements embedded into a column clause, using the flag "scalar=True" .. change:: :tags: :tickets: another overhaul to EagerLoading when used in conjunction with mappers that inherit; improvements to eager loads figuring out their aliased queries correctly, also relations set up against a mapper with inherited mappers will create joins against the table that is specific to the mapper itself (i.e. and not any tables that are inherited/are further down the inheritance chain), this can be overridden by using custom primary/secondary joins. .. change:: :tags: :tickets: added J.Ellis patch to mapper.py so that selectone() throws an exception if query returns more than one object row, selectfirst() to not throw the exception. also adds selectfirst_by (synonymous with get_by) and selectone_by .. change:: :tags: :tickets: added onupdate parameter to Column, will exec SQL/python upon an update statement.Also adds "for_update=True" to all DefaultGenerator subclasses .. change:: :tags: :tickets: added support for Oracle table reflection contributed by Andrija Zaric; still some bugs to work out regarding composite primary keys/dictionary selection .. change:: :tags: :tickets: checked in an initial Firebird module, awaiting testing. .. change:: :tags: :tickets: added sql.ClauseParameters dictionary object as the result for compiled.get_params(), does late-typeprocessing of bind parameters so that the original values are easier to access .. change:: :tags: :tickets: more docs for indexes, column defaults, connection pooling, engine construction .. change:: :tags: :tickets: overhaul to the construction of the types system. uses a simpler inheritance pattern so that any of the generic types can be easily subclassed, with no need for TypeDecorator. .. change:: :tags: :tickets: added "convert_unicode=False" parameter to SQLEngine, will cause all String types to perform unicode encoding/decoding (makes Strings act like Unicodes) .. change:: :tags: :tickets: added 'encoding="utf8"' parameter to engine. the given encoding will be used for all encode/decode calls within Unicode types as well as Strings when convert_unicode=True. .. change:: :tags: :tickets: improved support for mapping against UNIONs, added polymorph.py example to illustrate multi-class mapping against a UNION .. change:: :tags: :tickets: fix to SQLite LIMIT/OFFSET syntax .. change:: :tags: :tickets: fix to Oracle LIMIT syntax .. change:: :tags: :tickets: added backref() function, allows backreferences to have keyword arguments that will be passed to the backref. .. change:: :tags: :tickets: Sequences and ColumnDefault objects can do execute()/scalar() standalone .. change:: :tags: :tickets: SQL functions (i.e. func.foo()) can do execute()/scalar() standalone .. change:: :tags: :tickets: fix to SQL functions so that the ANSI-standard functions, i.e. current_timestamp etc., do not specify parenthesis. all other functions do. .. change:: :tags: :tickets: added settattr_clean and append_clean to SmartProperty, which set attributes without triggering a "dirty" event or any history. used as: myclass.prop1.setattr_clean(myobject, 'hi') .. change:: :tags: :tickets: improved support to column defaults when used by mappers; mappers will pull pre-executed defaults from statement's executed bind parameters (pre-conversion) to populate them into a saved object's attributes; if any PassiveDefaults have fired off, will instead post-fetch the row from the DB to populate the object. .. change:: :tags: :tickets: added 'get_session().invalidate(\*obj)' method to objectstore, instances will refresh() themselves upon the next attribute access. .. change:: :tags: :tickets: improvements to SQL func calls including an "engine" keyword argument so they can be execute()d or scalar()ed standalone, also added func accessor to SQLEngine .. change:: :tags: :tickets: fix to MySQL4 custom table engines, i.e. TYPE instead of ENGINE .. change:: :tags: :tickets: slightly enhanced logging, includes timestamps and a somewhat configurable formatting system, in lieu of a full-blown logging system .. change:: :tags: :tickets: improvements to the ActiveMapper class from the TG gang, including many-to-many relationships .. change:: :tags: :tickets: added Double and TinyInt support to mysql .. changelog:: :version: 0.1.3 :released: Thu Mar 02 2006 .. change:: :tags: :tickets: completed "post_update" feature, will add a second update statement before inserts and after deletes in order to reconcile a relationship without any dependencies being created; used when persisting two rows that are dependent on each other .. change:: :tags: :tickets: completed mapper.using(session) function, localized per-object Session functionality; objects can be declared and manipulated as local to any user-defined Session .. change:: :tags: :tickets: fix to Oracle "row_number over" clause with multiple tables .. change:: :tags: :tickets: mapper.get() was not selecting multiple-keyed objects if the mapper's table was a join, such as in an inheritance relationship, this is fixed. .. change:: :tags: :tickets: overhaul to sql/schema packages so that the sql package can run all on its own, producing selects, inserts, etc. without any engine dependencies. builds upon new TableClause/ColumnClause lexical objects. Schema's Table/Column objects are the "physical" subclasses of them. simplifies schema/sql relationship, extensions (like proxyengine), and speeds overall performance by a large margin. removes the entire getattr() behavior that plagued 0.1.1. .. change:: :tags: :tickets: refactoring of how the mapper "synchronizes" data between two objects into a separate module, works better with properties attached to a mapper that has an additional inheritance relationship to one of the related tables, also the same methodology used to synchronize parent/child objects now used by mapper to synchronize between inherited and inheriting mappers. .. change:: :tags: :tickets: made objectstore "check for out-of-identitymap" more aggressive, will perform the check when object attributes are modified or the object is deleted .. change:: :tags: :tickets: Index object fully implemented, can be constructed standalone, or via "index" and "unique" arguments on Columns. .. change:: :tags: :tickets: added "convert_unicode" flag to SQLEngine, will treat all String/CHAR types as Unicode types, with raw-byte/utf-8 translation on the bind parameter and result set side. .. change:: :tags: :tickets: postgres maintains a list of ANSI functions that must have no parenthesis so function calls with no arguments work consistently .. change:: :tags: :tickets: tables can be created with no engine specified. this will default their engine to a module-scoped "default engine" which is a ProxyEngine. this engine can be connected via the function "global_connect". .. change:: :tags: :tickets: added "refresh(\*obj)" method to objectstore / Session to reload the attributes of any set of objects from the database unconditionally .. changelog:: :version: 0.1.2 :released: Fri Feb 24 2006 .. change:: :tags: :tickets: fixed a recursive call in schema that was somehow running 994 times then returning normally. broke nothing, slowed down everything. thanks to jpellerin for finding this. .. changelog:: :version: 0.1.1 :released: Thu Feb 23 2006 .. change:: :tags: :tickets: small fix to Function class so that expressions with a func.foo() use the type of the Function object (i.e. the left side) as the type of the boolean expression, not the other side which is more of a moving target (changeset 1020). .. change:: :tags: :tickets: creating self-referring mappers with backrefs slightly easier (but still not that easy - changeset 1019) .. change:: :tags: :tickets: fixes to one-to-one mappings (changeset 1015) .. change:: :tags: :tickets: psycopg1 date/time issue with None fixed (changeset 1005) .. change:: :tags: :tickets: two issues related to postgres, which doesn't want to give you the "lastrowid" since oids are deprecated: * postgres database-side defaults that are on primary key cols *do* execute explicitly beforehand, even though that's not the idea of a PassiveDefault. this is because sequences on columns get reflected as PassiveDefaults, but need to be explicitly executed on a primary key col so we know what we just inserted. * if you did add a row that has a bunch of database-side defaults on it, and the PassiveDefault thing was working the old way, i.e. they just execute on the DB side, the "cant get the row back without an OID" exception that occurred also will not happen unless someone (usually the ORM) explicitly asks for it. .. change:: :tags: :tickets: fixed a glitch with engine.execute_compiled where it was making a second ResultProxy that just got thrown away. .. change:: :tags: :tickets: began to implement newer logic in object properities. you can now say myclass.attr.property, which will give you the PropertyLoader corresponding to that attribute, i.e. myclass.mapper.props['attr'] .. change:: :tags: :tickets: eager loading has been internally overhauled to use aliases at all times. more complicated chains of eager loads can now be created without any need for explicit "use aliases"-type instructions. EagerLoader code is also much simpler now. .. change:: :tags: :tickets: a new somewhat experimental flag "use_update" added to relations, indicates that this relationship should be handled by a second UPDATE statement, either after a primary INSERT or before a primary DELETE. handles circular row dependencies. .. change:: :tags: :tickets: added exceptions module, all raised exceptions (except for some KeyError/AttributeError exceptions) descend from these classes. .. change:: :tags: :tickets: fix to date types with MySQL, returned timedelta converted to datetime.time .. change:: :tags: :tickets: two-phase objectstore.commit operations (i.e. begin/commit) now return a transactional object (SessionTrans), to more clearly indicate transaction boundaries. .. change:: :tags: :tickets: Index object with create/drop support added to schema .. change:: :tags: :tickets: fix to postgres, where it will explicitly pre-execute a PassiveDefault on a table if it is a primary key column, pursuant to the ongoing "we cant get inserted rows back from postgres" issue .. change:: :tags: :tickets: change to information_schema query that gets back postgres table defs, now uses explicit JOIN keyword, since one user had faster performance with 8.1 .. change:: :tags: :tickets: fix to engine.process_defaults so it works correctly with a table that has different column name/column keys (changset 982) .. change:: :tags: :tickets: a column can only be attached to one table - this is now asserted .. change:: :tags: :tickets: postgres time types descend from Time type .. change:: :tags: :tickets: fix to alltests so that it runs types test (now named testtypes) .. change:: :tags: :tickets: fix to Join object so that it correctly exports its foreign keys (cs 973) .. change:: :tags: :tickets: creating relationships against mappers that use inheritance fixed (cs 973) SQLAlchemy-1.0.11/doc/build/changelog/changelog_07.rst0000664000175000017500000041255212636375552023433 0ustar classicclassic00000000000000 ============== 0.7 Changelog ============== .. changelog:: :version: 0.7.11 .. change:: :tags: bug, engine :tickets: 2851 :versions: 0.8.3, 0.9.0b1 The regexp used by the :func:`~sqlalchemy.engine.url.make_url` function now parses ipv6 addresses, e.g. surrounded by brackets. .. change:: :tags: bug, orm :tickets: 2807 :versions: 0.8.3, 0.9.0b1 Fixed bug where list instrumentation would fail to represent a setslice of ``[0:0]`` correctly, which in particular could occur when using ``insert(0, item)`` with the association proxy. Due to some quirk in Python collections, the issue was much more likely with Python 3 rather than 2. .. change:: :tags: bug, sql :tickets: 2801 :versions: 0.8.3, 0.9.0b1 Fixed regression dating back to 0.7.9 whereby the name of a CTE might not be properly quoted if it was referred to in multiple FROM clauses. .. change:: :tags: mysql, bug :tickets: 2791 :versions: 0.8.3, 0.9.0b1 Updates to MySQL reserved words for versions 5.5, 5.6, courtesy Hanno Schlichting. .. change:: :tags: sql, bug, cte :tickets: 2783 :versions: 0.8.3, 0.9.0b1 Fixed bug in common table expression system where if the CTE were used only as an ``alias()`` construct, it would not render using the WITH keyword. .. change:: :tags: bug, sql :tickets: 2784 :versions: 0.8.3, 0.9.0b1 Fixed bug in :class:`.CheckConstraint` DDL where the "quote" flag from a :class:`.Column` object would not be propagated. .. change:: :tags: bug, orm :tickets: 2699 :versions: 0.8.1 Fixed bug when a query of the form: ``query(SubClass).options(subqueryload(Baseclass.attrname))``, where ``SubClass`` is a joined inh of ``BaseClass``, would fail to apply the ``JOIN`` inside the subquery on the attribute load, producing a cartesian product. The populated results still tended to be correct as additional rows are just ignored, so this issue may be present as a performance degradation in applications that are otherwise working correctly. .. change:: :tags: bug, orm :tickets: 2689 :versions: 0.8.1 Fixed bug in unit of work whereby a joined-inheritance subclass could insert the row for the "sub" table before the parent table, if the two tables had no ForeignKey constraints set up between them. .. change:: :tags: feature, postgresql :tickets: 2676 :versions: 0.8.0 Added support for Postgresql's traditional SUBSTRING function syntax, renders as "SUBSTRING(x FROM y FOR z)" when regular ``func.substring()`` is used. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, tests :tickets: 2669 :pullreq: 41 Fixed an import of "logging" in test_execute which was not working on some linux platforms. .. change:: :tags: bug, orm :tickets: 2674 Improved the error message emitted when a "backref loop" is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair. .. change:: :tags: bug, orm :tickets: 2674 A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren't plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. This will also warn if a backref configures itself on top of an existing one in an inheritance relationship (which is an error in 0.8). .. changelog:: :version: 0.7.10 :released: Thu Feb 7 2013 .. change:: :tags: engine, bug :tickets: 2604 :versions: 0.8.0b2 Fixed :meth:`.MetaData.reflect` to correctly use the given :class:`.Connection`, if given, without opening a second connection from that connection's :class:`.Engine`. .. change:: :tags: mssql, bug :tickets:2607 :versions: 0.8.0b2 Fixed bug whereby using "key" with Column in conjunction with "schema" for the owning Table would fail to locate result rows due to the MSSQL dialect's "schema rendering" logic's failure to take .key into account. .. change:: :tags: sql, mysql, gae :tickets: 2649 Added a conditional import to the ``gaerdbms`` dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the ``instance`` attribute. Courtesy Sean Lynch. Also backported enhancements to allow username/password as well as fixing error code interpretation from 0.8. .. change:: :tags: sql, bug :tickets: 2594, 2584 Backported adjustment to ``__repr__`` for :class:`.TypeDecorator` to 0.7, allows :class:`.PickleType` to produce a clean ``repr()`` to help with Alembic. .. change:: :tags: sql, bug :tickets: 2643 Fixed bug where :meth:`.Table.tometadata` would fail if a :class:`.Column` had both a foreign key as well as an alternate ".key" name for the column. .. change:: :tags: mssql, bug :tickets: 2638 Added a Py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3k. .. change:: :tags: orm, bug :tickets: 2650 Fixed potential memory leak which could occur if an arbitrary number of :class:`.sessionmaker` objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher. .. change:: :tags: orm, bug :tickets: 2640 :meth:`.Query.merge_result` can now load rows from an outer join where an entity may be ``None`` without throwing an error. .. change:: :tags: sqlite, bug :tickets: 2568 :versions: 0.8.0b2 More adjustment to this SQLite related issue which was released in 0.7.9, to intercept legacy SQLite quoting characters when reflecting foreign keys. In addition to intercepting double quotes, other quoting characters such as brackets, backticks, and single quotes are now also intercepted. .. change:: :tags: sql, bug :tickets: 2631 :versions: 0.8.0b2 Fixed bug where using server_onupdate= without passing the "for_update=True" flag would apply the default object to the server_default, blowing away whatever was there. The explicit for_update=True argument shouldn't be needed with this usage (especially since the documentation shows an example without it being used) so it is now arranged internally using a copy of the given default object, if the flag isn't set to what corresponds to that argument. .. change:: :tags: oracle, bug :tickets: 2620 The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied. .. change:: :tags: oracle, bug :tickets: 2611 Repaired the usage of ``.prepare()`` in conjunction with cx_Oracle so that a return value of ``False`` will result in no call to ``connection.commit()``, hence avoiding "no transaction" errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details. .. change:: :tags: orm, bug :tickets: 2624 The :class:`.MutableComposite` type did not allow for the :meth:`.MutableBase.coerce` method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new :class:`.MutableComposite` types no longer add per-type global event handlers. Also in 0.8.0b2. .. change:: :tags: orm, bug :tickets: 2583 Fixed Session accounting bug whereby replacing a deleted object in the identity map with another object of the same primary key would raise a "conflicting state" error on rollback(), if the replaced primary key were established either via non-unitofwork-established INSERT statement or by primary key switch of another instance. .. change:: :tags: oracle, bug :tickets: 2561 changed the list of cx_oracle types that are excluded from the setinputsizes() step to only include STRING and UNICODE; CLOB and NCLOB are removed. This is to work around cx_oracle behavior which is broken for the executemany() call. In 0.8, this same change is applied however it is also configurable via the exclude_setinputsizes argument. .. change:: :tags: feature, mysql :tickets: 2523 Added "raise_on_warnings" flag to OurSQL dialect. .. change:: :tags: feature, mysql :tickets: 2554 Added "read_timeout" flag to MySQLdb dialect. .. changelog:: :version: 0.7.9 :released: Mon Oct 01 2012 .. change:: :tags: orm, bug :tickets: Fixed bug mostly local to new AbstractConcreteBase helper where the "type" attribute from the superclass would not be overridden on the subclass to produce the "reserved for base" error message, instead placing a do-nothing attribute there. This was inconsistent vs. using ConcreteBase as well as all the behavior of classical concrete mappings, where the "type" column from the polymorphic base would be explicitly disabled on subclasses, unless overridden explicitly. .. change:: :tags: orm, bug :tickets: A warning is emitted when lazy='dynamic' is combined with uselist=False. This is an exception raise in 0.8. .. change:: :tags: orm, bug :tickets: Fixed bug whereby user error in related-object assignment could cause recursion overflow if the assignment triggered a backref of the same name as a bi-directional attribute on the incorrect class to the same target. An informative error is raised now. .. change:: :tags: orm, bug :tickets: 2539 Fixed bug where incorrect type information would be passed when the ORM would bind the "version" column, when using the "version" feature. Tests courtesy Daniel Miller. .. change:: :tags: orm, bug :tickets: 2566 Extra logic has been added to the "flush" that occurs within Session.commit(), such that the extra state added by an after_flush() or after_flush_postexec() hook is also flushed in a subsequent flush, before the "commit" completes. Subsequent calls to flush() will continue until the after_flush hooks stop adding new state. An "overflow" counter of 100 is also in place, in the event of a broken after_flush() hook adding new content each time. .. change:: :tags: bug, sql :tickets: 2571 Fixed the DropIndex construct to support an Index associated with a Table in a remote schema. .. change:: :tags: bug, sql :tickets: 2574 Fixed bug in over() construct whereby passing an empty list for either partition_by or order_by, as opposed to None, would fail to generate correctly. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, sql :tickets: 2521 Fixed CTE bug whereby positional bound parameters present in the CTEs themselves would corrupt the overall ordering of bound parameters. This primarily affected SQL Server as the platform with positional binds + CTE support. .. change:: :tags: bug, sql :tickets: Fixed more un-intuitivenesses in CTEs which prevented referring to a CTE in a union of itself without it being aliased. CTEs now render uniquely on name, rendering the outermost CTE of a given name only - all other references are rendered just as the name. This even includes other CTE/SELECTs that refer to different versions of the same CTE object, such as a SELECT or a UNION ALL of that SELECT. We are somewhat loosening the usual link between object identity and lexical identity in this case. A true name conflict between two unrelated CTEs now raises an error. .. change:: :tags: bug, sql :tickets: 2512 quoting is applied to the column names inside the WITH RECURSIVE clause of a common table expression according to the quoting rules for the originating Column. .. change:: :tags: bug, sql :tickets: 2518 Fixed regression introduced in 0.7.6 whereby the FROM list of a SELECT statement could be incorrect in certain "clone+replace" scenarios. .. change:: :tags: bug, sql :tickets: 2552 Fixed bug whereby usage of a UNION or similar inside of an embedded subquery would interfere with result-column targeting, in the case that a result-column had the same ultimate name as a name inside the embedded UNION. .. change:: :tags: bug, sql :tickets: 2558 Fixed a regression since 0.6 regarding result-row targeting. It should be possible to use a select() statement with string based columns in it, that is select(['id', 'name']).select_from('mytable'), and have this statement be targetable by Column objects with those names; this is the mechanism by which query(MyClass).from_statement(some_statement) works. At some point the specific case of using select(['id']), which is equivalent to select([literal_column('id')]), stopped working here, so this has been re-instated and of course tested. .. change:: :tags: bug, sql :tickets: 2544 Added missing operators is_(), isnot() to the ColumnOperators base, so that these long-available operators are present as methods like all the other operators. .. change:: :tags: engine, bug :tickets: 2522 Fixed bug whereby a disconnect detect + dispose that occurs when the QueuePool has threads waiting for connections would leave those threads waiting for the duration of the timeout on the old pool (or indefinitely if timeout was disabled). The fix now notifies those waiters with a special exception case and has them move onto the new pool. .. change:: :tags: engine, feature :tickets: 2516 Dramatic improvement in memory usage of the event system; instance-level collections are no longer created for a particular type of event until instance-level listeners are established for that event. .. change:: :tags: engine, bug :tickets: 2529 Added gaerdbms import to mysql/__init__.py, the absence of which was preventing the new GAE dialect from being loaded. .. change:: :tags: engine, bug :tickets: 2553 Fixed cextension bug whereby the "ambiguous column error" would fail to function properly if the given index were a Column object and not a string. Note there are still some column-targeting issues here which are fixed in 0.8. .. change:: :tags: engine, bug :tickets: Fixed the repr() of Enum to include the "name" and "native_enum" flags. Helps Alembic autogenerate. .. change:: :tags: sqlite, bug :tickets: 2568 Adjusted a very old bugfix which attempted to work around a SQLite issue that itself was "fixed" as of sqlite 3.6.14, regarding quotes surrounding a table name when using the "foreign_key_list" pragma. The fix has been adjusted to not interfere with quotes that are *actually in the name* of a column or table, to as much a degree as possible; sqlite still doesn't return the correct result for foreign_key_list() if the target table actually has quotes surrounding its name, as *part* of its name (i.e. """mytable"""). .. change:: :tags: sqlite, bug :tickets: 2265 Adjusted column default reflection code to convert non-string values to string, to accommodate old SQLite versions that don't deliver default info as a string. .. change:: :tags: sqlite, feature :tickets: Added support for the localtimestamp() SQL function implemented in SQLite, courtesy Richard Mitchell. .. change:: :tags: postgresql, bug :tickets: 2531 Columns in reflected primary key constraint are now returned in the order in which the constraint itself defines them, rather than how the table orders them. Courtesy Gunnlaugur Þór Briem.. .. change:: :tags: postgresql, bug :tickets: 2570 Added 'terminating connection' to the list of messages we use to detect a disconnect with PG, which appears to be present in some versions when the server is restarted. .. change:: :tags: bug, mysql :tickets: Updated mysqlconnector interface to use updated "client flag" and "charset" APIs, courtesy David McNelis. .. change:: :tags: mssql, bug :tickets: 2538 Fixed compiler bug whereby using a correlated subquery within an ORDER BY would fail to render correctly if the stament also used LIMIT/OFFSET, due to mis-rendering within the ROW_NUMBER() OVER clause. Fix courtesy sayap .. change:: :tags: mssql, bug :tickets: 2545 Fixed compiler bug whereby a given select() would be modified if it had an "offset" attribute, causing the construct to not compile correctly a second time. .. change:: :tags: mssql, bug :tickets: Fixed bug where reflection of primary key constraint would double up columns if the same constraint/table existed in multiple schemas. .. changelog:: :version: 0.7.8 :released: Sat Jun 16 2012 .. change:: :tags: orm, bug :tickets: 2480 Fixed bug whereby subqueryload() from a polymorphic mapping to a target would incur a new invocation of the query for each distinct class encountered in the polymorphic result. .. change:: :tags: orm, bug :tickets: 2491, 1892 Fixed bug in declarative whereby the precedence of columns in a joined-table, composite column (typically for id) would fail to be correct if the columns contained names distinct from their attribute names. This would cause things like primaryjoin conditions made against the entity attributes to be incorrect. Related to as this was supposed to be part of that, this is. .. change:: :tags: orm, feature :tickets: The 'objects' argument to flush() is no longer deprecated, as some valid use cases have been identified. .. change:: :tags: orm, bug :tickets: 2508 Fixed identity_key() function which was not accepting a scalar argument for the identity. . .. change:: :tags: orm, bug :tickets: 2497 Fixed bug whereby populate_existing option would not propagate to subquery eager loaders. . .. change:: :tags: bug, sql :tickets: 2499 added BIGINT to types.__all__, BIGINT, BINARY, VARBINARY to sqlalchemy module namespace, plus test to ensure this breakage doesn't occur again. .. change:: :tags: bug, sql :tickets: 2490 Repaired common table expression rendering to function correctly when the SELECT statement contains UNION or other compound expressions, courtesy btbuilder. .. change:: :tags: bug, sql :tickets: 2482 Fixed bug whereby append_column() wouldn't function correctly on a cloned select() construct, courtesy Gunnlaugur Þór Briem. .. change:: :tags: engine, bug :tickets: 2489 Fixed memory leak in C version of result proxy whereby DBAPIs which don't deliver pure Python tuples for result rows would fail to decrement refcounts correctly. The most prominently affected DBAPI is pyodbc. .. change:: :tags: engine, bug :tickets: 2503 Fixed bug affecting Py3K whereby string positional parameters passed to engine/connection execute() would fail to be interpreted correctly, due to __iter__ being present on Py3K string.. .. change:: :tags: postgresql, bug :tickets: 2510 removed unnecessary table clause when reflecting enums,. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: oracle, bug :tickets: 2483 Added ROWID to oracle.*. .. change:: :tags: feature, mysql :tickets: 2484 Added a new dialect for Google App Engine. Courtesy Richie Foreman. .. changelog:: :version: 0.7.7 :released: Sat May 05 2012 .. change:: :tags: orm, bug :tickets: 2477 Fixed issue in unit of work whereby setting a non-None self-referential many-to-one relationship to None would fail to persist the change if the former value was not already loaded.. .. change:: :tags: orm, feature :tickets: 2443 Added prefix_with() method to Query, calls upon select().prefix_with() to allow placement of MySQL SELECT directives in statements. Courtesy Diana Clarke .. change:: :tags: orm, bug :tickets: 2409 Fixed bug in 0.7.6 introduced by whereby column_mapped_collection used against columns that were mapped as joins or other indirect selectables would fail to function. .. change:: :tags: orm, feature :tickets: Added new flag to @validates include_removes. When True, collection remove and attribute del events will also be sent to the validation function, which accepts an additional argument "is_remove" when this flag is used. .. change:: :tags: orm, bug :tickets: 2449 Fixed bug whereby polymorphic_on column that's not otherwise mapped on the class would be incorrectly included in a merge() operation, raising an error. .. change:: :tags: orm, bug :tickets: 2453 Fixed bug in expression annotation mechanics which could lead to incorrect rendering of SELECT statements with aliases and joins, particularly when using column_property(). .. change:: :tags: orm, bug :tickets: 2454 Fixed bug which would prevent OrderingList from being pickleable. Courtesy Jeff Dairiki .. change:: :tags: orm, bug :tickets: Fixed bug in relationship comparisons whereby calling unimplemented methods like SomeClass.somerelationship.like() would produce a recursion overflow, instead of NotImplementedError. .. change:: :tags: bug, sql :tickets: Removed warning when Index is created with no columns; while this might not be what the user intended, it is a valid use case as an Index could be a placeholder for just an index of a certain name. .. change:: :tags: feature, sql :tickets: Added new connection event dbapi_error(). Is called for all DBAPI-level errors passing the original DBAPI exception before SQLAlchemy modifies the state of the cursor. .. change:: :tags: bug, sql :tickets: If conn.begin() fails when calling "with engine.begin()", the newly acquired Connection is closed explicitly before propagating the exception onward normally. .. change:: :tags: bug, sql :tickets: 2474 Add BINARY, VARBINARY to types.__all__. .. change:: :tags: mssql, feature :tickets: Added interim create_engine flag supports_unicode_binds to PyODBC dialect, to force whether or not the dialect passes Python unicode literals to PyODBC or not. .. change:: :tags: mssql, bug :tickets: Repaired the use_scope_identity create_engine() flag when using the pyodbc dialect. Previously this flag would be ignored if set to False. When set to False, you'll get "SELECT @@identity" after each INSERT to get at the last inserted ID, for those tables which have "implicit_returning" set to False. .. change:: :tags: mssql, bug :tickets: 2468 UPDATE..FROM syntax with SQL Server requires that the updated table be present in the FROM clause when an alias of that table is also present in the FROM clause. The updated table is now always present in the FROM, when FROM is present in the first place. Courtesy sayap. .. change:: :tags: postgresql, feature :tickets: 2445 Added new for_update/with_lockmode() options for Postgresql: for_update="read"/ with_lockmode("read"), for_update="read_nowait"/ with_lockmode("read_nowait"). These emit "FOR SHARE" and "FOR SHARE NOWAIT", respectively. Courtesy Diana Clarke .. change:: :tags: postgresql, bug :tickets: 2473 removed unnecessary table clause when reflecting domains. .. change:: :tags: bug, mysql :tickets: 2460 Fixed bug whereby column name inside of "KEY" clause for autoincrement composite column with InnoDB would double quote a name that's a reserved word. Courtesy Jeff Dairiki. .. change:: :tags: bug, mysql :tickets: Fixed bug whereby get_view_names() for "information_schema" schema would fail to retrieve views marked as "SYSTEM VIEW". courtesy Matthew Turland. .. change:: :tags: bug, mysql :tickets: 2467 Fixed bug whereby if cast() is used on a SQL expression whose type is not supported by cast() and therefore CAST isn't rendered by the dialect, the order of evaluation could change if the casted expression required that it be grouped; grouping is now applied to those expressions. .. change:: :tags: sqlite, feature :tickets: 2475 Added SQLite execution option "sqlite_raw_colnames=True", will bypass attempts to remove "." from column names returned by SQLite cursor.description. .. change:: :tags: sqlite, bug :tickets: 2525 When the primary key column of a Table is replaced, such as via extend_existing, the "auto increment" column used by insert() constructs is reset. Previously it would remain referring to the previous primary key column. .. changelog:: :version: 0.7.6 :released: Wed Mar 14 2012 .. change:: :tags: orm, bug :tickets: 2424 Fixed event registration bug which would primarily show up as events not being registered with sessionmaker() instances created after the event was associated with the Session class. .. change:: :tags: orm, bug :tickets: 2425 Fixed bug whereby a primaryjoin condition with a "literal" in it would raise an error on compile with certain kinds of deeply nested expressions which also needed to render the same bound parameter name more than once. .. change:: :tags: orm, feature :tickets: Added "no_autoflush" context manager to Session, used with with: will temporarily disable autoflush. .. change:: :tags: orm, feature :tickets: 1859 Added cte() method to Query, invokes common table expression support from the Core (see below). .. change:: :tags: orm, bug :tickets: 2403 Removed the check for number of rows affected when doing a multi-delete against mapped objects. If an ON DELETE CASCADE exists between two rows, we can't get an accurate rowcount from the DBAPI; this particular count is not supported on most DBAPIs in any case, MySQLdb is the notable case where it is. .. change:: :tags: orm, bug :tickets: 2409 Fixed bug whereby objects using attribute_mapped_collection or column_mapped_collection could not be pickled. .. change:: :tags: orm, bug :tickets: 2406 Fixed bug whereby MappedCollection would not get the appropriate collection instrumentation if it were only used in a custom subclass that used @collection.internally_instrumented. .. change:: :tags: orm, bug :tickets: 2419 Fixed bug whereby SQL adaption mechanics would fail in a very nested scenario involving joined-inheritance, joinedload(), limit(), and a derived function in the columns clause. .. change:: :tags: orm, bug :tickets: 2417 Fixed the repr() for CascadeOptions to include refresh-expire. Also reworked CascadeOptions to be a . .. change:: :tags: orm, feature :tickets: 2400 Added the ability to query for Table-bound column names when using query(sometable).filter_by(colname=value). .. change:: :tags: orm, bug :tickets: Improved the "declarative reflection" example to support single-table inheritance, multiple calls to prepare(), tables that are present in alternate schemas, establishing only a subset of classes as reflected. .. change:: :tags: orm, bug :tickets: 2390 Scaled back the test applied within flush() to check for UPDATE against partially NULL PK within one table to only actually happen if there's really an UPDATE to occur. .. change:: :tags: orm, bug :tickets: 2352 Fixed bug whereby if a method name conflicted with a column name, a TypeError would be raised when the mapper tried to inspect the __get__() method on the method object. .. change:: :tags: bug, sql :tickets: 2427 Fixed memory leak in core which would occur when C extensions were used with particular types of result fetches, in particular when orm query.count() were called. .. change:: :tags: bug, sql :tickets: 2398 Fixed issue whereby attribute-based column access on a row would raise AttributeError with non-C version, NoSuchColumnError with C version. Now raises AttributeError in both cases. .. change:: :tags: feature, sql :tickets: 1859 Added support for SQL standard common table expressions (CTE), allowing SELECT objects as the CTE source (DML not yet supported). This is invoked via the cte() method on any select() construct. .. change:: :tags: bug, sql :tickets: 2392 Added support for using the .key of a Column as a string identifier in a result set row. The .key is currently listed as an "alternate" name for a column, and is superseded by the name of a column which has that key value as its regular name. For the next major release of SQLAlchemy we may reverse this precedence so that .key takes precedence, but this is not decided on yet. .. change:: :tags: bug, sql :tickets: 2413 A warning is emitted when a not-present column is stated in the values() clause of an insert() or update() construct. Will move to an exception in 0.8. .. change:: :tags: bug, sql :tickets: 2396 A significant change to how labeling is applied to columns in SELECT statements allows "truncated" labels, that is label names that are generated in Python which exceed the maximum identifier length (note this is configurable via label_length on create_engine()), to be properly referenced when rendered inside of a subquery, as well as to be present in a result set row using their original in-Python names. .. change:: :tags: bug, sql :tickets: 2402 Fixed bug in new "autoload_replace" flag which would fail to preserve the primary key constraint of the reflected table. .. change:: :tags: bug, sql :tickets: 2380 Index will raise when arguments passed cannot be interpreted as columns or expressions. Will warn when Index is created with no columns at all. .. change:: :tags: engine, feature :tickets: 2407 Added "no_parameters=True" execution option for connections. If no parameters are present, will pass the statement as cursor.execute(statement), thereby invoking the DBAPIs behavior when no parameter collection is present; for psycopg2 and mysql-python, this means not interpreting % signs in the string. This only occurs with this option, and not just if the param list is blank, as otherwise this would produce inconsistent behavior of SQL expressions that normally escape percent signs (and while compiling, can't know ahead of time if parameters will be present in some cases). .. change:: :tags: engine, bug :tickets: Added execution_options() call to MockConnection (i.e., that used with strategy="mock") which acts as a pass through for arguments. .. change:: :tags: engine, feature :tickets: 2378 Added pool_reset_on_return argument to create_engine, allows control over "connection return" behavior. Also added new arguments 'rollback', 'commit', None to pool.reset_on_return to allow more control over connection return activity. .. change:: :tags: engine, feature :tickets: Added some decent context managers to Engine, Connection:: with engine.begin() as conn: and:: with engine.connect() as conn: Both close out the connection when done, commit or rollback transaction with errors on engine.begin(). .. change:: :tags: sqlite, bug :tickets: 2432 Fixed bug in C extensions whereby string format would not be applied to a Numeric value returned as integer; this affected primarily SQLite which does not maintain numeric scale settings. .. change:: :tags: mssql, feature :tickets: 2430 Added support for MSSQL INSERT, UPDATE, and DELETE table hints, using new with_hint() method on UpdateBase. .. change:: :tags: feature, mysql :tickets: 2386 Added support for MySQL index and primary key constraint types (i.e. USING) via new mysql_using parameter to Index and PrimaryKeyConstraint, courtesy Diana Clarke. .. change:: :tags: feature, mysql :tickets: 2394 Added support for the "isolation_level" parameter to all MySQL dialects. Thanks to mu_mind for the patch here. .. change:: :tags: oracle, feature :tickets: 2399 Added a new create_engine() flag coerce_to_decimal=False, disables the precision numeric handling which can add lots of overhead by converting all numeric values to Decimal. .. change:: :tags: oracle, bug :tickets: 2401 Added missing compilation support for LONG .. change:: :tags: oracle, bug :tickets: 2435 Added 'LEVEL' to the list of reserved words for Oracle. .. change:: :tags: examples, bug :tickets: Altered _params_from_query() function in Beaker example to pull bindparams from the fully compiled statement, as a quick means to get everything including subqueries in the columns clause, etc. .. changelog:: :version: 0.7.5 :released: Sat Jan 28 2012 .. change:: :tags: orm, bug :tickets: 2389 Fixed issue where modified session state established after a failed flush would be committed as part of the subsequent transaction that begins automatically after manual call to rollback(). The state of the session is checked within rollback(), and if new state is present, a warning is emitted and restore_snapshot() is called a second time, discarding those changes. .. change:: :tags: orm, bug :tickets: 2345 Fixed regression from 0.7.4 whereby using an already instrumented column from a superclass as "polymorphic_on" failed to resolve the underlying Column. .. change:: :tags: orm, bug :tickets: 2370 Raise an exception if xyzload_all() is used inappropriately with two non-connected relationships. .. change:: :tags: orm, feature :tickets: Added "class_registry" argument to declarative_base(). Allows two or more declarative bases to share the same registry of class names. .. change:: :tags: orm, feature :tickets: query.filter() accepts multiple criteria which will join via AND, i.e. query.filter(x==y, z>q, ...) .. change:: :tags: orm, feature :tickets: 2351 Added new capability to relationship loader options to allow "default" loader strategies. Pass '*' to any of joinedload(), lazyload(), subqueryload(), or noload() and that becomes the loader strategy used for all relationships, except for those explicitly stated in the Query. Thanks to up-and-coming contributor Kent Bower for an exhaustive and well written test suite ! .. change:: :tags: orm, bug :tickets: 2367 Fixed bug whereby event.listen(SomeClass) forced an entirely unnecessary compile of the mapper, making events very hard to set up at module import time (nobody noticed this ??) .. change:: :tags: orm, bug :tickets: Fixed bug whereby hybrid_property didn't work as a kw arg in any(), has(). .. change:: :tags: orm :tickets: Fixed regression from 0.6 whereby if "load_on_pending" relationship() flag were used where a non-"get()" lazy clause needed to be emitted on a pending object, it would fail to load. .. change:: :tags: orm, bug :tickets: 2371 ensure pickleability of all ORM exceptions for multiprocessing compatibility. .. change:: :tags: orm, bug :tickets: 2353 implemented standard "can't set attribute" / "can't delete attribute" AttributeError when setattr/delattr used on a hybrid that doesn't define fset or fdel. .. change:: :tags: orm, bug :tickets: 2362 Fixed bug where unpickled object didn't have enough of its state set up to work correctly within the unpickle() event established by the mutable object extension, if the object needed ORM attribute access within __eq__() or similar. .. change:: :tags: orm, bug :tickets: 2374 Fixed bug where "merge" cascade could mis-interpret an unloaded attribute, if the load_on_pending flag were used with relationship(). Thanks to Kent Bower for tests. .. change:: :tags: orm, feature :tickets: 2356 New declarative reflection example added, illustrates how best to mix table reflection with declarative as well as uses some new features from. .. change:: :tags: feature, sql :tickets: 2356 New reflection feature "autoload_replace"; when set to False on Table, the Table can be autoloaded without existing columns being replaced. Allows more flexible chains of Table construction/reflection to be constructed, including that it helps with combining Declarative with table reflection. See the new example on the wiki. .. change:: :tags: bug, sql :tickets: 2356 Improved the API for add_column() such that if the same column is added to its own table, an error is not raised and the constraints don't get doubled up. Also helps with some reflection/declarative patterns. .. change:: :tags: feature, sql :tickets: Added "false()" and "true()" expression constructs to sqlalchemy.sql namespace, though not part of __all__ as of yet. .. change:: :tags: feature, sql :tickets: 2361 Dialect-specific compilers now raise CompileError for all type/statement compilation issues, instead of InvalidRequestError or ArgumentError. The DDL for CREATE TABLE will re-raise CompileError to include table/column information for the problematic column. .. change:: :tags: bug, sql :tickets: 2381 Fixed issue where the "required" exception would not be raised for bindparam() with required=True, if the statement were given no parameters at all. .. change:: :tags: engine, bug :tickets: 2371 Added __reduce__ to StatementError, DBAPIError, column errors so that exceptions are pickleable, as when using multiprocessing. However, not all DBAPIs support this yet, such as psycopg2. .. change:: :tags: engine, bug :tickets: 2382 Improved error messages when a non-string or invalid string is passed to any of the date/time processors used by SQLite, including C and Python versions. .. change:: :tags: engine, bug :tickets: 2377 Fixed bug whereby a table-bound Column object named "_" which matched a column labeled as "_" could match inappropriately when targeting in a result set row. .. change:: :tags: engine, bug :tickets: 2384 Fixed bug in "mock" strategy whereby correct DDL visit method wasn't called, resulting in "CREATE/DROP SEQUENCE" statements being duplicated .. change:: :tags: sqlite, bug :tickets: 2364 the "name" of an FK constraint in SQLite is reflected as "None", not "0" or other integer value. SQLite does not appear to support constraint naming in any case. .. change:: :tags: sqlite, bug :tickets: 2368 sql.false() and sql.true() compile to 0 and 1, respectively in sqlite .. change:: :tags: sqlite, bug :tickets: removed an erroneous "raise" in the SQLite dialect when getting table names and view names, where logic is in place to fall back to an older version of SQLite that doesn't have the "sqlite_temp_master" table. .. change:: :tags: bug, mysql :tickets: 2376 fixed regexp that filters out warnings for non-reflected "PARTITION" directives, thanks to George Reilly .. change:: :tags: mssql, bug :tickets: 2340 Adjusted the regexp used in the mssql.TIME type to ensure only six digits are received for the "microseconds" portion of the value, which is expected by Python's datetime.time(). Note that support for sending microseconds doesn't seem to be possible yet with pyodbc at least. .. change:: :tags: mssql, bug :tickets: 2347 Dropped the "30 char" limit on pymssql, based on reports that it's doing things better these days. pymssql hasn't been well tested and as the DBAPI is in flux it's still not clear what the status is on this driver and how SQLAlchemy's implementation should adapt. .. change:: :tags: oracle, bug :tickets: 2388 Added ORA-03135 to the never ending list of oracle "connection lost" errors .. change:: :tags: core, bug :tickets: 2379 Changed LRUCache, used by the mapper to cache INSERT/UPDATE/DELETE statements, to use an incrementing counter instead of a timestamp to track entries, for greater reliability versus using time.time(), which can cause test failures on some platforms. .. change:: :tags: core, bug :tickets: 2383 Added a boolean check for the "finalize" function within the pool connection proxy's weakref callback before calling it, so that a warning isn't emitted that this function is None when the application is exiting and gc has removed the function from the module before the weakref callback was invoked. .. change:: :tags: bug, py3k :tickets: 2348 Fixed inappropriate usage of util.py3k flag and renamed it to util.py3k_warning, since this flag is intended to detect the -3 flag series of import restrictions only. .. change:: :tags: examples, feature :tickets: 2313 Simplified the versioning example a bit to use a declarative mixin as well as an event listener, instead of a metaclass + SessionExtension. .. change:: :tags: examples, bug :tickets: 2346 Fixed large_collection.py to close the session before dropping tables. .. changelog:: :version: 0.7.4 :released: Fri Dec 09 2011 .. change:: :tags: orm, bug :tickets: 2315 Fixed backref behavior when "popping" the value off of a many-to-one in response to a removal from a stale one-to-many - the operation is skipped, since the many-to-one has since been updated. .. change:: :tags: orm, bug :tickets: 2264 After some years of not doing this, added more granularity to the "is X a parent of Y" functionality, which is used when determining if the FK on "Y" needs to be "nulled out" as well as if "Y" should be deleted with delete-orphan cascade. The test now takes into account the Python identity of the parent as well its identity key, to see if the last known parent of Y is definitely X. If a decision can't be made, a StaleDataError is raised. The conditions where this error is raised are fairly rare, requiring that the previous parent was garbage collected, and previously could very well inappropriately update/delete a record that's since moved onto a new parent, though there may be some cases where "silent success" occurred previously that will now raise in the face of ambiguity. Expiring "Y" resets the "parent" tracker, meaning X.remove(Y) could then end up deleting Y even if X is stale, but this is the same behavior as before; it's advised to expire X also in that case. .. change:: :tags: orm, bug :tickets: 2310 fixed inappropriate evaluation of user-mapped object in a boolean context within query.get(). Also in 0.6.9. .. change:: :tags: orm, bug :tickets: 2304 Added missing comma to PASSIVE_RETURN_NEVER_SET symbol .. change:: :tags: orm, bug :tickets: 1776 Cls.column.collate("some collation") now works. Also in 0.6.9 .. change:: :tags: orm, bug :tickets: 2309 the value of a composite attribute is now expired after an insert or update operation, instead of regenerated in place. This ensures that a column value which is expired within a flush will be loaded first, before the composite is regenerated using that value. .. change:: :tags: orm, bug :tickets: 2309, 2308 The fix in also emits the "refresh" event when the composite value is loaded on access, even if all column values were already present, as is appropriate. This fixes the "mutable" extension which relies upon the "load" event to ensure the _parents dictionary is up to date, fixes. Thanks to Scott Torborg for the test case here. .. change:: :tags: orm, bug :tickets: 2312 Fixed bug whereby a subclass of a subclass using concrete inheritance in conjunction with the new ConcreteBase or AbstractConcreteBase would fail to apply the subclasses deeper than one level to the "polymorphic loader" of each base .. change:: :tags: orm, bug :tickets: 2312 Fixed bug whereby a subclass of a subclass using the new AbstractConcreteBase would fail to acquire the correct "base_mapper" attribute when the "base" mapper was generated, thereby causing failures later on. .. change:: :tags: orm, bug :tickets: 2316 Fixed bug whereby column_property() created against ORM-level column could be treated as a distinct entity when producing certain kinds of joined-inh joins. .. change:: :tags: orm, bug :tickets: 2297 Fixed the error formatting raised when a tuple is inadvertently passed to session.query(). Also in 0.6.9. .. change:: :tags: orm, bug :tickets: 2328 Calls to query.join() to a single-table inheritance subclass are now tracked, and are used to eliminate the additional WHERE.. IN criterion normally tacked on with single table inheritance, since the join should accommodate it. This allows OUTER JOIN to a single table subclass to produce the correct results, and overall will produce fewer WHERE criterion when dealing with single table inheritance joins. .. change:: :tags: orm, bug :tickets: 2339 __table_args__ can now be passed as an empty tuple as well as an empty dict.. Thanks to Fayaz Yusuf Khan for the patch. .. change:: :tags: orm, bug :tickets: 2325 Updated warning message when setting delete-orphan without delete to no longer refer to 0.6, as we never got around to upgrading this to an exception. Ideally this might be better as an exception but it's not critical either way. .. change:: :tags: orm, feature :tickets: 2345, 2238 polymorphic_on now accepts many new kinds of values: * standalone expressions that aren't otherwise mapped * column_property() objects * string names of any column_property() or attribute name of a mapped Column The docs include an example using the case() construct, which is likely to be a common constructed used here. and part of Standalone expressions in polymorphic_on propagate to single-table inheritance subclasses so that they are used in the WHERE /JOIN clause to limit rows to that subclass as is the usual behavior. .. change:: :tags: orm, feature :tickets: 2301 IdentitySet supports the - operator as the same as difference(), handy when dealing with Session.dirty etc. .. change:: :tags: orm, feature :tickets: Added new value for Column autoincrement called "ignore_fk", can be used to force autoincrement on a column that's still part of a ForeignKeyConstraint. New example in the relationship docs illustrates its use. .. change:: :tags: orm, bug :tickets: Fixed bug in get_history() when referring to a composite attribute that has no value; added coverage for get_history() regarding composites which is otherwise just a userland function. .. change:: :tags: bug, sql :tickets: 2316, 2261 related to, made some adjustments to the change from regarding the "from" list on a select(). The _froms collection is no longer memoized, as this simplifies various use cases and removes the need for a "warning" if a column is attached to a table after it was already used in an expression - the select() construct will now always produce the correct expression. There's probably no real-world performance hit here; select() objects are almost always made ad-hoc, and systems that wish to optimize the re-use of a select() would be using the "compiled_cache" feature. A hit which would occur when calling select.bind has been reduced, but the vast majority of users shouldn't be using "bound metadata" anyway :). .. change:: :tags: feature, sql :tickets: 2166, 1944 The update() construct can now accommodate multiple tables in the WHERE clause, which will render an "UPDATE..FROM" construct, recognized by Postgresql and MSSQL. When compiled on MySQL, will instead generate "UPDATE t1, t2, ..". MySQL additionally can render against multiple tables in the SET clause, if Column objects are used as keys in the "values" parameter or generative method. .. change:: :tags: feature, sql :tickets: 77 Added accessor to types called "python_type", returns the rudimentary Python type object for a particular TypeEngine instance, if known, else raises NotImplementedError. .. change:: :tags: bug, sql :tickets: 2261, 2319 further tweak to the fix from, so that generative methods work a bit better off of cloned (this is almost a non-use case though). In particular this allows with_only_columns() to behave more consistently. Added additional documentation to with_only_columns() to clarify expected behavior, which changed as a result of. .. change:: :tags: engine, bug :tickets: 2317 Fixed bug whereby transaction.rollback() would throw an error on an invalidated connection if the transaction were a two-phase or savepoint transaction. For plain transactions, rollback() is a no-op if the connection is invalidated, so while it wasn't 100% clear if it should be a no-op, at least now the interface is consistent. .. change:: :tags: feature, schema :tickets: Added new support for remote "schemas": .. change:: :tags: schema :tickets: MetaData() accepts "schema" and "quote_schema" arguments, which will be applied to the same-named arguments of a Table or Sequence which leaves these at their default of ``None``. .. change:: :tags: schema :tickets: Sequence accepts "quote_schema" argument .. change:: :tags: schema :tickets: tometadata() for Table will use the "schema" of the incoming MetaData for the new Table if the schema argument is explicitly "None" .. change:: :tags: schema :tickets: Added CreateSchema and DropSchema DDL constructs - these accept just the string name of a schema and a "quote" flag. .. change:: :tags: schema :tickets: When using default "schema" with MetaData, ForeignKey will also assume the "default" schema when locating remote table. This allows the "schema" argument on MetaData to be applied to any set of Table objects that otherwise don't have a "schema". .. change:: :tags: schema :tickets: 1679 a "has_schema" method has been implemented on dialect, but only works on Postgresql so far. Courtesy Manlio Perillo. .. change:: :tags: feature, schema :tickets: 1410 The "extend_existing" flag on Table now allows for the reflection process to take effect for a Table object that's already been defined; when autoload=True and extend_existing=True are both set, the full set of columns will be reflected from the Table which will then *overwrite* those columns already present, rather than no activity occurring. Columns that are present directly in the autoload run will be used as always, however. .. change:: :tags: bug, schema :tickets: Fixed bug whereby TypeDecorator would return a stale value for _type_affinity, when using a TypeDecorator that "switches" types, like the CHAR/UUID type. .. change:: :tags: bug, schema :tickets: Fixed bug whereby "order_by='foreign_key'" option to Inspector.get_table_names wasn't implementing the sort properly, replaced with the existing sort algorithm .. change:: :tags: bug, schema :tickets: 2305 the "name" of a column-level CHECK constraint, if present, is now rendered in the CREATE TABLE statement using "CONSTRAINT CHECK ". .. change:: :tags: pyodbc, bug :tickets: 2318 pyodbc-based dialects now parse the pyodbc accurately as far as observed pyodbc strings, including such gems as "py3-3.0.1-beta4" .. change:: :tags: postgresql, bug :tickets: 2311 Postgresql dialect memoizes that an ENUM of a particular name was processed during a create/drop sequence. This allows a create/drop sequence to work without any calls to "checkfirst", and also means with "checkfirst" turned on it only needs to check for the ENUM once. .. change:: :tags: postgresql, feature :tickets: Added create_type constructor argument to pg.ENUM. When False, no CREATE/DROP or checking for the type will be performed as part of a table create/drop event; only the create()/drop)() methods called directly will do this. Helps with Alembic "offline" scripts. .. change:: :tags: mssql, feature :tickets: 822 lifted the restriction on SAVEPOINT for SQL Server. All tests pass using it, it's not known if there are deeper issues however. .. change:: :tags: mssql, bug :tickets: 2336 repaired the with_hint() feature which wasn't implemented correctly on MSSQL - usually used for the "WITH (NOLOCK)" hint (which you shouldn't be using anyway ! use snapshot isolation instead :) ) .. change:: :tags: mssql, bug :tickets: 2318 use new pyodbc version detection for _need_decimal_fix option. .. change:: :tags: mssql, bug :tickets: 2343 don't cast "table name" as NVARCHAR on SQL Server 2000. Still mostly in the dark what incantations are needed to make PyODBC work fully with FreeTDS 0.91 here, however. .. change:: :tags: mssql, bug :tickets: 2269 Decode incoming values when retrieving list of index names and the names of columns within those indexes. .. change:: :tags: bug, mysql :tickets: Unicode adjustments allow latest pymysql (post 0.4) to pass 100% on Python 2. .. change:: :tags: ext, feature :tickets: Added an example to the hybrid docs of a "transformer" - a hybrid that returns a query-transforming callable in combination with a custom comparator. Uses a new method on Query called with_transformation(). The use case here is fairly experimental, but only adds one line of code to Query. .. change:: :tags: ext, bug :tickets: the @compiles decorator raises an informative error message when no "default" compilation handler is present, rather than KeyError. .. change:: :tags: examples, bug :tickets: Fixed bug in history_meta.py example where the "unique" flag was not removed from a single-table-inheritance subclass which generates columns to put up onto the base. .. changelog:: :version: 0.7.3 :released: Sun Oct 16 2011 .. change:: :tags: general :tickets: 2279 Adjusted the "importlater" mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes. Also in 0.6.9. .. change:: :tags: orm :tickets: 2298 Improved query.join() such that the "left" side can more flexibly be a non-ORM selectable, such as a subquery. A selectable placed in select_from() will now be used as the left side, favored over implicit usage of a mapped entity. If the join still fails based on lack of foreign keys, the error message includes this detail. Thanks to brianrhude on IRC for the test case. .. change:: :tags: orm :tickets: 2241 Added after_soft_rollback() Session event. This event fires unconditionally whenever rollback() is called, regardless of if an actual DBAPI level rollback occurred. This event is specifically designed to allow operations with the Session to proceed after a rollback when the Session.is_active is True. .. change:: :tags: orm :tickets: added "adapt_on_names" boolean flag to orm.aliased() construct. Allows an aliased() construct to link the ORM entity to a selectable that contains aggregates or other derived forms of a particular attribute, provided the name is the same as that of the entity mapped column. .. change:: :tags: orm :tickets: Added new flag expire_on_flush=False to column_property(), marks those properties that would otherwise be considered to be "readonly", i.e. derived from SQL expressions, to retain their value after a flush has occurred, including if the parent object itself was involved in an update. .. change:: :tags: orm :tickets: 2237 Enhanced the instrumentation in the ORM to support Py3K's new argument style of "required kw arguments", i.e. fn(a, b, \*, c, d), fn(a, b, \*args, c, d). Argument signatures of mapped object's __init__ method will be preserved, including required kw rules. .. change:: :tags: orm :tickets: 2282 Fixed bug in unit of work whereby detection of "cycles" among classes in highly interlinked patterns would not produce a deterministic result; thereby sometimes missing some nodes that should be considered cycles and causing further issues down the road. Note this bug is in 0.6 also; not backported at the moment. .. change:: :tags: orm :tickets: Fixed a variety of synonym()-related regressions from 0.6: * making a synonym against a synonym now works. * synonyms made against a relationship() can be passed to query.join(), options sent to query.options(), passed by name to query.with_parent(). .. change:: :tags: orm :tickets: 2287 Fixed bug whereby mapper.order_by attribute would be ignored in the "inner" query within a subquery eager load. . Also in 0.6.9. .. change:: :tags: orm :tickets: 2267 Identity map .discard() uses dict.pop(,None) internally instead of "del" to avoid KeyError/warning during a non-determinate gc teardown .. change:: :tags: orm :tickets: 2253 Fixed regression in new composite rewrite where deferred=True option failed due to missing import .. change:: :tags: orm :tickets: 2248 Reinstated "comparator_factory" argument to composite(), removed when 0.7 was released. .. change:: :tags: orm :tickets: 2247 Fixed bug in query.join() which would occur in a complex multiple-overlapping path scenario, where the same table could be joined to twice. Thanks *much* to Dave Vitek for the excellent fix here. .. change:: :tags: orm :tickets: Query will convert an OFFSET of zero when slicing into None, so that needless OFFSET clauses are not invoked. .. change:: :tags: orm :tickets: Repaired edge case where mapper would fail to fully update internal state when a relationship on a new mapper would establish a backref on the first mapper. .. change:: :tags: orm :tickets: 2260 Fixed bug whereby if __eq__() was redefined, a relationship many-to-one lazyload would hit the __eq__() and fail. Does not apply to 0.6.9. .. change:: :tags: orm :tickets: 2196 Calling class_mapper() and passing in an object that is not a "type" (i.e. a class that could potentially be mapped) now raises an informative ArgumentError, rather than UnmappedClassError. .. change:: :tags: orm :tickets: New event hook, MapperEvents.after_configured(). Called after a configure() step has completed and mappers were in fact affected. Theoretically this event is called once per application, unless new mappings are constructed after existing ones have been used already. .. change:: :tags: orm :tickets: 2281 When an open Session is garbage collected, the objects within it which remain are considered detached again when they are add()-ed to a new Session. This is accomplished by an extra check that the previous "session_key" doesn't actually exist among the pool of Sessions. .. change:: :tags: orm :tickets: 2239 New declarative features: * __declare_last__() method, establishes an event listener for the class method that will be called when mappers are completed with the final "configure" step. * __abstract__ flag. The class will not be mapped at all when this flag is present on the class. * New helper classes ConcreteBase, AbstractConcreteBase. Allow concrete mappings using declarative which automatically set up the "polymorphic_union" when the "configure" mapper step is invoked. * The mapper itself has semi-private methods that allow the "with_polymorphic" selectable to be assigned to the mapper after it has already been configured. .. change:: :tags: orm :tickets: 2283 Declarative will warn when a subclass' base uses @declared_attr for a regular column - this attribute does not propagate to subclasses. .. change:: :tags: orm :tickets: 2280 The integer "id" used to link a mapped instance with its owning Session is now generated by a sequence generation function rather than id(Session), to eliminate the possibility of recycled id() values causing an incorrect result, no need to check that object actually in the session. .. change:: :tags: orm :tickets: 2257 Behavioral improvement: empty conjunctions such as and_() and or_() will be flattened in the context of an enclosing conjunction, i.e. and_(x, or_()) will produce 'X' and not 'X AND ()'.. .. change:: :tags: orm :tickets: 2261 Fixed bug regarding calculation of "from" list for a select() element. The "from" calc is now delayed, so that if the construct uses a Column object that is not yet attached to a Table, but is later associated with a Table, it generates SQL using the table as a FROM. This change impacted fairly deeply the mechanics of how the FROM list as well as the "correlates" collection is calculated, as some "clause adaption" schemes (these are used very heavily in the ORM) were relying upon the fact that the "froms" collection would typically be cached before the adaption completed. The rework allows it such that the "froms" collection can be cleared and re-generated at any time. .. change:: :tags: orm :tickets: 2270 Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. Also in 0.6.9. .. change:: :tags: schema :tickets: 2284 Modified Column.copy() to use _constructor(), which defaults to self.__class__, in order to create the new object. This allows easier support of subclassing Column. .. change:: :tags: schema :tickets: 2223 Added a slightly nicer __repr__() to SchemaItem classes. Note the repr here can't fully support the "repr is the constructor" idea since schema items can be very deeply nested/cyclical, have late initialization of some things, etc. .. change:: :tags: engine :tickets: 2254 The recreate() method in all pool classes uses self.__class__ to get at the type of pool to produce, in the case of subclassing. Note there's no usual need to subclass pools. .. change:: :tags: engine :tickets: 2243 Improvement to multi-param statement logging, long lists of bound parameter sets will be compressed with an informative indicator of the compression taking place. Exception messages use the same improved formatting. .. change:: :tags: engine :tickets: Added optional "sa_pool_key" argument to pool.manage(dbapi).connect() so that serialization of args is not necessary. .. change:: :tags: engine :tickets: 2286 The entry point resolution supported by create_engine() now supports resolution of individual DBAPI drivers on top of a built-in or entry point-resolved dialect, using the standard '+' notation - it's converted to a '.' before being resolved as an entry point. .. change:: :tags: engine :tickets: 2299 Added an exception catch + warning for the "return unicode detection" step within connect, allows databases that crash on NVARCHAR to continue initializing, assuming no NVARCHAR type implemented. .. change:: :tags: types :tickets: 2258 Extra keyword arguments to the base Float type beyond "precision" and "asdecimal" are ignored; added a deprecation warning here and additional docs, related to .. change:: :tags: sqlite :tickets: Ensured that the same ValueError is raised for illegal date/time/datetime string parsed from the database regardless of whether C extensions are in use or not. .. change:: :tags: postgresql :tickets: 2290 Added "postgresql_using" argument to Index(), produces USING clause to specify index implementation for PG. . Thanks to Ryan P. Kelly for the patch. .. change:: :tags: postgresql :tickets: 1839 Added client_encoding parameter to create_engine() when the postgresql+psycopg2 dialect is used; calls the psycopg2 set_client_encoding() method with the value upon connect. .. change:: :tags: postgresql :tickets: 2291, 2141 Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column.. Also in 0.6.9. .. change:: :tags: postgresql :tickets: 2256 Reflection functions for Table, Sequence no longer case insensitive. Names can be differ only in case and will be correctly distinguished. .. change:: :tags: postgresql :tickets: Use an atomic counter as the "random number" source for server side cursor names; conflicts have been reported in rare cases. .. change:: :tags: postgresql :tickets: 2249 Narrowed the assumption made when reflecting a foreign-key referenced table with schema in the current search path; an explicit schema will be applied to the referenced table only if it actually matches that of the referencing table, which also has an explicit schema. Previously it was assumed that "current" schema was synonymous with the full search_path. .. change:: :tags: mysql :tickets: 2225 a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL's arbitrary rules regarding if it will actually work or not. Also in 0.6.9. .. change:: :tags: mysql :tickets: 2293 Added mysql_length parameter to Index construct, specifies "length" for indexes. .. change:: :tags: mssql :tickets: 2273 Changes to attempt support of FreeTDS 0.91 with Pyodbc. This includes that string binds are sent as Python unicode objects when FreeTDS 0.91 is detected, and a CAST(? AS NVARCHAR) is used when we detect for a table. However, I'd continue to characterize Pyodbc + FreeTDS 0.91 behavior as pretty crappy, there are still many queries such as used in reflection which cause a core dump on Linux, and it is not really usable at all on OSX, MemoryErrors abound and just plain broken unicode support. .. change:: :tags: mssql :tickets: 2277 The behavior of =/!= when comparing a scalar select to a value will no longer produce IN/NOT IN as of 0.8; this behavior is a little too heavy handed (use in_() if you want to emit IN) and now emits a deprecation warning. To get the 0.8 behavior immediately and remove the warning, a compiler recipe is given at http://www.sqlalchemy.org/docs/07/dialects/mssql.html#scalar-select-comparisons to override the behavior of visit_binary(). .. change:: :tags: mssql :tickets: 2222 "0" is accepted as an argument for limit() which will produce "TOP 0". .. change:: :tags: oracle :tickets: 2272 Fixed ReturningResultProxy for zxjdbc dialect.. Regression from 0.6. .. change:: :tags: oracle :tickets: 2252 The String type now generates VARCHAR2 on Oracle which is recommended as the default VARCHAR. Added an explicit VARCHAR2 and NVARCHAR2 to the Oracle dialect as well. Using NVARCHAR still generates "NVARCHAR2" - there is no "NVARCHAR" on Oracle - this remains a slight breakage of the "uppercase types always give exactly that" policy. VARCHAR still generates "VARCHAR", keeping with the policy. If Oracle were to ever define "VARCHAR" as something different as they claim (IMHO this will never happen), the type would be available. .. change:: :tags: ext :tickets: 2262 SQLSoup will not be included in version 0.8 of SQLAlchemy; while useful, we would like to keep SQLAlchemy itself focused on one ORM usage paradigm. SQLSoup will hopefully soon be superseded by a third party project. .. change:: :tags: ext :tickets: 2236 Added local_attr, remote_attr, attr accessors to AssociationProxy, providing quick access to the proxied attributes at the class level. .. change:: :tags: ext :tickets: 2275 Changed the update() method on association proxy dictionary to use a duck typing approach, i.e. checks for "keys", to discern between update({}) and update((a, b)). Previously, passing a dictionary that had tuples as keys would be misinterpreted as a sequence. .. change:: :tags: examples :tickets: 2266 Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases. Also in 0.6.9. .. changelog:: :version: 0.7.2 :released: Sun Jul 31 2011 .. change:: :tags: orm :tickets: 2213 Feature enhancement: joined and subquery loading will now traverse already-present related objects and collections in search of unpopulated attributes throughout the scope of the eager load being defined, so that the eager loading that is specified via mappings or query options unconditionally takes place for the full depth, populating whatever is not already populated. Previously, this traversal would stop if a related object or collection were already present leading to inconsistent behavior (though would save on loads/cycles for an already-loaded graph). For a subqueryload, this means that the additional SELECT statements emitted by subqueryload will invoke unconditionally, no matter how much of the existing graph is already present (hence the controversy). The previous behavior of "stopping" is still in effect when a query is the result of an attribute-initiated lazyload, as otherwise an "N+1" style of collection iteration can become needlessly expensive when the same related object is encountered repeatedly. There's also an as-yet-not-public generative Query method _with_invoke_all_eagers() which selects old/new behavior .. change:: :tags: orm :tickets: 2195 A rework of "replacement traversal" within the ORM as it alters selectables to be against aliases of things (i.e. clause adaption) includes a fix for multiply-nested any()/has() constructs against a joined table structure. .. change:: :tags: orm :tickets: 2234 Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately. Also in 0.6.9. .. change:: :tags: orm :tickets: 2205 Fixed regression from 0.6 where Session.add() against an object which contained None in a collection would raise an internal exception. Reverted this to 0.6's behavior which is to accept the None but obviously nothing is persisted. Ideally, collections with None present or on append() should at least emit a warning, which is being considered for 0.8. .. change:: :tags: orm :tickets: 2191 Load of a deferred() attribute on an object where row can't be located raises ObjectDeletedError instead of failing later on; improved the message in ObjectDeletedError to include other conditions besides a simple "delete". .. change:: :tags: orm :tickets: 2224 Fixed regression from 0.6 where a get history operation on some relationship() based attributes would fail when a lazyload would emit; this could trigger within a flush() under certain conditions. Thanks to the user who submitted the great test for this. .. change:: :tags: orm :tickets: 2228 Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer. Also in 0.6.9 .. change:: :tags: orm :tickets: 2197 Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together. Also in 0.6.9 .. change:: :tags: orm :tickets: 2215 Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a "composite" (i.e. non-single-entity) result set. Also in 0.6.9. .. change:: :tags: orm :tickets: 2240 Added public attribute ".validators" to Mapper, an immutable dictionary view of all attributes that have been decorated with the @validates decorator. courtesy Stefano Fontanelli .. change:: :tags: orm :tickets: 2188 Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. . Also in 0.6.9 .. change:: :tags: orm :tickets: 2207 The join condition produced by with_parent as well as when using a "dynamic" relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. . Also in 0.6.9. .. change:: :tags: orm :tickets: Added the same "columns-only" check to mapper.polymorphic_on as used when receiving user arguments to relationship.order_by, foreign_keys, remote_side, etc. .. change:: :tags: orm :tickets: 2190 Fixed bug whereby comparison of column expression to a Query() would not call as_scalar() on the underlying SELECT statement to produce a scalar subquery, in the way that occurs if you called it on Query().subquery(). .. change:: :tags: orm :tickets: 2194 Fixed declarative bug where a class inheriting from a superclass of the same name would fail due to an unnecessary lookup of the name in the _decl_class_registry. .. change:: :tags: orm :tickets: 2199 Repaired the "no statement condition" assertion in Query which would attempt to raise if a generative method were called after from_statement() were called.. Also in 0.6.9. .. change:: :tags: sql :tickets: 2188 Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been "grouped" and loses itself. Affects. .. change:: :tags: schema :tickets: 2187 New feature: with_variant() method on all types. Produces an instance of Variant(), a special TypeDecorator which will select the usage of a different type based on the dialect in use. .. change:: :tags: schema :tickets: Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found. Also in 0.6.9. .. change:: :tags: schema :tickets: 2206 Fixed bug whereby adaptation of old append_ddl_listener() function was passing unexpected \**kw through to the Table event. Table gets no kws, the MetaData event in 0.6 would get "tables=somecollection", this behavior is preserved. .. change:: :tags: schema :tickets: Fixed bug where "autoincrement" detection on Table would fail if the type had no "affinity" value, in particular this would occur when using the UUID example on the site that uses TypeEngine as the "impl". .. change:: :tags: schema :tickets: 2209 Added an improved repr() to TypeEngine objects that will only display constructor args which are positional or kwargs that deviate from the default. .. change:: :tags: engine :tickets: Context manager provided by Connection.begin() will issue rollback() if the commit() fails, not just if an exception occurs. .. change:: :tags: engine :tickets: 1682 Use urllib.parse_qsl() in Python 2.6 and above, no deprecation warning about cgi.parse_qsl() .. change:: :tags: engine :tickets: Added mixin class sqlalchemy.ext.DontWrapMixin. User-defined exceptions of this type are never wrapped in StatementException when they occur in the context of a statement execution. .. change:: :tags: engine :tickets: StatementException wrapping will display the original exception class in the message. .. change:: :tags: engine :tickets: 2201 Failures on connect which raise dbapi.Error will forward the error to dialect.is_disconnect() and set the "connection_invalidated" flag if the dialect knows this to be a potentially "retryable" condition. Only Oracle ORA-01033 implemented for now. .. change:: :tags: sqlite :tickets: 2189 SQLite dialect no longer strips quotes off of reflected default value, allowing a round trip CREATE TABLE to work. This is consistent with other dialects that also maintain the exact form of the default. .. change:: :tags: postgresql :tickets: 2198 Added new "postgresql_ops" argument to Index, allows specification of PostgreSQL operator classes for indexed columns. Courtesy Filip Zyzniewski. .. change:: :tags: mysql :tickets: 2186 Fixed OurSQL dialect to use ansi-neutral quote symbol "'" for XA commands instead of '"'. . Also in 0.6.9. .. change:: :tags: mssql :tickets: Adjusted the pyodbc dialect such that bound values are passed as bytes and not unicode if the "Easysoft" unix drivers are detected. This is the same behavior as occurs with FreeTDS. Easysoft appears to segfault if Python unicodes are passed under certain circumstances. .. change:: :tags: oracle :tickets: 2200 Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,. Also in 0.6.9. .. change:: :tags: oracle :tickets: 2201 Added ORA-01033 to disconnect codes, which can be caught during a connection event. .. change:: :tags: oracle :tickets: 2220 repaired the oracle.RAW type which did not generate the correct DDL. Also in 0.6.9. .. change:: :tags: oracle :tickets: 2212 added CURRENT to reserved word list. Also in 0.6.9. .. change:: :tags: oracle :tickets: Fixed bug in the mutable extension whereby if the same type were used twice in one mapping, the attributes beyond the first would not get instrumented. .. change:: :tags: oracle :tickets: Fixed bug in the mutable extension whereby if None or a non-corresponding type were set, an error would be raised. None is now accepted which assigns None to all attributes, illegal values raise ValueError. .. change:: :tags: examples :tickets: Repaired the examples/versioning test runner to not rely upon SQLAlchemy test libs, nosetests must be run from within examples/versioning to get around setup.cfg breaking it. .. change:: :tags: examples :tickets: Tweak to examples/versioning to pick the correct foreign key in a multi-level inheritance situation. .. change:: :tags: examples :tickets: Fixed the attribute shard example to check for bind param callable correctly in 0.7 style. .. changelog:: :version: 0.7.1 :released: Sun Jun 05 2011 .. change:: :tags: general :tickets: 2184 Added a workaround for Python bug 7511 where failure of C extension build does not raise an appropriate exception on Windows 64 bit + VC express .. change:: :tags: orm :tickets: 1912 "delete-orphan" cascade is now allowed on self-referential relationships - this since SQLA 0.7 no longer enforces "parent with no child" at the ORM level; this check is left up to foreign key nullability. Related to .. change:: :tags: orm :tickets: 2180 Repaired new "mutable" extension to propagate events to subclasses correctly; don't create multiple event listeners for subclasses either. .. change:: :tags: orm :tickets: 2170 Modify the text of the message which occurs when the "identity" key isn't detected on flush, to include the common cause that the Column isn't set up to detect auto-increment correctly;. Also in 0.6.8. .. change:: :tags: orm :tickets: 2182 Fixed bug where transaction-level "deleted" collection wouldn't be cleared of expunged states, raising an error if they later became transient. Also in 0.6.8. .. change:: :tags: sql :tickets: Fixed bug whereby metadata.reflect(bind) would close a Connection passed as a bind argument. Regression from 0.6. .. change:: :tags: sql :tickets: Streamlined the process by which a Select determines what's in its '.c' collection. Behaves identically, except that a raw ClauseList() passed to select([]) (which is not a documented case anyway) will now be expanded into its individual column elements instead of being ignored. .. change:: :tags: engine :tickets: Deprecate schema/SQL-oriented methods on Connection/Engine that were never well known and are redundant: reflecttable(), create(), drop(), text(), engine.func .. change:: :tags: engine :tickets: 2178 Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string.. Also in 0.6.8. .. change:: :tags: sqlite :tickets: 2173 Accept None from cursor.fetchone() when "PRAGMA read_uncommitted" is called to determine current isolation mode at connect time and default to SERIALIZABLE; this to support SQLite versions pre-3.3.0 that did not have this feature. .. change:: :tags: postgresql :tickets: 2175 Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. . Also in 0.6.8. .. change:: :tags: mysql :tickets: Unit tests pass 100% on MySQL installed on windows. .. change:: :tags: mysql :tickets: 2181 Removed the "adjust casing" step that would fail when reflecting a table on MySQL on windows with a mixed case name. After some experimenting with a windows MySQL server, it's been determined that this step wasn't really helping the situation much; MySQL does not return FK names with proper casing on non-windows platforms either, and removing the step at least allows the reflection to act more like it does on other OSes. A warning here has been considered but its difficult to determine under what conditions such a warning can be raised, so punted on that for now - added some docs instead. .. change:: :tags: mysql :tickets: supports_sane_rowcount will be set to False if using MySQLdb and the DBAPI doesn't provide the constants.CLIENT module. .. changelog:: :version: 0.7.0 :released: Fri May 20 2011 .. change:: :tags: :tickets: This section documents those changes from 0.7b4 to 0.7.0. For an overview of what's new in SQLAlchemy 0.7, see http://www.sqlalchemy.org/trac/wiki/07Migration .. change:: :tags: orm :tickets: 2069 Fixed regression introduced in 0.7b4 (!) whereby query.options(someoption("nonexistent name")) would fail to raise an error. Also added additional error catching for cases where the option would try to build off a column-based element, further fixed up some of the error messages tailored in .. change:: :tags: orm :tickets: 2162 query.count() emits "count(*)" instead of "count(1)". .. change:: :tags: orm :tickets: 2155 Fine tuning of Query clause adaptation when from_self(), union(), or other "select from myself" operation, such that plain SQL expression elements added to filter(), order_by() etc. which are present in the nested "from myself" query *will* be adapted in the same way an ORM expression element will, since these elements are otherwise not easily accessible. .. change:: :tags: orm :tickets: 2149 Fixed bug where determination of "self referential" relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition. Also in 0.6.8. .. change:: :tags: orm :tickets: 2153 mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class, but will raise as usual for unresolved columns and table names regarding the inherited table. This is an enhanced generalization of behavior that was already applied to declarative previously. 0.6.8 has a more conservative version of this which doesn't fundamentally alter how join conditions are determined. .. change:: :tags: orm :tickets: 2144 It is an error to call query.get() when the given entity is not a single, full class entity or mapper (i.e. a column). This is a deprecation warning in 0.6.8. .. change:: :tags: orm :tickets: 2148 Fixed a potential KeyError which under some circumstances could occur with the identity map, part of .. change:: :tags: orm :tickets: added Query.with_session() method, switches Query to use a different session. .. change:: :tags: orm :tickets: 2131 horizontal shard query should use execution options per connection as per .. change:: :tags: orm :tickets: 2151 a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that's normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper (also in 0.6.8) .. change:: :tags: orm :tickets: 2163 Fixed the error message emitted for "can't execute syncrule for destination column 'q'; mapper 'X' does not map this column" to reference the correct mapper. . Also in 0.6.8. .. change:: :tags: orm :tickets: 1502 polymorphic_union() gets a "cast_nulls" option, disables the usage of CAST when it renders the labeled NULL columns. .. change:: :tags: orm :tickets: polymorphic_union() renders the columns in their original table order, as according to the first table/selectable in the list of polymorphic unions in which they appear. (which is itself an unordered mapping unless you pass an OrderedDict). .. change:: :tags: orm :tickets: 2171 Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name. Also in 0.6.8. .. change:: :tags: sql :tickets: 2167 Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). . Also in 0.6.8 .. change:: :tags: sql :tickets: Changed the handling in determination of join conditions such that foreign key errors are only considered between the two given tables. That is, t1.join(t2) will report FK errors that involve 't1' or 't2', but anything involving 't3' will be skipped. This affects join(), as well as ORM relationship and inherit condition logic. .. change:: :tags: sql :tickets: Some improvements to error handling inside of the execute procedure to ensure auto-close connections are really closed when very unusual DBAPI errors occur. .. change:: :tags: sql :tickets: metadata.reflect() and reflection.Inspector() had some reliance on GC to close connections which were internally procured, fixed this. .. change:: :tags: sql :tickets: 2140 Added explicit check for when Column .name is assigned as blank string .. change:: :tags: sql :tickets: 2147 Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent "column" assigned, added test coverage for all column default assignment patterns. also in 0.6.8 .. change:: :tags: postgresql :tickets: Fixed the psycopg2_version parsing in the psycopg2 dialect. .. change:: :tags: postgresql :tickets: 2141 Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. . Also in 0.6.8. .. change:: :tags: mssql :tickets: 2169 Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements. Also in 0.6.8. .. change:: :tags: documentation :tickets: 2152 Removed the usage of the "collections.MutableMapping" abc from the ext.mutable docs as it was being used incorrectly and makes the example more difficult to understand in any case. .. change:: :tags: examples :tickets: removed the ancient "polymorphic association" examples and replaced with an updated set of examples that use declarative mixins, "generic_associations". Each presents an alternative table layout. .. change:: :tags: ext :tickets: 2143 Fixed bugs in sqlalchemy.ext.mutable extension where `None` was not appropriately handled, replacement events were not appropriately handled. .. changelog:: :version: 0.7.0b4 :released: Sun Apr 17 2011 .. change:: :tags: general :tickets: Changes to the format of CHANGES, this file. The format changes have been applied to the 0.7 releases. .. change:: :tags: general :tickets: The "-declarative" changes will now be listed directly under the "-orm" section, as these are closely related. .. change:: :tags: general :tickets: The 0.5 series changes have been moved to the file CHANGES_PRE_06 which replaces CHANGES_PRE_05. .. change:: :tags: general :tickets: The changelog for 0.6.7 and subsequent within the 0.6 series is now listed only in the CHANGES file within the 0.6 branch. In the 0.7 CHANGES file (i.e. this file), all the 0.6 changes are listed inline within the 0.7 section in which they were also applied (since all 0.6 changes are in 0.7 as well). Changes that apply to an 0.6 version here are noted as are if any differences in implementation/behavior are present. .. change:: :tags: orm :tickets: 2122 Some fixes to "evaluate" and "fetch" evaluation when query.update(), query.delete() are called. The retrieval of records is done after autoflush in all cases, and before update/delete is emitted, guarding against unflushed data present as well as expired objects failing during the evaluation. .. change:: :tags: orm :tickets: 2063 Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype. .. change:: :tags: orm :tickets: Still more wording adjustments when a query option can't find the target entity. Explain that the path must be from one of the root entities. .. change:: :tags: orm :tickets: 2123 Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn't properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch. (also in 0.6.7). .. change:: :tags: orm :tickets: 2127 Added checks inside the UOW to detect the unusual condition of being asked to UPDATE or DELETE on a primary key value that contains NULL in it. .. change:: :tags: orm :tickets: 2127 Some refinements to attribute history. More changes are pending possibly in 0.8, but for now history has been modified such that scalar history doesn't have a "side effect" of populating None for a non-present value. This allows a slightly better ability to distinguish between a None set and no actual change, affects as well. .. change:: :tags: orm :tickets: 2130 a "having" clause would be copied from the inside to the outside query if from_self() were used; in particular this would break an 0.7 style count() query. (also in 0.6.7) .. change:: :tags: orm :tickets: 2131 the Query.execution_options() method now passes those options to the Connection rather than the SELECT statement, so that all available options including isolation level and compiled cache may be used. .. change:: :tags: sql :tickets: 2131 The "compiled_cache" execution option now raises an error when passed to a SELECT statement rather than a Connection. Previously it was being ignored entirely. We may look into having this option work on a per-statement level at some point. .. change:: :tags: sql :tickets: Restored the "catchall" constructor on the base TypeEngine class, with a deprecation warning. This so that code which does something like Integer(11) still succeeds. .. change:: :tags: sql :tickets: 2104 Fixed regression whereby MetaData() coming back from unpickling did not keep track of new things it keeps track of now, i.e. collection of Sequence objects, list of schema names. .. change:: :tags: sql :tickets: 2116 The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer. (also in 0.6.7) .. change:: :tags: sql :tickets: fixed bug where "from" clause gathering from an over() clause would be an itertools.chain() and not a list, causing "can only concatenate list" TypeError when combined with other clauses. .. change:: :tags: sql :tickets: 2134 Fixed incorrect usage of "," in over() clause being placed between the "partition" and "order by" clauses. .. change:: :tags: sql :tickets: 2105 Before/after attach events for PrimaryKeyConstraint now function, tests added for before/after events on all constraint types. .. change:: :tags: sql :tickets: 2117 Added explicit true()/false() constructs to expression lib - coercion rules will intercept "False"/"True" into these constructs. In 0.6, the constructs were typically converted straight to string, which was no longer accepted in 0.7. .. change:: :tags: engine :tickets: 2129 The C extension is now enabled by default on CPython 2.x with a fallback to pure python if it fails to compile. .. change:: :tags: schema :tickets: 2109 The 'useexisting' flag on Table has been superseded by a new pair of flags 'keep_existing' and 'extend_existing'. 'extend_existing' is equivalent to 'useexisting' - the existing Table is returned, and additional constructor elements are added. With 'keep_existing', the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created. .. change:: :tags: types :tickets: 2081 REAL has been added to the core types. Supported by Postgresql, SQL Server, MySQL, SQLite. Note that the SQL Server and MySQL versions, which add extra arguments, are also still available from those dialects. .. change:: :tags: types :tickets: 2106 Added @event.listens_for() decorator, given target + event name, applies the decorated function as a listener. .. change:: :tags: pool :tickets: 2103 AssertionPool now stores the traceback indicating where the currently checked out connection was acquired; this traceback is reported within the assertion raised upon a second concurrent checkout; courtesy Gunnlaugur Briem .. change:: :tags: pool :tickets: The "pool.manage" feature doesn't use pickle anymore to hash the arguments for each pool. .. change:: :tags: sqlite :tickets: 2115 Fixed bug where reflection of foreign key created as "REFERENCES " without col name would fail. (also in 0.6.7) .. change:: :tags: postgresql :tickets: Psycopg2 for Python 3 is now supported. .. change:: :tags: postgresql :tickets: 2132 Fixed support for precision numerics when using pg8000. .. change:: :tags: oracle :tickets: 2100 Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle. (Also in 0.6.7) .. change:: :tags: oracle :tickets: 2116 Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle. (Also in 0.6.7) .. change:: :tags: documentation :tickets: 2029 Documented SQLite DATE/TIME/DATETIME types. (also in 0.6.7) .. change:: :tags: documentation :tickets: 2118 Fixed mutable extension docs to show the correct type-association methods. .. changelog:: :version: 0.7.0b3 :released: Sun Mar 20 2011 .. change:: :tags: general :tickets: Lots of fixes to unit tests when run under Pypy (courtesy Alex Gaynor). .. change:: :tags: orm :tickets: 2093 Changed the underlying approach to query.count(). query.count() is now in all cases exactly: query. from_self(func.count(literal_column('1'))). scalar() That is, "select count(1) from ()". This produces a subquery in all cases, but vastly simplifies all the guessing count() tried to do previously, which would still fail in many scenarios particularly when joined table inheritance and other joins were involved. If the subquery produced for an otherwise very simple count is really an issue, use query(func.count()) as an optimization. .. change:: :tags: orm :tickets: 2087 some changes to the identity map regarding rare weakref callbacks during iterations. The mutex has been removed as it apparently can cause a reentrant (i.e. in one thread) deadlock, perhaps when gc collects objects at the point of iteration in order to gain more memory. It is hoped that "dictionary changed during iteration" will be exceedingly rare as iteration methods internally acquire the full list of objects in a single values() call. Note 0.6.7 has a more conservative fix here which still keeps the mutex in place. .. change:: :tags: orm :tickets: 2082 A tweak to the unit of work causes it to order the flush along relationship() dependencies even if the given objects don't have any inter-attribute references in memory, which was the behavior in 0.5 and earlier, so a flush of Parent/Child with only foreign key/primary key set will succeed. This while still maintaining 0.6 and above's not generating a ton of useless internal dependency structures within the flush that don't correspond to state actually within the current flush. .. change:: :tags: orm :tickets: 2069 Improvements to the error messages emitted when querying against column-only entities in conjunction with (typically incorrectly) using loader options, where the parent entity is not fully present. .. change:: :tags: orm :tickets: 2098 Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.6.7 has a more conservative fix to this. .. change:: :tags: declarative :tickets: 2091 Arguments in __mapper_args__ that aren't "hashable" aren't mistaken for always-hashable, possibly-column arguments. (also in 0.6.7) .. change:: :tags: sql :tickets: Added a fully descriptive error message for the case where Column is subclassed and _make_proxy() fails to make a copy due to TypeError on the constructor. The method _constructor should be implemented in this case. .. change:: :tags: sql :tickets: 2095 Added new event "column_reflect" for Table objects. Receives the info dictionary about a Column before the object is generated within reflection, and allows modification to the dictionary for control over most aspects of the resulting Column including key, name, type, info dictionary. .. change:: :tags: sql :tickets: To help with the "column_reflect" event being used with specific Table objects instead of all instances of Table, listeners can be added to a Table object inline with its construction using a new argument "listeners", a list of tuples of the form (, ), which are applied to the Table before the reflection process begins. .. change:: :tags: sql :tickets: 2085 Added new generic function "next_value()", accepts a Sequence object as its argument and renders the appropriate "next value" generation string on the target platform, if supported. Also provides ".next_value()" method on Sequence itself. .. change:: :tags: sql :tickets: 2084 func.next_value() or other SQL expression can be embedded directly into an insert() construct, and if implicit or explicit "returning" is used in conjunction with a primary key column, the newly generated value will be present in result.inserted_primary_key. .. change:: :tags: sql :tickets: 2089 Added accessors to ResultProxy "returns_rows", "is_insert" (also in 0.6.7) .. change:: :tags: engine :tickets: 2097 Fixed AssertionPool regression bug. .. change:: :tags: engine :tickets: 2060 Changed exception raised to ArgumentError when an invalid dialect is specified. .. change:: :tags: postgresql :tickets: 2092 Added RESERVED_WORDS for postgresql dialect. (also in 0.6.7) .. change:: :tags: postgresql :tickets: 2073 Fixed the BIT type to allow a "length" parameter, "varying" parameter. Reflection also fixed. (also in 0.6.7) .. change:: :tags: mssql :tickets: 2071 Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned. (also in 0.6.7) .. change:: :tags: firebird :tickets: 2083 The "implicit_returning" flag on create_engine() is honored if set to False. (also in 0.6.7) .. change:: :tags: informix :tickets: 2092 Added RESERVED_WORDS informix dialect. (also in 0.6.7) .. change:: :tags: ext :tickets: 2090 The horizontal_shard ShardedSession class accepts the common Session argument "query_cls" as a constructor argument, to enable further subclassing of ShardedQuery. (also in 0.6.7) .. change:: :tags: examples :tickets: Updated the association, association proxy examples to use declarative, added a new example dict_of_sets_with_default.py, a "pushing the envelope" example of association proxy. .. change:: :tags: examples :tickets: 2090 The Beaker caching example allows a "query_cls" argument to the query_callable() function. (also in 0.6.7) .. changelog:: :version: 0.7.0b2 :released: Sat Feb 19 2011 .. change:: :tags: orm :tickets: 2053 Fixed bug whereby Session.merge() would call the load() event with one too few arguments. .. change:: :tags: orm :tickets: 2052 Added logic which prevents the generation of events from a MapperExtension or SessionExtension from generating do-nothing events for all the methods not overridden. .. change:: :tags: declarative :tickets: 2058 Fixed regression whereby composite() with Column objects placed inline would fail to initialize. The Column objects can now be inline with the composite() or external and pulled in via name or object ref. .. change:: :tags: declarative :tickets: 2061 Fix error message referencing old @classproperty name to reference @declared_attr (also in 0.6.7) .. change:: :tags: declarative :tickets: 1468 the dictionary at the end of the __table_args__ tuple is now optional. .. change:: :tags: sql :tickets: 2059 Renamed the EngineEvents event class to ConnectionEvents. As these classes are never accessed directly by end-user code, this strictly is a documentation change for end users. Also simplified how events get linked to engines and connections internally. .. change:: :tags: sql :tickets: 2055 The Sequence() construct, when passed a MetaData() object via its 'metadata' argument, will be included in CREATE/DROP statements within metadata.create_all() and metadata.drop_all(), including "checkfirst" logic. .. change:: :tags: sql :tickets: 2064 The Column.references() method now returns True if it has a foreign key referencing the given column exactly, not just its parent table. .. change:: :tags: postgresql :tickets: 2065 Fixed regression from 0.6 where SMALLINT and BIGINT types would both generate SERIAL on an integer PK column, instead of SMALLINT and BIGSERIAL .. change:: :tags: ext :tickets: 2054 Association proxy now has correct behavior for any(), has(), and contains() when proxying a many-to-one scalar attribute to a one-to-many collection (i.e. the reverse of the 'typical' association proxy use case) .. change:: :tags: examples :tickets: Beaker example now takes into account 'limit' and 'offset', bind params within embedded FROM clauses (like when you use union() or from_self()) when generating a cache key. .. changelog:: :version: 0.7.0b1 :released: Sat Feb 12 2011 .. change:: :tags: :tickets: Detailed descriptions of each change below are described at: http://www.sqlalchemy.org/trac/wiki/07Migration .. change:: :tags: general :tickets: 1902 New event system, supersedes all extensions, listeners, etc. .. change:: :tags: general :tickets: 1926 Logging enhancements .. change:: :tags: general :tickets: 1949 Setup no longer installs a Nose plugin .. change:: :tags: general :tickets: The "sqlalchemy.exceptions" alias in sys.modules has been removed. Base SQLA exceptions are available via "from sqlalchemy import exc". The "exceptions" alias for "exc" remains in "sqlalchemy" for now, it's just not patched into sys.modules. .. change:: :tags: orm :tickets: 1923 More succinct form of query.join(target, onclause) .. change:: :tags: orm :tickets: 1903 Hybrid Attributes, implements/supersedes synonym() .. change:: :tags: orm :tickets: 2008 Rewrite of composites .. change:: :tags: orm :tickets: Mutation Event Extension, supersedes "mutable=True" .. seealso:: :ref:`07_migration_mutation_extension` .. change:: :tags: orm :tickets: 1980 PickleType and ARRAY mutability turned off by default .. change:: :tags: orm :tickets: 1895 Simplified polymorphic_on assignment .. change:: :tags: orm :tickets: 1912 Flushing of Orphans that have no parent is allowed .. change:: :tags: orm :tickets: 2041 Adjusted flush accounting step to occur before the commit in the case of autocommit=True. This allows autocommit=True to work appropriately with expire_on_commit=True, and also allows post-flush session hooks to operate in the same transactional context as when autocommit=False. .. change:: :tags: orm :tickets: 1973 Warnings generated when collection members, scalar referents not part of the flush .. change:: :tags: orm :tickets: 1876 Non-`Table`-derived constructs can be mapped .. change:: :tags: orm :tickets: 1942 Tuple label names in Query Improved .. change:: :tags: orm :tickets: 1892 Mapped column attributes reference the most specific column first .. change:: :tags: orm :tickets: 1896 Mapping to joins with two or more same-named columns requires explicit declaration .. change:: :tags: orm :tickets: 1875 Mapper requires that polymorphic_on column be present in the mapped selectable .. change:: :tags: orm :tickets: 1966 compile_mappers() renamed configure_mappers(), simplified configuration internals .. change:: :tags: orm :tickets: 2018 the aliased() function, if passed a SQL FromClause element (i.e. not a mapped class), will return element.alias() instead of raising an error on AliasedClass. .. change:: :tags: orm :tickets: 2027 Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don't match. .. change:: :tags: orm :tickets: 1996 Session.connection(), Session.execute() accept 'bind', to allow execute/connection operations to participate in the open transaction of an engine explicitly. .. change:: :tags: orm :tickets: Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments (i.e. option([x, y, z]) form, deprecated since 0.5) .. change:: :tags: orm :tickets: ScopedSession.mapper is removed (deprecated since 0.5). .. change:: :tags: orm :tickets: 2031 Horizontal shard query places 'shard_id' in context.attributes where it's accessible by the "load()" event. .. change:: :tags: orm :tickets: 2032 A single contains_eager() call across multiple entities will indicate all collections along that path should load, instead of requiring distinct contains_eager() calls for each endpoint (which was never correctly documented). .. change:: :tags: orm :tickets: The "name" field used in orm.aliased() now renders in the resulting SQL statement. .. change:: :tags: orm :tickets: 1473 Session weak_instance_dict=False is deprecated. .. change:: :tags: orm :tickets: 2046 An exception is raised in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. Was a warning in 0.6.6. .. change:: :tags: orm :tickets: 1069 Query.distinct() now accepts column expressions as \*args, interpreted by the Postgresql dialect as DISTINCT ON (). .. change:: :tags: orm :tickets: 2049 Additional tuning to "many-to-one" relationship loads during a flush(). A change in version 0.6.6 ([ticket:2002]) required that more "unnecessary" m2o loads during a flush could occur. Extra loading modes have been added so that the SQL emitted in this specific use case is trimmed back, while still retrieving the information the flush needs in order to not miss anything. .. change:: :tags: orm :tickets: the value of "passive" as passed to attributes.get_history() should be one of the constants defined in the attributes package. Sending True or False is deprecated. .. change:: :tags: orm :tickets: 2030 Added a `name` argument to `Query.subquery()`, to allow a fixed name to be assigned to the alias object. (also in 0.6.7) .. change:: :tags: orm :tickets: 2019 A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table). (also in 0.6.7) .. change:: :tags: orm :tickets: 2038 Fixed bug where "middle" class in a polymorphic hierarchy would have no 'polymorphic_on' column if it didn't also specify a 'polymorphic_identity', leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance. (also in 0.6.7) .. change:: :tags: orm :tickets: 1995 Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError. (also in 0.6.7) .. change:: :tags: orm :tickets: 2046 A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. This will be an exception in 0.7. (also in 0.6.7) .. change:: :tags: declarative :tickets: 2050 Added an explicit check for the case that the name 'metadata' is used for a column attribute on a declarative class. (also in 0.6.7) .. change:: :tags: sql :tickets: 1844 Added over() function, method to FunctionElement classes, produces the _Over() construct which in turn generates "window functions", i.e. " OVER (PARTITION BY , ORDER BY )". .. change:: :tags: sql :tickets: 805 LIMIT/OFFSET clauses now use bind parameters .. change:: :tags: sql :tickets: 1069 select.distinct() now accepts column expressions as \*args, interpreted by the Postgresql dialect as DISTINCT ON (). Note this was already available via passing a list to the `distinct` keyword argument to select(). .. change:: :tags: sql :tickets: select.prefix_with() accepts multiple expressions (i.e. \*expr), 'prefix' keyword argument to select() accepts a list or tuple. .. change:: :tags: sql :tickets: Passing a string to the `distinct` keyword argument of `select()` for the purpose of emitting special MySQL keywords (DISTINCTROW etc.) is deprecated - use `prefix_with()` for this. .. change:: :tags: sql :tickets: 2006, 2005 TypeDecorator works with primary key columns .. change:: :tags: sql :tickets: 1897 DDL() constructs now escape percent signs .. change:: :tags: sql :tickets: 1917, 1893 Table.c / MetaData.tables refined a bit, don't allow direct mutation .. change:: :tags: sql :tickets: 1950 Callables passed to `bindparam()` don't get evaluated .. change:: :tags: sql :tickets: 1870 types.type_map is now private, types._type_map .. change:: :tags: sql :tickets: 1982 Non-public Pool methods underscored .. change:: :tags: sql :tickets: 723 Added NULLS FIRST and NULLS LAST support. It's implemented as an extension to the asc() and desc() operators, called nullsfirst() and nullslast(). .. change:: :tags: sql :tickets: The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table. .. change:: :tags: sql :tickets: 2001 execution_options() on Connection accepts "isolation_level" argument, sets transaction isolation level for that connection only until returned to the connection pool, for those backends which support it (SQLite, Postgresql) .. change:: :tags: sql :tickets: 2005 A TypeDecorator of Integer can be used with a primary key column, and the "autoincrement" feature of various dialects as well as the "sqlite_autoincrement" flag will honor the underlying database type as being Integer-based. .. change:: :tags: sql :tickets: 2020, 2021 Established consistency when server_default is present on an Integer PK column. SQLA doesn't pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these. Regarding reflection for this case, reflection of an int PK col with a server_default sets the "autoincrement" flag to False, except in the case of a PG SERIAL col where we detected a sequence default. .. change:: :tags: sql :tickets: 2006 Result-row processors are applied to pre-executed SQL defaults, as well as cursor.lastrowid, when determining the contents of result.inserted_primary_key. .. change:: :tags: sql :tickets: Bind parameters present in the "columns clause" of a select are now auto-labeled like other "anonymous" clauses, which among other things allows their "type" to be meaningful when the row is fetched, as in result row processors. .. change:: :tags: sql :tickets: TypeDecorator is present in the "sqlalchemy" import space. .. change:: :tags: sql :tickets: 2015 Non-DBAPI errors which occur in the scope of an `execute()` call are now wrapped in sqlalchemy.exc.StatementError, and the text of the SQL statement and repr() of params is included. This makes it easier to identify statement executions which fail before the DBAPI becomes involved. .. change:: :tags: sql :tickets: 2048 The concept of associating a ".bind" directly with a ClauseElement has been explicitly moved to Executable, i.e. the mixin that describes ClauseElements which represent engine-executable constructs. This change is an improvement to internal organization and is unlikely to affect any real-world usage. .. change:: :tags: sql :tickets: 2028 Column.copy(), as used in table.tometadata(), copies the 'doc' attribute. (also in 0.6.7) .. change:: :tags: sql :tickets: 2023 Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4. (also in 0.6.7) .. change:: :tags: sql :tickets: 2042 The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules. (also in 0.6.7) .. change:: :tags: sql :tickets: 1921 SQLite dialect now uses `NullPool` for file-based databases .. change:: :tags: sql :tickets: 2036 The path given as the location of a sqlite database is now normalized via os.path.abspath(), so that directory changes within the process don't affect the ultimate location of a relative file path. .. change:: :tags: postgresql :tickets: 1083 When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses. (also in 0.6.7) .. change:: :tags: postgresql :tickets: 2044 Added an additional libpq message to the list of "disconnect" exceptions, "could not receive data from server" (also in 0.6.7) .. change:: :tags: mssql :tickets: 1833 the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, emit "max" as the length when no length is specified, so that the default length, normally '1' as per SQL server documentation, is instead 'unbounded'. This also occurs for the VARBINARY type.. This behavior makes these types more closely compatible with Postgresql's VARCHAR type which is similarly unbounded when no length is specified. .. change:: :tags: mysql :tickets: 1991 New DBAPI support for pymysql, a pure Python port of MySQL-python. .. change:: :tags: mysql :tickets: 2047 oursql dialect accepts the same "ssl" arguments in create_engine() as that of MySQLdb. (also in 0.6.7) .. change:: :tags: firebird :tickets: 1885 Some adjustments so that Interbase is supported as well. FB/Interbase version idents are parsed into a structure such as (8, 1, 1, 'interbase') or (2, 1, 588, 'firebird') so they can be distinguished. SQLAlchemy-1.0.11/doc/build/changelog/changelog_10.rst0000664000175000017500000033764112636375610023425 0ustar classicclassic00000000000000 ============== 1.0 Changelog ============== .. changelog_imports:: .. include:: changelog_09.rst :start-line: 5 .. include:: changelog_08.rst :start-line: 5 .. include:: changelog_07.rst :start-line: 5 .. changelog:: :version: 1.0.11 :released: December 22, 2015 .. change:: :tags: bug, mysql :versions: 1.1.0b1 :tickets: 3613 An adjustment to the regular expression used to parse MySQL views, such that we no longer assume the "ALGORITHM" keyword is present in the reflected view source, as some users have reported this not being present in some Amazon RDS environments. .. change:: :tags: bug, mysql :versions: 1.1.0b1 :pullreq: github:222 Added new reserved words for MySQL 5.7 to the MySQL dialect, including 'generated', 'optimizer_costs', 'stored', 'virtual'. Pull request courtesy Hanno Schlichting. .. change:: :tags: bug, ext :tickets: 3605 :versions: 1.1.0b1 Further fixes to :ticket:`3605`, pop method on :class:`.MutableDict`, where the "default" argument was not included. .. change:: :tags: bug, ext :tickets: 3612 :versions: 1.1.0b1 Fixed bug in baked loader system where the systemwide monkeypatch for setting up baked lazy loaders would interfere with other loader strategies that rely on lazy loading as a fallback, e.g. joined and subquery eager loaders, leading to ``IndexError`` exceptions at mapper configuration time. .. change:: :tags: bug, orm :tickets: 3611 :versions: 1.1.0b1 Fixed regression caused in 1.0.10 by the fix for :ticket:`3593` where the check added for a polymorphic joinedload from a poly_subclass->class->poly_baseclass connection would fail for the scenario of class->poly_subclass->class. .. change:: :tags: bug, orm :tickets: 3610 :versions: 1.1.0b1 Fixed bug where :meth:`.Session.bulk_update_mappings` and related would not bump a version id counter when in use. The experience here is still a little rough as the original version id is required in the given dictionaries and there's not clean error reporting on that yet. .. change:: :tags: bug, sql :tickets: 3609 :versions: 1.1.0b1 Fixed bug in :meth:`.Update.return_defaults` which would cause all insert-default holding columns not otherwise included in the SET clause (such as primary key cols) to get rendered into the RETURNING even though this is an UPDATE. .. change:: :tags: bug, orm :tickets: 3609 :versions: 1.1.0b1 Major fixes to the :paramref:`.Mapper.eager_defaults` flag, this flag would not be honored correctly in the case that multiple UPDATE statements were to be emitted, either as part of a flush or a bulk update operation. Additionally, RETURNING would be emitted unnecessarily within update statements. .. change:: :tags: bug, orm :tickets: 3606 :versions: 1.1.0b1 Fixed bug where use of the :meth:`.Query.select_from` method would cause a subsequent call to the :meth:`.Query.with_parent` method to fail. .. changelog:: :version: 1.0.10 :released: December 11, 2015 .. change:: :tags: bug, ext :tickets: 3605 :versions: 1.1.0b1 Added support for the ``dict.pop()`` and ``dict.popitem()`` methods to the :class:`.mutable.MutableDict` class. .. change:: :tags: change, tests :versions: 1.1.0b1 The ORM and Core tutorials, which have always been in doctest format, are now exercised within the normal unit test suite in both Python 2 and Python 3. .. change:: :tags: bug, sql :tickets: 3603 :versions: 1.1.0b1 Fixed issue within the :meth:`.Insert.from_select` construct whereby the :class:`.Select` construct would have its ``._raw_columns`` collection mutated in-place when compiling the :class:`.Insert` construct, when the target :class:`.Table` has Python-side defaults. The :class:`.Select` construct would compile standalone with the erroneous column present subsequent to compilation of the :class:`.Insert`, and the the :class:`.Insert` statement itself would fail on a second compile attempt due to duplicate bound parameters. .. change:: :tags: bug, mysql :tickets: 3602 :versions: 1.1.0b1 Fixed bug in MySQL reflection where the "fractional sections portion" of the :class:`.mysql.DATETIME`, :class:`.mysql.TIMESTAMP` and :class:`.mysql.TIME` types would be incorrectly placed into the ``timezone`` attribute, which is unused by MySQL, instead of the ``fsp`` attribute. .. change:: :tags: bug, orm :tickets: 3599 :versions: 1.1.0b1 Fixed issue where post_update on a many-to-one relationship would fail to emit an UPDATE in the case where the attribute were set to None and not previously loaded. .. change:: :tags: bug, sql, postgresql :tickets: 3598 :versions: 1.1.0b1 Fixed bug where CREATE TABLE with a no-column table, but a constraint such as a CHECK constraint would render an erroneous comma in the definition; this scenario can occur such as with a Postgresql INHERITS table that has no columns of its own. .. change:: :tags: bug, mssql :tickets: 3585 :versions: 1.1.0b1 Added the error "20006: Write to the server failed" to the list of disconnect errors for the pymssql driver, as this has been observed to render a connection unusable. .. change:: :tags: bug, postgresql :pullreq: github:216 :tickets: 3573 :versions: 1.1.0b1 Fixed issue where the "FOR UPDATE OF" Postgresql-specific SELECT modifier would fail if the referred table had a schema qualifier; PG needs the schema name to be omitted. Pull request courtesy Diana Clarke. .. change:: :tags: bug, postgresql :pullreq: github:215 :versions: 1.1.0b1 Fixed bug where some varieties of SQL expression passed to the "where" clause of :class:`.postgresql.ExcludeConstraint` would fail to be accepted correctly. Pull request courtesy aisch. .. change:: :tags: bug, orm, declarative :pullreq: github:212 :versions: 1.1.0b1 Fixed bug where in Py2K a unicode literal would not be accepted as the string name of a class or other argument within declarative using :func:`.backref` on :func:`.relationship`. Pull request courtesy Nils Philippsen. .. change:: :tags: bug, mssql :versions: 1.1.0b1 :pullreq: github:206 A descriptive ValueError is now raised in the event that SQL server returns an invalid date or time format from a DATE or TIME column, rather than failing with a NoneType error. Pull request courtesy Ed Avis. .. change:: :tags: bug, py3k :versions: 1.1.0b1 :pullreq: github:210, github:218, github:211 Updates to internal getargspec() calls, some py36-related fixture updates, and alterations to two iterators to "return" instead of raising StopIteration, to allow tests to pass without errors or warnings on Py3.5, Py3.6, pull requests courtesy Jacob MacDonald, Luri de Silvio, and Phil Jones. .. change:: :tags: bug, ext :versions: 1.1.0b1 :tickets: 3597 Fixed an issue in baked queries where the .get() method, used either directly or within lazy loads, didn't consider the mapper's "get clause" as part of the cache key, causing bound parameter mismatches if the clause got re-generated. This clause is cached by mappers on the fly but in highly concurrent scenarios may be generated more than once when first accessed. .. change:: :tags: feature, sql :versions: 1.1.0b1 :pullreq: github:200 Added support for parameter-ordered SET clauses in an UPDATE statement. This feature is available by passing the :paramref:`~.sqlalchemy.sql.expression.update.preserve_parameter_order` flag either to the core :class:`.Update` construct or alternatively adding it to the :paramref:`.Query.update.update_args` dictionary at the ORM-level, also passing the parameters themselves as a list of 2-tuples. Thanks to Gorka Eguileor for implementation and tests. .. seealso:: :ref:`updates_order_parameters` .. change:: :tags: bug, orm :versions: 1.1.0b1 :tickets: 3593 Fixed bug which is actually a regression that occurred between versions 0.8.0 and 0.8.1, due :ticket:`2714`. The case where joined eager loading needs to join out over a subclass-bound relationship when "with_polymorphic" were also used would fail to join from the correct entity. .. change:: :tags: bug, orm :versions: 1.1.0b1 :tickets: 3592 Fixed joinedload bug which would occur when a. the query includes limit/offset criteria that forces a subquery b. the relationship uses "secondary" c. the primaryjoin of the relationship refers to a column that is either not part of the primary key, or is a PK col in a joined-inheritance subclass table that is under a different attribute name than the parent table's primary key column d. the query defers the columns that are present in the primaryjoin, typically via not being included in load_only(); the necessary column(s) would not be present in the subquery and produce invalid SQL. .. change:: :tags: bug, orm :versions: 1.1.0b1 :tickets: 2696 A rare case which occurs when a :meth:`.Session.rollback` fails in the scope of a :meth:`.Session.flush` operation that's raising an exception, as has been observed in some MySQL SAVEPOINT cases, prevents the original database exception from being observed when it was emitted during flush, but only on Py2K because Py2K does not support exception chaining; on Py3K the originating exception is chained. As a workaround, a warning is emitted in this specific case showing at least the string message of the original database error before we proceed to raise the rollback-originating exception. .. change:: :tags: bug, postgresql :versions: 1.1.0b1 :tickets: 3571 Fixed the ``.python_type`` attribute of :class:`.postgresql.INTERVAL` to return ``datetime.timedelta`` in the same way as that of :obj:`.types.Interval.python_type`, rather than raising ``NotImplementedError``. .. change:: :tags: bug, mssql :pullreq: github:213 :versions: 1.1.0b1 Fixed issue where DDL generated for the MSSQL types DATETIME2, TIME and DATETIMEOFFSET with a precision of "zero" would not generate the precision field. Pull request courtesy Jacobo de Vera. .. changelog:: :version: 1.0.9 :released: October 20, 2015 .. change:: :tags: bug, orm, postgresql :versions: 1.1.0b1 :tickets: 3556 Fixed regression in 1.0 where new feature of using "executemany" for UPDATE statements in the ORM (e.g. :ref:`feature_updatemany`) would break on Postgresql and other RETURNING backends when using server-side version generation schemes, as the server side value is retrieved via RETURNING which is not supported with executemany. .. change:: :tags: feature, ext :versions: 1.1.0b1 :tickets: 3551 Added the :paramref:`.AssociationProxy.info` parameter to the :class:`.AssociationProxy` constructor, to suit the :attr:`.AssociationProxy.info` accessor that was added in :ticket:`2971`. This is possible because :class:`.AssociationProxy` is constructed explicitly, unlike a hybrid which is constructed implicitly via the decorator syntax. .. change:: :tags: bug, oracle :versions: 1.1.0b1 :tickets: 3548 Fixed bug in Oracle dialect where reflection of tables and other symbols with names quoted to force all-lower-case would not be identified properly in reflection queries. The :class:`.quoted_name` construct is now applied to incoming symbol names that detect as forced into all-lower-case within the "name normalize" process. .. change:: :tags: feature, orm :versions: 1.1.0b1 :pullreq: github:201 Added new method :meth:`.Query.one_or_none`; same as :meth:`.Query.one` but returns None if no row found. Pull request courtesy esiegerman. .. change:: :tags: bug, orm :versions: 1.1.0b1 :tickets: 3539 Fixed rare TypeError which could occur when stringifying certain kinds of internal column loader options within internal logging. .. change:: :tags: bug, orm :versions: 1.1.0b1 :tickets: 3525 Fixed bug in :meth:`.Session.bulk_save_objects` where a mapped column that had some kind of "fetch on update" value and was not locally present in the given object would cause an AttributeError within the operation. .. change:: :tags: bug, sql :versions: 1.1.0b1 :tickets: 3520 Fixed regression in 1.0-released default-processor for multi-VALUES insert statement, :ticket:`3288`, where the column type for the default-holding column would not be propagated to the compiled statement in the case where the default was being used, leading to bind-level type handlers not being invoked. .. change:: :tags: bug, examples :versions: 1.1.0b1 Fixed two issues in the "history_meta" example where history tracking could encounter empty history, and where a column keyed to an alternate attribute name would fail to track properly. Fixes courtesy Alex Fraser. .. change:: :tags: bug, orm :tickets: 3510 :versions: 1.1.0b1 Fixed 1.0 regression where the "noload" loader strategy would fail to function for a many-to-one relationship. The loader used an API to place "None" into the dictionary which no longer actually writes a value; this is a side effect of :ticket:`3061`. .. change:: :tags: bug, sybase :tickets: 3508, 3509 :versions: 1.1.0b1 Fixed two issues regarding Sybase reflection, allowing tables without primary keys to be reflected as well as ensured that a SQL statement involved in foreign key detection is pre-fetched up front to avoid driver issues upon nested queries. Fixes here courtesy Eugene Zapolsky; note that we cannot currently test Sybase to locally verify these changes. .. change:: :tags: bug, postgresql :pullreq: github:190 :versions: 1.1.0b1 An adjustment to the new Postgresql feature of reflecting storage options and USING of :ticket:`3455` released in 1.0.6, to disable the feature for Postgresql versions < 8.2 where the ``reloptions`` column is not provided; this allows Amazon Redshift to again work as it is based on an 8.0.x version of Postgresql. Fix courtesy Pete Hollobon. .. changelog:: :version: 1.0.8 :released: July 22, 2015 .. change:: :tags: bug, misc :tickets: 3494 Fixed an issue where a particular base class within utils didn't implement ``__slots__``, and therefore meant all subclasses of that class didn't either, negating the rationale for ``__slots__`` to be in use. Didn't cause any issue except on IronPython which apparently does not implement ``__slots__`` behavior compatibly with cPython. .. changelog:: :version: 1.0.7 :released: July 20, 2015 .. change:: :tags: feature, sql :tickets: 3459 :pullreq: bitbucket:56 Added a :meth:`.ColumnElement.cast` method which performs the same purpose as the standalone :func:`.cast` function. Pull request courtesy Sebastian Bank. .. change:: :tags: bug, engine :tickets: 3481 Fixed regression where new methods on :class:`.ResultProxy` used by the ORM :class:`.Query` object (part of the performance enhancements of :ticket:`3175`) would not raise the "this result does not return rows" exception in the case where the driver (typically MySQL) fails to generate cursor.description correctly; an AttributeError against NoneType would be raised instead. .. change:: :tags: bug, engine :tickets: 3483 Fixed regression where :meth:`.ResultProxy.keys` would return un-adjusted internal symbol names for "anonymous" labels, which are the "foo_1" types of labels we see generated for SQL functions without labels and similar. This was a side effect of the performance enhancements implemented as part of #918. .. change:: :tags: bug, sql :tickets: 3490 Fixed bug where coersion of literal ``True`` or ``False`` constant in conjunction with :func:`.and_` or :func:`.or_` would fail with an AttributeError. .. change:: :tags: bug, sql :tickets: 3485 Fixed potential issue where a custom subclass of :class:`.FunctionElement` or other column element that incorrectly states 'None' or any other invalid object as the ``.type`` attribute will report this exception instead of recursion overflow. .. change:: :tags: bug, sql :pullreq: github:188 Fixed bug where the modulus SQL operator wouldn't work in reverse due to a missing ``__rmod__`` method. Pull request courtesy dan-gittik. .. change:: :tags: feature, schema :pullreq: github:186 Added support for the MINVALUE, MAXVALUE, NO MINVALUE, NO MAXVALUE, and CYCLE arguments for CREATE SEQUENCE as supported by Postgresql and Oracle. Pull request courtesy jakeogh. .. change:: :tags: bug, orm, declarative :tickets: 3480 Fixed bug in :class:`.AbstractConcreteBase` extension where a column setup on the ABC base which had a different attribute name vs. column name would not be correctly mapped on the final base class. The failure on 0.9 would be silent whereas on 1.0 it raised an ArgumentError, so may not have been noticed prior to 1.0. .. change:: :tags: bug, orm :tickets: 3469 Fixed 1.0 regression where value objects that override ``__eq__()`` to return a non-boolean-capable object, such as some geoalchemy types as well as numpy types, were being tested for ``bool()`` during a unit of work update operation, where in 0.9 the return value of ``__eq__()`` was tested against "is True" to guard against this. .. change:: :tags: bug, orm :tickets: 3468 Fixed 1.0 regression where a "deferred" attribute would not populate correctly if it were loaded within the "optimized inheritance load", which is a special SELECT emitted in the case of joined table inheritance used to populate expired or unloaded attributes against a joined table without loading the base table. This is related to the fact that SQLA 1.0 no longer guesses about loading deferred columns and must be directed explicitly. .. change:: :tags: bug, orm :tickets: 3466 Fixed 1.0 regression where the "parent entity" of a synonym- mapped attribute on top of an :func:`.aliased` object would resolve to the original mapper, not the :func:`.aliased` version of it, thereby causing problems for a :class:`.Query` that relies on this attribute (e.g. it's the only representative attribute given in the constructor) to figure out the correct FROM clause for the query. .. changelog:: :version: 1.0.6 :released: June 25, 2015 .. change:: :tags: bug, orm :tickets: 3465 Fixed a major regression in the 1.0 series where the version_id_counter feature would cause an object's version counter to be incremented when there was no net change to the object's row, but instead an object related to it via relationship (e.g. typically many-to-one) were associated or de-associated with it, resulting in an UPDATE statement that updates the object's version counter and nothing else. In the use case where the relatively recent "server side" and/or "programmatic/conditional" version counter feature were used (e.g. setting version_id_generator to False), the bug could cause an UPDATE without a valid SET clause to be emitted. .. change:: :tags: bug, mssql :tickets: 3464 Fixed issue when using :class:`.VARBINARY` type in conjunction with an INSERT of NULL + pyodbc; pyodbc requires a special object be passed in order to persist NULL. As the :class:`.VARBINARY` type is now usually the default for :class:`.LargeBinary` due to :ticket:`3039`, this issue is partially a regression in 1.0. The pymssql driver appears to be unaffected. .. change:: :tags: bug, postgresql, pypy :tickets: 3439 Re-fixed this issue first released in 1.0.5 to fix psycopg2cffi JSONB support once again, as they suddenly switched on unconditional decoding of JSONB types in version 2.7.1. Version detection now specifies 2.7.1 as where we should expect the DBAPI to do json encoding for us. .. change:: :tags: feature, postgresql :tickets: 3455 :pullreq: github:179 Added support for storage parameters under CREATE INDEX, using a new keyword argument ``postgresql_with``. Also added support for reflection to support both the ``postgresql_with`` flag as well as the ``postgresql_using`` flag, which will now be set on :class:`.Index` objects that are reflected, as well present in a new "dialect_options" dictionary in the result of :meth:`.Inspector.get_indexes`. Pull request courtesy Pete Hollobon. .. seealso:: :ref:`postgresql_index_storage` .. change:: :tags: bug, orm :tickets: 3462 Fixed 1.0 regression where the enhanced behavior of single-inheritance joins of :ticket:`3222` takes place inappropriately for a JOIN along explicit join criteria with a single-inheritance subclass that does not make use of any discriminator, resulting in an additional "AND NULL" clause. .. change:: :tags: bug, postgresql :tickets: 3454 Repaired the :class:`.ExcludeConstraint` construct to support common features that other objects like :class:`.Index` now do, that the column expression may be specified as an arbitrary SQL expression such as :obj:`.cast` or :obj:`.text`. .. change:: :tags: feature, postgresql :pullreq: github:182 Added new execution option ``max_row_buffer`` which is interpreted by the psycopg2 dialect when the ``stream_results`` option is used, which sets a limit on the size of the row buffer that may be allocated. This value is also provided based on the integer value sent to :meth:`.Query.yield_per`. Pull request courtesy mcclurem. .. change:: :tags: bug, orm :tickets: 3451 :pullreq: github:181 Fixed bug in new :meth:`.Session.bulk_update_mappings` feature where the primary key columns used in the WHERE clause to locate the row would also be included in the SET clause, setting their value to themselves unnecessarily. Pull request courtesy Patrick Hayes. .. change:: :tags: bug, orm :tickets: 3448 Fixed an unexpected-use regression whereby custom :class:`.Comparator` objects that made use of the ``__clause_element__()`` method and returned an object that was an ORM-mapped :class:`.InstrumentedAttribute` and not explicitly a :class:`.ColumnElement` would fail to be correctly handled when passed as an expression to :meth:`.Session.query`. The logic in 0.9 happened to succeed on this, so this use case is now supported. .. change:: :tags: bug, sql :tickets: 3445 Fixed a bug where clause adaption as applied to a :class:`.Label` object would fail to accommodate the labeled SQL expression in all cases, such that any SQL operation that made use of :meth:`.Label.self_group` would use the original unadapted expression. One effect of this would be that an ORM :func:`.aliased` construct would not fully accommodate attributes mapped by :obj:`.column_property`, such that the un-aliased table could leak out when the property were used in some kinds of SQL comparisons. .. change:: :tags: bug, documentation :tickets: 2077 Fixed an internal "memoization" routine for method types such that a Python descriptor is no longer used; repairs inspectability of these methods including support for Sphinx documentation. .. changelog:: :version: 1.0.5 :released: June 7, 2015 .. change:: :tags: feature, engine Added new engine event :meth:`.ConnectionEvents.engine_disposed`. Called after the :meth:`.Engine.dispose` method is called. .. change:: :tags: bug, postgresql, pypy :tickets: 3439 Repaired some typing and test issues related to the pypy psycopg2cffi dialect, in particular that the current 2.7.0 version does not have native support for the JSONB type. The version detection for psycopg2 features has been tuned into a specific sub-version for psycopg2cffi. Additionally, test coverage has been enabled for the full series of psycopg2 features under psycopg2cffi. .. change:: :tags: feature, ext :pullreq: bitbucket:54 Added support for ``*args`` to be passed to the baked query initial callable, in the same way that ``*args`` are supported for the :meth:`.BakedQuery.add_criteria` and :meth:`.BakedQuery.with_criteria` methods. Initial PR courtesy Naoki INADA. .. change:: :tags: bug, engine :tickets: 3435 Fixed bug where known boolean values used by :func:`.engine_from_config` were not being parsed correctly; these included ``pool_threadlocal`` and the psycopg2 argument ``use_native_unicode``. .. change:: :tags: bug, mssql :tickets: 3424, 3430 Added a new dialect flag to the MSSQL dialect ``legacy_schema_aliasing`` which when set to False will disable a very old and obsolete behavior, that of the compiler's attempt to turn all schema-qualified table names into alias names, to work around old and no longer locatable issues where SQL server could not parse a multi-part identifier name in all circumstances. The behavior prevented more sophisticated statements from working correctly, including those which use hints, as well as CRUD statements that embed correlated SELECT statements. Rather than continue to repair the feature to work with more complex statements, it's better to just disable it as it should no longer be needed for any modern SQL server version. The flag defaults to True for the 1.0.x series, leaving current behavior unchanged for this version series. In the 1.1 series, it will default to False. For the 1.0 series, when not set to either value explicitly, a warning is emitted when a schema-qualified table is first used in a statement, which suggests that the flag be set to False for all modern SQL Server versions. .. seealso:: :ref:`legacy_schema_rendering` .. change:: :tags: feature, engine :tickets: 3379 Adjustments to the engine plugin hook, such that the :meth:`.URL.get_dialect` method will continue to return the ultimate :class:`.Dialect` object when a dialect plugin is used, without the need for the caller to be aware of the :meth:`.Dialect.get_dialect_cls` method. .. change:: :tags: bug, ext :tickets: 3427 Fixed regression in the :mod:`sqlalchemy.ext.mutable` extension as a result of the bugfix for :ticket:`3167`, where attribute and validation events are no longer called within the flush process. The mutable extension was relying upon this behavior in the case where a column level Python-side default were responsible for generating the new value on INSERT or UPDATE, or when a value were fetched from the RETURNING clause for "eager defaults" mode. The new value would not be subject to any event when populated and the mutable extension could not establish proper coercion or history listening. A new event :meth:`.InstanceEvents.refresh_flush` is added which the mutable extension now makes use of for this use case. .. change:: :tags: feature, orm :tickets: 3427 Added new event :meth:`.InstanceEvents.refresh_flush`, invoked when an INSERT or UPDATE level default value fetched via RETURNING or Python-side default is invoked within the flush process. This is to provide a hook that is no longer present as a result of :ticket:`3167`, where attribute and validation events are no longer called within the flush process. .. change:: :tags: feature, ext :tickets: 3427 Added a new semi-public method to :class:`.MutableBase` :meth:`.MutableBase._get_listen_keys`. Overriding this method is needed in the case where a :class:`.MutableBase` subclass needs events to propagate for attribute keys other than the key to which the mutable type is associated with, when intercepting the :meth:`.InstanceEvents.refresh` or :meth:`.InstanceEvents.refresh_flush` events. The current example of this is composites using :class:`.MutableComposite`. .. change:: :tags: bug, engine :tickets: 3421 Added support for the case of the misbehaving DBAPI that has pep-249 exception names linked to exception classes of an entirely different name, preventing SQLAlchemy's own exception wrapping from wrapping the error appropriately. The SQLAlchemy dialect in use needs to implement a new accessor :attr:`.DefaultDialect.dbapi_exception_translation_map` to support this feature; this is implemented now for the py-postgresql dialect. .. change:: :tags: bug, orm :tickets: 3420 The "lightweight named tuple" used when a :class:`.Query` returns rows failed to implement ``__slots__`` correctly such that it still had a ``__dict__``. This is resolved, but in the extremely unlikely case someone was assigning values to the returned tuples, that will no longer work. .. change:: :tags: bug, engine :tickets: 3419 Fixed bug involving the case when pool checkout event handlers are used and connection attempts are made in the handler itself which fail, the owning connection record would not be freed until the stack trace of the connect error itself were freed. For the case where a test pool of only a single connection were used, this means the pool would be fully checked out until that stack trace were freed. This mostly impacts very specific debugging scenarios and is unlikely to have been noticable in any production application. The fix applies an explicit checkin of the record before re-raising the caught exception. .. changelog:: :version: 1.0.4 :released: May 7, 2015 .. change:: :tags: bug, orm :tickets: 3416 Fixed unexpected-use regression where in the odd case that the primaryjoin of a relationship involved comparison to an unhashable type such as an HSTORE, lazy loads would fail due to a hash-oriented check on the statement parameters, modified in 1.0 as a result of :ticket:`3061` to use hashing and modified in :ticket:`3368` to occur in cases more common than "load on pending". The values are now checked for the ``__hash__`` attribute beforehand. .. change:: :tags: bug, orm :tickets: 3412, 3347 Liberalized an assertion that was added as part of :ticket:`3347` to protect against unknown conditions when splicing inner joins together within joined eager loads with ``innerjoin=True``; if some of the joins use a "secondary" table, the assertion needs to unwrap further joins in order to pass. .. change:: :tags: bug, schema :tickets: 3411 Fixed bug in enhanced constraint-attachment logic introduced in :ticket:`3341` where in the unusual case of a constraint that refers to a mixture of :class:`.Column` objects and string column names at the same time, the auto-attach-on-column-attach logic will be skipped; for the constraint to be auto-attached in this case, all columns must be assembled on the target table up front. Added a new section to the migration document regarding the original feature as well as this change. .. seealso:: :ref:`change_3341` .. change:: :tags: bug, orm :tickets: 3409, 3320 Repaired / added to tests yet more expressions that were reported as failing with the new 'entity' key value added to :attr:`.Query.column_descriptions`, the logic to discover the "from" clause is again reworked to accommodate columns from aliased classes, as well as to report the correct value for the "aliased" flag in these cases. .. changelog:: :version: 1.0.3 :released: April 30, 2015 .. change:: :tags: bug, orm, pypy :tickets: 3405 Fixed regression from 0.9.10 prior to release due to :ticket:`3349` where the check for query state on :meth:`.Query.update` or :meth:`.Query.delete` compared the empty tuple to itself using ``is``, which fails on Pypy to produce ``True`` in this case; this would erronously emit a warning in 0.9 and raise an exception in 1.0. .. change:: :tags: feature, engine :tickets: 3379 New features added to support engine/pool plugins with advanced functionality. Added a new "soft invalidate" feature to the connection pool at the level of the checked out connection wrapper as well as the :class:`._ConnectionRecord`. This works similarly to a modern pool invalidation in that connections aren't actively closed, but are recycled only on next checkout; this is essentially a per-connection version of that feature. A new event :class:`.PoolEvents.soft_invalidate` is added to complement it. Also added new flag :attr:`.ExceptionContext.invalidate_pool_on_disconnect`. Allows an error handler within :meth:`.ConnectionEvents.handle_error` to maintain a "disconnect" condition, but to handle calling invalidate on individual connections in a specific manner within the event. .. change:: :tags: feature, engine :tickets: 3355 Added new event :class:`.DialectEvents.do_connect`, which allows interception / replacement of when the :meth:`.Dialect.connect` hook is called to create a DBAPI connection. Also added dialect plugin hooks :meth:`.Dialect.get_dialect_cls` and :meth:`.Dialect.engine_created` which allow external plugins to add events to existing dialects using entry points. .. change:: :tags: bug, orm :tickets: 3403, 3320 Fixed regression from 0.9.10 prior to release where the new addition of ``entity`` to the :attr:`.Query.column_descriptions` accessor would fail if the target entity was produced from a core selectable such as a :class:`.Table` or :class:`.CTE` object. .. change:: :tags: feature, sql Added a placeholder method :meth:`.TypeEngine.compare_against_backend` which is now consumed by Alembic migrations as of 0.7.6. User-defined types can implement this method to assist in the comparison of a type against one reflected from the database. .. change:: :tags: bug, orm :tickets: 3402 Fixed regression within the flush process when an attribute were set to a SQL expression for an UPDATE, and the SQL expression when compared to the previous value of the attribute would produce a SQL comparison other than ``==`` or ``!=``, the exception "Boolean value of this clause is not defined" would raise. The fix ensures that the unit of work will not interpret the SQL expression in this way. .. change:: :tags: bug, ext :tickets: 3397 Fixed bug in association proxy where an any()/has() on an relationship->scalar non-object attribute comparison would fail, e.g. ``filter(Parent.some_collection_to_attribute.any(Child.attr == 'foo'))`` .. change:: :tags: bug, sql :tickets: 3396 Fixed bug where the truncation of long labels in SQL could produce a label that overlapped another label that is not truncated; this because the length threshhold for truncation was greater than the portion of the label that remains after truncation. These two values have now been made the same; label_length - 6. The effect here is that shorter column labels will be "truncated" where they would not have been truncated before. .. change:: :tags: bug, orm :tickets: 3392 Fixed unexpected use regression due to :ticket:`2992` where textual elements placed into the :meth:`.Query.order_by` clause in conjunction with joined eager loading would be added to the columns clause of the inner query in such a way that they were assumed to be table-bound column names, in the case where the joined eager load needs to wrap the query in a subquery to accommodate for a limit/offset. Originally, the behavior here was intentional, in that a query such as ``query(User).order_by('name').limit(1)`` would order by ``user.name`` even if the query was modified by joined eager loading to be within a subquery, as ``'name'`` would be interpreted as a symbol to be located within the FROM clauses, in this case ``User.name``, which would then be copied into the columns clause to ensure it were present for ORDER BY. However, the feature fails to anticipate the case where ``order_by("name")`` refers to a specific label name present in the local columns clause already and not a name bound to a selectable in the FROM clause. Beyond that, the feature also fails for deprecated cases such as ``order_by("name desc")``, which, while it emits a warning that :func:`.text` should be used here (note that the issue does not impact cases where :func:`.text` is used explicitly), still produces a different query than previously where the "name desc" expression is copied into the columns clause inappropriately. The resolution is such that the "joined eager loading" aspect of the feature will skip over these so-called "label reference" expressions when augmenting the inner columns clause, as though they were :func:`.text` constructs already. .. change:: :tags: bug, sql :tickets: 3391 Fixed regression due to :ticket:`3282` where the ``tables`` collection passed as a keyword argument to the :meth:`.DDLEvents.before_create`, :meth:`.DDLEvents.after_create`, :meth:`.DDLEvents.before_drop`, and :meth:`.DDLEvents.after_drop` events would no longer be a list of tables, but instead a list of tuples which contained a second entry with foreign keys to be added or dropped. As the ``tables`` collection, while documented as not necessarily stable, has come to be relied upon, this change is considered a regression. Additionally, in some cases for "drop", this collection would be an iterator that would cause the operation to fail if prematurely iterated. The collection is now a list of table objects in all cases and test coverage for the format of this collection is now added. .. change:: :tags: bug, orm :tickets: 3388 Fixed a regression regarding the :meth:`.MapperEvents.instrument_class` event where its invocation was moved to be after the class manager's instrumentation of the class, which is the opposite of what the documentation for the event explicitly states. The rationale for the switch was due to Declarative taking the step of setting up the full "instrumentation manager" for a class before it was mapped for the purpose of the new ``@declared_attr`` features described in :ref:`feature_3150`, but the change was also made against the classical use of :func:`.mapper` for consistency. However, SQLSoup relies upon the instrumentation event happening before any instrumentation under classical mapping. The behavior is reverted in the case of classical and declarative mapping, the latter implemented by using a simple memoization without using class manager. .. change:: :tags: bug, orm :tickets: 3387 Fixed issue in new :meth:`.QueryEvents.before_compile` event where changes made to the :class:`.Query` object's collection of entities to load within the event would render in the SQL, but would not be reflected during the loading process. .. changelog:: :version: 1.0.2 :released: April 24, 2015 .. change:: :tags: bug, sql :tickets: 3338, 3385 Fixed a regression that was incorrectly fixed in 1.0.0b4 (hence becoming two regressions); reports that SELECT statements would GROUP BY a label name and fail was misconstrued that certain backends such as SQL Server should not be emitting ORDER BY or GROUP BY on a simple label name at all; when in fact, we had forgotten that 0.9 was already emitting ORDER BY on a simple label name for all backends, as described in :ref:`migration_1068`, even though 1.0 includes a rewrite of this logic as part of :ticket:`2992`. As far as emitting GROUP BY against a simple label, even Postgresql has cases where it will raise an error even though the label to group on should be apparent, so it is clear that GROUP BY should never be rendered in this way automatically. In 1.0.2, SQL Server, Firebird and others will again emit ORDER BY on a simple label name when passed a :class:`.Label` construct that is also present in the columns clause. Additionally, no backend will emit GROUP BY against the simple label name only when passed a :class:`.Label` construct. .. change:: :tags: bug, orm, declarative :tickets: 3383 Fixed unexpected use regression regarding the declarative ``__declare_first__`` and ``__declare_last__`` accessors where these would no longer be called on the superclass of the declarative base. .. changelog:: :version: 1.0.1 :released: April 23, 2015 .. change:: :tags: bug, firebird :tickets: 3380 :pullreq: github:168 Fixed a regression due to :ticket:`3034` where limit/offset clauses were not properly interpreted by the Firebird dialect. Pull request courtesy effem-git. .. change:: :tags: bug, firebird :tickets: 3381 Fixed support for "literal_binds" mode when using limit/offset with Firebird, so that the values are again rendered inline when this is selected. Related to :ticket:`3034`. .. change:: :tags: bug, sqlite :tickets: 3378 Fixed a regression due to :ticket:`3282`, where due to the fact that we attempt to assume the availability of ALTER when creating/dropping schemas, in the case of SQLite we simply said to not worry about foreign keys at all, since ALTER is not available, when creating and dropping tables. This meant that the sorting of tables was basically skipped in the case of SQLite, and for the vast majority of SQLite use cases, this is not an issue. However, users who were doing DROPs on SQLite with tables that contained data and with referential integrity turned on would then experience errors, as the dependency sorting *does* matter in the case of DROP with enforced constraints, when those tables have data (SQLite will still happily let you create foreign keys to nonexistent tables and drop tables referring to existing ones with constraints enabled, as long as there's no data being referenced). In order to maintain the new feature of :ticket:`3282` while still allowing a SQLite DROP operation to maintain ordering, we now do the sort with full FKs taken under consideration, and if we encounter an unresolvable cycle, only *then* do we forego attempting to sort the tables; we instead emit a warning and go with the unsorted list. If an environment needs both ordered DROPs *and* has foreign key cycles, then the warning notes they will need to restore the ``use_alter`` flag to their :class:`.ForeignKey` and :class:`.ForeignKeyConstraint` objects so that just those objects will be omitted from the dependency sort. .. seealso:: :ref:`feature_3282` - contains an updated note about SQLite. .. change:: :tags: bug, sql :tickets: 3372 Fixed issue where a straight SELECT EXISTS query would fail to assign the proper result type of Boolean to the result mapping, and instead would leak column types from within the query into the result map. This issue exists in 0.9 and earlier as well, however has less of an impact in those versions. In 1.0, due to :ticket:`918` this becomes a regression in that we now rely upon the result mapping to be very accurate, else we can assign result-type processors to the wrong column. In all versions, this issue also has the effect that a simple EXISTS will not apply the Boolean type handler, leading to simple 1/0 values for backends without native boolean instead of True/False. The fix includes that an EXISTS columns argument will be anon-labeled like other column expressions; a similar fix is implemented for pure-boolean expressions like ``not_(True())``. .. change:: :tags: bug, orm :tickets: 3374 Fixed issue where a query of the form ``query(B).filter(B.a != A(id=7))`` would render the ``NEVER_SET`` symbol, when given a transient object. For a persistent object, it would always use the persisted database value and not the currently set value. Assuming autoflush is turned on, this usually would not be apparent for persistent values, as any pending changes would be flushed first in any case. However, this is inconsistent vs. the logic used for the non-negated comparison, ``query(B).filter(B.a == A(id=7))``, which does use the current value and additionally allows comparisons to transient objects. The comparison now uses the current value and not the database-persisted value. Unlike the other ``NEVER_SET`` issues that are repaired as regressions caused by :ticket:`3061` in this release, this particular issue is present at least as far back as 0.8 and possibly earlier, however it was discovered as a result of repairing the related ``NEVER_SET`` issues. .. seealso:: :ref:`bug_3374` .. change:: :tags: bug, orm :tickets: 3371 Fixed unexpected use regression cause by :ticket:`3061` where the NEVER_SET symbol could leak into relationship-oriented queries, including ``filter()`` and ``with_parent()`` queries. The ``None`` symbol is returned in all cases, however many of these queries have never been correctly supported in any case, and produce comparisons to NULL without using the IS operator. For this reason, a warning is also added to that subset of relationship queries that don't currently provide for ``IS NULL``. .. seealso:: :ref:`bug_3371` .. change:: :tags: bug, orm :tickets: 3368 Fixed a regression caused by :ticket:`3061` where the NEVER_SET symbol could leak into a lazyload query, subsequent to the flush of a pending object. This would occur typically for a many-to-one relationship that does not use a simple "get" strategy. The good news is that the fix improves efficiency vs. 0.9, because we can now skip the SELECT statement entirely when we detect NEVER_SET symbols present in the parameters; prior to :ticket:`3061`, we couldn't discern if the None here were set or not. .. changelog:: :version: 1.0.0 :released: April 16, 2015 .. change:: :tags: bug, orm :tickets: 3367 Identified an inconsistency when handling :meth:`.Query.join` to the same target more than once; it implicitly dedupes only in the case of a relationship join, and due to :ticket:`3233`, in 1.0 a join to the same table twice behaves differently than 0.9 in that it no longer erroneously aliases. To help document this change, the verbiage regarding :ticket:`3233` in the migration notes has been generalized, and a warning has been added when :meth:`.Query.join` is called against the same target relationship more than once. .. change:: :tags: bug, orm :tickets: 3364 Made a small improvement to the heuristics of relationship when determining remote side with semi-self-referential (e.g. two joined inh subclasses referring to each other), non-simple join conditions such that the parententity is taken into account and can reduce the need for using the ``remote()`` annotation; this can restore some cases that might have worked without the annotation prior to 0.9.4 via :ticket:`2948`. .. change:: :tags: bug, mssql :tickets: 3360 Fixed a regression where the "last inserted id" mechanics would fail to store the correct value for MSSQL on an INSERT where the primary key value was present in the insert params before execution, as well as in the case where an INSERT from SELECT would state the target columns as column objects, instead of string keys. .. change:: :tags: bug, mssql :pullreq: github:166 Using the ``Binary`` constructor now present in pymssql rather than patching one in. Pull request courtesy Ramiro Morales. .. change:: :tags: bug, tests :tickets: 3356 Fixed the pathing used when tests run; for sqla_nose.py and py.test, the "./lib" prefix is again inserted at the head of sys.path but only if sys.flags.no_user_site isn't set; this makes it act just like the way Python puts "." in the current path by default. For tox, we are setting the PYTHONNOUSERSITE flag now. .. change:: :tags: feature, sql :tickets: 3084 :pullreq: bitbucket:47 The topological sorting used to sort :class:`.Table` objects and available via the :attr:`.MetaData.sorted_tables` collection will now produce a **deterministic** ordering; that is, the same ordering each time given a set of tables with particular names and dependencies. This is to help with comparison of DDL scripts and other use cases. The tables are sent to the topological sort sorted by name, and the topological sort itself will process the incoming data in an ordered fashion. Pull request courtesy Sebastian Bank. .. seealso:: :ref:`feature_3084` .. change:: :tags: feature, orm :pullreq: github:164 Added new argument :paramref:`.Query.update.update_args` which allows kw arguments such as ``mysql_limit`` to be passed to the underlying :class:`.Update` construct. Pull request courtesy Amir Sadoughi. .. changelog:: :version: 1.0.0b5 :released: April 3, 2015 .. change:: :tags: bug, orm :tickets: 3349 :class:`.Query` doesn't support joins, subselects, or special FROM clauses when using the :meth:`.Query.update` or :meth:`.Query.delete` methods; instead of silently ignoring these fields if methods like :meth:`.Query.join` or :meth:`.Query.select_from` has been called, an error is raised. In 0.9.10 this only emits a warning. .. change:: :tags: bug, orm Added a list() call around a weak dictionary used within the commit phase of the session, which without it could cause a "dictionary changed size during iter" error if garbage collection interacted within the process. Change was introduced by #3139. .. change:: :tags: bug, postgresql :tickets: 3343 Fixed bug where updated PG index reflection as a result of :ticket:`3184` would cause index operations to fail on Postgresql versions 8.4 and earlier. The enhancements are now disabled when using an older version of Postgresql. .. change:: :tags: bug, sql :tickets: 3346 The warning emitted by the unicode type for a non-unicode type has been liberalized to warn for values that aren't even string values, such as integers; previously, the updated warning system of 1.0 made use of string formatting operations which would raise an internal TypeError. While these cases should ideally raise totally, some backends like SQLite and MySQL do accept them and are potentially in use by legacy code, not to mention that they will always pass through if unicode conversion is turned off for the target backend. .. change:: :tags: bug, orm :tickets: 3347 Fixed a bug related to "nested" inner join eager loading, which exists in 0.9 as well but is more of a regression in 1.0 due to :ticket:`3008` which turns on "nested" by default, such that a joined eager load that travels across sibling paths from a common ancestor using innerjoin=True will correctly splice each "innerjoin" sibling into the appropriate part of the join, when a series of inner/outer joins are mixed together. .. changelog:: :version: 1.0.0b4 :released: March 29, 2015 .. change:: :tags: bug, mssql, oracle, firebird, sybase :tickets: 3338 Turned off the "simple order by" flag on the MSSQL, Oracle dialects; this is the flag that per :ticket:`2992` causes an order by or group by an expression that's also in the columns clause to be copied by label, even if referenced as the expression object. The behavior for MSSQL is now the old behavior that copies the whole expression in by default, as MSSQL can be picky on these particularly in GROUP BY expressions. The flag is also turned off defensively for the Firebird and Sybase dialects. .. note:: this resolution was incorrect, please see version 1.0.2 for a rework of this resolution. .. change:: :tags: feature, schema :tickets: 3341 The "auto-attach" feature of constraints such as :class:`.UniqueConstraint` and :class:`.CheckConstraint` has been further enhanced such that when the constraint is associated with non-table-bound :class:`.Column` objects, the constraint will set up event listeners with the columns themselves such that the constraint auto attaches at the same time the columns are associated with the table. This in particular helps in some edge cases in declarative but is also of general use. .. seealso:: :ref:`change_3341` .. change:: :tags: bug, sql :tickets: 3340 Fixed bug in new "label resolution" feature of :ticket:`2992` where a label that was anonymous, then labeled again with a name, would fail to be locatable via a textual label. This situation occurs naturally when a mapped :func:`.column_property` is given an explicit label in a query. .. change:: :tags: bug, sql :tickets: 3335 Fixed bug in new "label resolution" feature of :ticket:`2992` where the string label placed in the order_by() or group_by() of a statement would place higher priority on the name as found inside the FROM clause instead of a more locally available name inside the columns clause. .. changelog:: :version: 1.0.0b3 :released: March 20, 2015 .. change:: :tags: bug, mysql :tickets: 2771 Repaired the commit for issue #2771 which was inadvertently commented out. .. changelog:: :version: 1.0.0b2 :released: March 20, 2015 .. change:: :tags: bug, mysql :tickets: 2771 :pullreq: bitbucket:49 Fixes to fully support using the ``'utf8mb4'`` MySQL-specific charset with MySQL dialects, in particular MySQL-Python and PyMySQL. In addition, MySQL databases that report more unusual charsets such as 'koi8u' or 'eucjpms' will also work correctly. Pull request courtesy Thomas Grainger. .. change:: :tags: change, orm, declarative :tickets: 3331 Loosened some restrictions that were added to ``@declared_attr`` objects, such that they were prevented from being called outside of the declarative process; this is related to the enhancements of #3150 which allow ``@declared_attr`` to return a value that is cached based on the current class as it's being configured. The exception raise has been removed, and the behavior changed so that outside of the declarative process, the function decorated by ``@declared_attr`` is called every time just like a regular ``@property``, without using any caching, as none is available at this stage. .. change:: :tags: bug, engine :tickets: 3330, 3329 The "auto close" for :class:`.ResultProxy` is now a "soft" close. That is, after exhausing all rows using the fetch methods, the DBAPI cursor is released as before and the object may be safely discarded, but the fetch methods may continue to be called for which they will return an end-of-result object (None for fetchone, empty list for fetchmany and fetchall). Only if :meth:`.ResultProxy.close` is called explicitly will these methods raise the "result is closed" error. .. seealso:: :ref:`change_3330` .. change:: :tags: bug, orm :tickets: 3327 :pullreq: github:160 Fixed unexpected use regression from pullreq github:137 where Py2K unicode literals (e.g. ``u""``) would not be accepted by the :paramref:`.relationship.cascade` option. Pull request courtesy Julien Castets. .. changelog:: :version: 1.0.0b1 :released: March 13, 2015 Version 1.0.0b1 is the first release of the 1.0 series. Many changes described here are also present in the 0.9 and sometimes the 0.8 series as well. For changes that are specific to 1.0 with an emphasis on compatibility concerns, see :doc:`/changelog/migration_10`. .. change:: :tags: feature, ext :tickets: 3054 Added a new extension suite :mod:`sqlalchemy.ext.baked`. This simple but unusual system allows for a dramatic savings in Python overhead for the construction and processing of orm :class:`.Query` objects, from query construction up through rendering of a string SQL statement. .. seealso:: :ref:`baked_toplevel` .. change:: :tags: bug, postgresql :tickets: 3319 The Postgresql :class:`.postgresql.ENUM` type will emit a DROP TYPE instruction when a plain ``table.drop()`` is called, assuming the object is not associated directly with a :class:`.MetaData` object. In order to accomodate the use case of an enumerated type shared between multiple tables, the type should be associated directly with the :class:`.MetaData` object; in this case the type will only be created at the metadata level, or if created directly. The rules for create/drop of Postgresql enumerated types have been highly reworked in general. .. seealso:: :ref:`change_3319` .. change:: :tags: feature, orm :tickets: 3317 Added a new event suite :class:`.QueryEvents`. The :meth:`.QueryEvents.before_compile` event allows the creation of functions which may place additional modifications to :class:`.Query` objects before the construction of the SELECT statement. It is hoped that this event be made much more useful via the advent of a new inspection system that will allow for detailed modifications to be made against :class:`.Query` objects in an automated fashion. .. seealso:: :class:`.QueryEvents` .. change:: :tags: feature, orm :tickets: 3249 The subquery wrapping which occurs when joined eager loading is used with a one-to-many query that also features LIMIT, OFFSET, or DISTINCT has been disabled in the case of a one-to-one relationship, that is a one-to-many with :paramref:`.relationship.uselist` set to False. This will produce more efficient queries in these cases. .. seealso:: :ref:`change_3249` .. change:: :tags: bug, orm :tickets: 3301 Fixed bug where the session attachment error "object is already attached to session X" would fail to prevent the object from also being attached to the new session, in the case that execution continued after the error raise occurred. .. change:: :tags: bug, orm, declarative :tickets: 3219, 3240 Fixed bug where using an ``__abstract__`` mixin in the middle of a declarative inheritance hierarchy would prevent attributes and configuration being correctly propagated from the base class to the inheriting class. .. change:: :tags: feature, sql :tickets: 918 The SQL compiler now generates the mapping of expected columns such that they are matched to the received result set positionally, rather than by name. Originally, this was seen as a way to handle cases where we had columns returned with difficult-to-predict names, though in modern use that issue has been overcome by anonymous labeling. In this version, the approach basically reduces function call count per-result by a few dozen calls, or more for larger sets of result columns. The approach still degrades into a modern version of the old approach if any discrepancy in size exists between the compiled set of columns versus what was received, so there's no issue for partially or fully textual compilation scenarios where these lists might not line up. .. change:: :tags: feature, postgresql :pullreq: github:132 The PG8000 dialect now supports the :paramref:`.create_engine.encoding` parameter, by setting up the client encoding on the connection which is then intercepted by pg8000. Pull request courtesy Tony Locke. .. change:: :tags: feature, postgresql :pullreq: github:132 Added support for PG8000's native JSONB feature. Pull request courtesy Tony Locke. .. change:: :tags: change, orm Mapped attributes marked as deferred without explicit undeferral will now remain "deferred" even if their column is otherwise present in the result set in some way. This is a performance enhancement in that an ORM load no longer spends time searching for each deferred column when the result set is obtained. However, for an application that has been relying upon this, an explicit :func:`.undefer` or similar option should now be used. .. change:: :tags: feature, orm :tickets: 3307 Mapped state internals have been reworked to allow for a 50% reduction in callcounts specific to the "expiration" of objects, as in the "auto expire" feature of :meth:`.Session.commit` and for :meth:`.Session.expire_all`, as well as in the "cleanup" step which occurs when object states are garbage collected. .. change:: :tags: bug, mysql The MySQL dialect now supports CAST on types that are constructed as :class:`.TypeDecorator` objects. .. change:: :tags: bug, mysql :tickets: 3237 A warning is emitted when :func:`.cast` is used with the MySQL dialect on a type where MySQL does not support CAST; MySQL only supports CAST on a subset of datatypes. SQLAlchemy has for a long time just omitted the CAST for unsupported types in the case of MySQL. While we don't want to change this now, we emit a warning to show that it's taken place. A warning is also emitted when a CAST is used with an older MySQL version (< 4) that doesn't support CAST at all, it's skipped in this case as well. .. change:: :tags: feature, sql :tickets: 3087 Literal values within a :class:`.DefaultClause`, which is invoked when using the :paramref:`.Column.server_default` parameter, will now be rendered using the "inline" compiler, so that they are rendered as-is, rather than as bound parameters. .. seealso:: :ref:`change_3087` .. change:: :tags: feature, oracle :pullreq: github:152 Added support for cx_oracle connections to a specific service name, as opposed to a tns name, by passing ``?service_name=`` to the URL. Pull request courtesy Sławomir Ehlert. .. change:: :tags: feature, mysql :tickets: 3155 The MySQL dialect now renders TIMESTAMP with NULL / NOT NULL in all cases, so that MySQL 5.6.6 with the ``explicit_defaults_for_timestamp`` flag enabled will will allow TIMESTAMP to continue to work as expected when ``nullable=False``. Existing applications are unaffected as SQLAlchemy has always emitted NULL for a TIMESTAMP column that is ``nullable=True``. .. seealso:: :ref:`change_3155` :ref:`mysql_timestamp_null` .. change:: :tags: bug, schema :tickets: 3299, 3067 The :class:`.CheckConstraint` construct now supports naming conventions that include the token ``%(column_0_name)s``; the constraint expression is scanned for columns. Additionally, naming conventions for check constraints that don't include the ``%(constraint_name)s`` token will now work for :class:`.SchemaType`- generated constraints, such as those of :class:`.Boolean` and :class:`.Enum`; this stopped working in 0.9.7 due to :ticket:`3067`. .. seealso:: :ref:`naming_check_constraints` :ref:`naming_schematypes` .. change:: :tags: feature, postgresql, pypy :tickets: 3052 :pullreq: bitbucket:34 Added support for the psycopg2cffi DBAPI on pypy. Pull request courtesy shauns. .. seealso:: :mod:`sqlalchemy.dialects.postgresql.psycopg2cffi` .. change:: :tags: feature, orm :tickets: 3262 :pullreq: bitbucket:38 A warning is emitted when the same polymorphic identity is assigned to two different mappers in the same hierarchy. This is typically a user error and means that the two different mapping types cannot be correctly distinguished at load time. Pull request courtesy Sebastian Bank. .. change:: :tags: feature, sql :pullreq: github:150 The type of expression is reported when an object passed to a SQL expression unit can't be interpreted as a SQL fragment; pull request courtesy Ryan P. Kelly. .. change:: :tags: bug, orm :tickets: 3227, 3242, 1326 The primary :class:`.Mapper` of a :class:`.Query` is now passed to the :meth:`.Session.get_bind` method when calling upon :meth:`.Query.count`, :meth:`.Query.update`, :meth:`.Query.delete`, as well as queries against mapped columns, :obj:`.column_property` objects, and SQL functions and expressions derived from mapped columns. This allows sessions that rely upon either customized :meth:`.Session.get_bind` schemes or "bound" metadata to work in all relevant cases. .. seealso:: :ref:`bug_3227` .. change:: :tags: enhancement, sql :tickets: 3074 Custom dialects that implement :class:`.GenericTypeCompiler` can now be constructed such that the visit methods receive an indication of the owning expression object, if any. Any visit method that accepts keyword arguments (e.g. ``**kw``) will in most cases receive a keyword argument ``type_expression``, referring to the expression object that the type is contained within. For columns in DDL, the dialect's compiler class may need to alter its ``get_column_specification()`` method to support this as well. The ``UserDefinedType.get_col_spec()`` method will also receive ``type_expression`` if it provides ``**kw`` in its argument signature. .. change:: :tags: bug, sql :tickets: 3288 The multi-values version of :meth:`.Insert.values` has been repaired to work more usefully with tables that have Python- side default values and/or functions, as well as server-side defaults. The feature will now work with a dialect that uses "positional" parameters; a Python callable will also be invoked individually for each row just as is the case with an "executemany" style invocation; a server- side default column will no longer implicitly receive the value explicitly specified for the first row, instead refusing to invoke without an explicit value. .. seealso:: :ref:`bug_3288` .. change:: :tags: feature, general Structural memory use has been improved via much more significant use of ``__slots__`` for many internal objects. This optimization is particularly geared towards the base memory size of large applications that have lots of tables and columns, and greatly reduces memory size for a variety of high-volume objects including event listening internals, comparator objects and parts of the ORM attribute and loader strategy system. .. seealso:: :ref:`feature_slots` .. change:: :tags: bug, mysql :tickets: 3283 The :class:`.mysql.SET` type has been overhauled to no longer assume that the empty string, or a set with a single empty string value, is in fact a set with a single empty string; instead, this is by default treated as the empty set. In order to handle persistence of a :class:`.mysql.SET` that actually wants to include the blank value ``''`` as a legitimate value, a new bitwise operational mode is added which is enabled by the :paramref:`.mysql.SET.retrieve_as_bitwise` flag, which will persist and retrieve values unambiguously using their bitflag positioning. Storage and retrieval of unicode values for driver configurations that aren't converting unicode natively is also repaired. .. seealso:: :ref:`change_3283` .. change:: :tags: feature, schema :tickets: 3282 The DDL generation system of :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` has been enhanced to in most cases automatically handle the case of mutually dependent foreign key constraints; the need for the :paramref:`.ForeignKeyConstraint.use_alter` flag is greatly reduced. The system also works for constraints which aren't given a name up front; only in the case of DROP is a name required for at least one of the constraints involved in the cycle. .. seealso:: :ref:`feature_3282` .. change:: :tags: feature, schema Added a new accessor :attr:`.Table.foreign_key_constraints` to complement the :attr:`.Table.foreign_keys` collection, as well as :attr:`.ForeignKeyConstraint.referred_table`. .. change:: :tags: bug, sqlite :tickets: 3244, 3261 UNIQUE and FOREIGN KEY constraints are now fully reflected on SQLite both with and without names. Previously, foreign key names were ignored and unnamed unique constraints were skipped. Thanks to Jon Nelson for assistance with this. .. change:: :tags: feature, examples A new suite of examples dedicated to providing a detailed study into performance of SQLAlchemy ORM and Core, as well as the DBAPI, from multiple perspectives. The suite runs within a container that provides built in profiling displays both through console output as well as graphically via the RunSnake tool. .. seealso:: :ref:`examples_performance` .. change:: :tags: feature, orm :tickets: 3100 A new series of :class:`.Session` methods which provide hooks directly into the unit of work's facility for emitting INSERT and UPDATE statements has been created. When used correctly, this expert-oriented system can allow ORM-mappings to be used to generate bulk insert and update statements batched into executemany groups, allowing the statements to proceed at speeds that rival direct use of the Core. .. seealso:: :ref:`bulk_operations` .. change:: :tags: feature, mssql :tickets: 3039 SQL Server 2012 now recommends VARCHAR(max), NVARCHAR(max), VARBINARY(max) for large text/binary types. The MSSQL dialect will now respect this based on version detection, as well as the new ``deprecate_large_types`` flag. .. seealso:: :ref:`mssql_large_type_deprecation` .. change:: :tags: bug, sqlite :tickets: 3257 The SQLite dialect, when using the :class:`.sqlite.DATE`, :class:`.sqlite.TIME`, or :class:`.sqlite.DATETIME` types, and given a ``storage_format`` that only renders numbers, will render the types in DDL as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, so that despite the lack of alpha characters in the values, the column will still deliver the "text affinity". Normally this is not needed, as the textual values within the default storage formats already imply text. .. seealso:: :ref:`sqlite_datetime` .. change:: :tags: bug, engine :tickets: 3266 The engine-level error handling and wrapping routines will now take effect in all engine connection use cases, including when user-custom connect routines are used via the :paramref:`.create_engine.creator` parameter, as well as when the :class:`.Connection` encounters a connection error on revalidation. .. seealso:: :ref:`change_3266` .. change:: :tags: feature, oracle New Oracle DDL features for tables, indexes: COMPRESS, BITMAP. Patch courtesy Gabor Gombas. .. change:: :tags: bug, oracle An alias name will be properly quoted when referred to using the ``%(name)s`` token inside the :meth:`.Select.with_hint` method. Previously, the Oracle backend hadn't implemented this quoting. .. change:: :tags: feature, oracle :tickets: 3220 Added support for CTEs under Oracle. This includes some tweaks to the aliasing syntax, as well as a new CTE feature :meth:`.CTE.suffix_with`, which is useful for adding in special Oracle-specific directives to the CTE. .. seealso:: :ref:`change_3220` .. change:: :tags: feature, mysql :tickets: 3121 Updated the "supports_unicode_statements" flag to True for MySQLdb and Pymysql under Python 2. This refers to the SQL statements themselves, not the parameters, and affects issues such as table and column names using non-ASCII characters. These drivers both appear to support Python 2 Unicode objects without issue in modern versions. .. change:: :tags: bug, mysql :tickets: 3263 The :meth:`.ColumnOperators.match` operator is now handled such that the return type is not strictly assumed to be boolean; it now returns a :class:`.Boolean` subclass called :class:`.MatchType`. The type will still produce boolean behavior when used in Python expressions, however the dialect can override its behavior at result time. In the case of MySQL, while the MATCH operator is typically used in a boolean context within an expression, if one actually queries for the value of a match expression, a floating point value is returned; this value is not compatible with SQLAlchemy's C-based boolean processor, so MySQL's result-set behavior now follows that of the :class:`.Float` type. A new operator object ``notmatch_op`` is also added to better allow dialects to define the negation of a match operation. .. seealso:: :ref:`change_3263` .. change:: :tags: bug, postgresql :tickets: 3264 The :meth:`.PGDialect.has_table` method will now query against ``pg_catalog.pg_table_is_visible(c.oid)``, rather than testing for an exact schema match, when the schema name is None; this so that the method will also illustrate that temporary tables are present. Note that this is a behavioral change, as Postgresql allows a non-temporary table to silently overwrite an existing temporary table of the same name, so this changes the behavior of ``checkfirst`` in that unusual scenario. .. seealso:: :ref:`change_3264` .. change:: :tags: bug, sql :tickets: 3260 Fixed bug in :meth:`.Table.tometadata` method where the :class:`.CheckConstraint` associated with a :class:`.Boolean` or :class:`.Enum` type object would be doubled in the target table. The copy process now tracks the production of this constraint object as local to a type object. .. change:: :tags: feature, orm :tickets: 3217 Added a parameter :paramref:`.Query.join.isouter` which is synonymous with calling :meth:`.Query.outerjoin`; this flag is to provide a more consistent interface compared to Core :meth:`.FromClause.join`. Pull request courtesy Jonathan Vanasco. .. change:: :tags: bug, sql :tickets: 3243 The behavioral contract of the :attr:`.ForeignKeyConstraint.columns` collection has been made consistent; this attribute is now a :class:`.ColumnCollection` like that of all other constraints and is initialized at the point when the constraint is associated with a :class:`.Table`. .. seealso:: :ref:`change_3243` .. change:: :tags: bug, orm :tickets: 3256 The :meth:`.PropComparator.of_type` modifier has been improved in conjunction with loader directives such as :func:`.joinedload` and :func:`.contains_eager` such that if two :meth:`.PropComparator.of_type` modifiers of the same base type/path are encountered, they will be joined together into a single "polymorphic" entity, rather than replacing the entity of type A with the one of type B. E.g. a joinedload of ``A.b.of_type(BSub1)->BSub1.c`` combined with joinedload of ``A.b.of_type(BSub2)->BSub2.c`` will create a single joinedload of ``A.b.of_type((BSub1, BSub2)) -> BSub1.c, BSub2.c``, without the need for the ``with_polymorphic`` to be explicit in the query. .. seealso:: :ref:`eagerloading_polymorphic_subtypes` - contains an updated example illustrating the new format. .. change:: :tags: bug, sql :tickets: 3245 The :attr:`.Column.key` attribute is now used as the source of anonymous bound parameter names within expressions, to match the existing use of this value as the key when rendered in an INSERT or UPDATE statement. This allows :attr:`.Column.key` to be used as a "substitute" string to work around a difficult column name that doesn't translate well into a bound parameter name. Note that the paramstyle is configurable on :func:`.create_engine` in any case, and most DBAPIs today support a named and positional style. .. change:: :tags: bug, sql :pullreq: github:146 Fixed the name of the :paramref:`.PoolEvents.reset.dbapi_connection` parameter as passed to this event; in particular this affects usage of the "named" argument style for this event. Pull request courtesy Jason Goldberger. .. change:: :tags: feature, sql :pullreq: github:139 Added a new parameter :paramref:`.Table.tometadata.name` to the :meth:`.Table.tometadata` method. Similar to :paramref:`.Table.tometadata.schema`, this argument causes the newly copied :class:`.Table` to take on the new name instead of the existing one. An interesting capability this adds is that of copying a :class:`.Table` object to the *same* :class:`.MetaData` target with a new name. Pull request courtesy n.d. parker. .. change:: :tags: bug, orm :pullreq: github:137 Repaired support of the ``copy.deepcopy()`` call when used by the :class:`.orm.util.CascadeOptions` argument, which occurs if ``copy.deepcopy()`` is being used with :func:`.relationship` (not an officially supported use case). Pull request courtesy duesenfranz. .. change:: :tags: bug, sql :tickets: 3170 Reversing a change that was made in 0.9, the "singleton" nature of the "constants" :func:`.null`, :func:`.true`, and :func:`.false` has been reverted. These functions returning a "singleton" object had the effect that different instances would be treated as the same regardless of lexical use, which in particular would impact the rendering of the columns clause of a SELECT statement. .. seealso:: :ref:`bug_3170` .. change:: :tags: bug, orm :tickets: 3139 Fixed bug where :meth:`.Session.expunge` would not fully detach the given object if the object had been subject to a delete operation that was flushed, but not committed. This would also affect related operations like :func:`.make_transient`. .. seealso:: :ref:`bug_3139` .. change:: :tags: bug, orm :tickets: 3230 A warning is emitted in the case of multiple relationships that ultimately will populate a foreign key column in conflict with another, where the relationships are attempting to copy values from different source columns. This occurs in the case where composite foreign keys with overlapping columns are mapped to relationships that each refer to a different referenced column. A new documentation section illustrates the example as well as how to overcome the issue by specifying "foreign" columns specifically on a per-relationship basis. .. seealso:: :ref:`relationship_overlapping_foreignkeys` .. change:: :tags: feature, sql :tickets: 3172 Exception messages have been spiffed up a bit. The SQL statement and parameters are not displayed if None, reducing confusion for error messages that weren't related to a statement. The full module and classname for the DBAPI-level exception is displayed, making it clear that this is a wrapped DBAPI exception. The statement and parameters themselves are bounded within a bracketed sections to better isolate them from the error message and from each other. .. change:: :tags: bug, orm :tickets: 3228 The :meth:`.Query.update` method will now convert string key names in the given dictionary of values into mapped attribute names against the mapped class being updated. Previously, string names were taken in directly and passed to the core update statement without any means to resolve against the mapped entity. Support for synonyms and hybrid attributes as the subject attributes of :meth:`.Query.update` are also supported. .. seealso:: :ref:`bug_3228` .. change:: :tags: bug, orm :tickets: 3035 Improvements to the mechanism used by :class:`.Session` to locate "binds" (e.g. engines to use), such engines can be associated with mixin classes, concrete subclasses, as well as a wider variety of table metadata such as joined inheritance tables. .. seealso:: :ref:`bug_3035` .. change:: :tags: bug, general :tickets: 3218 The ``__module__`` attribute is now set for all those SQL and ORM functions that are derived as "public factory" symbols, which should assist with documentation tools being able to report on the target module. .. change:: :tags: feature, sql :meth:`.Insert.from_select` now includes Python and SQL-expression defaults if otherwise unspecified; the limitation where non- server column defaults aren't included in an INSERT FROM SELECT is now lifted and these expressions are rendered as constants into the SELECT statement. .. seealso:: :ref:`feature_insert_from_select_defaults` .. change:: :tags: bug, orm :tickets: 3233 Fixed bug in single table inheritance where a chain of joins that included the same single inh entity more than once (normally this should raise an error) could, in some cases depending on what was being joined "from", implicitly alias the second case of the single inh entity, producing a query that "worked". But as this implicit aliasing is not intended in the case of single table inheritance, it didn't really "work" fully and was very misleading, since it wouldn't always appear. .. seealso:: :ref:`bug_3233` .. change:: :tags: bug, orm :tickets: 3222 The ON clause rendered when using :meth:`.Query.join`, :meth:`.Query.outerjoin`, or the standalone :func:`.orm.join` / :func:`.orm.outerjoin` functions to a single-inheritance subclass will now include the "single table criteria" in the ON clause even if the ON clause is otherwise hand-rolled; it is now added to the criteria using AND, the same way as if joining to a single-table target using relationship or similar. This is sort of in-between feature and bug. .. seealso:: :ref:`migration_3222` .. change:: :tags: feature, sql :tickets: 3184 :pullreq: bitbucket:30 The :class:`.UniqueConstraint` construct is now included when reflecting a :class:`.Table` object, for databases where this is applicable. In order to achieve this with sufficient accuracy, MySQL and Postgresql now contain features that correct for the duplication of indexes and unique constraints when reflecting tables, indexes, and constraints. In the case of MySQL, there is not actually a "unique constraint" concept independent of a "unique index", so for this backend :class:`.UniqueConstraint` continues to remain non-present for a reflected :class:`.Table`. For Postgresql, the query used to detect indexes against ``pg_index`` has been improved to check for the same construct in ``pg_constraint``, and the implicitly constructed unique index is not included with a reflected :class:`.Table`. In both cases, the :meth:`.Inspector.get_indexes` and the :meth:`.Inspector.get_unique_constraints` methods return both constructs individually, but include a new token ``duplicates_constraint`` in the case of Postgresql or ``duplicates_index`` in the case of MySQL to indicate when this condition is detected. Pull request courtesy Johannes Erdfelt. .. seealso:: :ref:`feature_3184` .. change:: :tags: feature, postgresql :pullreq: github:134 Added support for the FILTER keyword as applied to aggregate functions, supported by Postgresql 9.4. Pull request courtesy Ilja Everilä. .. seealso:: :ref:`feature_gh134` .. change:: :tags: bug, sql, engine :tickets: 3215 Fixed bug where a "branched" connection, that is the kind you get when you call :meth:`.Connection.connect`, would not share invalidation status with the parent. The architecture of branching has been tweaked a bit so that the branched connection defers to the parent for all invalidation status and operations. .. change:: :tags: bug, sql, engine :tickets: 3190 Fixed bug where a "branched" connection, that is the kind you get when you call :meth:`.Connection.connect`, would not share transaction status with the parent. The architecture of branching has been tweaked a bit so that the branched connection defers to the parent for all transactional status and operations. .. change:: :tags: bug, orm, declarative :tickets: 2670 A relationship set up with :class:`.declared_attr` on a :class:`.AbstractConcreteBase` base class will now be configured on the abstract base mapping automatically, in addition to being set up on descendant concrete classes as usual. .. seealso:: :ref:`feature_3150` .. change:: :tags: feature, orm, declarative :tickets: 3150 The :class:`.declared_attr` construct has newly improved behaviors and features in conjunction with declarative. The decorated function will now have access to the final column copies present on the local mixin when invoked, and will also be invoked exactly once for each mapped class, the returned result being memoized. A new modifier :attr:`.declared_attr.cascading` is added as well. .. seealso:: :ref:`feature_3150` .. change:: :tags: feature, ext :tickets: 3210 The :mod:`sqlalchemy.ext.automap` extension will now set ``cascade="all, delete-orphan"`` automatically on a one-to-many relationship/backref where the foreign key is detected as containing one or more non-nullable columns. This argument is present in the keywords passed to :func:`.automap.generate_relationship` in this case and can still be overridden. Additionally, if the :class:`.ForeignKeyConstraint` specifies ``ondelete="CASCADE"`` for a non-nullable or ``ondelete="SET NULL"`` for a nullable set of columns, the argument ``passive_deletes=True`` is also added to the relationship. Note that not all backends support reflection of ondelete, but backends that do include Postgresql and MySQL. .. change:: :tags: feature, sql :tickets: 3206 Added new method :meth:`.Select.with_statement_hint` and ORM method :meth:`.Query.with_statement_hint` to support statement-level hints that are not specific to a table. .. change:: :tags: bug, sqlite :tickets: 3203 :pullreq: bitbucket:31 SQLite now supports reflection of unique constraints from temp tables; previously, this would fail with a TypeError. Pull request courtesy Johannes Erdfelt. .. seealso:: :ref:`change_3204` - changes regarding SQLite temporary table and view reflection. .. change:: :tags: bug, sqlite :tickets: 3204 Added :meth:`.Inspector.get_temp_table_names` and :meth:`.Inspector.get_temp_view_names`; currently, only the SQLite and Oracle dialects support these methods. The return of temporary table and view names has been **removed** from SQLite and Oracle's version of :meth:`.Inspector.get_table_names` and :meth:`.Inspector.get_view_names`; other database backends cannot support this information (such as MySQL), and the scope of operation is different in that the tables can be local to a session and typically aren't supported in remote schemas. .. seealso:: :ref:`change_3204` .. change:: :tags: feature, postgresql :tickets: 2891 :pullreq: github:128 Support has been added for reflection of materialized views and foreign tables, as well as support for materialized views within :meth:`.Inspector.get_view_names`, and a new method :meth:`.PGInspector.get_foreign_table_names` available on the Postgresql version of :class:`.Inspector`. Pull request courtesy Rodrigo Menezes. .. seealso:: :ref:`feature_2891` .. change:: :tags: feature, orm Added new event handlers :meth:`.AttributeEvents.init_collection` and :meth:`.AttributeEvents.dispose_collection`, which track when a collection is first associated with an instance and when it is replaced. These handlers supersede the :meth:`.collection.linker` annotation. The old hook remains supported through an event adapter. .. change:: :tags: bug, orm :tickets: 3148, 3188 A major rework to the behavior of expression labels, most specifically when used with ColumnProperty constructs with custom SQL expressions and in conjunction with the "order by labels" logic first introduced in 0.9. Fixes include that an ``order_by(Entity.some_col_prop)`` will now make use of "order by label" rules even if Entity has been subject to aliasing, either via inheritance rendering or via the use of the ``aliased()`` construct; rendering of the same column property multiple times with aliasing (e.g. ``query(Entity.some_prop, entity_alias.some_prop)``) will label each occurrence of the entity with a distinct label, and additionally "order by label" rules will work for both (e.g. ``order_by(Entity.some_prop, entity_alias.some_prop)``). Additional issues that could prevent the "order by label" logic from working in 0.9, most notably that the state of a Label could change such that "order by label" would stop working depending on how things were called, has been fixed. .. seealso:: :ref:`bug_3188` .. change:: :tags: bug, mysql :tickets: 3186 MySQL boolean symbols "true", "false" work again. 0.9's change in :ticket:`2682` disallowed the MySQL dialect from making use of the "true" and "false" symbols in the context of "IS" / "IS NOT", but MySQL supports this syntax even though it has no boolean type. MySQL remains "non native boolean", but the :func:`.true` and :func:`.false` symbols again produce the keywords "true" and "false", so that an expression like ``column.is_(true())`` again works on MySQL. .. seealso:: :ref:`bug_3186` .. change:: :tags: changed, mssql :tickets: 3182 The hostname-based connection format for SQL Server when using pyodbc will no longer specify a default "driver name", and a warning is emitted if this is missing. The optimal driver name for SQL Server changes frequently and is per-platform, so hostname based connections need to specify this. DSN-based connections are preferred. .. seealso:: :ref:`change_3182` .. change:: :tags: changed, sql The :func:`~.expression.column` and :func:`~.expression.table` constructs are now importable from the "from sqlalchemy" namespace, just like every other Core construct. .. change:: :tags: changed, sql :tickets: 2992 The implicit conversion of strings to :func:`.text` constructs when passed to most builder methods of :func:`.select` as well as :class:`.Query` now emits a warning with just the plain string sent. The textual conversion still proceeds normally, however. The only method that accepts a string without a warning are the "label reference" methods like order_by(), group_by(); these functions will now at compile time attempt to resolve a single string argument to a column or label expression present in the selectable; if none is located, the expression still renders, but you get the warning again. The rationale here is that the implicit conversion from string to text is more unexpected than not these days, and it is better that the user send more direction to the Core / ORM when passing a raw string as to what direction should be taken. Core/ORM tutorials have been updated to go more in depth as to how text is handled. .. seealso:: :ref:`migration_2992` .. change:: :tags: feature, engine :tickets: 3178 A new style of warning can be emitted which will "filter" up to N occurrences of a parameterized string. This allows parameterized warnings that can refer to their arguments to be delivered a fixed number of times until allowing Python warning filters to squelch them, and prevents memory from growing unbounded within Python's warning registries. .. seealso:: :ref:`feature_3178` .. change:: :tags: feature, orm The :class:`.Query` will raise an exception when :meth:`.Query.yield_per` is used with mappings or options where either subquery eager loading, or joined eager loading with collections, would take place. These loading strategies are not currently compatible with yield_per, so by raising this error, the method is safer to use. Eager loads can be disabled with the ``lazyload('*')`` option or :meth:`.Query.enable_eagerloads`. .. seealso:: :ref:`migration_yield_per_eager_loading` .. change:: :tags: bug, orm :tickets: 3177 Changed the approach by which the "single inheritance criterion" is applied, when using :meth:`.Query.from_self`, or its common user :meth:`.Query.count`. The criteria to limit rows to those with a certain type is now indicated on the inside subquery, not the outside one, so that even if the "type" column is not available in the columns clause, we can filter on it on the "inner" query. .. seealso:: :ref:`migration_3177` .. change:: :tags: changed, orm The ``proc()`` callable passed to the ``create_row_processor()`` method of custom :class:`.Bundle` classes now accepts only a single "row" argument. .. seealso:: :ref:`bundle_api_change` .. change:: :tags: changed, orm Deprecated event hooks removed: ``populate_instance``, ``create_instance``, ``translate_row``, ``append_result`` .. seealso:: :ref:`migration_deprecated_orm_events` .. change:: :tags: bug, orm :tickets: 3145 Made a small adjustment to the mechanics of lazy loading, such that it has less chance of interfering with a joinload() in the very rare circumstance that an object points to itself; in this scenario, the object refers to itself while loading its attributes which can cause a mixup between loaders. The use case of "object points to itself" is not fully supported, but the fix also removes some overhead so for now is part of testing. .. change:: :tags: feature, orm :tickets: 3176 A new implementation for :class:`.KeyedTuple` used by the :class:`.Query` object offers dramatic speed improvements when fetching large numbers of column-oriented rows. .. seealso:: :ref:`feature_3176` .. change:: :tags: feature, orm :tickets: 3008 The behavior of :paramref:`.joinedload.innerjoin` as well as :paramref:`.relationship.innerjoin` is now to use "nested" inner joins, that is, right-nested, as the default behavior when an inner join joined eager load is chained to an outer join eager load. .. seealso:: :ref:`migration_3008` .. change:: :tags: bug, orm :tickets: 3171 The "resurrect" ORM event has been removed. This event hook had no purpose since the old "mutable attribute" system was removed in 0.8. .. change:: :tags: bug, sql :tickets: 3169 Using :meth:`.Insert.from_select` now implies ``inline=True`` on :func:`.insert`. This helps to fix a bug where an INSERT...FROM SELECT construct would inadvertently be compiled as "implicit returning" on supporting backends, which would cause breakage in the case of an INSERT that inserts zero rows (as implicit returning expects a row), as well as arbitrary return data in the case of an INSERT that inserts multiple rows (e.g. only the first row of many). A similar change is also applied to an INSERT..VALUES with multiple parameter sets; implicit RETURNING will no longer emit for this statement either. As both of these constructs deal with varible numbers of rows, the :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. Previously, there was a documentation note that one may prefer ``inline=True`` with INSERT..FROM SELECT as some databases don't support returning and therefore can't do "implicit" returning, but there's no reason an INSERT...FROM SELECT needs implicit returning in any case. Regular explicit :meth:`.Insert.returning` should be used to return variable numbers of result rows if inserted data is needed. .. change:: :tags: bug, orm :tickets: 3167 Fixed bug where attribute "set" events or columns with ``@validates`` would have events triggered within the flush process, when those columns were the targets of a "fetch and populate" operation, such as an autoincremented primary key, a Python side default, or a server-side default "eagerly" fetched via RETURNING. .. change:: :tags: feature, oracle Added support for the Oracle table option ON COMMIT. .. change:: :tags: feature, postgresql :tickets: 2051 Added support for PG table options TABLESPACE, ON COMMIT, WITH(OUT) OIDS, and INHERITS, when rendering DDL via the :class:`.Table` construct. Pull request courtesy malikdiarra. .. seealso:: :ref:`postgresql_table_options` .. change:: :tags: bug, orm, py3k The :class:`.IdentityMap` exposed from :attr:`.Session.identity_map` now returns lists for ``items()`` and ``values()`` in Py3K. Early porting to Py3K here had these returning iterators, when they technically should be "iterable views"..for now, lists are OK. .. change:: :tags: orm, feature UPDATE statements can now be batched within an ORM flush into more performant executemany() call, similarly to how INSERT statements can be batched; this will be invoked within flush to the degree that subsequent UPDATE statements for the same mapping and table involve the identical columns within the VALUES clause, that no SET-level SQL expressions are embedded, and that the versioning requirements for the mapping are compatible with the backend dialect's ability to return a correct rowcount for an executemany operation. .. change:: :tags: engine, bug :tickets: 3163 Removing (or adding) an event listener at the same time that the event is being run itself, either from inside the listener or from a concurrent thread, now raises a RuntimeError, as the collection used is now an instance of ``colletions.deque()`` and does not support changes while being iterated. Previously, a plain Python list was used where removal from inside the event itself would produce silent failures. .. change:: :tags: orm, feature :tickets: 2963 The ``info`` parameter has been added to the constructor for :class:`.SynonymProperty` and :class:`.ComparableProperty`. .. change:: :tags: sql, feature :tickets: 2963 The ``info`` parameter has been added as a constructor argument to all schema constructs including :class:`.MetaData`, :class:`.Index`, :class:`.ForeignKey`, :class:`.ForeignKeyConstraint`, :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`, :class:`.CheckConstraint`. .. change:: :tags: orm, feature :tickets: 2971 The :attr:`.InspectionAttr.info` collection is now moved down to :class:`.InspectionAttr`, where in addition to being available on all :class:`.MapperProperty` objects, it is also now available on hybrid properties, association proxies, when accessed via :attr:`.Mapper.all_orm_descriptors`. .. change:: :tags: sql, feature :tickets: 3027 :pullrequest: bitbucket:29 The :paramref:`.Table.autoload_with` flag now implies that :paramref:`.Table.autoload` should be ``True``. Pull request courtesy Malik Diarra. .. change:: :tags: postgresql, feature :pullreq: github:126 Added new method :meth:`.PGInspector.get_enums`, when using the inspector for Postgresql will provide a list of ENUM types. Pull request courtesy Ilya Pekelny. .. change:: :tags: mysql, bug The MySQL dialect will now disable :meth:`.ConnectionEvents.handle_error` events from firing for those statements which it uses internally to detect if a table exists or not. This is achieved using an execution option ``skip_user_error_events`` that disables the handle error event for the scope of that execution. In this way, user code that rewrites exceptions doesn't need to worry about the MySQL dialect or other dialects that occasionally need to catch SQLAlchemy specific exceptions. .. change:: :tags: mysql, bug :tickets: 2515 Changed the default value of "raise_on_warnings" to False for MySQLconnector. This was set at True for some reason. The "buffered" flag unfortunately must stay at True as MySQLconnector does not allow a cursor to be closed unless all results are fully fetched. .. change:: :tags: bug, orm :tickets: 3117 The "evaluator" for query.update()/delete() won't work with multi-table updates, and needs to be set to `synchronize_session=False` or `synchronize_session='fetch'`; this now raises an exception, with a message to change the synchronize setting. This is upgraded from a warning emitted as of 0.9.7. .. change:: :tags: removed The Drizzle dialect has been removed from the Core; it is now available as `sqlalchemy-drizzle `_, an independent, third party dialect. The dialect is still based almost entirely off of the MySQL dialect present in SQLAlchemy. .. seealso:: :ref:`change_2984` .. change:: :tags: enhancement, orm :tickets: 3061 Adjustment to attribute mechanics concerning when a value is implicitly initialized to None via first access; this action, which has always resulted in a population of the attribute, no longer does so; the None value is returned but the underlying attribute receives no set event. This is consistent with how collections work and allows attribute mechanics to behave more consistently; in particular, getting an attribute with no value does not squash the event that should proceed if the value is actually set to None. .. seealso:: :ref:`migration_3061` .. change:: :tags: feature, sql :tickets: 3034 The :meth:`.Select.limit` and :meth:`.Select.offset` methods now accept any SQL expression, in addition to integer values, as arguments. Typically this is used to allow a bound parameter to be passed, which can be substituted with a value later thus allowing Python-side caching of the SQL query. The implementation here is fully backwards compatible with existing third party dialects, however those dialects which implement special LIMIT/OFFSET systems will need modification in order to take advantage of the new capabilities. Limit and offset also support "literal_binds" mode, where bound parameters are rendered inline as strings based on a compile-time option. Work on this feature is courtesy of Dobes Vandermeer. .. seealso:: :ref:`feature_3034`. SQLAlchemy-1.0.11/doc/build/changelog/changelog_09.rst0000664000175000017500000041245312636375552023435 0ustar classicclassic00000000000000 ============== 0.9 Changelog ============== .. changelog_imports:: .. include:: changelog_08.rst :start-line: 5 .. include:: changelog_07.rst :start-line: 5 .. changelog:: :version: 0.9.11 .. change:: :tags: bug, oracle, py3k :tickets: 3491 :versions: 1.1.0b1, 1.0.9 Fixed support for cx_Oracle version 5.2, which was tripping up SQLAlchemy's version detection under Python 3 and inadvertently not using the correct unicode mode for Python 3. This would cause issues such as bound variables mis-interpreted as NULL and rows silently not being returned. .. change:: :tags: bug, engine :tickets: 3497 :versions: 1.0.8 Fixed critical issue whereby the pool "checkout" event handler may be called against a stale connection without the "connect" event handler having been called, in the case where the pool attempted to reconnect after being invalidated and failed; the stale connection would remain present and would be used on a subsequent attempt. This issue has a greater impact in the 1.0 series subsequent to 1.0.2, as it also delivers a blanked-out ``.info`` dictionary to the event handler; prior to 1.0.2 the ``.info`` dictionary is still the previous one. .. changelog:: :version: 0.9.10 :released: July 22, 2015 .. change:: :tags: bug, sqlite :tickets: 3495 :versions: 1.0.8 Fixed bug in SQLite dialect where reflection of UNIQUE constraints that included non-alphabetic characters in the names, like dots or spaces, would not be reflected with their name. .. change:: :tags: feature, sql :tickets: 3418 :versions: 1.0.5 Added official support for a CTE used by the SELECT present inside of :meth:`.Insert.from_select`. This behavior worked accidentally up until 0.9.9, when it no longer worked due to unrelated changes as part of :ticket:`3248`. Note that this is the rendering of the WITH clause after the INSERT, before the SELECT; the full functionality of CTEs rendered at the top level of INSERT, UPDATE, DELETE is a new feature targeted for a later release. .. change:: :tags: bug, ext :tickets: 3408 :versions: 1.0.4 Fixed bug where when using extended attribute instrumentation system, the correct exception would not be raised when :func:`.class_mapper` were called with an invalid input that also happened to not be weak referencable, such as an integer. .. change:: :tags: bug, tests, pypy :tickets: 3406 :versions: 1.0.4 Fixed an import that prevented "pypy setup.py test" from working correctly. .. change:: :tags: bug, engine :tickets: 3375 :versions: 1.0.1 Added the string value ``"none"`` to those accepted by the :paramref:`.Pool.reset_on_return` parameter as a synonym for ``None``, so that string values can be used for all settings, allowing utilities like :func:`.engine_from_config` to be usable without issue. .. change:: :tags: bug, sql :tickets: 3362 :versions: 1.0.0 Fixed issue where a :class:`.MetaData` object that used a naming convention would not properly work with pickle. The attribute was skipped leading to inconsistencies and failures if the unpickled :class:`.MetaData` object were used to base additional tables from. .. change:: :tags: bug, postgresql :tickets: 3354 :versions: 1.0.0 Fixed a long-standing bug where the :class:`.Enum` type as used with the psycopg2 dialect in conjunction with non-ascii values and ``native_enum=False`` would fail to decode return results properly. This stemmed from when the PG :class:`.postgresql.ENUM` type used to be a standalone type without a "non native" option. .. change:: :tags: bug, orm :tickets: 3349 :class:`.Query` doesn't support joins, subselects, or special FROM clauses when using the :meth:`.Query.update` or :meth:`.Query.delete` methods; instead of silently ignoring these fields if methods like :meth:`.Query.join` or :meth:`.Query.select_from` has been called, a warning is emitted. As of 1.0.0b5 this will raise an error. .. change:: :tags: bug, orm :tickets: 3352 :versions: 1.0.0b5 Fixed bug where the state tracking within multiple, nested :meth:`.Session.begin_nested` operations would fail to propagate the "dirty" flag for an object that had been updated within the inner savepoint, such that if the enclosing savepoint were rolled back, the object would not be part of the state that was expired and therefore reverted to its database state. .. change:: :tags: bug, mysql, pymysql :tickets: 3337 :versions: 1.0.0b4 Fixed unicode support for PyMySQL when using an "executemany" operation with unicode parameters. SQLAlchemy now passes both the statement as well as the bound parameters as unicode objects, as PyMySQL generally uses string interpolation internally to produce the final statement, and in the case of executemany does the "encode" step only on the final statement. .. change:: :tags: bug, py3k, mysql :tickets: 3333 :pullreq: github:158 :versions: 1.0.0b2 Fixed the :class:`.mysql.BIT` type on Py3K which was not using the ``ord()`` function correctly. Pull request courtesy David Marin. .. change:: :tags: bug, ext :tickets: 3324 Fixed regression from 0.9.9 where the :func:`.as_declarative` symbol was removed from the ``sqlalchemy.ext.declarative`` namespace. .. change:: :tags: feature, orm :tickets: 3320 :versions: 1.0.0b1 Added a new entry ``"entity"`` to the dictionaries returned by :attr:`.Query.column_descriptions`. This refers to the primary ORM mapped class or aliased class that is referred to by the expression. Compared to the existing entry for ``"type"``, it will always be a mapped entity, even if extracted from a column expression, or None if the given expression is a pure core expression. See also :ticket:`3403` which repaired a regression in this feature which was unreleased in 0.9.10 but was released in the 1.0 version. .. changelog:: :version: 0.9.9 :released: March 10, 2015 .. change:: :tags: feature, postgresql :pullreq: bitbucket:45 :versions: 1.0.0b1 Added support for the ``CONCURRENTLY`` keyword with Postgresql indexes, established using ``postgresql_concurrently``. Pull request courtesy Iuri de Silvio. .. seealso:: :ref:`postgresql_index_concurrently` .. change:: :tags: bug, ext, py3k :pullreq: github:154 :versions: 1.0.0b1 Fixed bug where the association proxy list class would not interpret slices correctly under Py3K. Pull request courtesy Gilles Dartiguelongue. .. change:: :tags: feature, sqlite :pullreq: bitbucket:42 :versions: 1.0.0b1 Added support for partial indexes (e.g. with a WHERE clause) on SQLite. Pull request courtesy Kai Groner. .. seealso:: :ref:`sqlite_partial_index` .. change:: :tags: bug, orm :tickets: 3310 :versions: 1.0.0b1 Fixed bugs in ORM object comparisons where comparison of many-to-one ``!= None`` would fail if the source were an aliased class, or if the query needed to apply special aliasing to the expression due to aliased joins or polymorphic querying; also fixed bug in the case where comparing a many-to-one to an object state would fail if the query needed to apply special aliasing due to aliased joins or polymorphic querying. .. change:: :tags: bug, orm :tickets: 3309 :versions: 1.0.0b1 Fixed bug where internal assertion would fail in the case where an ``after_rollback()`` handler for a :class:`.Session` incorrectly adds state to that :class:`.Session` within the handler, and the task to warn and remove this state (established by :ticket:`2389`) attempts to proceed. .. change:: :tags: bug, orm :pullreq: github:147 :versions: 1.0.0b1 Fixed bug where TypeError raised when :meth:`.Query.join` called with unknown kw arguments would raise its own TypeError due to broken formatting. Pull request courtesy Malthe Borch. .. change:: :tags: bug, engine :tickets: 3302 :versions: 1.0.0b1 Fixed bug in :class:`.Connection` and pool where the :meth:`.Connection.invalidate` method, or an invalidation due to a database disconnect, would fail if the ``isolation_level`` parameter had been used with :meth:`.Connection.execution_options`; the "finalizer" that resets the isolation level would be called on the no longer opened connection. .. change:: :tags: feature, orm :tickets: 3296 :versions: 1.0.0b1 Added new parameter :paramref:`.Session.connection.execution_options` which may be used to set up execution options on a :class:`.Connection` when it is first checked out, before the transaction has begun. This is used to set up options such as isolation level on the connection before the transaction starts. .. seealso:: :ref:`session_transaction_isolation` - new documentation section detailing best practices for setting transaction isolation with sessions. .. change:: :tags: bug, engine :tickets: 3296 :versions: 1.0.0b1 A warning is emitted if the ``isolation_level`` parameter is used with :meth:`.Connection.execution_options` when a :class:`.Transaction` is in play; DBAPIs and/or SQLAlchemy dialects such as psycopg2, MySQLdb may implicitly rollback or commit the transaction, or not change the setting til next transaction, so this is never safe. .. change:: :tags: bug, orm :tickets: 3300 :versions: 1.0.0b1 Fixed bug in lazy loading SQL construction whereby a complex primaryjoin that referred to the same "local" column multiple times in the "column that points to itself" style of self-referential join would not be substituted in all cases. The logic to determine substitutions here has been reworked to be more open-ended. .. change:: :tags: bug, postgresql :tickets: 2940 :versions: 1.0.0b1 Repaired support for Postgresql UUID types in conjunction with the ARRAY type when using psycopg2. The psycopg2 dialect now employs use of the psycopg2.extras.register_uuid() hook so that UUID values are always passed to/from the DBAPI as UUID() objects. The :paramref:`.UUID.as_uuid` flag is still honored, except with psycopg2 we need to convert returned UUID objects back into strings when this is disabled. .. change:: :tags: bug, postgresql :pullreq: github:145 :versions: 1.0.0b1 Added support for the :class:`postgresql.JSONB` datatype when using psycopg2 2.5.4 or greater, which features native conversion of JSONB data so that SQLAlchemy's converters must be disabled; additionally, the newly added psycopg2 extension ``extras.register_default_jsonb`` is used to establish a JSON deserializer passed to the dialect via the ``json_deserializer`` argument. Also repaired the Postgresql integration tests which weren't actually round-tripping the JSONB type as opposed to the JSON type. Pull request courtesy Mateusz Susik. .. change:: :tags: bug, postgresql :versions: 1.0.0b1 Repaired the use of the "array_oid" flag when registering the HSTORE type with older psycopg2 versions < 2.4.3, which does not support this flag, as well as use of the native json serializer hook "register_default_json" with user-defined ``json_deserializer`` on psycopg2 versions < 2.5, which does not include native json. .. change:: :tags: bug, schema :tickets: 3298, 1765 Fixed bug in 0.9's foreign key setup system, such that the logic used to link a :class:`.ForeignKey` to its parent could fail when the foreign key used "link_to_name=True" in conjunction with a target :class:`.Table` that would not receive its parent column until later, such as within a reflection + "useexisting" scenario, if the target column in fact had a key value different from its name, as would occur in reflection if column reflect events were used to alter the .key of reflected :class:`.Column` objects so that the link_to_name becomes significant. Also repaired support for column type via FK transmission in a similar way when target columns had a different key and were referenced using link_to_name. .. change:: :tags: feature, engine :versions: 1.0.0b1 Added new user-space accessors for viewing transaction isolation levels; :meth:`.Connection.get_isolation_level`, :attr:`.Connection.default_isolation_level`. .. change:: :tags: bug, postgresql :versions: 1.0.0b1 :tickets: 3174 Fixed bug where Postgresql dialect would fail to render an expression in an :class:`.Index` that did not correspond directly to a table-bound column; typically when a :func:`.text` construct was one of the expressions within the index; or could misinterpret the list of expressions if one or more of them were such an expression. .. change:: :tags: bug, orm :versions: 1.0.0b1 :tickets: 3287 The "wildcard" loader options, in particular the one set up by the :func:`.orm.load_only` option to cover all attributes not explicitly mentioned, now takes into account the superclasses of a given entity, if that entity is mapped with inheritance mapping, so that attribute names within the superclasses are also omitted from the load. Additionally, the polymorphic discriminator column is unconditionally included in the list, just in the same way that primary key columns are, so that even with load_only() set up, polymorphic loading of subtypes continues to function correctly. .. change:: :tags: bug, sql :versions: 1.0.0b1 :pullreq: bitbucket:41 Added the ``native_enum`` flag to the ``__repr__()`` output of :class:`.Enum`, which is mostly important when using it with Alembic autogenerate. Pull request courtesy Dimitris Theodorou. .. change:: :tags: bug, orm, pypy :versions: 1.0.0b1 :tickets: 3285 Fixed bug where if an exception were thrown at the start of a :class:`.Query` before it fetched results, particularly when row processors can't be formed, the cursor would stay open with results pending and not actually be closed. This is typically only an issue on an interpreter like Pypy where the cursor isn't immediately GC'ed, and can in some circumstances lead to transactions/ locks being open longer than is desirable. .. change:: :tags: change, mysql :versions: 1.0.0b1 :tickets: 3275 The ``gaerdbms`` dialect is no longer necessary, and emits a deprecation warning. Google now recommends using the MySQLdb dialect directly. .. change:: :tags: bug, sql :versions: 1.0.0b1 :tickets: 3278 Fixed bug where using a :class:`.TypeDecorator` that implemented a type that was also a :class:`.TypeDecorator` would fail with Python's "Cannot create a consistent method resolution order (MRO)" error, when any kind of SQL comparison expression were used against an object using this type. .. change:: :tags: bug, mysql :versions: 1.0.0b1 :tickets: 3274 Added a version check to the MySQLdb dialect surrounding the check for 'utf8_bin' collation, as this fails on MySQL server < 5.0. .. change:: :tags: feature, orm :versions: 1.0.0b1 Added new method :meth:`.Session.invalidate`, functions similarly to :meth:`.Session.close`, except also calls :meth:`.Connection.invalidate` on all connections, guaranteeing that they will not be returned to the connection pool. This is useful in situations e.g. dealing with gevent timeouts when it is not safe to use the connection further, even for rollbacks. .. change:: :tags: bug, examples :versions: 1.0.0b1 Updated the :ref:`examples_versioned_history` example such that mapped columns are re-mapped to match column names as well as grouping of columns; in particular, this allows columns that are explicitly grouped in a same-column-named joined inheritance scenario to be mapped in the same way in the history mappings, avoiding warnings added in the 0.9 series regarding this pattern and allowing the same view of attribute keys. .. change:: :tags: bug, examples :versions: 1.0.0b1 Fixed a bug in the examples/generic_assocaitions/discriminator_on_association.py example, where the subclasses of AddressAssociation were not being mapped as "single table inheritance", leading to problems when trying to use the mappings further. .. change:: :tags: bug, orm :versions: 1.0.0b1 :tickets: 3251 Fixed a leak which would occur in the unsupported and highly non-recommended use case of replacing a relationship on a fixed mapped class many times, referring to an arbitrarily growing number of target mappers. A warning is emitted when the old relationship is replaced, however if the mapping were already used for querying, the old relationship would still be referenced within some registries. .. change:: :tags: bug, sql :versions: 1.0.0b1 :tickets: 3248 Fixed issue where the columns from a SELECT embedded in an INSERT, either through the values clause or as a "from select", would pollute the column types used in the result set produced by the RETURNING clause when columns from both statements shared the same name, leading to potential errors or mis-adaptation when retrieving the returning rows. .. change:: :tags: bug, orm, sqlite :versions: 1.0.0b1 :tickets: 3241 Fixed bug regarding expression mutations which could express itself as a "Could not locate column" error when using :class:`.Query` to select from multiple, anonymous column entities when querying against SQLite, as a side effect of the "join rewriting" feature used by the SQLite dialect. .. change:: :tags: feature, sqlite :versions: 1.0.0b1 Added a new SQLite backend for the SQLCipher backend. This backend provides for encrypted SQLite databases using the pysqlcipher Python driver, which is very similar to the pysqlite driver. .. seealso:: :mod:`~sqlalchemy.dialects.sqlite.pysqlcipher` .. change:: :tags: bug, orm :tickets: 3232 :versions: 1.0.0b1 Fixed bug where the ON clause for :meth:`.Query.join`, and :meth:`.Query.outerjoin` to a single-inheritance subclass using ``of_type()`` would not render the "single table criteria" in the ON clause if the ``from_joinpoint=True`` flag were set. .. changelog:: :version: 0.9.8 :released: October 13, 2014 .. change:: :tags: bug, mysql, mysqlconnector :versions: 1.0.0b1 Mysqlconnector as of version 2.0, probably as a side effect of the python 3 merge, now does not expect percent signs (e.g. as used as the modulus operator and others) to be doubled, even when using the "pyformat" bound parameter format (this change is not documented by Mysqlconnector). The dialect now checks for py2k and for mysqlconnector less than version 2.0 when detecting if the modulus operator should be rendered as ``%%`` or ``%``. .. change:: :tags: bug, mysql, mysqlconnector :versions: 1.0.0b1 Unicode SQL is now passed for MySQLconnector version 2.0 and above; for Py2k and MySQL < 2.0, strings are encoded. .. change:: :tags: bug, oracle :versions: 1.0.0b1 :tickets: 2138 Fixed long-standing bug in Oracle dialect where bound parameter names that started with numbers would not be quoted, as Oracle doesn't like numerics in bound parameter names. .. change:: :tags: bug, sql :versions: 1.0.0b1 :tickets: 3195 Fixed bug where a fair number of SQL elements within the sql package would fail to ``__repr__()`` successfully, due to a missing ``description`` attribute that would then invoke a recursion overflow when an internal AttributeError would then re-invoke ``__repr__()``. .. change:: :tags: bug, declarative, orm :versions: 1.0.0b1 :tickets: 3185 Fixed "'NoneType' object has no attribute 'concrete'" error when using :class:`.AbstractConcreteBase` in conjunction with a subclass that declares ``__abstract__``. .. change:: :tags: bug, engine :versions: 1.0.0b1 :tickets: 3200 The execution options passed to an :class:`.Engine` either via :paramref:`.create_engine.execution_options` or :meth:`.Engine.update_execution_options` are not passed to the special :class:`.Connection` used to initialize the dialect within the "first connect" event; dialects will usually perform their own queries in this phase, and none of the current available options should be applied here. In particular, the "autocommit" option was causing an attempt to autocommit within this initial connect which would fail with an AttributeError due to the non-standard state of the :class:`.Connection`. .. change:: :tags: bug, sqlite :versions: 1.0.0b1 :tickets: 3211 When selecting from a UNION using an attached database file, the pysqlite driver reports column names in cursor.description as 'dbname.tablename.colname', instead of 'tablename.colname' as it normally does for a UNION (note that it's supposed to just be 'colname' for both, but we work around it). The column translation logic here has been adjusted to retrieve the rightmost token, rather than the second token, so it works in both cases. Workaround courtesy Tony Roberts. .. change:: :tags: bug, postgresql :versions: 1.0.0b1 :tickets: 3021 A revisit to this issue first patched in 0.9.5, apparently psycopg2's ``.closed`` accessor is not as reliable as we assumed, so we have added an explicit check for the exception messages "SSL SYSCALL error: Bad file descriptor" and "SSL SYSCALL error: EOF detected" when detecting an is-disconnect scenario. We will continue to consult psycopg2's connection.closed as a first check. .. change:: :tags: bug, orm, engine :versions: 1.0.0b1 :tickets: 3197 Fixed bug that affected generally the same classes of event as that of :ticket:`3199`, when the ``named=True`` parameter would be used. Some events would fail to register, and others would not invoke the event arguments correctly, generally in the case of when an event was "wrapped" for adaption in some other way. The "named" mechanics have been rearranged to not interfere with the argument signature expected by internal wrapper functions. .. change:: :tags: bug, declarative :versions: 1.0.0b1 :tickets: 3208 Fixed an unlikely race condition observed in some exotic end-user setups, where the attempt to check for "duplicate class name" in declarative would hit upon a not-totally-cleaned-up weak reference related to some other class being removed; the check here now ensures the weakref still references an object before calling upon it further. .. change:: :tags: bug, orm :versions: 1.0.0b1 :tickets: 3199 Fixed bug that affected many classes of event, particularly ORM events but also engine events, where the usual logic of "de duplicating" a redundant call to :func:`.event.listen` with the same arguments would fail, for those events where the listener function is wrapped. An assertion would be hit within registry.py. This assertion has now been integrated into the deduplication check, with the added bonus of a simpler means of checking deduplication across the board. .. change:: :tags: bug, mssql :versions: 1.0.0b1 :tickets: 3151 Fixed the version string detection in the pymssql dialect to work with Microsoft SQL Azure, which changes the word "SQL Server" to "SQL Azure". .. change:: :tags: bug, orm :versions: 1.0.0b1 :tickets: 3194 Fixed warning that would emit when a complex self-referential primaryjoin contained functions, while at the same time remote_side was specified; the warning would suggest setting "remote side". It now only emits if remote_side isn't present. .. change:: :tags: bug, ext :versions: 1.0.0b1 :tickets: 3191 Fixed bug in ordering list where the order of items would be thrown off during a collection replace event, if the reorder_on_append flag were set to True. The fix ensures that the ordering list only impacts the list that is explicitly associated with the object. .. change:: :tags: bug, sql :versions: 1.0.0b1 :tickets: 3180 An adjustment to table/index reflection such that if an index reports a column that isn't found to be present in the table, a warning is emitted and the column is skipped. This can occur for some special system column situations as has been observed with Oracle. .. change:: :tags: bug, ext :versions: 1.0.0b1 :pullrequest: bitbucket:28 Fixed bug where :class:`.ext.mutable.MutableDict` failed to implement the ``update()`` dictionary method, thus not catching changes. Pull request courtesy Matt Chisholm. .. change:: :tags: bug, ext :versions: 1.0.0b1 :pullrequest: bitbucket:27 Fixed bug where a custom subclass of :class:`.ext.mutable.MutableDict` would not show up in a "coerce" operation, and would instead return a plain :class:`.ext.mutable.MutableDict`. Pull request courtesy Matt Chisholm. .. change:: :tags: bug, pool :versions: 1.0.0b1 :tickets: 3168 Fixed bug in connection pool logging where the "connection checked out" debug logging message would not emit if the logging were set up using ``logging.setLevel()``, rather than using the ``echo_pool`` flag. Tests to assert this logging have been added. This is a regression that was introduced in 0.9.0. .. change:: :tags: feature, postgresql, pg8000 :versions: 1.0.0b1 :pullreq: github:125 Support is added for "sane multi row count" with the pg8000 driver, which applies mostly to when using versioning with the ORM. The feature is version-detected based on pg8000 1.9.14 or greater in use. Pull request courtesy Tony Locke. .. change:: :tags: bug, engine :versions: 1.0.0b1 :tickets: 3165 The string keys that are used to determine the columns impacted for an INSERT or UPDATE are now sorted when they contribute towards the "compiled cache" cache key. These keys were previously not deterministically ordered, meaning the same statement could be cached multiple times on equivalent keys, costing both in terms of memory as well as performance. .. change:: :tags: bug, postgresql :versions: 1.0.0b1 :tickets: 3159 Fixed bug where Postgresql JSON type was not able to persist or otherwise render a SQL NULL column value, rather than a JSON-encoded ``'null'``. To support this case, changes are as follows: * The value :func:`.null` can now be specified, which will always result in a NULL value resulting in the statement. * A new parameter :paramref:`.JSON.none_as_null` is added, which when True indicates that the Python ``None`` value should be peristed as SQL NULL, rather than JSON-encoded ``'null'``. Retrival of NULL as None is also repaired for DBAPIs other than psycopg2, namely pg8000. .. change:: :tags: bug, sql :versions: 1.0.0b1 :tickets: 3154 Fixed bug in CTE where ``literal_binds`` compiler argument would not be always be correctly propagated when one CTE referred to another aliased CTE in a statement. .. change:: :tags: bug, postgresql :versions: 1.0.0b1 :tickets: 3075 The exception wrapping system for DBAPI errors can now accommodate non-standard DBAPI exceptions, such as the psycopg2 TransactionRollbackError. These exceptions will now be raised using the closest available subclass in ``sqlalchemy.exc``, in the case of TransactionRollbackError, ``sqlalchemy.exc.OperationalError``. .. change:: :tags: bug, sql :versions: 1.0.0b1 :tickets: 3144, 3067 Fixed 0.9.7 regression caused by :ticket:`3067` in conjunction with a mis-named unit test such that so-called "schema" types like :class:`.Boolean` and :class:`.Enum` could no longer be pickled. .. change:: :tags: bug, postgresql :versions: 1.0.0b1 :tickets: 3141 :pullreq: github:124 Fixed bug in :class:`.postgresql.array` object where comparison to a plain Python list would fail to use the correct array constructor. Pull request courtesy Andrew. .. change:: :tags: bug, postgresql :versions: 1.0.0b1 :tickets: 3137 Added a supported :meth:`.FunctionElement.alias` method to functions, e.g. the ``func`` construct. Previously, behavior for this method was undefined. The current behavior mimics that of pre-0.9.4, which is that the function is turned into a single-column FROM clause with the given alias name, where the column itself is anonymously named. .. changelog:: :version: 0.9.7 :released: July 22, 2014 .. change:: :tags: bug, postgresql, pg8000 :tickets: 3134 :versions: 1.0.0b1 Fixed bug introduced in 0.9.5 by new pg8000 isolation level feature where engine-level isolation level parameter would raise an error on connect. .. change:: :tags: bug, oracle, tests :tickets: 3128 :versions: 1.0.0b1 Fixed bug in oracle dialect test suite where in one test, 'username' was assumed to be in the database URL, even though this might not be the case. .. change:: :tags: bug, orm, eagerloading :tickets: 3131 :versions: 1.0.0b1 Fixed a regression caused by :ticket:`2976` released in 0.9.4 where the "outer join" propagation along a chain of joined eager loads would incorrectly convert an "inner join" along a sibling join path into an outer join as well, when only descendant paths should be receiving the "outer join" propagation; additionally, fixed related issue where "nested" join propagation would take place inappropriately between two sibling join paths. .. change:: :tags: bug, sqlite :tickets: 3130 :versions: 1.0.0b1 Fixed a SQLite join rewriting issue where a subquery that is embedded as a scalar subquery such as within an IN would receive inappropriate substitutions from the enclosing query, if the same table were present inside the subquery as were in the enclosing query such as in a joined inheritance scenario. .. change:: :tags: bug, sql :tickets: 3067 :versions: 1.0.0b1 Fix bug in naming convention feature where using a check constraint convention that includes ``constraint_name`` would then force all :class:`.Boolean` and :class:`.Enum` types to require names as well, as these implicitly create a constraint, even if the ultimate target backend were one that does not require generation of the constraint such as Postgresql. The mechanics of naming conventions for these particular constraints has been reorganized such that the naming determination is done at DDL compile time, rather than at constraint/table construction time. .. change:: :tags: bug, mssql :tickets: 3025 Fixed a regression from 0.9.5 caused by :ticket:`3025` where the query used to determine "default schema" is invalid in SQL Server 2000. For SQL Server 2000 we go back to defaulting to the "schema name" parameter of the dialect, which is configurable but defaults to 'dbo'. .. change:: :tags: bug, orm :tickets: 3083, 2736 :versions: 1.0.0b1 Fixed a regression from 0.9.0 due to :ticket:`2736` where the :meth:`.Query.select_from` method no longer set up the "from entity" of the :class:`.Query` object correctly, so that subsequent :meth:`.Query.filter_by` or :meth:`.Query.join` calls would fail to check the appropriate "from" entity when searching for attributes by string name. .. change:: :tags: bug, sql :tickets: 3090 :versions: 1.0.0b1 Fixed bug in common table expressions whereby positional bound parameters could be expressed in the wrong final order when CTEs were nested in certain ways. .. change:: :tags: bug, sql :tickets: 3069 :versions: 1.0.0b1 Fixed bug where multi-valued :class:`.Insert` construct would fail to check subsequent values entries beyond the first one given for literal SQL expressions. .. change:: :tags: bug, sql :tickets: 3123 :versions: 1.0.0b1 Added a "str()" step to the dialect_kwargs iteration for Python version < 2.6.5, working around the "no unicode keyword arg" bug as these args are passed along as keyword args within some reflection processes. .. change:: :tags: bug, sql :tickets: 3122 :versions: 1.0.0b1 The :meth:`.TypeEngine.with_variant` method will now accept a type class as an argument which is internally converted to an instance, using the same convention long established by other constructs such as :class:`.Column`. .. change:: :tags: bug, orm :tickets: 3117 The "evaluator" for query.update()/delete() won't work with multi-table updates, and needs to be set to `synchronize_session=False` or `synchronize_session='fetch'`; a warning is now emitted. In 1.0 this will be promoted to a full exception. .. change:: :tags: bug, tests :versions: 1.0.0b1 Fixed bug where "python setup.py test" wasn't calling into distutils appropriately, and errors would be emitted at the end of the test suite. .. change:: :tags: feature, postgresql :versions: 1.0.0b1 :pullreq: bitbucket:22 :tickets: 3078 Added kw argument ``postgresql_regconfig`` to the :meth:`.ColumnOperators.match` operator, allows the "reg config" argument to be specified to the ``to_tsquery()`` function emitted. Pull request courtesy Jonathan Vanasco. .. change:: :tags: feature, postgresql :versions: 1.0.0b1 :pullreq: github:101 Added support for Postgresql JSONB via :class:`.JSONB`. Pull request courtesy Damian Dimmich. .. change:: :tags: feature, mssql :pullreq: github:98 :versions: 1.0.0b1 Enabled "multivalues insert" for SQL Server 2008. Pull request courtesy Albert Cervin. Also expanded the checks for "IDENTITY INSERT" mode to include when the identity key is present in the VALUEs clause of the statement. .. change:: :tags: feature, engine :tickets: 3076 :versions: 1.0.0b1 Added new event :meth:`.ConnectionEvents.handle_error`, a more fully featured and comprehensive replacement for :meth:`.ConnectionEvents.dbapi_error`. .. change:: :tags: bug, orm :tickets: 3108 :versions: 1.0.0b1 Fixed bug where items that were persisted, deleted, or had a primary key change within a savepoint block would not participate in being restored to their former state (not in session, in session, previous PK) after the outer transaction were rolled back. .. change:: :tags: bug, orm :tickets: 3106 :versions: 1.0.0b1 Fixed bug in subquery eager loading in conjunction with :func:`.with_polymorphic`, the targeting of entities and columns in the subquery load has been made more accurate with respect to this type of entity and others. .. change:: :tags: bug, orm :tickets: 3099 Fixed bug involving dynamic attributes, that was again a regression of :ticket:`3060` from version 0.9.5. A self-referential relationship with lazy='dynamic' would raise a TypeError within a flush operation. .. change:: :tags: bug, declarative :tickets: 3097 :versions: 1.0.0b1 Fixed bug when the declarative ``__abstract__`` flag was not being distinguished for when it was actually the value ``False``. The ``__abstract__`` flag needs to acutally evaluate to a True value at the level being tested. .. changelog:: :version: 0.9.6 :released: June 23, 2014 .. change:: :tags: bug, orm :tickets: 3060 Reverted the change for :ticket:`3060` - this is a unit of work fix that is updated more comprehensively in 1.0 via :ticket:`3061`. The fix in :ticket:`3060` unfortunately produces a new issue whereby an eager load of a many-to-one attribute can produce an event that is interpreted into an attribute change. .. changelog:: :version: 0.9.5 :released: June 23, 2014 .. change:: :tags: bug, orm :tickets: 3042 :versions: 1.0.0b1 Additional checks have been added for the case where an inheriting mapper is implicitly combining one of its column-based attributes with that of the parent, where those columns normally don't necessarily share the same value. This is an extension of an existing check that was added via :ticket:`1892`; however this new check emits only a warning, instead of an exception, to allow for applications that may be relying upon the existing behavior. .. seealso:: :ref:`faq_combining_columns` .. change:: :tags: bug, sql :tickets: 3023 :versions: 1.0.0b1 The :paramref:`.Column.nullable` flag is implicitly set to ``False`` when that :class:`.Column` is referred to in an explicit :class:`.PrimaryKeyConstraint` for that table. This behavior now matches that of when the :class:`.Column` itself has the :paramref:`.Column.primary_key` flag set to ``True``, which is intended to be an exactly equivalent case. .. change:: :tags: enhancement, postgresql :tickets: 3002 :versions: 1.0.0b1 Added a new type :class:`.postgresql.OID` to the Postgresql dialect. While "oid" is generally a private type within PG that is not exposed in modern versions, there are some PG use cases such as large object support where these types might be exposed, as well as within some user-reported schema reflection use cases. .. change:: :tags: bug, orm :tickets: 3080 :versions: 1.0.0b1 Modified the behavior of :func:`.orm.load_only` such that primary key columns are always added to the list of columns to be "undeferred"; otherwise, the ORM can't load the row's identity. Apparently, one can defer the mapped primary keys and the ORM will fail, that hasn't been changed. But as load_only is essentially saying "defer all but X", it's more critical that PK cols not be part of this deferral. .. change:: :tags: feature, examples :pullreq: bitbucket:21 :versions: 1.0.0b1 Added a new example illustrating materialized paths, using the latest relationship features. Example courtesy Jack Zhou. .. change:: :tags: bug, testsuite :pullreq: github:95 :versions: 1.0.0b1 In public test suite, shanged to use of ``String(40)`` from less-supported ``Text`` in ``StringTest.test_literal_backslashes``. Pullreq courtesy Jan. .. change:: :tags: bug, engine :versions: 1.0.0b1 :tickets: 3063 Fixed bug which would occur if a DBAPI exception occurs when the engine first connects and does its initial checks, and the exception is not a disconnect exception, yet the cursor raises an error when we try to close it. In this case the real exception would be quashed as we tried to log the cursor close exception via the connection pool and failed, as we were trying to access the pool's logger in a way that is inappropriate in this very specific scenario. .. change:: :tags: feature, postgresql :versions: 1.0.0b1 :pullreq: github:88 Added support for AUTOCOMMIT isolation level when using the pg8000 DBAPI. Pull request courtesy Tony Locke. .. change:: :tags: bug, postgresql :tickets: 3021 :versions: 1.0.0b1 :pullreq: github:87 The psycopg2 ``.closed`` accessor is now consulted when determining if an exception is a "disconnect" error; ideally, this should remove the need for any other inspection of the exception message to detect disconnect, however we will leave those existing messages in place as a fallback. This should be able to handle newer cases like "SSL EOF" conditions. Pull request courtesy Dirk Mueller. .. change:: :tags: bug, orm :tickets: 3060 :versions: 1.0.0b1 Fixed a few edge cases which arise in the so-called "row switch" scenario, where an INSERT/DELETE can be turned into an UPDATE. In this situation, a many-to-one relationship set to None, or in some cases a scalar attribute set to None, may not be detected as a net change in value, and therefore the UPDATE would not reset what was on the previous row. This is due to some as-yet unresovled side effects of the way attribute history works in terms of implicitly assuming None isn't really a "change" for a previously un-set attribute. See also :ticket:`3061`. .. note:: This change has been **REVERTED** in 0.9.6. The full fix will be in version 1.0 of SQLAlchemy. .. change:: :tags: bug, orm :versions: 1.0.0b1 Related to :ticket:`3060`, an adjustment has been made to the unit of work such that loading for related many-to-one objects is slightly more aggressive, in the case of a graph of self-referential objects that are to be deleted; the load of related objects is to help determine the correct order for deletion if passive_deletes is not set. .. change:: :tags: bug, orm :tickets: 3057 :versions: 1.0.0b1 Fixed bug in SQLite join rewriting where anonymized column names due to repeats would not correctly be rewritten in subqueries. This would affect SELECT queries with any kind of subquery + join. .. change:: :tags: bug, sql :tickets: 3012 :versions: 1.0.0b1 Fixed bug where the :meth:`.Operators.__and__`, :meth:`.Operators.__or__` and :meth:`.Operators.__invert__` operator overload methods could not be overridden within a custom :class:`.TypeEngine.Comparator` implementation. .. change:: :tags: feature, postgresql :tickets: 2785 :pullreq: bitbucket:18 :versions: 1.0.0b1 Added a new flag :paramref:`.ARRAY.zero_indexes` to the Postgresql :class:`.ARRAY` type. When set to ``True``, a value of one will be added to all array index values before passing to the database, allowing better interoperability between Python style zero-based indexes and Postgresql one-based indexes. Pull request courtesy Alexey Terentev. .. change:: :tags: bug, engine :tickets: 3043 :versions: 1.0.0b1 Fixed some "double invalidate" situations were detected where a connection invalidation could occur within an already critical section like a connection.close(); ultimately, these conditions are caused by the change in :ticket:`2907`, in that the "reset on return" feature calls out to the Connection/Transaction in order to handle it, where "disconnect detection" might be caught. However, it's possible that the more recent change in :ticket:`2985` made it more likely for this to be seen as the "connection invalidate" operation is much quicker, as the issue is more reproducible on 0.9.4 than 0.9.3. Checks are now added within any section that an invalidate might occur to halt further disallowed operations on the invalidated connection. This includes two fixes both at the engine level and at the pool level. While the issue was observed with highly concurrent gevent cases, it could in theory occur in any kind of scenario where a disconnect occurs within the connection close operation. .. change:: :tags: feature, orm :tickets: 3029 :versions: 1.0.0b1 The "primaryjoin" model has been stretched a bit further to allow a join condition that is strictly from a single column to itself, translated through some kind of SQL function or expression. This is kind of experimental, but the first proof of concept is a "materialized path" join condition where a path string is compared to itself using "like". The :meth:`.ColumnOperators.like` operator has also been added to the list of valid operators to use in a primaryjoin condition. .. change:: :tags: feature, sql :tickets: 3028 :versions: 1.0.0b1 Liberalized the contract for :class:`.Index` a bit in that you can specify a :func:`.text` expression as the target; the index no longer needs to have a table-bound column present if the index is to be manually added to the table, either via inline declaration or via :meth:`.Table.append_constraint`. .. change:: :tags: bug, firebird :tickets: 3038 Fixed bug where the combination of "limit" rendering as "SELECT FIRST n ROWS" using a bound parameter (only firebird has both), combined with column-level subqueries which also feature "limit" as well as "positional" bound parameters (e.g. qmark style) would erroneously assign the subquery-level positions before that of the enclosing SELECT, thus returning parameters which are out of order. .. change:: :tags: bug, mssql :tickets: 3025 :versions: 1.0.0b1 Revised the query used to determine the current default schema name to use the ``database_principal_id()`` function in conjunction with the ``sys.database_principals`` view so that we can determine the default schema independently of the type of login in progress (e.g., SQL Server, Windows, etc). .. change:: :tags: bug, sql :tickets: 3024 :versions: 1.0.0b1 Fixed bug in new :meth:`.DialectKWArgs.argument_for` method where adding an argument for a construct not previously included for any special arguments would fail. .. change:: :tags: bug, py3k, tests :tickets: 2830 :pullreq: bitbucket:2830 :versions: 1.0.0b1 Corrected for some deprecation warnings involving the ``imp`` module and Python 3.3 or greater, when running tests. Pull request courtesy Matt Chisholm. .. change:: :tags: bug, sql :tickets: 3020, 1068 :versions: 1.0.0b1 Fixed regression introduced in 0.9 where new "ORDER BY " feature from :ticket:`1068` would not apply quoting rules to the label name as rendered in the ORDER BY. .. change:: :tags: feature, orm :tickets: 3017 :versions: 1.0.0b1 Added new utility function :func:`.make_transient_to_detached` which can be used to manufacture objects that behave as though they were loaded from a session, then detached. Attributes that aren't present are marked as expired, and the object can be added to a Session where it will act like a persistent one. .. change:: :tags: bug, sql :versions: 1.0.0b1 Restored the import for :class:`.Function` to the ``sqlalchemy.sql.expression`` import namespace, which was removed at the beginning of 0.9. .. change:: :tags: bug, orm, sql :tickets: 3013 :versions: 1.0.0b1 Fixes to the newly enhanced boolean coercion in :ticket:`2804` where the new rules for "where" and "having" woudn't take effect for the "whereclause" and "having" kw arguments of the :func:`.select` construct, which is also what :class:`.Query` uses so wasn't working in the ORM either. .. change:: :tags: feature, sql :tickets: 2990 :versions: 1.0.0b1 Added new flag :paramref:`.expression.between.symmetric`, when set to True renders "BETWEEN SYMMETRIC". Also added a new negation operator "notbetween_op", which now allows an expression like ``~col.between(x, y)`` to render as "col NOT BETWEEN x AND y", rather than a parentheiszed NOT string. .. changelog:: :version: 0.9.4 :released: March 28, 2014 .. change:: :tags: feature, orm :tickets: 3007 Added new parameter :paramref:`.orm.mapper.confirm_deleted_rows`. Defaults to True, indicates that a series of DELETE statements should confirm that the cursor rowcount matches the number of primary keys that should have matched; this behavior had been taken off in most cases (except when version_id is used) to support the unusual edge case of self-referential ON DELETE CASCADE; to accommodate this, the message is now just a warning, not an exception, and the flag can be used to indicate a mapping that expects self-refererntial cascaded deletes of this nature. See also :ticket:`2403` for background on the original change. .. change:: :tags: bug, ext, automap :tickets: 3004 Added support to automap for the case where a relationship should not be created between two classes that are in a joined inheritance relationship, for those foreign keys that link the subclass back to the superclass. .. change:: :tags: bug, orm :tickets: 2948 Fixed a very old behavior where the lazy load emitted for a one-to-many could inappropriately pull in the parent table, and also return results inconsistent based on what's in the parent table, when the primaryjoin includes some kind of discriminator against the parent table, such as ``and_(parent.id == child.parent_id, parent.deleted == False)``. While this primaryjoin doesn't make that much sense for a one-to-many, it is slightly more common when applied to the many-to-one side, and the one-to-many comes as a result of a backref. Loading rows from ``child`` in this case would keep ``parent.deleted == False`` as is within the query, thereby yanking it into the FROM clause and doing a cartesian product. The new behavior will now substitute the value of the local "parent.deleted" for that parameter as is appropriate. Though typically, a real-world app probably wants to use a different primaryjoin for the o2m side in any case. .. change:: :tags: bug, orm :tickets: 2965 Improved the check for "how to join from A to B" such that when a table has multiple, composite foreign keys targeting a parent table, the :paramref:`.relationship.foreign_keys` argument will be properly interpreted in order to resolve the ambiguity; previously this condition would raise that there were multiple FK paths when in fact the foreign_keys argument should be establishing which one is expected. .. change:: :tags: bug, mysql Tweaked the settings for mysql-connector-python; in Py2K, the "supports unicode statements" flag is now False, so that SQLAlchemy will encode the *SQL string* (note: *not* the parameters) to bytes before sending to the database. This seems to allow all unicode-related tests to pass for mysql-connector, including those that use non-ascii table/column names, as well as some tests for the TEXT type using unicode under cursor.executemany(). .. change:: :tags: feature, engine Added some new event mechanics for dialect-level events; the initial implementation allows an event handler to redefine the specific mechanics by which an arbitrary dialect invokes execute() or executemany() on a DBAPI cursor. The new events, at this point semi-public and experimental, are in support of some upcoming transaction-related extensions. .. change:: :tags: feature, engine :tickets: 2978 An event listener can now be associated with a :class:`.Engine`, after one or more :class:`.Connection` objects have been created (such as by an orm :class:`.Session` or via explicit connect) and the listener will pick up events from those connections. Previously, performance concerns pushed the event transfer from :class:`.Engine` to :class:`.Connection` at init-time only, but we've inlined a bunch of conditional checks to make this possible without any additional function calls. .. change:: :tags: bug, tests :tickets: 2980 Fixed a few errant ``u''`` strings that would prevent tests from passing in Py3.2. Patch courtesy Arfrever Frehtes Taifersar Arahesis. .. change:: :tags: bug, engine :tickets: 2985 A major improvement made to the mechanics by which the :class:`.Engine` recycles the connection pool when a "disconnect" condition is detected; instead of discarding the pool and explicitly closing out connections, the pool is retained and a "generational" timestamp is updated to reflect the current time, thereby causing all existing connections to be recycled when they are next checked out. This greatly simplifies the recycle process, removes the need for "waking up" connect attempts waiting on the old pool and eliminates the race condition that many immediately-discarded "pool" objects could be created during the recycle operation. .. change:: :tags: bug, oracle :tickets: 2987 Added new datatype :class:`.oracle.DATE`, which is a subclass of :class:`.DateTime`. As Oracle has no "datetime" type per se, it instead has only ``DATE``, it is appropriate here that the ``DATE`` type as present in the Oracle dialect be an instance of :class:`.DateTime`. This issue doesn't change anything as far as the behavior of the type, as data conversion is handled by the DBAPI in any case, however the improved subclass layout will help the use cases of inspecting types for cross-database compatibility. Also removed uppercase ``DATETIME`` from the Oracle dialect as this type isn't functional in that context. .. change:: :tags: bug, sql :tickets: 2988 :pullreq: github:78 Fixed an 0.9 regression where a :class:`.Table` that failed to reflect correctly wouldn't be removed from the parent :class:`.MetaData`, even though in an invalid state. Pullreq courtesy Roman Podoliaka. .. change:: :tags: bug, engine The :meth:`.ConnectionEvents.after_cursor_execute` event is now emitted for the "_cursor_execute()" method of :class:`.Connection`; this is the "quick" executor that is used for things like when a sequence is executed ahead of an INSERT statement, as well as for dialect startup checks like unicode returns, charset, etc. the :meth:`.ConnectionEvents.before_cursor_execute` event was already invoked here. The "executemany" flag is now always set to False here, as this event always corresponds to a single execution. Previously the flag could be True if we were acting on behalf of an executemany INSERT statement. .. change:: :tags: bug, orm Added support for the not-quite-yet-documented ``insert=True`` flag for :func:`.event.listen` to work with mapper / instance events. .. change:: :tags: feature, sql Added support for literal rendering of boolean values, e.g. "true" / "false" or "1" / "0". .. change:: :tags: feature, sql Added a new feature :func:`.schema.conv`, the purpose of which is to mark a constraint name as already having had a naming convention applied. This token will be used by Alembic migrations as of Alembic 0.6.4 in order to render constraints in migration scripts with names marked as already having been subject to a naming convention. .. change:: :tags: bug, sql :paramref:`.MetaData.naming_convention` feature will now also apply to :class:`.CheckConstraint` objects that are associated directly with a :class:`.Column` instead of just on the :class:`.Table`. .. change:: :tags: bug, sql :tickets: 2991 Fixed bug in new :paramref:`.MetaData.naming_convention` feature where the name of a check constraint making use of the `"%(constraint_name)s"` token would get doubled up for the constraint generated by a boolean or enum type, and overall duplicate events would cause the `"%(constraint_name)s"` token to keep compounding itself. .. change:: :tags: feature, orm A warning is emitted if the :meth:`.MapperEvents.before_configured` or :meth:`.MapperEvents.after_configured` events are applied to a specific mapper or mapped class, as the events are only invoked for the :class:`.Mapper` target at the general level. .. change:: :tags: feature, orm Added a new keyword argument ``once=True`` to :func:`.event.listen` and :func:`.event.listens_for`. This is a convenience feature which will wrap the given listener such that it is only invoked once. .. change:: :tags: feature, oracle :tickets: 2911 :pullreq: github:74 Added a new engine option ``coerce_to_unicode=True`` to the cx_Oracle dialect, which restores the cx_Oracle outputtypehandler approach to Python unicode conversion under Python 2, which was removed in 0.9.2 as a result of :ticket:`2911`. Some use cases would prefer that unicode coersion is unconditional for all string values, despite performance concerns. Pull request courtesy Christoph Zwerschke. .. change:: :tags: bug, pool Fixed small issue in :class:`.SingletonThreadPool` where the current connection to be returned might get inadvertently cleaned out during the "cleanup" process. Patch courtesy jd23. .. change:: :tags: bug, ext, py3k Fixed bug in association proxy where assigning an empty slice (e.g. ``x[:] = [...]``) would fail on Py3k. .. change:: :tags: bug, general :tickets: 2979 Fixed some test/feature failures occurring in Python 3.4, in particular the logic used to wrap "column default" callables wouldn't work properly for Python built-ins. .. change:: :tags: feature, general Support has been added for pytest to run tests. This runner is currently being supported in addition to nose, and will likely be preferred to nose going forward. The nose plugin system used by SQLAlchemy has been split out so that it works under pytest as well. There are no plans to drop support for nose at the moment and we hope that the test suite itself can continue to remain as agnostic of testing platform as possible. See the file README.unittests.rst for updated information on running tests with pytest. The test plugin system has also been enhanced to support running tests against multiple database URLs at once, by specifying the ``--db`` and/or ``--dburi`` flags multiple times. This does not run the entire test suite for each database, but instead allows test cases that are specific to certain backends make use of that backend as the test is run. When using pytest as the test runner, the system will also run specific test suites multiple times, once for each database, particularly those tests within the "dialect suite". The plan is that the enhanced system will also be used by Alembic, and allow Alembic to run migration operation tests against multiple backends in one run, including third-party backends not included within Alembic itself. Third party dialects and extensions are also encouraged to standardize on SQLAlchemy's test suite as a basis; see the file README.dialects.rst for background on building out from SQLAlchemy's test platform. .. change:: :tags: feature, orm :tickets: 2976 Added a new option to :paramref:`.relationship.innerjoin` which is to specify the string ``"nested"``. When set to ``"nested"`` as opposed to ``True``, the "chaining" of joins will parenthesize the inner join on the right side of an existing outer join, instead of chaining as a string of outer joins. This possibly should have been the default behavior when 0.9 was released, as we introduced the feature of right-nested joins in the ORM, however we are keeping it as a non-default for now to avoid further surprises. .. seealso:: :ref:`feature_2976` .. change:: :tags: bug, ext :tickets: 2810 Fixed a regression in association proxy caused by :ticket:`2810` which caused a user-provided "getter" to no longer receive values of ``None`` when fetching scalar values from a target that is non-present. The check for None introduced by this change is now moved into the default getter, so a user-provided getter will also again receive values of None. .. change:: :tags: bug, sql :tickets: 2974 Adjusted the logic which applies names to the .c collection when a no-name :class:`.BindParameter` is received, e.g. via :func:`.sql.literal` or similar; the "key" of the bind param is used as the key within .c. rather than the rendered name. Since these binds have "anonymous" names in any case, this allows individual bound parameters to have their own name within a selectable if they are otherwise unlabeled. .. change:: :tags: bug, sql :tickets: 2974 Some changes to how the :attr:`.FromClause.c` collection behaves when presented with duplicate columns. The behavior of emitting a warning and replacing the old column with the same name still remains to some degree; the replacement in particular is to maintain backwards compatibility. However, the replaced column still remains associated with the ``c`` collection now in a collection ``._all_columns``, which is used by constructs such as aliases and unions, to deal with the set of columns in ``c`` more towards what is actually in the list of columns rather than the unique set of key names. This helps with situations where SELECT statements with same-named columns are used in unions and such, so that the union can match the columns up positionally and also there's some chance of :meth:`.FromClause.corresponding_column` still being usable here (it can now return a column that is only in selectable.c._all_columns and not otherwise named). The new collection is underscored as we still need to decide where this list might end up. Theoretically it would become the result of iter(selectable.c), however this would mean that the length of the iteration would no longer match the length of keys(), and that behavior needs to be checked out. .. change:: :tags: bug, sql Fixed issue in new :meth:`.TextClause.columns` method where the ordering of columns given positionally would not be preserved. This could have potential impact in positional situations such as applying the resulting :class:`.TextAsFrom` object to a union. .. change:: :tags: feature, sql :tickets: 2962, 2866 The new dialect-level keyword argument system for schema-level constructs has been enhanced in order to assist with existing schemes that rely upon addition of ad-hoc keyword arguments to constructs. E.g., a construct such as :class:`.Index` will again accept ad-hoc keyword arguments within the :attr:`.Index.kwargs` collection, after construction:: idx = Index('a', 'b') idx.kwargs['mysql_someargument'] = True To suit the use case of allowing custom arguments at construction time, the :meth:`.DialectKWArgs.argument_for` method now allows this registration:: Index.argument_for('mysql', 'someargument', False) idx = Index('a', 'b', mysql_someargument=True) .. seealso:: :meth:`.DialectKWArgs.argument_for` .. change:: :tags: bug, orm, engine :tickets: 2973 Fixed bug where events set to listen at the class level (e.g. on the :class:`.Mapper` or :class:`.ClassManager` level, as opposed to on an individual mapped class, and also on :class:`.Connection`) that also made use of internal argument conversion (which is most within those categories) would fail to be removable. .. change:: :tags: bug, orm Fixed regression from 0.8 where using an option like :func:`.orm.lazyload` with the "wildcard" expression, e.g. ``"*"``, would raise an assertion error in the case where the query didn't contain any actual entities. This assertion is meant for other cases and was catching this one inadvertently. .. change:: :tags: bug, examples Fixed bug in the versioned_history example where column-level INSERT defaults would prevent history values of NULL from being written. .. change:: :tags: orm, bug, sqlite :tickets: 2969 More fixes to SQLite "join rewriting"; the fix from :ticket:`2967` implemented right before the release of 0.9.3 affected the case where a UNION contained nested joins in it. "Join rewriting" is a feature with a wide range of possibilities and is the first intricate "SQL rewriting" feature we've introduced in years, so we're sort of going through a lot of iterations with it (not unlike eager loading back in the 0.2/0.3 series, polymorphic loading in 0.4/0.5). We should be there soon so thanks for bearing with us :). .. changelog:: :version: 0.9.3 :released: February 19, 2014 .. change:: :tags: orm, bug, sqlite :tickets: 2967 Fixed bug in SQLite "join rewriting" where usage of an exists() construct would fail to be rewritten properly, such as when the exists is mapped to a column_property in an intricate nested-join scenario. Also fixed a somewhat related issue where join rewriting would fail on the columns clause of the SELECT statement if the targets were aliased tables, as opposed to individual aliased columns. .. change:: :tags: sqlite, bug The SQLite dialect will now skip unsupported arguments when reflecting types; such as if it encounters a string like ``INTEGER(5)``, the :class:`.INTEGER` type will be instantiated without the "5" being included, based on detecting a ``TypeError`` on the first attempt. .. change:: :tags: sqlite, bug :pullreq: github:65 Support has been added to SQLite type reflection to fully support the "type affinity" contract specified at http://www.sqlite.org/datatype3.html. In this scheme, keywords like ``INT``, ``CHAR``, ``BLOB`` or ``REAL`` located in the type name generically associate the type with one of five affinities. Pull request courtesy Erich Blume. .. seealso:: :ref:`sqlite_type_reflection` .. change:: :tags: postgresql, feature :pullreq: github:64 Added the :attr:`.TypeEngine.python_type` convenience accessor onto the :class:`.postgresql.ARRAY` type. Pull request courtesy Alexey Terentev. .. change:: :tags: examples, feature :pullreq: github:41 Added optional "changed" column to the versioned rows example, as well as support for when the versioned :class:`.Table` has an explicit :paramref:`~.Table.schema` argument. Pull request courtesy jplaverdure. .. change:: :tags: bug, postgresql :tickets: 2946 Added server version detection to the newly added dialect startup query for "show standard_conforming_strings"; as this variable was added as of PG 8.2, we skip the query for PG versions who report a version string earlier than that. .. change:: :tags: bug, orm, declarative :tickets: 2950 Fixed bug where :class:`.AbstractConcreteBase` would fail to be fully usable within declarative relationship configuration, as its string classname would not be available in the registry of classnames at mapper configuration time. The class now explicitly adds itself to the class regsitry, and additionally both :class:`.AbstractConcreteBase` as well as :class:`.ConcreteBase` set themselves up *before* mappers are configured within the :func:`.configure_mappers` setup, using the new :meth:`.MapperEvents.before_configured` event. .. change:: :tags: feature, orm Added new :meth:`.MapperEvents.before_configured` event which allows an event at the start of :func:`.configure_mappers`, as well as ``__declare_first__()`` hook within declarative to complement ``__declare_last__()``. .. change:: :tags: bug, mysql, cymysql :tickets: 2934 :pullreq: github:69 Fixed bug in cymysql dialect where a version string such as ``'33a-MariaDB'`` would fail to parse properly. Pull request courtesy Matt Schmidt. .. change:: :tags: bug, orm :tickets: 2949 Fixed an 0.9 regression where ORM instance or mapper events applied to a base class such as a declarative base with the propagate=True flag would fail to apply to existing mapped classes which also used inheritance due to an assertion. Addtionally, repaired an attribute error which could occur during removal of such an event, depending on how it was first assigned. .. change:: :tags: bug, ext Fixed bug where the :class:`.AutomapBase` class of the new automap extension would fail if classes were pre-arranged in single or potentially joined inheritance patterns. The repaired joined inheritance issue could also potentially apply when using :class:`.DeferredReflection` as well. .. change:: :tags: bug, sql :pullreq: github:67 Fixed regression in new "naming convention" feature where conventions would fail if the referred table in a foreign key contained a schema name. Pull request courtesy Thomas Farvour. .. change:: :tags: bug, sql Fixed bug where so-called "literal render" of :func:`.bindparam` constructs would fail if the bind were constructed with a callable, rather than a direct value. This prevented ORM expressions from being rendered with the "literal_binds" compiler flag. .. change:: :tags: bug, orm :tickets: 2935 Improved the initialization logic of composite attributes such that calling ``MyClass.attribute`` will not require that the configure mappers step has occurred, e.g. it will just work without throwing any error. .. change:: :tags: bug, orm :tickets: 2932 More issues with [ticket:2932] first resolved in 0.9.2 where using a column key of the form ``_`` matching that of an aliased column in the text would still not match at the ORM level, which is ultimately due to a core column-matching issue. Additional rules have been added so that the column ``_label`` is taken into account when working with a :class:`.TextAsFrom` construct or with literal columns. .. changelog:: :version: 0.9.2 :released: February 2, 2014 .. change:: :tags: bug, examples Added a tweak to the "history_meta" example where the check for "history" on a relationship-bound attribute will now no longer emit any SQL if the relationship is unloaded. .. change:: :tags: feature, sql Added :paramref:`.MetaData.reflect.**dialect_kwargs` to support dialect-level reflection options for all :class:`.Table` objects reflected. .. change:: :tags: feature, postgresql :tickets: 2922 Added a new dialect-level argument ``postgresql_ignore_search_path``; this argument is accepted by both the :class:`.Table` constructor as well as by the :meth:`.MetaData.reflect` method. When in use against Postgresql, a foreign-key referenced table which specifies a remote schema name will retain that schema name even if the name is present in the ``search_path``; the default behavior since 0.7.3 has been that schemas present in ``search_path`` would not be copied to reflected :class:`.ForeignKey` objects. The documentation has been updated to describe in detail the behavior of the ``pg_get_constraintdef()`` function and how the ``postgresql_ignore_search_path`` feature essentially determines if we will honor the schema qualification reported by this function or not. .. seealso:: :ref:`postgresql_schema_reflection` .. change:: :tags: bug, sql :tickets: 2913 The behavior of :meth:`.Table.tometadata` has been adjusted such that the schema target of a :class:`.ForeignKey` will not be changed unless that schema matches that of the parent table. That is, if a table "schema_a.user" has a foreign key to "schema_b.order.id", the "schema_b" target will be maintained whether or not the "schema" argument is passed to :meth:`.Table.tometadata`. However if a table "schema_a.user" refers to "schema_a.order.id", the presence of "schema_a" will be updated on both the parent and referred tables. This is a behavioral change hence isn't likely to be backported to 0.8; it is assumed that the previous behavior is pretty buggy however and that it's unlikely anyone was relying upon it. Additionally, a new parameter has been added :paramref:`.Table.tometadata.referred_schema_fn`. This refers to a callable function which will be used to determine the new referred schema for any :class:`.ForeignKeyConstraint` encountered in the tometadata operation. This callable can be used to revert to the previous behavior or to customize how referred schemas are treated on a per-constraint basis. .. change:: :tags: bug, orm :tickets: 2932 Fixed bug in new :class:`.TextAsFrom` construct where :class:`.Column`- oriented row lookups were not matching up to the ad-hoc :class:`.ColumnClause` objects that :class:`.TextAsFrom` generates, thereby making it not usable as a target in :meth:`.Query.from_statement`. Also fixed :meth:`.Query.from_statement` mechanics to not mistake a :class:`.TextAsFrom` for a :class:`.Select` construct. This bug is also an 0.9 regression as the :meth:`.Text.columns` method is called to accommodate the :paramref:`.text.typemap` argument. .. change:: :tags: feature, sql :tickets: 2923 Added a new feature which allows automated naming conventions to be applied to :class:`.Constraint` and :class:`.Index` objects. Based on a recipe in the wiki, the new feature uses schema-events to set up names as various schema objects are associated with each other. The events then expose a configuration system through a new argument :paramref:`.MetaData.naming_convention`. This system allows production of both simple and custom naming schemes for constraints and indexes on a per-:class:`.MetaData` basis. .. seealso:: :ref:`constraint_naming_conventions` .. change:: :tags: bug, orm :tickets: 2921 Added a new directive used within the scope of an attribute "set" operation to disable autoflush, in the case that the attribute needs to lazy-load the "old" value, as in when replacing one-to-one values or some kinds of many-to-one. A flush at this point otherwise occurs at the point that the attribute is None and can cause NULL violations. .. change:: :tags: feature, orm Added a new parameter :paramref:`.Operators.op.is_comparison`. This flag allows a custom op from :meth:`.Operators.op` to be considered as a "comparison" operator, thus usable for custom :paramref:`.relationship.primaryjoin` conditions. .. seealso:: :ref:`relationship_custom_operator` .. change:: :tags: bug, sqlite Fixed bug whereby SQLite compiler failed to propagate compiler arguments such as "literal binds" into a CAST expression. .. change:: :tags: bug, sql Fixed bug whereby binary type would fail in some cases if used with a "test" dialect, such as a DefaultDialect or other dialect with no DBAPI. .. change:: :tags: bug, sql, py3k Fixed bug where "literal binds" wouldn't work with a bound parameter that's a binary type. A similar, but different, issue is fixed in 0.8. .. change:: :tags: bug, sql :tickets: 2927 Fixed regression whereby the "annotation" system used by the ORM was leaking into the names used by standard functions in :mod:`sqlalchemy.sql.functions`, such as ``func.coalesce()`` and ``func.max()``. Using these functions in ORM attributes and thus producing annotated versions of them could corrupt the actual function name rendered in the SQL. .. change:: :tags: bug, sql :tickets: 2924, 2848 Fixed 0.9 regression where the new sortable support for :class:`.RowProxy` would lead to ``TypeError`` when compared to non-tuple types as it attempted to apply tuple() to the "other" object unconditionally. The full range of Python comparison operators have now been implemented on :class:`.RowProxy`, using an approach that guarantees a comparison system that is equivalent to that of a tuple, and the "other" object is only coerced if it's an instance of RowProxy. .. change:: :tags: bug, orm :tickets: 2918 Fixed an 0.9 regression where the automatic aliasing applied by :class:`.Query` and in other situations where selects or joins were aliased (such as joined table inheritance) could fail if a user-defined :class:`.Column` subclass were used in the expression. In this case, the subclass would fail to propagate ORM-specific "annotations" along needed by the adaptation. The "expression annotations" system has been corrected to account for this case. .. change:: :tags: feature, orm Support is improved for supplying a :func:`.join` construct as the target of :paramref:`.relationship.secondary` for the purposes of creating very complex :func:`.relationship` join conditions. The change includes adjustments to query joining, joined eager loading to not render a SELECT subquery, changes to lazy loading such that the "secondary" target is properly included in the SELECT, and changes to declarative to better support specification of a join() object with classes as targets. The new use case is somewhat experimental, but a new documentation section has been added. .. seealso:: :ref:`composite_secondary_join` .. change:: :tags: bug, mysql, sql :tickets: 2917 Added new test coverage for so-called "down adaptions" of SQL types, where a more specific type is adapted to a more generic one - this use case is needed by some third party tools such as ``sqlacodegen``. The specific cases that needed repair within this test suite were that of :class:`.mysql.ENUM` being downcast into a :class:`.types.Enum`, and that of SQLite date types being cast into generic date types. The ``adapt()`` method needed to become more specific here to counteract the removal of a "catch all" ``**kwargs`` collection on the base :class:`.TypeEngine` class that was removed in 0.9. .. change:: :tags: feature, sql :tickets: 2910 Options can now be specified on a :class:`.PrimaryKeyConstraint` object independently of the specification of columns in the table with the ``primary_key=True`` flag; use a :class:`.PrimaryKeyConstraint` object with no columns in it to achieve this result. Previously, an explicit :class:`.PrimaryKeyConstraint` would have the effect of those columns marked as ``primary_key=True`` being ignored; since this is no longer the case, the :class:`.PrimaryKeyConstraint` will now assert that either one style or the other is used to specify the columns, or if both are present, that the column lists match exactly. If an inconsistent set of columns in the :class:`.PrimaryKeyConstraint` and within the :class:`.Table` marked as ``primary_key=True`` are present, a warning is emitted, and the list of columns is taken only from the :class:`.PrimaryKeyConstraint` alone as was the case in previous releases. .. seealso:: :class:`.PrimaryKeyConstraint` .. change:: :tags: feature, sql :tickets: 2866 The system by which schema constructs and certain SQL constructs accept dialect-specific keyword arguments has been enhanced. This system includes commonly the :class:`.Table` and :class:`.Index` constructs, which accept a wide variety of dialect-specific arguments such as ``mysql_engine`` and ``postgresql_where``, as well as the constructs :class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`, :class:`.Update`, :class:`.Insert` and :class:`.Delete`, and also newly added kwarg capability to :class:`.ForeignKeyConstraint` and :class:`.ForeignKey`. The change is that participating dialects can now specify acceptable argument lists for these constructs, allowing an argument error to be raised if an invalid keyword is specified for a particular dialect. If the dialect portion of the keyword is unrecognized, a warning is emitted only; while the system will actually make use of setuptools entrypoints in order to locate non-local dialects, the use case where certain dialect-specific arguments are used in an environment where that third-party dialect is uninstalled remains supported. Dialects also have to explicitly opt-in to this system, so that external dialects which aren't making use of this system will remain unaffected. .. change:: :tags: bug, sql :pullreq: bitbucket:11 A :class:`.UniqueConstraint` created inline with a :class:`.Table` that has no columns within it will be skipped. Pullreq courtesy Derek Harland. .. change:: :tags: feature, mssql :pullreq: bitbucket:11 Added an option ``mssql_clustered`` to the :class:`.UniqueConstraint` and :class:`.PrimaryKeyConstraint` constructs; on SQL Server, this adds the ``CLUSTERED`` keyword to the constraint construct within DDL. Pullreq courtesy Derek Harland. .. change:: :tags: bug, sql, orm :tickets: 2912 Fixed the multiple-table "UPDATE..FROM" construct, only usable on MySQL, to correctly render the SET clause among multiple columns with the same name across tables. This also changes the name used for the bound parameter in the SET clause to "_" for the non-primary table only; as this parameter is typically specified using the :class:`.Column` object directly this should not have an impact on applications. The fix takes effect for both :meth:`.Table.update` as well as :meth:`.Query.update` in the ORM. .. change:: :tags: bug, oracle :tickets: 2911 It's been observed that the usage of a cx_Oracle "outputtypehandler" in Python 2.xx in order to coerce string values to Unicode is inordinately expensive; even though cx_Oracle is written in C, when you pass the Python ``unicode`` primitive to cursor.var() and associate with an output handler, the library counts every conversion as a Python function call with all the requisite overhead being recorded; this *despite* the fact when running in Python 3, all strings are also unconditionally coerced to unicode but it does *not* incur this overhead, meaning that cx_Oracle is failing to use performant techniques in Py2K. As SQLAlchemy cannot easily select for this style of type handler on a per-column basis, the handler was assembled unconditionally thereby adding the overhead to all string access. So this logic has been replaced with SQLAlchemy's own unicode conversion system, which now only takes effect in Py2K for columns that are requested as unicode. When C extensions are used, SQLAlchemy's system appears to be 2-3x faster than cx_Oracle's. Additionally, SQLAlchemy's unicode conversion has been enhanced such that when the "conditional" converter is required (now needed for the Oracle backend), the check for "already unicode" is now performed in C and no longer introduces significant overhead. This change has two impacts on the cx_Oracle backend. One is that string values in Py2K which aren't specifically requested with the Unicode type or convert_unicode=True will now come back as ``str``, not ``unicode`` - this behavior is similar to a backend such as MySQL. Additionally, when unicode values are requested with the cx_Oracle backend, if the C extensions are *not* used, there is now an additional overhead of an isinstance() check per column. This tradeoff has been made as it can be worked around and no longer places a performance burden on the likely majority of Oracle result columns that are non-unicode strings. .. change:: :tags: bug, orm :tickets: 2908 Fixed a bug involving the new flattened JOIN structures which are used with :func:`.joinedload()` (thereby causing a regression in joined eager loading) as well as :func:`.aliased` in conjunction with the ``flat=True`` flag and joined-table inheritance; basically multiple joins across a "parent JOIN sub" entity using different paths to get to a target class wouldn't form the correct ON conditions. An adjustment / simplification made in the mechanics of figuring out the "left side" of the join in the case of an aliased, joined-inh class repairs the issue. .. change:: :tags: bug, mysql The MySQL CAST compilation now takes into account aspects of a string type such as "charset" and "collation". While MySQL wants all character- based CAST calls to use the CHAR type, we now create a real CHAR object at CAST time and copy over all the parameters it has, so that an expression like ``cast(x, mysql.TEXT(charset='utf8'))`` will render ``CAST(t.col AS CHAR CHARACTER SET utf8)``. .. change:: :tags: bug, mysql :tickets: 2906 Added new "unicode returns" detection to the MySQL dialect and to the default dialect system overall, such that any dialect can add extra "tests" to the on-first-connect "does this DBAPI return unicode directly?" detection. In this case, we are adding a check specifically against the "utf8" encoding with an explicit "utf8_bin" collation type (after checking that this collation is available) to test for some buggy unicode behavior observed with MySQLdb version 1.2.3. While MySQLdb has resolved this issue as of 1.2.4, the check here should guard against regressions. The change also allows the "unicode" checks to log in the engine logs, which was not previously the case. .. change:: :tags: bug, mysql, pool, engine :tickets: 2907 :class:`.Connection` now associates a new :class:`.RootTransaction` or :class:`.TwoPhaseTransaction` with its immediate :class:`._ConnectionFairy` as a "reset handler" for the span of that transaction, which takes over the task of calling commit() or rollback() for the "reset on return" behavior of :class:`.Pool` if the transaction was not otherwise completed. This resolves the issue that a picky transaction like that of MySQL two-phase will be properly closed out when the connection is closed without an explicit rollback or commit (e.g. no longer raises "XAER_RMFAIL" in this case - note this only shows up in logging as the exception is not propagated within pool reset). This issue would arise e.g. when using an orm :class:`.Session` with ``twophase`` set, and then :meth:`.Session.close` is called without an explicit rollback or commit. The change also has the effect that you will now see an explicit "ROLLBACK" in the logs when using a :class:`.Session` object in non-autocommit mode regardless of how that session was discarded. Thanks to Jeff Dairiki and Laurence Rowe for isolating the issue here. .. change:: :tags: feature, pool, engine Added a new pool event :meth:`.PoolEvents.invalidate`. Called when a DBAPI connection is to be marked as "invaldated" and discarded from the pool. .. change:: :tags: bug, pool The argument names for the :meth:`.PoolEvents.reset` event have been renamed to ``dbapi_connection`` and ``connection_record`` in order to maintain consistency with all the other pool events. It is expected that any existing listeners for this relatively new and seldom-used event are using positional style to receive arguments in any case. .. change:: :tags: bug, py3k, cextensions :pullreq: github:55 Fixed an issue where the C extensions in Py3K are using the wrong API to specify the top-level module function, which breaks in Python 3.4b2. Py3.4b2 changes PyMODINIT_FUNC to return "void" instead of ``PyObject *``, so we now make sure to use "PyMODINIT_FUNC" instead of ``PyObject *`` directly. Pull request courtesy cgohlke. .. change:: :tags: bug, schema :pullreq: github:57 Restored :class:`sqlalchemy.schema.SchemaVisitor` to the ``.schema`` module. Pullreq courtesy Sean Dague. .. changelog:: :version: 0.9.1 :released: January 5, 2014 .. change:: :tags: bug, orm, events :tickets: 2905 Fixed regression where using a ``functools.partial()`` with the event system would cause a recursion overflow due to usage of inspect.getargspec() on it in order to detect a legacy calling signature for certain events, and apparently there's no way to do this with a partial object. Instead we skip the legacy check and assume the modern style; the check itself now only occurs for the SessionEvents.after_bulk_update and SessionEvents.after_bulk_delete events. Those two events will require the new signature style if assigned to a "partial" event listener. .. change:: :tags: feature, orm, extensions A new, **experimental** extension :mod:`sqlalchemy.ext.automap` is added. This extension expands upon the functionality of Declarative as well as the :class:`.DeferredReflection` class to produce a base class which automatically generates mapped classes *and relationships* based on table metadata. .. seealso:: :ref:`feature_automap` :ref:`automap_toplevel` .. change:: :tags: feature, sql Conjunctions like :func:`.and_` and :func:`.or_` can now accept Python generators as a single argument, e.g.:: and_(x == y for x, y in tuples) The logic here looks for a single argument ``*args`` where the first element is an instance of ``types.GeneratorType``. .. change:: :tags: feature, schema The :paramref:`.Table.extend_existing` and :paramref:`.Table.autoload_replace` parameters are now available on the :meth:`.MetaData.reflect` method. .. change:: :tags: bug, orm, declarative Fixed an extremely unlikely memory issue where when using :class:`.DeferredReflection` to define classes pending for reflection, if some subset of those classes were discarded before the :meth:`.DeferredReflection.prepare` method were called to reflect and map the class, a strong reference to the class would remain held within the declarative internals. This internal collection of "classes to map" now uses weak references against the classes themselves. .. change:: :tags: bug, orm :pullreq: bitbucket:9 Fixed bug where using new :attr:`.Session.info` attribute would fail if the ``.info`` argument were only passed to the :class:`.sessionmaker` creation call but not to the object itself. Courtesy Robin Schoonover. .. change:: :tags: bug, orm :tickets: 2901 Fixed regression where we don't check the given name against the correct string class when setting up a backref based on a name, therefore causing the error "too many values to unpack". This was related to the Py3k conversion. .. change:: :tags: bug, orm, declarative :tickets: 2900 A quasi-regression where apparently in 0.8 you can set a class-level attribute on declarative to simply refer directly to an :class:`.InstrumentedAttribute` on a superclass or on the class itself, and it acts more or less like a synonym; in 0.9, this fails to set up enough bookkeeping to keep up with the more liberalized backref logic from :ticket:`2789`. Even though this use case was never directly considered, it is now detected by declarative at the "setattr()" level as well as when setting up a subclass, and the mirrored/renamed attribute is now set up as a :func:`.synonym` instead. .. change:: :tags: bug, orm :tickets: 2903 Fixed regression where we apparently still create an implicit alias when saying query(B).join(B.cs), where "C" is a joined inh class; however, this implicit alias was created only considering the immediate left side, and not a longer chain of joins along different joined-inh subclasses of the same base. As long as we're still implicitly aliasing in this case, the behavior is dialed back a bit so that it will alias the right side in a wider variety of cases. .. changelog:: :version: 0.9.0 :released: December 30, 2013 .. change:: :tags: bug, orm, declarative :tickets: 2828 Declarative does an extra check to detect if the same :class:`.Column` is mapped multiple times under different properties (which typically should be a :func:`.synonym` instead) or if two or more :class:`.Column` objects are given the same name, raising a warning if this condition is detected. .. change:: :tags: bug, firebird :tickets: 2898 Changed the queries used by Firebird to list table and view names to query from the ``rdb$relations`` view instead of the ``rdb$relation_fields`` and ``rdb$view_relations`` views. Variants of both the old and new queries are mentioned on many FAQ and blogs, however the new queries are taken straight from the "Firebird FAQ" which appears to be the most official source of info. .. change:: :tags: bug, mysql :tickets: 2893 Improvements to the system by which SQL types generate within ``__repr__()``, particularly with regards to the MySQL integer/numeric/ character types which feature a wide variety of keyword arguments. The ``__repr__()`` is important for use with Alembic autogenerate for when Python code is rendered in a migration script. .. change:: :tags: feature, postgresql :tickets: 2581 :pullreq: github:50 Support for Postgresql JSON has been added, using the new :class:`.JSON` type. Huge thanks to Nathan Rice for implementing and testing this. .. change:: :tags: bug, sql The :func:`.cast` function, when given a plain literal value, will now apply the given type to the given literal value on the bind parameter side according to the type given to the cast, in the same manner as that of the :func:`.type_coerce` function. However unlike :func:`.type_coerce`, this only takes effect if a non-clauseelement value is passed to :func:`.cast`; an existing typed construct will retain its type. .. change:: :tags: bug, postgresql Now using psycopg2 UNICODEARRAY extension for handling unicode arrays with psycopg2 + normal "native unicode" mode, in the same way the UNICODE extension is used. .. change:: :tags: bug, sql :tickets: 2883 The :class:`.ForeignKey` class more aggressively checks the given column argument. If not a string, it checks that the object is at least a :class:`.ColumnClause`, or an object that resolves to one, and that the ``.table`` attribute, if present, refers to a :class:`.TableClause` or subclass, and not something like an :class:`.Alias`. Otherwise, a :class:`.ArgumentError` is raised. .. change:: :tags: feature, orm The :class:`.exc.StatementError` or DBAPI-related subclass now can accommodate additional information about the "reason" for the exception; the :class:`.Session` now adds some detail to it when the exception occurs within an autoflush. This approach is taken as opposed to combining :class:`.FlushError` with a Python 3 style "chained exception" approach so as to maintain compatibility both with Py2K code as well as code that already catches ``IntegrityError`` or similar. .. change:: :tags: feature, postgresql :pullreq: bitbucket:8 Added support for Postgresql TSVECTOR via the :class:`.postgresql.TSVECTOR` type. Pull request courtesy Noufal Ibrahim. .. change:: :tags: feature, engine :tickets: 2875 The :func:`.engine_from_config` function has been improved so that we will be able to parse dialect-specific arguments from string configuration dictionaries. Dialect classes can now provide their own list of parameter types and string-conversion routines. The feature is not yet used by the built-in dialects, however. .. change:: :tags: bug, sql :tickets: 2879 The precedence rules for the :meth:`.ColumnOperators.collate` operator have been modified, such that the COLLATE operator is now of lower precedence than the comparison operators. This has the effect that a COLLATE applied to a comparison will not render parenthesis around the comparison, which is not parsed by backends such as MSSQL. The change is backwards incompatible for those setups that were working around the issue by applying :meth:`.Operators.collate` to an individual element of the comparison expression, rather than the comparison expression as a whole. .. seealso:: :ref:`migration_2879` .. change:: :tags: bug, orm, declarative :tickets: 2865 The :class:`.DeferredReflection` class has been enhanced to provide automatic reflection support for the "secondary" table referred to by a :func:`.relationship`. "secondary", when specified either as a string table name, or as a :class:`.Table` object with only a name and :class:`.MetaData` object will also be included in the reflection process when :meth:`.DeferredReflection.prepare` is called. .. change:: :tags: feature, orm, backrefs :tickets: 1535 Added new argument ``include_backrefs=True`` to the :func:`.validates` function; when set to False, a validation event will not be triggered if the event was initated as a backref to an attribute operation from the other side. .. seealso:: :ref:`feature_1535` .. change:: :tags: bug, orm, collections, py3k :pullreq: github:40 Added support for the Python 3 method ``list.clear()`` within the ORM collection instrumentation system; pull request courtesy Eduardo Schettino. .. change:: :tags: bug, postgresql :tickets: 2878 Fixed bug where values within an ENUM weren't escaped for single quote signs. Note that this is backwards-incompatible for existing workarounds that manually escape the single quotes. .. seealso:: :ref:`migration_2878` .. change:: :tags: bug, orm, declarative Fixed bug where in Py2K a unicode literal would not be accepted as the string name of a class or other argument within declarative using :func:`.relationship`. .. change:: :tags: feature, sql :tickets: 2877, 2882 New improvements to the :func:`.text` construct, including more flexible ways to set up bound parameters and return types; in particular, a :func:`.text` can now be turned into a full FROM-object, embeddable in other statements as an alias or CTE using the new method :meth:`.TextClause.columns`. The :func:`.text` construct can also render "inline" bound parameters when the construct is compiled in a "literal bound" context. .. seealso:: :ref:`feature_2877` .. change:: :tags: feature, sql :pullreq: github:42 A new API for specifying the ``FOR UPDATE`` clause of a ``SELECT`` is added with the new :meth:`.GenerativeSelect.with_for_update` method. This method supports a more straightforward system of setting dialect-specific options compared to the ``for_update`` keyword argument of :func:`.select`, and also includes support for the SQL standard ``FOR UPDATE OF`` clause. The ORM also includes a new corresponding method :meth:`.Query.with_for_update`. Pull request courtesy Mario Lassnig. .. seealso:: :ref:`feature_github_42` .. change:: :tags: feature, orm :pullreq: github:42 A new API for specifying the ``FOR UPDATE`` clause of a ``SELECT`` is added with the new :meth:`.Query.with_for_update` method, to complement the new :meth:`.GenerativeSelect.with_for_update` method. Pull request courtesy Mario Lassnig. .. seealso:: :ref:`feature_github_42` .. change:: :tags: bug, engine :tickets: 2873 The :func:`.create_engine` routine and the related :func:`.make_url` function no longer considers the ``+`` sign to be a space within the password field. The parsing has been adjuted to match RFC 1738 exactly, in that both ``username`` and ``password`` expect only ``:``, ``@``, and ``/`` to be encoded. .. seealso:: :ref:`migration_2873` .. change:: :tags: bug, orm :tickets: 2872 Some refinements to the :class:`.AliasedClass` construct with regards to descriptors, like hybrids, synonyms, composites, user-defined descriptors, etc. The attribute adaptation which goes on has been made more robust, such that if a descriptor returns another instrumented attribute, rather than a compound SQL expression element, the operation will still proceed. Addtionally, the "adapted" operator will retain its class; previously, a change in class from ``InstrumentedAttribute`` to ``QueryableAttribute`` (a superclass) would interact with Python's operator system such that an expression like ``aliased(MyClass.x) > MyClass.x`` would reverse itself to read ``myclass.x < myclass_1.x``. The adapted attribute will also refer to the new :class:`.AliasedClass` as its parent which was not always the case before. .. change:: :tags: feature, sql :tickets: 2867 The precision used when coercing a returned floating point value to Python ``Decimal`` via string is now configurable. The flag ``decimal_return_scale`` is now supported by all :class:`.Numeric` and :class:`.Float` types, which will ensure this many digits are taken from the native floating point value when it is converted to string. If not present, the type will make use of the value of ``.scale``, if the type supports this setting and it is non-None. Otherwise the original default length of 10 is used. .. seealso:: :ref:`feature_2867` .. change:: :tags: bug, schema :tickets: 2868 Fixed a regression caused by :ticket:`2812` where the repr() for table and column names would fail if the name contained non-ascii characters. .. change:: :tags: bug, engine :tickets: 2848 The :class:`.RowProxy` object is now sortable in Python as a regular tuple is; this is accomplished via ensuring tuple() conversion on both sides within the ``__eq__()`` method as well as the addition of a ``__lt__()`` method. .. seealso:: :ref:`migration_2848` .. change:: :tags: bug, orm :tickets: 2833 The ``viewonly`` flag on :func:`.relationship` will now prevent attribute history from being written on behalf of the target attribute. This has the effect of the object not being written to the Session.dirty list if it is mutated. Previously, the object would be present in Session.dirty, but no change would take place on behalf of the modified attribute during flush. The attribute still emits events such as backref events and user-defined events and will still receive mutations from backrefs. .. seealso:: :ref:`migration_2833` .. change:: :tags: bug, orm Added support for new :attr:`.Session.info` attribute to :class:`.scoped_session`. .. change:: :tags: removed The "informix" and "informixdb" dialects have been removed; the code is now available as a separate repository on Bitbucket. The IBM-DB project has provided production-level Informix support since the informixdb dialect was first added. .. change:: :tags: bug, orm Fixed bug where usage of new :class:`.Bundle` object would cause the :attr:`.Query.column_descriptions` attribute to fail. .. change:: :tags: bug, examples Fixed bug which prevented history_meta recipe from working with joined inheritance schemes more than one level deep. .. change:: :tags: bug, orm, sql, sqlite :tickets: 2858 Fixed a regression introduced by the join rewriting feature of :ticket:`2369` and :ticket:`2587` where a nested join with one side already an aliased select would fail to translate the ON clause on the outside correctly; in the ORM this could be seen when using a SELECT statement as a "secondary" table. .. changelog:: :version: 0.9.0b1 :released: October 26, 2013 .. change:: :tags: feature, orm :tickets: 2810 The association proxy now returns ``None`` when fetching a scalar attribute off of a scalar relationship, where the scalar relationship itself points to ``None``, instead of raising an ``AttributeError``. .. seealso:: :ref:`migration_2810` .. change:: :tags: feature, sql, postgresql, mysql :tickets: 2183 The Postgresql and MySQL dialects now support reflection/inspection of foreign key options, including ON UPDATE, ON DELETE. Postgresql also reflects MATCH, DEFERRABLE, and INITIALLY. Coutesy ijl. .. change:: :tags: bug, mysql :tickets: 2839 Fix and test parsing of MySQL foreign key options within reflection; this complements the work in :ticket:`2183` where we begin to support reflection of foreign key options such as ON UPDATE/ON DELETE cascade. .. change:: :tags: bug, orm :tickets: 2787 :func:`.attributes.get_history()` when used with a scalar column-mapped attribute will now honor the "passive" flag passed to it; as this defaults to ``PASSIVE_OFF``, the function will by default query the database if the value is not present. This is a behavioral change vs. 0.8. .. seealso:: :ref:`change_2787` .. change:: :tags: feature, orm :tickets: 2787 Added new method :meth:`.AttributeState.load_history`, works like :attr:`.AttributeState.history` but also fires loader callables. .. seealso:: :ref:`change_2787` .. change:: :tags: feature, sql :tickets: 2850 A :func:`.bindparam` construct with a "null" type (e.g. no type specified) is now copied when used in a typed expression, and the new copy is assigned the actual type of the compared column. Previously, this logic would occur on the given :func:`.bindparam` in place. Additionally, a similar process now occurs for :func:`.bindparam` constructs passed to :meth:`.ValuesBase.values` for an :class:`.Insert` or :class:`.Update` construct, within the compilation phase of the construct. These are both subtle behavioral changes which may impact some usages. .. seealso:: :ref:`migration_2850` .. change:: :tags: feature, sql :tickets: 2804, 2823, 2734 An overhaul of expression handling for special symbols particularly with conjunctions, e.g. ``None`` :func:`.expression.null` :func:`.expression.true` :func:`.expression.false`, including consistency in rendering NULL in conjunctions, "short-circuiting" of :func:`.and_` and :func:`.or_` expressions which contain boolean constants, and rendering of boolean constants and expressions as compared to "1" or "0" for backends that don't feature ``true``/``false`` constants. .. seealso:: :ref:`migration_2804` .. change:: :tags: feature, sql :tickets: 2838 The typing system now handles the task of rendering "literal bind" values, e.g. values that are normally bound parameters but due to context must be rendered as strings, typically within DDL constructs such as CHECK constraints and indexes (note that "literal bind" values become used by DDL as of :ticket:`2742`). A new method :meth:`.TypeEngine.literal_processor` serves as the base, and :meth:`.TypeDecorator.process_literal_param` is added to allow wrapping of a native literal rendering method. .. seealso:: :ref:`change_2838` .. change:: :tags: feature, sql :tickets: 2716 The :meth:`.Table.tometadata` method now produces copies of all :attr:`.SchemaItem.info` dictionaries from all :class:`.SchemaItem` objects within the structure including columns, constraints, foreign keys, etc. As these dictionaries are copies, they are independent of the original dictionary. Previously, only the ``.info`` dictionary of :class:`.Column` was transferred within this operation, and it was only linked in place, not copied. .. change:: :tags: feature, postgresql :tickets: 2840 Added support for rendering ``SMALLSERIAL`` when a :class:`.SmallInteger` type is used on a primary key autoincrement column, based on server version detection of Postgresql version 9.2 or greater. .. change:: :tags: feature, mysql :tickets: 2817 The MySQL :class:`.mysql.SET` type now features the same auto-quoting behavior as that of :class:`.mysql.ENUM`. Quotes are not required when setting up the value, but quotes that are present will be auto-detected along with a warning. This also helps with Alembic where the SET type doesn't render with quotes. .. change:: :tags: feature, sql The ``default`` argument of :class:`.Column` now accepts a class or object method as an argument, in addition to a standalone function; will properly detect if the "context" argument is accepted or not. .. change:: :tags: bug, sql :tickets: 2835 The "name" attribute is set on :class:`.Index` before the "attach" events are called, so that attachment events can be used to dynamically generate a name for the index based on the parent table and/or columns. .. change:: :tags: bug, engine :tickets: 2748 The method signature of :meth:`.Dialect.reflecttable`, which in all known cases is provided by :class:`.DefaultDialect`, has been tightened to expect ``include_columns`` and ``exclude_columns`` arguments without any kw option, reducing ambiguity - previously ``exclude_columns`` was missing. .. change:: :tags: bug, sql :tickets: 2831 The erroneous kw arg "schema" has been removed from the :class:`.ForeignKey` object. this was an accidental commit that did nothing; a warning is raised in 0.8.3 when this kw arg is used. .. change:: :tags: feature, orm :tickets: 1418 Added a new load option :func:`.orm.load_only`. This allows a series of column names to be specified as loading "only" those attributes, deferring the rest. .. change:: :tags: feature, orm :tickets: 1418 The system of loader options has been entirely rearchitected to build upon a much more comprehensive base, the :class:`.Load` object. This base allows any common loader option like :func:`.joinedload`, :func:`.defer`, etc. to be used in a "chained" style for the purpose of specifying options down a path, such as ``joinedload("foo").subqueryload("bar")``. The new system supersedes the usage of dot-separated path names, multiple attributes within options, and the usage of ``_all()`` options. .. seealso:: :ref:`feature_1418` .. change:: :tags: feature, orm :tickets: 2824 The :func:`.composite` construct now maintains the return object when used in a column-oriented :class:`.Query`, rather than expanding out into individual columns. This makes use of the new :class:`.Bundle` feature internally. This behavior is backwards incompatible; to select from a composite column which will expand out, use ``MyClass.some_composite.clauses``. .. seealso:: :ref:`migration_2824` .. change:: :tags: feature, orm :tickets: 2824 A new construct :class:`.Bundle` is added, which allows for specification of groups of column expressions to a :class:`.Query` construct. The group of columns are returned as a single tuple by default. The behavior of :class:`.Bundle` can be overridden however to provide any sort of result processing to the returned row. The behavior of :class:`.Bundle` is also embedded into composite attributes now when they are used in a column-oriented :class:`.Query`. .. seealso:: :ref:`change_2824` :ref:`migration_2824` .. change:: :tags: bug, sql :tickets: 2812 A rework to the way that "quoted" identifiers are handled, in that instead of relying upon various ``quote=True`` flags being passed around, these flags are converted into rich string objects with quoting information included at the point at which they are passed to common schema constructs like :class:`.Table`, :class:`.Column`, etc. This solves the issue of various methods that don't correctly honor the "quote" flag such as :meth:`.Engine.has_table` and related methods. The :class:`.quoted_name` object is a string subclass that can also be used explicitly if needed; the object will hold onto the quoting preferences passed and will also bypass the "name normalization" performed by dialects that standardize on uppercase symbols, such as Oracle, Firebird and DB2. The upshot is that the "uppercase" backends can now work with force-quoted names, such as lowercase-quoted names and new reserved words. .. seealso:: :ref:`change_2812` .. change:: :tags: feature, orm :tickets: 2793 The ``version_id_generator`` parameter of ``Mapper`` can now be specified to rely upon server generated version identifiers, using triggers or other database-provided versioning features, or via an optional programmatic value, by setting ``version_id_generator=False``. When using a server-generated version identfier, the ORM will use RETURNING when available to immediately load the new version value, else it will emit a second SELECT. .. change:: :tags: feature, orm :tickets: 2793 The ``eager_defaults`` flag of :class:`.Mapper` will now allow the newly generated default values to be fetched using an inline RETURNING clause, rather than a second SELECT statement, for backends that support RETURNING. .. change:: :tags: feature, core :tickets: 2793 Added a new variant to :meth:`.UpdateBase.returning` called :meth:`.ValuesBase.return_defaults`; this allows arbitrary columns to be added to the RETURNING clause of the statement without interfering with the compilers usual "implicit returning" feature, which is used to efficiently fetch newly generated primary key values. For supporting backends, a dictionary of all fetched values is present at :attr:`.ResultProxy.returned_defaults`. .. change:: :tags: bug, mysql Improved support for the cymysql driver, supporting version 0.6.5, courtesy Hajime Nakagami. .. change:: :tags: general A large refactoring of packages has reorganized the import structure of many Core modules as well as some aspects of the ORM modules. In particular ``sqlalchemy.sql`` has been broken out into several more modules than before so that the very large size of ``sqlalchemy.sql.expression`` is now pared down. The effort has focused on a large reduction in import cycles. Additionally, the system of API functions in ``sqlalchemy.sql.expression`` and ``sqlalchemy.orm`` has been reorganized to eliminate redundancy in documentation between the functions vs. the objects they produce. .. change:: :tags: orm, feature, orm Added a new attribute :attr:`.Session.info` to :class:`.Session`; this is a dictionary where applications can store arbitrary data local to a :class:`.Session`. The contents of :attr:`.Session.info` can be also be initialized using the ``info`` argument of :class:`.Session` or :class:`.sessionmaker`. .. change:: :tags: feature, general, py3k :tickets: 2161 The C extensions are ported to Python 3 and will build under any supported CPython 2 or 3 environment. .. change:: :tags: feature, orm :tickets: 2268 Removal of event listeners is now implemented. The feature is provided via the :func:`.event.remove` function. .. seealso:: :ref:`feature_2268` .. change:: :tags: feature, orm :tickets: 2789 The mechanism by which attribute events pass along an :class:`.AttributeImpl` as an "initiator" token has been changed; the object is now an event-specific object called :class:`.attributes.Event`. Additionally, the attribute system no longer halts events based on a matching "initiator" token; this logic has been moved to be specific to ORM backref event handlers, which are the typical source of the re-propagation of an attribute event onto subsequent append/set/remove operations. End user code which emulates the behavior of backrefs must now ensure that recursive event propagation schemes are halted, if the scheme does not use the backref handlers. Using this new system, backref handlers can now perform a "two-hop" operation when an object is appended to a collection, associated with a new many-to-one, de-associated with the previous many-to-one, and then removed from a previous collection. Before this change, the last step of removal from the previous collection would not occur. .. seealso:: :ref:`migration_2789` .. change:: :tags: feature, sql :tickets: 722 Added new method to the :func:`.insert` construct :meth:`.Insert.from_select`. Given a list of columns and a selectable, renders ``INSERT INTO (table) (columns) SELECT ..``. While this feature is highlighted as part of 0.9 it is also backported to 0.8.3. .. seealso:: :ref:`feature_722` .. change:: :tags: feature, engine :tickets: 2770 New events added to :class:`.ConnectionEvents`: * :meth:`.ConnectionEvents.engine_connect` * :meth:`.ConnectionEvents.set_connection_execution_options` * :meth:`.ConnectionEvents.set_engine_execution_options` .. change:: :tags: bug, sql :tickets: 1765 The resolution of :class:`.ForeignKey` objects to their target :class:`.Column` has been reworked to be as immediate as possible, based on the moment that the target :class:`.Column` is associated with the same :class:`.MetaData` as this :class:`.ForeignKey`, rather than waiting for the first time a join is constructed, or similar. This along with other improvements allows earlier detection of some foreign key configuration issues. Also included here is a rework of the type-propagation system, so that it should be reliable now to set the type as ``None`` on any :class:`.Column` that refers to another via :class:`.ForeignKey` - the type will be copied from the target column as soon as that other column is associated, and now works for composite foreign keys as well. .. seealso:: :ref:`migration_1765` .. change:: :tags: feature, sql :tickets: 2744, 2734 Provided a new attribute for :class:`.TypeDecorator` called :attr:`.TypeDecorator.coerce_to_is_types`, to make it easier to control how comparisons using ``==`` or ``!=`` to ``None`` and boolean types goes about producing an ``IS`` expression, or a plain equality expression with a bound parameter. .. change:: :tags: feature, pool :tickets: 2752 Added pool logging for "rollback-on-return" and the less used "commit-on-return". This is enabled with the rest of pool "debug" logging. .. change:: :tags: bug, orm, associationproxy :tickets: 2751 Added additional criterion to the ==, != comparators, used with scalar values, for comparisons to None to also take into account the association record itself being non-present, in addition to the existing test for the scalar endpoint on the association record being NULL. Previously, comparing ``Cls.scalar == None`` would return records for which ``Cls.associated`` were present and ``Cls.associated.scalar`` is None, but not rows for which ``Cls.associated`` is non-present. More significantly, the inverse operation ``Cls.scalar != None`` *would* return ``Cls`` rows for which ``Cls.associated`` was non-present. The case for ``Cls.scalar != 'somevalue'`` is also modified to act more like a direct SQL comparison; only rows for which ``Cls.associated`` is present and ``Associated.scalar`` is non-NULL and not equal to ``'somevalue'`` are returned. Previously, this would be a simple ``NOT EXISTS``. Also added a special use case where you can call ``Cls.scalar.has()`` with no arguments, when ``Cls.scalar`` is a column-based value - this returns whether or not ``Cls.associated`` has any rows present, regardless of whether or not ``Cls.associated.scalar`` is NULL or not. .. seealso:: :ref:`migration_2751` .. change:: :tags: feature, orm :tickets: 2587 A major change regarding how the ORM constructs joins where the right side is itself a join or left outer join. The ORM is now configured to allow simple nesting of joins of the form ``a JOIN (b JOIN c ON b.id=c.id) ON a.id=b.id``, rather than forcing the right side into a ``SELECT`` subquery. This should allow significant performance improvements on most backends, most particularly MySQL. The one database backend that has for many years held back this change, SQLite, is now addressed by moving the production of the ``SELECT`` subquery from the ORM to the SQL compiler; so that a right-nested join on SQLite will still ultimately render with a ``SELECT``, while all other backends are no longer impacted by this workaround. As part of this change, a new argument ``flat=True`` has been added to the :func:`.orm.aliased`, :meth:`.Join.alias`, and :func:`.orm.with_polymorphic` functions, which allows an "alias" of a JOIN to be produced which applies an anonymous alias to each component table within the join, rather than producing a subquery. .. seealso:: :ref:`feature_joins_09` .. change:: :tags: bug, orm :tickets: 2369 Fixed an obscure bug where the wrong results would be fetched when joining/joinedloading across a many-to-many relationship to a single-table-inheriting subclass with a specific discriminator value, due to "secondary" rows that would come back. The "secondary" and right-side tables are now inner joined inside of parenthesis for all ORM joins on many-to-many relationships so that the left->right join can accurately filtered. This change was made possible by finally addressing the issue with right-nested joins outlined in :ticket:`2587`. .. seealso:: :ref:`feature_joins_09` .. change:: :tags: bug, mssql, pyodbc :tickets: 2355 Fixes to MSSQL with Python 3 + pyodbc, including that statements are passed correctly. .. change:: :tags: feature, sql :tickets: 1068 A :func:`~sqlalchemy.sql.expression.label` construct will now render as its name alone in an ``ORDER BY`` clause, if that label is also referred to in the columns clause of the select, instead of rewriting the full expression. This gives the database a better chance to optimize the evaluation of the same expression in two different contexts. .. seealso:: :ref:`migration_1068` .. change:: :tags: feature, firebird :tickets: 2504 The ``fdb`` dialect is now the default dialect when specified without a dialect qualifier, i.e. ``firebird://``, per the Firebird project publishing ``fdb`` as their official Python driver. .. change:: :tags: feature, general, py3k :tickets: 2671 The codebase is now "in-place" for Python 2 and 3, the need to run 2to3 has been removed. Compatibility is now against Python 2.6 on forward. .. change:: :tags: feature, oracle, py3k The Oracle unit tests with cx_oracle now pass fully under Python 3. .. change:: :tags: bug, orm :tickets: 2736 The "auto-aliasing" behavior of the :meth:`.Query.select_from` method has been turned off. The specific behavior is now available via a new method :meth:`.Query.select_entity_from`. The auto-aliasing behavior here was never well documented and is generally not what's desired, as :meth:`.Query.select_from` has become more oriented towards controlling how a JOIN is rendered. :meth:`.Query.select_entity_from` will also be made available in 0.8 so that applications which rely on the auto-aliasing can shift their applications to use this method. .. seealso:: :ref:`migration_2736` SQLAlchemy-1.0.11/doc/build/changelog/changelog_05.rst0000664000175000017500000033064412636375552023432 0ustar classicclassic00000000000000 ============== 0.5 Changelog ============== .. changelog:: :version: 0.5.9 :released: .. change:: :tags: sql :tickets: 1661 Fixed erroneous self_group() call in expression package. .. changelog:: :version: 0.5.8 :released: Sat Jan 16 2010 .. change:: :tags: sql :tickets: The copy() method on Column now supports uninitialized, unnamed Column objects. This allows easy creation of declarative helpers which place common columns on multiple subclasses. .. change:: :tags: sql :tickets: Default generators like Sequence() translate correctly across a copy() operation. .. change:: :tags: sql :tickets: Sequence() and other DefaultGenerator objects are accepted as the value for the "default" and "onupdate" keyword arguments of Column, in addition to being accepted positionally. .. change:: :tags: sql :tickets: 1568, 1617 Fixed a column arithmetic bug that affected column correspondence for cloned selectables which contain free-standing column expressions. This bug is generally only noticeable when exercising newer ORM behavior only available in 0.6 via, but is more correct at the SQL expression level as well. .. change:: :tags: postgresql :tickets: 1647 The extract() function, which was slightly improved in 0.5.7, needed a lot more work to generate the correct typecast (the typecasts appear to be necessary in PG's EXTRACT quite a lot of the time). The typecast is now generated using a rule dictionary based on PG's documentation for date/time/interval arithmetic. It also accepts text() constructs again, which was broken in 0.5.7. .. change:: :tags: firebird :tickets: 1646 Recognize more errors as disconnections. .. changelog:: :version: 0.5.7 :released: Sat Dec 26 2009 .. change:: :tags: orm :tickets: 1543 contains_eager() now works with the automatically generated subquery that results when you say "query(Parent).join(Parent.somejoinedsubclass)", i.e. when Parent joins to a joined-table-inheritance subclass. Previously contains_eager() would erroneously add the subclass table to the query separately producing a cartesian product. An example is in the ticket description. .. change:: :tags: orm :tickets: 1553 query.options() now only propagate to loaded objects for potential further sub-loads only for options where such behavior is relevant, keeping various unserializable options like those generated by contains_eager() out of individual instance states. .. change:: :tags: orm :tickets: 1054 Session.execute() now locates table- and mapper-specific binds based on a passed in expression which is an insert()/update()/delete() construct. .. change:: :tags: orm :tickets: Session.merge() now properly overwrites a many-to-one or uselist=False attribute to None if the attribute is also None in the given object to be merged. .. change:: :tags: orm :tickets: 1618 Fixed a needless select which would occur when merging transient objects that contained a null primary key identifier. .. change:: :tags: orm :tickets: 1585 Mutable collection passed to the "extension" attribute of relation(), column_property() etc. will not be mutated or shared among multiple instrumentation calls, preventing duplicate extensions, such as backref populators, from being inserted into the list. .. change:: :tags: orm :tickets: 1504 Fixed the call to get_committed_value() on CompositeProperty. .. change:: :tags: orm :tickets: 1602 Fixed bug where Query would crash if a join() with no clear "left" side were called when a non-mapped column entity appeared in the columns list. .. change:: :tags: orm :tickets: 1616, 1480 Fixed bug whereby composite columns wouldn't load properly when configured on a joined-table subclass, introduced in version 0.5.6 as a result of the fix for. thx to Scott Torborg. .. change:: :tags: orm :tickets: 1556 The "use get" behavior of many-to-one relations, i.e. that a lazy load will fallback to the possibly cached query.get() value, now works across join conditions where the two compared types are not exactly the same class, but share the same "affinity" - i.e. Integer and SmallInteger. Also allows combinations of reflected and non-reflected types to work with 0.5 style type reflection, such as PGText/Text (note 0.6 reflects types as their generic versions). .. change:: :tags: orm :tickets: 1436 Fixed bug in query.update() when passing Cls.attribute as keys in the value dict and using synchronize_session='expire' ('fetch' in 0.6). .. change:: :tags: sql :tickets: 1603 Fixed bug in two-phase transaction whereby commit() method didn't set the full state which allows subsequent close() call to succeed. .. change:: :tags: sql :tickets: Fixed the "numeric" paramstyle, which apparently is the default paramstyle used by Informixdb. .. change:: :tags: sql :tickets: 1574 Repeat expressions in the columns clause of a select are deduped based on the identity of each clause element, not the actual string. This allows positional elements to render correctly even if they all render identically, such as "qmark" style bind parameters. .. change:: :tags: sql :tickets: 1632 The cursor associated with connection pool connections (i.e. _CursorFairy) now proxies `__iter__()` to the underlying cursor correctly. .. change:: :tags: sql :tickets: 1556 types now support an "affinity comparison" operation, i.e. that an Integer/SmallInteger are "compatible", or a Text/String, PickleType/Binary, etc. Part of. .. change:: :tags: sql :tickets: 1641 Fixed bug preventing alias() of an alias() from being cloned or adapted (occurs frequently in ORM operations). .. change:: :tags: sqlite :tickets: 1439 sqlite dialect properly generates CREATE INDEX for a table that is in an alternate schema. .. change:: :tags: postgresql :tickets: 1085 Added support for reflecting the DOUBLE PRECISION type, via a new postgres.PGDoublePrecision object. This is postgresql.DOUBLE_PRECISION in 0.6. .. change:: :tags: postgresql :tickets: 460 Added support for reflecting the INTERVAL YEAR TO MONTH and INTERVAL DAY TO SECOND syntaxes of the INTERVAL type. .. change:: :tags: postgresql :tickets: 1576 Corrected the "has_sequence" query to take current schema, or explicit sequence-stated schema, into account. .. change:: :tags: postgresql :tickets: 1611 Fixed the behavior of extract() to apply operator precedence rules to the "::" operator when applying the "timestamp" cast - ensures proper parenthesization. .. change:: :tags: mssql :tickets: 1561 Changed the name of TrustedConnection to Trusted_Connection when constructing pyodbc connect arguments .. change:: :tags: oracle :tickets: 1637 The "table_names" dialect function, used by MetaData .reflect(), omits "index overflow tables", a system table generated by Oracle when "index only tables" with overflow are used. These tables aren't accessible via SQL and can't be reflected. .. change:: :tags: ext :tickets: 1570, 1523 A column can be added to a joined-table declarative superclass after the class has been constructed (i.e. via class-level attribute assignment), and the column will be propagated down to subclasses. This is the reverse situation as that of, fixed in 0.5.6. .. change:: :tags: ext :tickets: 1491 Fixed a slight inaccuracy in the sharding example. Comparing equivalence of columns in the ORM is best accomplished using col1.shares_lineage(col2). .. change:: :tags: ext :tickets: 1606 Removed unused `load()` method from ShardedQuery. .. changelog:: :version: 0.5.6 :released: Sat Sep 12 2009 .. change:: :tags: orm :tickets: 1300 Fixed bug whereby inheritance discriminator part of a composite primary key would fail on updates. Continuation of. .. change:: :tags: orm :tickets: 1507 Fixed bug which disallowed one side of a many-to-many bidirectional reference to declare itself as "viewonly" .. change:: :tags: orm :tickets: 1526 Added an assertion that prevents a @validates function or other AttributeExtension from loading an unloaded collection such that internal state may be corrupted. .. change:: :tags: orm :tickets: 1519 Fixed bug which prevented two entities from mutually replacing each other's primary key values within a single flush() for some orderings of operations. .. change:: :tags: orm :tickets: 1485 Fixed an obscure issue whereby a joined-table subclass with a self-referential eager load on the base class would populate the related object's "subclass" table with data from the "subclass" table of the parent. .. change:: :tags: orm :tickets: 1477 relations() now have greater ability to be "overridden", meaning a subclass that explicitly specifies a relation() overriding that of the parent class will be honored during a flush. This is currently to support many-to-many relations from concrete inheritance setups. Outside of that use case, YMMV. .. change:: :tags: orm :tickets: 1483 Squeezed a few more unnecessary "lazy loads" out of relation(). When a collection is mutated, many-to-one backrefs on the other side will not fire off to load the "old" value, unless "single_parent=True" is set. A direct assignment of a many-to-one still loads the "old" value in order to update backref collections on that value, which may be present in the session already, thus maintaining the 0.5 behavioral contract. .. change:: :tags: orm :tickets: 1480 Fixed bug whereby a load/refresh of joined table inheritance attributes which were based on column_property() or similar would fail to evaluate. .. change:: :tags: orm :tickets: 1488 Improved support for MapperProperty objects overriding that of an inherited mapper for non-concrete inheritance setups - attribute extensions won't randomly collide with each other. .. change:: :tags: orm :tickets: 1487 UPDATE and DELETE do not support ORDER BY, LIMIT, OFFSET, etc. in standard SQL. Query.update() and Query.delete() now raise an exception if any of limit(), offset(), order_by(), group_by(), or distinct() have been called. .. change:: :tags: orm :tickets: Added AttributeExtension to sqlalchemy.orm.__all__ .. change:: :tags: orm :tickets: 1476 Improved error message when query() is called with a non-SQL /entity expression. .. change:: :tags: orm :tickets: 1440 Using False or 0 as a polymorphic discriminator now works on the base class as well as a subclass. .. change:: :tags: orm :tickets: 1424 Added enable_assertions(False) to Query which disables the usual assertions for expected state - used by Query subclasses to engineer custom state.. See http://www.sqlalchemy.org/trac/wiki/UsageRecipes/PreFilteredQuery for an example. .. change:: :tags: orm :tickets: 1501 Fixed recursion issue which occurred if a mapped object's `__len__()` or `__nonzero__()` method resulted in state changes. .. change:: :tags: orm :tickets: 1506 Fixed incorrect exception raise in Weak/StrongIdentityMap.add() .. change:: :tags: orm :tickets: 1522 Fixed the error message for "could not find a FROM clause" in query.join() which would fail to issue correctly if the query was against a pure SQL construct. .. change:: :tags: orm :tickets: 1486 Fixed a somewhat hypothetical issue which would result in the wrong primary key being calculated for a mapper using the old polymorphic_union function - but this is old stuff. .. change:: :tags: sql :tickets: 1373 Fixed column.copy() to copy defaults and onupdates. .. change:: :tags: sql :tickets: Fixed a bug in extract() introduced in 0.5.4 whereby the string "field" argument was getting treated as a ClauseElement, causing various errors within more complex SQL transformations. .. change:: :tags: sql :tickets: 1420 Unary expressions such as DISTINCT propagate their type handling to result sets, allowing conversions like unicode and such to take place. .. change:: :tags: sql :tickets: 1482 Fixed bug in Table and Column whereby passing empty dict for "info" argument would raise an exception. .. change:: :tags: oracle :tickets: 1309 Backported 0.6 fix for Oracle alias names not getting truncated. .. change:: :tags: ext :tickets: 1446 The collection proxies produced by associationproxy are now pickleable. A user-defined proxy_factory however is still not pickleable unless it defines __getstate__ and __setstate__. .. change:: :tags: ext :tickets: 1468 Declarative will raise an informative exception if __table_args__ is passed as a tuple with no dict argument. Improved documentation. .. change:: :tags: ext :tickets: 1527 Table objects declared in the MetaData can now be used in string expressions sent to primaryjoin/secondaryjoin/ secondary - the name is pulled from the MetaData of the declarative base. .. change:: :tags: ext :tickets: 1523 A column can be added to a joined-table subclass after the class has been constructed (i.e. via class-level attribute assignment). The column is added to the underlying Table as always, but now the mapper will rebuild its "join" to include the new column, instead of raising an error about "no such column, use column_property() instead". .. change:: :tags: test :tickets: Added examples into the test suite so they get exercised regularly and cleaned up a couple deprecation warnings. .. changelog:: :version: 0.5.5 :released: Mon Jul 13 2009 .. change:: :tags: general :tickets: 970 unit tests have been migrated from unittest to nose. See README.unittests for information on how to run the tests. .. change:: :tags: orm :tickets: The "foreign_keys" argument of relation() will now propagate automatically to the backref in the same way that primaryjoin and secondaryjoin do. For the extremely rare use case where the backref of a relation() has intentionally different "foreign_keys" configured, both sides now need to be configured explicitly (if they do in fact require this setting, see the next note...). .. change:: :tags: orm :tickets: ...the only known (and really, really rare) use case where a different foreign_keys setting was used on the forwards/backwards side, a composite foreign key that partially points to its own columns, has been enhanced such that the fk->itself aspect of the relation won't be used to determine relation direction. .. change:: :tags: orm :tickets: Session.mapper is now *deprecated*. Call session.add() if you'd like a free-standing object to be part of your session. Otherwise, a DIY version of Session.mapper is now documented at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper The method will remain deprecated throughout 0.6. .. change:: :tags: orm :tickets: 1431 Fixed Query being able to join() from individual columns of a joined-table subclass entity, i.e. query(SubClass.foo, SubcClass.bar).join(). In most cases, an error "Could not find a FROM clause to join from" would be raised. In a few others, the result would be returned in terms of the base class rather than the subclass - so applications which relied on this erroneous result need to be adjusted. .. change:: :tags: orm :tickets: 1461 Fixed a bug involving contains_eager(), which would apply itself to a secondary (i.e. lazy) load in a particular rare case, producing cartesian products. improved the targeting of query.options() on secondary loads overall. .. change:: :tags: orm :tickets: Fixed bug introduced in 0.5.4 whereby Composite types fail when default-holding columns are flushed. .. change:: :tags: orm :tickets: 1426 Fixed another 0.5.4 bug whereby mutable attributes (i.e. PickleType) wouldn't be deserialized correctly when the whole object was serialized. .. change:: :tags: orm :tickets: Fixed bug whereby session.is_modified() would raise an exception if any synonyms were in use. .. change:: :tags: orm :tickets: Fixed potential memory leak whereby previously pickled objects placed back in a session would not be fully garbage collected unless the Session were explicitly closed out. .. change:: :tags: orm :tickets: Fixed bug whereby list-based attributes, like pickletype and PGArray, failed to be merged() properly. .. change:: :tags: orm :tickets: Repaired non-working attributes.set_committed_value function. .. change:: :tags: orm :tickets: Trimmed the pickle format for InstanceState which should further reduce the memory footprint of pickled instances. The format should be backwards compatible with that of 0.5.4 and previous. .. change:: :tags: orm :tickets: 1463 sqlalchemy.orm.join and sqlalchemy.orm.outerjoin are now added to __all__ in sqlalchemy.orm.*. .. change:: :tags: orm :tickets: 1458 Fixed bug where Query exception raise would fail when a too-short composite primary key value were passed to get(). .. change:: :tags: sql :tickets: Removed an obscure feature of execute() (including connection, engine, Session) whereby a bindparam() construct can be sent as a key to the params dictionary. This usage is undocumented and is at the core of an issue whereby the bindparam() object created implicitly by a text() construct may have the same hash value as a string placed in the params dictionary and may result in an inappropriate match when computing the final bind parameters. Internal checks for this condition would add significant latency to the critical task of parameter rendering, so the behavior is removed. This is a backwards incompatible change for any application that may have been using this feature, however the feature has never been documented. .. change:: :tags: engine/pool :tickets: Implemented recreate() for StaticPool. .. changelog:: :version: 0.5.4p2 :released: Tue May 26 2009 .. change:: :tags: sql :tickets: Repaired the printing of SQL exceptions which are not based on parameters or are not executemany() style. .. change:: :tags: postgresql :tickets: Deprecated the hardcoded TIMESTAMP function, which when used as func.TIMESTAMP(value) would render "TIMESTAMP value". This breaks on some platforms as PostgreSQL doesn't allow bind parameters to be used in this context. The hard-coded uppercase is also inappropriate and there's lots of other PG casts that we'd need to support. So instead, use text constructs i.e. select(["timestamp '12/05/09'"]). .. changelog:: :version: 0.5.4p1 :released: Mon May 18 2009 .. change:: :tags: orm :tickets: Fixed an attribute error introduced in 0.5.4 which would occur when merge() was used with an incomplete object. .. changelog:: :version: 0.5.4 :released: Sun May 17 2009 .. change:: :tags: orm :tickets: 1398 Significant performance enhancements regarding Sessions/flush() in conjunction with large mapper graphs, large numbers of objects: - Removed all* O(N) scanning behavior from the flush() process, i.e. operations that were scanning the full session, including an extremely expensive one that was erroneously assuming primary key values were changing when this was not the case. * one edge case remains which may invoke a full scan, if an existing primary key attribute is modified to a new value. - The Session's "weak referencing" behavior is now *full* - no strong references whatsoever are made to a mapped object or related items/collections in its __dict__. Backrefs and other cycles in objects no longer affect the Session's ability to lose all references to unmodified objects. Objects with pending changes still are maintained strongly until flush. The implementation also improves performance by moving the "resurrection" process of garbage collected items to only be relevant for mappings that map "mutable" attributes (i.e. PickleType, composite attrs). This removes overhead from the gc process and simplifies internal behavior. If a "mutable" attribute change is the sole change on an object which is then dereferenced, the mapper will not have access to other attribute state when the UPDATE is issued. This may present itself differently to some MapperExtensions. The change also affects the internal attribute API, but not the AttributeExtension interface nor any of the publically documented attribute functions. - The unit of work no longer genererates a graph of "dependency" processors for the full graph of mappers during flush(), instead creating such processors only for those mappers which represent objects with pending changes. This saves a tremendous number of method calls in the context of a large interconnected graph of mappers. - Cached a wasteful "table sort" operation that previously occurred multiple times per flush, also removing significant method call count from flush(). - Other redundant behaviors have been simplified in mapper._save_obj(). .. change:: :tags: orm :tickets: Modified query_cls on DynamicAttributeImpl to accept a full mixin version of the AppenderQuery, which allows subclassing the AppenderMixin. .. change:: :tags: orm :tickets: 1300 The "polymorphic discriminator" column may be part of a primary key, and it will be populated with the correct discriminator value. .. change:: :tags: orm :tickets: Fixed the evaluator not being able to evaluate IS NULL clauses. .. change:: :tags: orm :tickets: 1352 Fixed the "set collection" function on "dynamic" relations to initiate events correctly. Previously a collection could only be assigned to a pending parent instance, otherwise modified events would not be fired correctly. Set collection is now compatible with merge(), fixes. .. change:: :tags: orm :tickets: Allowed pickling of PropertyOption objects constructed with instrumented descriptors; previously, pickle errors would occur when pickling an object which was loaded with a descriptor-based option, such as query.options(eagerload(MyClass.foo)). .. change:: :tags: orm :tickets: 1357 Lazy loader will not use get() if the "lazy load" SQL clause matches the clause used by get(), but contains some parameters hardcoded. Previously the lazy strategy would fail with the get(). Ideally get() would be used with the hardcoded parameters but this would require further development. .. change:: :tags: orm :tickets: 1391 MapperOptions and other state associated with query.options() is no longer bundled within callables associated with each lazy/deferred-loading attribute during a load. The options are now associated with the instance's state object just once when it's populated. This removes the need in most cases for per-instance/attribute loader objects, improving load speed and memory overhead for individual instances. .. change:: :tags: orm :tickets: 1360 Fixed another location where autoflush was interfering with session.merge(). autoflush is disabled completely for the duration of merge() now. .. change:: :tags: orm :tickets: 1406 Fixed bug which prevented "mutable primary key" dependency logic from functioning properly on a one-to-one relation(). .. change:: :tags: orm :tickets: Fixed bug in relation(), introduced in 0.5.3, whereby a self referential relation from a base class to a joined-table subclass would not configure correctly. .. change:: :tags: orm :tickets: Fixed obscure mapper compilation issue when inheriting mappers are used which would result in un-initialized attributes. .. change:: :tags: orm :tickets: Fixed documentation for session weak_identity_map - the default value is True, indicating a weak referencing map in use. .. change:: :tags: orm :tickets: 1376 Fixed a unit of work issue whereby the foreign key attribute on an item contained within a collection owned by an object being deleted would not be set to None if the relation() was self-referential. .. change:: :tags: orm :tickets: 1378 Fixed Query.update() and Query.delete() failures with eagerloaded relations. .. change:: :tags: orm :tickets: It is now an error to specify both columns of a binary primaryjoin condition in the foreign_keys or remote_side collection. Whereas previously it was just nonsensical, but would succeed in a non-deterministic way. .. change:: :tags: ticket: 594, 1341, schema :tickets: Added a quote_schema() method to the IdentifierPreparer class so that dialects can override how schemas get handled. This enables the MSSQL dialect to treat schemas as multipart identifiers, such as 'database.owner'. .. change:: :tags: sql :tickets: Back-ported the "compiler" extension from SQLA 0.6. This is a standardized interface which allows the creation of custom ClauseElement subclasses and compilers. In particular it's handy as an alternative to text() when you'd like to build a construct that has database-specific compilations. See the extension docs for details. .. change:: :tags: sql :tickets: 1413 Exception messages are truncated when the list of bound parameters is larger than 10, preventing enormous multi-page exceptions from filling up screens and logfiles for large executemany() statements. .. change:: :tags: sql :tickets: ``sqlalchemy.extract()`` is now dialect sensitive and can extract components of timestamps idiomatically across the supported databases, including SQLite. .. change:: :tags: sql :tickets: 1353 Fixed __repr__() and other _get_colspec() methods on ForeignKey constructed from __clause_element__() style construct (i.e. declarative columns). .. change:: :tags: mysql :tickets: 1405 Reflecting a FOREIGN KEY construct will take into account a dotted schema.tablename combination, if the foreign key references a table in a remote schema. .. change:: :tags: mssql :tickets: Modified how savepoint logic works to prevent it from stepping on non-savepoint oriented routines. Savepoint support is still very experimental. .. change:: :tags: mssql :tickets: 1310 Added in reserved words for MSSQL that covers version 2008 and all prior versions. .. change:: :tags: mssql :tickets: 1343 Corrected problem with information schema not working with a binary collation based database. Cleaned up information schema since it is only used by mssql now. .. change:: :tags: sqlite :tickets: 1402 Corrected the SLBoolean type so that it properly treats only 1 as True. .. change:: :tags: sqlite :tickets: 1273 Corrected the float type so that it correctly maps to a SLFloat type when being reflected. .. change:: :tags: extensions :tickets: 1379 Fixed adding of deferred or other column properties to a declarative class. .. changelog:: :version: 0.5.3 :released: Tue Mar 24 2009 .. change:: :tags: orm :tickets: 1315 The "objects" argument to session.flush() is deprecated. State which represents the linkage between a parent and child object does not support "flushed" status on one side of the link and not the other, so supporting this operation leads to misleading results. .. change:: :tags: orm :tickets: Query now implements __clause_element__() which produces its selectable, which means a Query instance can be accepted in many SQL expressions, including col.in_(query), union(query1, query2), select([foo]).select_from(query), etc. .. change:: :tags: orm :tickets: 1337 Query.join() can now construct multiple FROM clauses, if needed. Such as, query(A, B).join(A.x).join(B.y) might say SELECT A.*, B.* FROM A JOIN X, B JOIN Y. Eager loading can also tack its joins onto those multiple FROM clauses. .. change:: :tags: orm :tickets: 1347 Fixed bug in dynamic_loader() where append/remove events after construction time were not being propagated to the UOW to pick up on flush(). .. change:: :tags: orm :tickets: Fixed bug where column_prefix wasn't being checked before not mapping an attribute that already had class-level name present. .. change:: :tags: orm :tickets: 1315 a session.expire() on a particular collection attribute will clear any pending backref additions as well, so that the next access correctly returns only what was present in the database. Presents some degree of a workaround for, although we are considering removing the flush([objects]) feature altogether. .. change:: :tags: orm :tickets: Session.scalar() now converts raw SQL strings to text() the same way Session.execute() does and accepts same alternative \**kw args. .. change:: :tags: orm :tickets: improvements to the "determine direction" logic of relation() such that the direction of tricky situations like mapper(A.join(B)) -> relation-> mapper(B) can be determined. .. change:: :tags: orm :tickets: 1306 When flushing partial sets of objects using session.flush([somelist]), pending objects which remain pending after the operation won't inadvertently be added as persistent. .. change:: :tags: orm :tickets: 1314 Added "post_configure_attribute" method to InstrumentationManager, so that the "listen_for_events.py" example works again. .. change:: :tags: orm :tickets: a forward and complementing backwards reference which are both of the same direction, i.e. ONETOMANY or MANYTOONE, is now detected, and an error message is raised. Saves crazy CircularDependencyErrors later on. .. change:: :tags: orm :tickets: Fixed bugs in Query regarding simultaneous selection of multiple joined-table inheritance entities with common base classes: - previously the adaption applied to "B" on "A JOIN B" would be erroneously partially applied to "A". - comparisons on relations (i.e. A.related==someb) were not getting adapted when they should. - Other filterings, like query(A).join(A.bs).filter(B.foo=='bar'), were erroneously adapting "B.foo" as though it were an "A". .. change:: :tags: orm :tickets: 1325 Fixed adaptation of EXISTS clauses via any(), has(), etc. in conjunction with an aliased object on the left and of_type() on the right. .. change:: :tags: orm :tickets: Added an attribute helper method ``set_committed_value`` in sqlalchemy.orm.attributes. Given an object, attribute name, and value, will set the value on the object as part of its "committed" state, i.e. state that is understood to have been loaded from the database. Helps with the creation of homegrown collection loaders and such. .. change:: :tags: orm :tickets: Query won't fail with weakref error when a non-mapper/class instrumented descriptor is passed, raises "Invalid column expession". .. change:: :tags: orm :tickets: Query.group_by() properly takes into account aliasing applied to the FROM clause, such as with select_from(), using with_polymorphic(), or using from_self(). .. change:: :tags: sql :tickets: An alias() of a select() will convert to a "scalar subquery" when used in an unambiguously scalar context, i.e. it's used in a comparison operation. This applies to the ORM when using query.subquery() as well. .. change:: :tags: sql :tickets: 1302 Fixed missing _label attribute on Function object, others when used in a select() with use_labels (such as when used in an ORM column_property()). .. change:: :tags: sql :tickets: 1309 anonymous alias names now truncate down to the max length allowed by the dialect. More significant on DBs like Oracle with very small character limits. .. change:: :tags: sql :tickets: the __selectable__() interface has been replaced entirely by __clause_element__(). .. change:: :tags: sql :tickets: 1299 The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6. .. change:: :tags: sqlite :tickets: Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn't fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present. .. change:: :tags: postgresql :tickets: Index reflection won't fail when an index with multiple expressions is encountered. .. change:: :tags: postgresql :tickets: 1327 Added PGUuid and PGBit types to sqlalchemy.databases.postgres. .. change:: :tags: postgresql :tickets: 1327 Refection of unknown PG types won't crash when those types are specified within a domain. .. change:: :tags: mssql :tickets: Preliminary support for pymssql 1.0.1 .. change:: :tags: mssql :tickets: Corrected issue on mssql where max_identifier_length was not being respected. .. change:: :tags: extensions :tickets: Fixed a recursive pickling issue in serializer, triggered by an EXISTS or other embedded FROM construct. .. change:: :tags: extensions :tickets: Declarative locates the "inherits" class using a search through __bases__, to skip over mixins that are local to subclasses. .. change:: :tags: extensions :tickets: Declarative figures out joined-table inheritance primary join condition even if "inherits" mapper argument is given explicitly. .. change:: :tags: extensions :tickets: Declarative will properly interpret the "foreign_keys" argument on a backref() if it's a string. .. change:: :tags: extensions :tickets: Declarative will accept a table-bound column as a property when used in conjunction with __table__, if the column is already present in __table__. The column will be remapped to the given key the same way as when added to the mapper() properties dict. .. changelog:: :version: 0.5.2 :released: Sat Jan 24 2009 .. change:: :tags: orm :tickets: Further refined 0.5.1's warning about delete-orphan cascade placed on a many-to-many relation. First, the bad news: the warning will apply to both many-to-many as well as many-to-one relations. This is necessary since in both cases, SQLA does not scan the full set of potential parents when determining "orphan" status - for a persistent object it only detects an in-python de-association event to establish the object as an "orphan". Next, the good news: to support one-to-one via a foreign key or association table, or to support one-to-many via an association table, a new flag single_parent=True may be set which indicates objects linked to the relation are only meant to have a single parent. The relation will raise an error if multiple parent-association events occur within Python. .. change:: :tags: orm :tickets: 1292 Adjusted the attribute instrumentation change from 0.5.1 to fully establish instrumentation for subclasses where the mapper was created after the superclass had already been fully instrumented. .. change:: :tags: orm :tickets: Fixed bug in delete-orphan cascade whereby two one-to-one relations from two different parent classes to the same target class would prematurely expunge the instance. .. change:: :tags: orm :tickets: Fixed an eager loading bug whereby self-referential eager loading would prevent other eager loads, self referential or not, from joining to the parent JOIN properly. Thanks to Alex K for creating a great test case. .. change:: :tags: orm :tickets: session.expire() and related methods will not expire() unloaded deferred attributes. This prevents them from being needlessly loaded when the instance is refreshed. .. change:: :tags: orm :tickets: 1293 query.join()/outerjoin() will now properly join an aliased() construct to the existing left side, even if query.from_self() or query.select_from(someselectable) has been called. .. change:: :tags: sql :tickets: 1284 Further fixes to the "percent signs and spaces in column/table names" functionality. .. change:: :tags: mssql :tickets: 1291 Restored convert_unicode handling. Results were being passed on through without conversion. .. change:: :tags: mssql :tickets: 1282 Really fixing the decimal handling this time.. .. change:: :tags: Ticket:1289, mssql :tickets: Modified table reflection code to use only kwargs when constructing tables. .. changelog:: :version: 0.5.1 :released: Sat Jan 17 2009 .. change:: :tags: orm :tickets: Removed an internal join cache which could potentially leak memory when issuing query.join() repeatedly to ad-hoc selectables. .. change:: :tags: orm :tickets: The "clear()", "save()", "update()", "save_or_update()" Session methods have been deprecated, replaced by "expunge_all()" and "add()". "expunge_all()" has also been added to ScopedSession. .. change:: :tags: orm :tickets: Modernized the "no mapped table" exception and added a more explicit __table__/__tablename__ exception to declarative. .. change:: :tags: orm :tickets: 1237 Concrete inheriting mappers now instrument attributes which are inherited from the superclass, but are not defined for the concrete mapper itself, with an InstrumentedAttribute that issues a descriptive error when accessed. .. change:: :tags: orm :tickets: 1237, 781 Added a new `relation()` keyword `back_populates`. This allows configuation of backreferences using explicit relations. This is required when creating bidirectional relations between a hierarchy of concrete mappers and another class. .. change:: :tags: orm :tickets: 1237 Test coverage added for `relation()` objects specified on concrete mappers. .. change:: :tags: orm :tickets: 1276 Query.from_self() as well as query.subquery() both disable the rendering of eager joins inside the subquery produced. The "disable all eager joins" feature is available publically via a new query.enable_eagerloads() generative. .. change:: :tags: orm :tickets: Added a rudimental series of set operations to Query that receive Query objects as arguments, including union(), union_all(), intersect(), except_(), insertsect_all(), except_all(). See the API documentation for Query.union() for examples. .. change:: :tags: orm :tickets: Fixed bug that prevented Query.join() and eagerloads from attaching to a query that selected from a union or aliased union. .. change:: :tags: orm :tickets: 1237 A short documentation example added for bidirectional relations specified on concrete mappers. .. change:: :tags: orm :tickets: 1269 Mappers now instrument class attributes upon construction with the final InstrumentedAttribute object which remains persistent. The `_CompileOnAttr`/`__getattribute__()` methodology has been removed. The net effect is that Column-based mapped class attributes can now be used fully at the class level without invoking a mapper compilation operation, greatly simplifying typical usage patterns within declarative. .. change:: :tags: orm :tickets: ColumnProperty (and front-end helpers such as ``deferred``) no longer ignores unknown \**keyword arguments. .. change:: :tags: orm :tickets: Fixed a bug with the unitofwork's "row switch" mechanism, i.e. the conversion of INSERT/DELETE into an UPDATE, when combined with joined-table inheritance and an object which contained no defined values for the child table where an UPDATE with no SET clause would be rendered. .. change:: :tags: orm :tickets: 1281 Using delete-orphan on a many-to-many relation is deprecated. This produces misleading or erroneous results since SQLA does not retrieve the full list of "parents" for m2m. To get delete-orphan behavior with an m2m table, use an explcit association class so that the individual association row is treated as a parent. .. change:: :tags: orm :tickets: 1281 delete-orphan cascade always requires delete cascade. Specifying delete-orphan without delete now raises a deprecation warning. .. change:: :tags: sql :tickets: 1256 Improved the methodology to handling percent signs in column names from. Added more tests. MySQL and PostgreSQL dialects still do not issue correct CREATE TABLE statements for identifiers with percent signs in them. .. change:: :tags: schema :tickets: 1214 Index now accepts column-oriented InstrumentedAttributes (i.e. column-based mapped class attributes) as column arguments. .. change:: :tags: schema :tickets: Column with no name (as in declarative) won't raise a NoneType error when its string output is requested (such as in a stack trace). .. change:: :tags: schema :tickets: 1278 Fixed bug when overriding a Column with a ForeignKey on a reflected table, where derived columns (i.e. the "virtual" columns of a select, etc.) would inadvertently call upon schema-level cleanup logic intended only for the original column. .. change:: :tags: declarative :tickets: Can now specify Column objects on subclasses which have no table of their own (i.e. use single table inheritance). The columns will be appended to the base table, but only mapped by the subclass. .. change:: :tags: declarative :tickets: For both joined and single inheriting subclasses, the subclass will only map those columns which are already mapped on the superclass and those explicit on the subclass. Other columns that are present on the `Table` will be excluded from the mapping by default, which can be disabled by passing a blank `exclude_properties` collection to the `__mapper_args__`. This is so that single-inheriting classes which define their own columns are the only classes to map those columns. The effect is actually a more organized mapping than you'd normally get with explicit `mapper()` calls unless you set up the `exclude_properties` arguments explicitly. .. change:: :tags: declarative :tickets: It's an error to add new Column objects to a declarative class that specified an existing table using __table__. .. change:: :tags: mysql :tickets: Added the missing keywords from MySQL 4.1 so they get escaped properly. .. change:: :tags: mssql :tickets: 1280 Corrected handling of large decimal values with more robust tests. Removed string manipulation on floats. .. change:: :tags: mssql :tickets: Modified the do_begin handling in mssql to use the Cursor not the Connection so it is DBAPI compatible. .. change:: :tags: mssql :tickets: Corrected SAVEPOINT support on adodbapi by changing the handling of savepoint_release, which is unsupported on mssql. .. changelog:: :version: 0.5.0 :released: Tue Jan 06 2009 .. change:: :tags: general :tickets: Documentation has been converted to Sphinx. In particular, the generated API documentation has been constructed into a full blown "API Reference" section which organizes editorial documentation combined with generated docstrings. Cross linking between sections and API docs are vastly improved, a javascript-powered search feature is provided, and a full index of all classes, functions and members is provided. .. change:: :tags: general :tickets: setup.py now imports setuptools only optionally. If not present, distutils is used. The new "pip" installer is recommended over easy_install as it installs in a more simplified way. .. change:: :tags: general :tickets: added an extremely basic illustration of a PostGIS integration to the examples folder. .. change:: :tags: orm :tickets: Query.with_polymorphic() now accepts a third argument "discriminator" which will replace the value of mapper.polymorphic_on for that query. Mappers themselves no longer require polymorphic_on to be set, even if the mapper has a polymorphic_identity. When not set, the mapper will load non-polymorphically by default. Together, these two features allow a non-polymorphic concrete inheritance setup to use polymorphic loading on a per-query basis, since concrete setups are prone to many issues when used polymorphically in all cases. .. change:: :tags: orm :tickets: dynamic_loader accepts a query_class= to customize the Query classes used for both the dynamic collection and the queries built from it. .. change:: :tags: orm :tickets: 1079 query.order_by() accepts None which will remove any pending order_by state from the query, as well as cancel out any mapper/relation configured ordering. This is primarily useful for overriding the ordering specified on a dynamic_loader(). .. change:: :tags: sql :tickets: 935 RowProxy objects can be used in place of dictionary arguments sent to connection.execute() and friends. .. change:: :tags: dialect :tickets: Added a new description_encoding attribute on the dialect that is used for encoding the column name when processing the metadata. This usually defaults to utf-8. .. change:: :tags: mssql :tickets: Added in a new MSGenericBinary type. This maps to the Binary type so it can implement the specialized behavior of treating length specified types as fixed-width Binary types and non-length types as an unbound variable length Binary type. .. change:: :tags: mssql :tickets: 1249 Added in new types: MSVarBinary and MSImage. .. change:: :tags: mssql :tickets: Added in the MSReal, MSNText, MSSmallDateTime, MSTime, MSDateTimeOffset, and MSDateTime2 types .. change:: :tags: sqlite :tickets: 1266 Table reflection now stores the actual DefaultClause value for the column. .. change:: :tags: sqlite :tickets: bugfixes, behavioral changes .. change:: :tags: orm :tickets: Exceptions raised during compile_mappers() are now preserved to provide "sticky behavior" - if a hasattr() call on a pre-compiled mapped attribute triggers a failing compile and suppresses the exception, subsequent compilation is blocked and the exception will be reiterated on the next compile() call. This issue occurs frequently when using declarative. .. change:: :tags: orm :tickets: property.of_type() is now recognized on a single-table inheriting target, when used in the context of prop.of_type(..).any()/has(), as well as query.join(prop.of_type(...)). .. change:: :tags: orm :tickets: query.join() raises an error when the target of the join doesn't match the property-based attribute - while it's unlikely anyone is doing this, the SQLAlchemy author was guilty of this particular loosey-goosey behavior. .. change:: :tags: orm :tickets: 1272 Fixed bug when using weak_instance_map=False where modified events would not be intercepted for a flush(). .. change:: :tags: orm :tickets: 1268 Fixed some deep "column correspondence" issues which could impact a Query made against a selectable containing multiple versions of the same table, as well as unions and similar which contained the same table columns in different column positions at different levels. .. change:: :tags: orm :tickets: Custom comparator classes used in conjunction with column_property(), relation() etc. can define new comparison methods on the Comparator, which will become available via __getattr__() on the InstrumentedAttribute. In the case of synonym() or comparable_property(), attributes are resolved first on the user-defined descriptor, then on the user-defined comparator. .. change:: :tags: orm :tickets: 976 Added ScopedSession.is_active accessor. .. change:: :tags: orm :tickets: 1262 Can pass mapped attributes and column objects as keys to query.update({}). .. change:: :tags: orm :tickets: Mapped attributes passed to the values() of an expression level insert() or update() will use the keys of the mapped columns, not that of the mapped attribute. .. change:: :tags: orm :tickets: 1242 Corrected problem with Query.delete() and Query.update() not working properly with bind parameters. .. change:: :tags: orm :tickets: Query.select_from(), from_statement() ensure that the given argument is a FromClause, or Text/Select/Union, respectively. .. change:: :tags: orm :tickets: 1253 Query() can be passed a "composite" attribute as a column expression and it will be expanded. Somewhat related to. .. change:: :tags: orm :tickets: Query() is a little more robust when passed various column expressions such as strings, clauselists, text() constructs (which may mean it just raises an error more nicely). .. change:: :tags: orm :tickets: first() works as expected with Query.from_statement(). .. change:: :tags: orm :tickets: Fixed bug introduced in 0.5rc4 involving eager loading not functioning for properties which were added to a mapper post-compile using add_property() or equivalent. .. change:: :tags: orm :tickets: Fixed bug where many-to-many relation() with viewonly=True would not correctly reference the link between secondary->remote. .. change:: :tags: orm :tickets: 1232 Duplicate items in a list-based collection will be maintained when issuing INSERTs to a "secondary" table in a many-to-many relation. Assuming the m2m table has a unique or primary key constraint on it, this will raise the expected constraint violation instead of silently dropping the duplicate entries. Note that the old behavior remains for a one-to-many relation since collection entries in that case don't result in INSERT statements and SQLA doesn't manually police collections. .. change:: :tags: orm :tickets: Query.add_column() can accept FromClause objects in the same manner as session.query() can. .. change:: :tags: orm :tickets: Comparison of many-to-one relation to NULL is properly converted to IS NOT NULL based on not_(). .. change:: :tags: orm :tickets: 1087 Extra checks added to ensure explicit primaryjoin/secondaryjoin are ClauseElement instances, to prevent more confusing errors later on. .. change:: :tags: orm :tickets: 1236 Improved mapper() check for non-class classes. .. change:: :tags: orm :tickets: 5051 comparator_factory argument is now documented and supported by all MapperProperty types, including column_property(), relation(), backref(), and synonym(). .. change:: :tags: orm :tickets: Changed the name of PropertyLoader to RelationProperty, to be consistent with all the other names. PropertyLoader is still present as a synonym. .. change:: :tags: orm :tickets: 1099, 1228 fixed "double iter()" call causing bus errors in shard API, removed errant result.close() left over from the 0.4 version. .. change:: :tags: orm :tickets: made Session.merge cascades not trigger autoflush. Fixes merged instances getting prematurely inserted with missing values. .. change:: :tags: orm :tickets: Two fixes to help prevent out-of-band columns from being rendered in polymorphic_union inheritance scenarios (which then causes extra tables to be rendered in the FROM clause causing cartesian products): - improvements to "column adaption" for a->b->c inheritance situations to better locate columns that are related to one another via multiple levels of indirection, rather than rendering the non-adapted column. - the "polymorphic discriminator" column is only rendered for the actual mapper being queried against. The column won't be "pulled in" from a subclass or superclass mapper since it's not needed. .. change:: :tags: orm :tickets: 1072 Fixed shard_id argument on ShardedSession.execute(). .. change:: :tags: sql :tickets: 1256 Columns can again contain percent signs within their names. .. change:: :tags: sql :tickets: sqlalchemy.sql.expression.Function is now a public class. It can be subclassed to provide user-defined SQL functions in an imperative style, including with pre-established behaviors. The postgis.py example illustrates one usage of this. .. change:: :tags: sql :tickets: PickleType now favors == comparison by default, if the incoming object (such as a dict) implements __eq__(). If the object does not implement __eq__() and mutable=True, a deprecation warning is raised. .. change:: :tags: sql :tickets: 1215 Fixed the import weirdness in sqlalchemy.sql to not export __names__. .. change:: :tags: sql :tickets: 1238 Using the same ForeignKey object repeatedly raises an error instead of silently failing later. .. change:: :tags: sql :tickets: Added NotImplementedError for params() method on Insert/Update/Delete constructs. These items currently don't support this functionality, which also would be a little misleading compared to values(). .. change:: :tags: sql :tickets: 650 Reflected foreign keys will properly locate their referenced column, even if the column was given a "key" attribute different from the reflected name. This is achieved via a new flag on ForeignKey/ForeignKeyConstraint called "link_to_name", if True means the given name is the referred-to column's name, not its assigned key. .. change:: :tags: sql :tickets: 1253 select() can accept a ClauseList as a column in the same way as a Table or other selectable and the interior expressions will be used as column elements. .. change:: :tags: sql :tickets: the "passive" flag on session.is_modified() is correctly propagated to the attribute manager. .. change:: :tags: sql :tickets: union() and union_all() will not whack any order_by() that has been applied to the select()s inside. If you union() a select() with order_by() (presumably to support LIMIT/OFFSET), you should also call self_group() on it to apply parenthesis. .. change:: :tags: engine/pool :tickets: 1246 Connection.invalidate() checks for closed status to avoid attribute errors. .. change:: :tags: engine/pool :tickets: 1094 NullPool supports reconnect on failure behavior. .. change:: :tags: engine/pool :tickets: 799 Added a mutex for the initial pool creation when using pool.manage(dbapi). This prevents a minor case of "dogpile" behavior which would otherwise occur upon a heavy load startup. .. change:: :tags: engine/pool :tickets: _execute_clauseelement() goes back to being a private method. Subclassing Connection is not needed now that ConnectionProxy is available. .. change:: :tags: documentation :tickets: 1149, 1200 Tickets. .. change:: :tags: documentation :tickets: Added note about create_session() defaults. .. change:: :tags: documentation :tickets: Added section about metadata.reflect(). .. change:: :tags: documentation :tickets: Updated `TypeDecorator` section. .. change:: :tags: documentation :tickets: Rewrote the "threadlocal" strategy section of the docs due to recent confusion over this feature. .. change:: :tags: documentation :tickets: Removed badly out of date 'polymorphic_fetch' and 'select_table' docs from inheritance, reworked the second half of "joined table inheritance". .. change:: :tags: documentation :tickets: Documented `comparator_factory` kwarg, added new doc section "Custom Comparators". .. change:: :tags: mssql :tickets: 1254 Refactored the Date/Time types. The ``smalldatetime`` data type no longer truncates to a date only, and will now be mapped to the MSSmallDateTime type. .. change:: :tags: mssql :tickets: Corrected an issue with Numerics to accept an int. .. change:: :tags: mssql :tickets: Mapped ``char_length`` to the ``LEN()`` function. .. change:: :tags: mssql :tickets: If an ``INSERT`` includes a subselect the ``INSERT`` is converted from an ``INSERT INTO VALUES`` construct to a ``INSERT INTO SELECT`` construct. .. change:: :tags: mssql :tickets: If the column is part of a ``primary_key`` it will be ``NOT NULL`` since MSSQL doesn't allow ``NULL`` in primary_key columns. .. change:: :tags: mssql :tickets: 1249 ``MSBinary`` now returns a ``BINARY`` instead of an ``IMAGE``. This is a backwards incompatible change in that ``BINARY`` is a fixed length data type whereas ``IMAGE`` is a variable length data type. .. change:: :tags: mssql :tickets: 1258 ``get_default_schema_name`` is now reflected from the database based on the user's default schema. This only works with MSSQL 2005 and later. .. change:: :tags: mssql :tickets: 1248 Added collation support through the use of a new collation argument. This is supported on the following types: char, nchar, varchar, nvarchar, text, ntext. .. change:: :tags: mssql :tickets: Changes to the connection string parameters favor DSN as the default specification for pyodbc. See the mssql.py docstring for detailed usage instructions. .. change:: :tags: mssql :tickets: Added experimental support of savepoints. It currently does not work fully with sessions. .. change:: :tags: mssql :tickets: 1243 Support for three levels of column nullability: NULL, NOT NULL, and the database's configured default. The default Column configuration (nullable=True) will now generate NULL in the DDL. Previously no specification was emitted and the database default would take effect (usually NULL, but not always). To explicitly request the database default, configure columns with nullable=None and no specification will be emitted in DDL. This is backwards incompatible behavior. .. change:: :tags: postgres :tickets: 1267 "%" signs in text() constructs are automatically escaped to "%%". Because of the backwards incompatible nature of this change, a warning is emitted if '%%' is detected in the string. .. change:: :tags: postgres :tickets: Calling alias.execute() in conjunction with server_side_cursors won't raise AttributeError. .. change:: :tags: postgres :tickets: 714 Added Index reflection support to PostgreSQL, using a great patch we long neglected, submitted by Ken Kuhlman. .. change:: :tags: oracle :tickets: Adjusted the format of create_xid() to repair two-phase commit. We now have field reports of Oracle two-phase commit working properly with this change. .. change:: :tags: oracle :tickets: 1233 Added OracleNVarchar type, produces NVARCHAR2, and also subclasses Unicode so that convert_unicode=True by default. NVARCHAR2 reflects into this type automatically so these columns pass unicode on a reflected table with no explicit convert_unicode=True flags. .. change:: :tags: oracle :tickets: 1265 Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu ! .. change:: :tags: mysql :tickets: "%" signs in text() constructs are automatically escaped to "%%". Because of the backwards incompatible nature of this change, a warning is emitted if '%%' is detected in the string. .. change:: :tags: mysql :tickets: 1241 Fixed bug in exception raise when FK columns not present during reflection. .. change:: :tags: mysql :tickets: Fixed bug involving reflection of a remote-schema table with a foreign key ref to another table in that schema. .. change:: :tags: associationproxy :tickets: The association proxy properties are make themselves available at the class level, e.g. MyClass.aproxy. Previously this evaluated to None. .. change:: :tags: declarative :tickets: The full list of arguments accepted as string by backref() includes 'primaryjoin', 'secondaryjoin', 'secondary', 'foreign_keys', 'remote_side', 'order_by'. .. changelog:: :version: 0.5.0rc4 :released: Fri Nov 14 2008 .. change:: :tags: orm :tickets: Query.count() has been enhanced to do the "right thing" in a wider variety of cases. It can now count multiple-entity queries, as well as column-based queries. Note that this means if you say query(A, B).count() without any joining criterion, it's going to count the cartesian product of A*B. Any query which is against column-based entities will automatically issue "SELECT count(1) FROM (SELECT...)" so that the real rowcount is returned, meaning a query such as query(func.count(A.name)).count() will return a value of one, since that query would return one row. .. change:: :tags: orm :tickets: Lots of performance tuning. A rough guesstimate over various ORM operations places it 10% faster over 0.5.0rc3, 25-30% over 0.4.8. .. change:: :tags: orm :tickets: bugfixes and behavioral changes .. change:: :tags: general :tickets: global "propigate"->"propagate" change. .. change:: :tags: orm :tickets: Adjustments to the enhanced garbage collection on InstanceState to better guard against errors due to lost state. .. change:: :tags: orm :tickets: 1220 Query.get() returns a more informative error message when executed against multiple entities. .. change:: :tags: orm :tickets: 1140, 1221 Restored NotImplementedError on Cls.relation.in_() .. change:: :tags: orm :tickets: 1226 Fixed PendingDeprecationWarning involving order_by parameter on relation(). .. change:: :tags: sql :tickets: Removed the 'properties' attribute of the Connection object, Connection.info should be used. .. change:: :tags: sql :tickets: Restored "active rowcount" fetch before ResultProxy autocloses the cursor. This was removed in 0.5rc3. .. change:: :tags: sql :tickets: Rearranged the `load_dialect_impl()` method in `TypeDecorator` such that it will take effect even if the user-defined `TypeDecorator` uses another `TypeDecorator` as its impl. .. change:: :tags: access :tickets: Added support for Currency type. .. change:: :tags: access :tickets: 1017 Functions were not return their result. .. change:: :tags: access :tickets: 1017 Corrected problem with joins. Access only support LEFT OUTER or INNER not just JOIN by itself. .. change:: :tags: mssql :tickets: Lots of cleanup and fixes to correct problems with limit and offset. .. change:: :tags: mssql :tickets: Correct situation where subqueries as part of a binary expression need to be translated to use the IN and NOT IN syntax. .. change:: :tags: mssql :tickets: 1216 Fixed E Notation issue that prevented the ability to insert decimal values less than 1E-6. .. change:: :tags: mssql :tickets: 1217 Corrected problems with reflection when dealing with schemas, particularly when those schemas are the default schema. .. change:: :tags: mssql :tickets: Corrected problem with casting a zero length item to a varchar. It now correctly adjusts the CAST. .. change:: :tags: ext :tickets: Can now use a custom "inherit_condition" in __mapper_args__ when using declarative. .. change:: :tags: ext :tickets: fixed string-based "remote_side", "order_by" and others not propagating correctly when used in backref(). .. changelog:: :version: 0.5.0rc3 :released: Fri Nov 07 2008 .. change:: :tags: orm :tickets: Added two new hooks to SessionExtension: after_bulk_delete() and after_bulk_update(). after_bulk_delete() is called after a bulk delete() operation on a query. after_bulk_update() is called after a bulk update() operation on a query. .. change:: :tags: sql :tickets: SQL compiler optimizations and complexity reduction. The call count for compiling a typical select() construct is 20% less versus 0.5.0rc2. .. change:: :tags: sql :tickets: 1211 Dialects can now generate label names of adjustable length. Pass in the argument "label_length=" to create_engine() to adjust how many characters max will be present in dynamically generated column labels, i.e. "somecolumn AS somelabel". Any value less than 6 will result in a label of minimal size, consisting of an underscore and a numeric counter. The compiler uses the value of dialect.max_identifier_length as a default. .. change:: :tags: ext :tickets: Added a new extension sqlalchemy.ext.serializer. Provides Serializer/Deserializer "classes" which mirror Pickle/Unpickle, as well as dumps() and loads(). This serializer implements an "external object" pickler which keeps key context-sensitive objects, including engines, sessions, metadata, Tables/Columns, and mappers, outside of the pickle stream, and can later restore the pickle using any engine/metadata/session provider. This is used not for pickling regular object instances, which are pickleable without any special logic, but for pickling expression objects and full Query objects, such that all mapper/engine/session dependencies can be restored at unpickle time. .. change:: :tags: oracle :tickets: Wrote a docstring for Oracle dialect. Apparently that Ohloh "few source code comments" label is starting to sting :). .. change:: :tags: oracle :tickets: 536 Removed FIRST_ROWS() optimize flag when using LIMIT/OFFSET, can be reenabled with optimize_limits=True create_engine() flag. .. change:: :tags: oracle :tickets: bugfixes and behavioral changes .. change:: :tags: orm :tickets: "not equals" comparisons of simple many-to-one relation to an instance will not drop into an EXISTS clause and will compare foreign key columns instead. .. change:: :tags: orm :tickets: Removed not-really-working use cases of comparing a collection to an iterable. Use contains() to test for collection membership. .. change:: :tags: orm :tickets: 1171 Improved the behavior of aliased() objects such that they more accurately adapt the expressions generated, which helps particularly with self-referential comparisons. .. change:: :tags: orm :tickets: Fixed bug involving primaryjoin/secondaryjoin conditions constructed from class-bound attributes (as often occurs when using declarative), which later would be inappropriately aliased by Query, particularly with the various EXISTS based comparators. .. change:: :tags: orm :tickets: Fixed bug when using multiple query.join() with an aliased-bound descriptor which would lose the left alias. .. change:: :tags: orm :tickets: Improved weakref identity map memory management to no longer require mutexing, resurrects garbage collected instance on a lazy basis for an InstanceState with pending changes. .. change:: :tags: orm :tickets: InstanceState object now removes circular references to itself upon disposal to keep it outside of cyclic garbage collection. .. change:: :tags: orm :tickets: relation() won't hide unrelated ForeignKey errors inside of the "please specify primaryjoin" message when determining join condition. .. change:: :tags: orm :tickets: 1218 Fixed bug in Query involving order_by() in conjunction with multiple aliases of the same class (will add tests in) .. change:: :tags: orm :tickets: When using Query.join() with an explicit clause for the ON clause, the clause will be aliased in terms of the left side of the join, allowing scenarios like query(Source). from_self().join((Dest, Source.id==Dest.source_id)) to work properly. .. change:: :tags: orm :tickets: polymorphic_union() function respects the "key" of each Column if they differ from the column's name. .. change:: :tags: orm :tickets: 1183 Repaired support for "passive-deletes" on a many-to-one relation() with "delete" cascade. .. change:: :tags: orm :tickets: 1213 Fixed bug in composite types which prevented a primary-key composite type from being mutated. .. change:: :tags: orm :tickets: 1202 Added more granularity to internal attribute access, such that cascade and flush operations will not initialize unloaded attributes and collections, leaving them intact for a lazy-load later on. Backref events still initialize attrbutes and collections for pending instances. .. change:: :tags: sql :tickets: 1212 Simplified the check for ResultProxy "autoclose without results" to be based solely on presence of cursor.description. All the regexp-based guessing about statements returning rows has been removed. .. change:: :tags: sql :tickets: 1194 Direct execution of a union() construct will properly set up result-row processing. .. change:: :tags: sql :tickets: The internal notion of an "OID" or "ROWID" column has been removed. It's basically not used by any dialect, and the possibility of its usage with psycopg2's cursor.lastrowid is basically gone now that INSERT..RETURNING is available. .. change:: :tags: sql :tickets: Removed "default_order_by()" method on all FromClause objects. .. change:: :tags: sql :tickets: Repaired the table.tometadata() method so that a passed-in schema argument is propagated to ForeignKey constructs. .. change:: :tags: sql :tickets: Slightly changed behavior of IN operator for comparing to empty collections. Now results in inequality comparison against self. More portable, but breaks with stored procedures that aren't pure functions. .. change:: :tags: oracle :tickets: Setting the auto_convert_lobs to False on create_engine() will also instruct the OracleBinary type to return the cx_oracle LOB object unchanged. .. change:: :tags: mysql :tickets: Fixed foreign key reflection in the edge case where a Table's explicit schema= is the same as the schema (database) the connection is attached to. .. change:: :tags: mysql :tickets: No longer expects include_columns in table reflection to be lower case. .. change:: :tags: ext :tickets: 1174 Fixed bug preventing declarative-bound "column" objects from being used in column_mapped_collection(). .. change:: :tags: misc :tickets: 1077 util.flatten_iterator() func doesn't interpret strings with __iter__() methods as iterators, such as in pypy. .. changelog:: :version: 0.5.0rc2 :released: Sun Oct 12 2008 .. change:: :tags: orm :tickets: Fixed bug involving read/write relation()s that contain literal or other non-column expressions within their primaryjoin condition equated to a foreign key column. .. change:: :tags: orm :tickets: "non-batch" mode in mapper(), a feature which allows mapper extension methods to be called as each instance is updated/inserted, now honors the insert order of the objects given. .. change:: :tags: orm :tickets: Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects. .. change:: :tags: orm :tickets: ScopedSession.query_property now accepts a query_cls factory, overriding the session's configured query_cls. .. change:: :tags: orm :tickets: Fixed shared state bug interfering with ScopedSession.mapper's ability to apply default __init__ implementations on object subclasses. .. change:: :tags: orm :tickets: 1177 Fixed up slices on Query (i.e. query[x:y]) to work properly for zero length slices, slices with None on either end. .. change:: :tags: orm :tickets: Added an example illustrating Celko's "nested sets" as a SQLA mapping. .. change:: :tags: orm :tickets: contains_eager() with an alias argument works even when the alias is embedded in a SELECT, as when sent to the Query via query.select_from(). .. change:: :tags: orm :tickets: 1180 contains_eager() usage is now compatible with a Query that also contains a regular eager load and limit/offset, in that the columns are added to the Query-generated subquery. .. change:: :tags: orm :tickets: session.execute() will execute a Sequence object passed to it (regression from 0.4). .. change:: :tags: orm :tickets: Removed the "raiseerror" keyword argument from object_mapper() and class_mapper(). These functions raise in all cases if the given class/instance is not mapped. .. change:: :tags: orm :tickets: Fixed session.transaction.commit() on a autocommit=False session not starting a new transaction. .. change:: :tags: orm :tickets: Some adjustments to Session.identity_map's weak referencing behavior to reduce asynchronous GC side effects. .. change:: :tags: orm :tickets: 1182 Adjustment to Session's post-flush accounting of newly "clean" objects to better protect against operating on objects as they're asynchronously gc'ed. .. change:: :tags: sql :tickets: 1074 column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause .. change:: :tags: sqlite :tickets: 968 Overhauled SQLite date/time bind/result processing to use regular expressions and format strings, rather than strptime/strftime, to generically support pre-1900 dates, dates with microseconds. .. change:: :tags: sqlite :tickets: String's (and Unicode's, UnicodeText's, etc.) convert_unicode logic disabled in the sqlite dialect, to adjust for pysqlite 2.5.0's new requirement that only Python unicode objects are accepted; http://itsystementwicklung.de/pipermail/list-pysqlite/2008-March/000018.html .. change:: :tags: mysql :tickets: Temporary tables are now reflectable. .. change:: :tags: oracle :tickets: 1187 Oracle will detect string-based statements which contain comments at the front before a SELECT as SELECT statements. .. changelog:: :version: 0.5.0rc1 :released: Thu Sep 11 2008 .. change:: :tags: orm :tickets: Query now has delete() and update(values) methods. This allows to perform bulk deletes/updates with the Query object. .. change:: :tags: orm :tickets: The RowTuple object returned by Query(\*cols) now features keynames which prefer mapped attribute names over column keys, column keys over column names, i.e. Query(Class.foo, Class.bar) will have names "foo" and "bar" even if those are not the names of the underlying Column objects. Direct Column objects such as Query(table.c.col) will return the "key" attribute of the Column. .. change:: :tags: orm :tickets: Added scalar() and value() methods to Query, each return a single scalar value. scalar() takes no arguments and is roughly equivalent to first()[0], value() takes a single column expression and is roughly equivalent to values(expr).next()[0]. .. change:: :tags: orm :tickets: Improved the determination of the FROM clause when placing SQL expressions in the query() list of entities. In particular scalar subqueries should not "leak" their inner FROM objects out into the enclosing query. .. change:: :tags: orm :tickets: Joins along a relation() from a mapped class to a mapped subclass, where the mapped subclass is configured with single table inheritance, will include an IN clause which limits the subtypes of the joined class to those requested, within the ON clause of the join. This takes effect for eager load joins as well as query.join(). Note that in some scenarios the IN clause will appear in the WHERE clause of the query as well since this discrimination has multiple trigger points. .. change:: :tags: orm :tickets: AttributeExtension has been refined such that the event is fired before the mutation actually occurs. Additionally, the append() and set() methods must now return the given value, which is used as the value to be used in the mutation operation. This allows creation of validating AttributeListeners which raise before the action actually occurs, and which can change the given value into something else before its used. .. change:: :tags: orm :tickets: column_property(), composite_property(), and relation() now accept a single or list of AttributeExtensions using the "extension" keyword argument. .. change:: :tags: orm :tickets: query.order_by().get() silently drops the "ORDER BY" from the query issued by GET but does not raise an exception. .. change:: :tags: orm :tickets: Added a Validator AttributeExtension, as well as a @validates decorator which is used in a similar fashion as @reconstructor, and marks a method as validating one or more mapped attributes. .. change:: :tags: orm :tickets: 1140 class.someprop.in_() raises NotImplementedError pending the implementation of "in\_" for relation .. change:: :tags: orm :tickets: 1127 Fixed primary key update for many-to-many collections where the collection had not been loaded yet .. change:: :tags: orm :tickets: Fixed bug whereby deferred() columns with a group in conjunction with an otherwise unrelated synonym() would produce an AttributeError during deferred load. .. change:: :tags: orm :tickets: 1128 The before_flush() hook on SessionExtension takes place before the list of new/dirty/deleted is calculated for the final time, allowing routines within before_flush() to further change the state of the Session before the flush proceeds. .. change:: :tags: orm :tickets: The "extension" argument to Session and others can now optionally be a list, supporting events sent to multiple SessionExtension instances. Session places SessionExtensions in Session.extensions. .. change:: :tags: orm :tickets: Reentrant calls to flush() raise an error. This also serves as a rudimentary, but not foolproof, check against concurrent calls to Session.flush(). .. change:: :tags: orm :tickets: Improved the behavior of query.join() when joining to joined-table inheritance subclasses, using explicit join criteria (i.e. not on a relation). .. change:: :tags: orm :tickets: @orm.attributes.reconstitute and MapperExtension.reconstitute have been renamed to @orm.reconstructor and MapperExtension.reconstruct_instance .. change:: :tags: orm :tickets: 1129 Fixed @reconstructor hook for subclasses which inherit from a base class. .. change:: :tags: orm :tickets: 1132 The composite() property type now supports a __set_composite_values__() method on the composite class which is required if the class represents state using attribute names other than the column's keynames; default-generated values now get populated properly upon flush. Also, composites with attributes set to None compare correctly. .. change:: :tags: orm :tickets: The 3-tuple of iterables returned by attributes.get_history() may now be a mix of lists and tuples. (Previously members were always lists.) .. change:: :tags: orm :tickets: 1151 Fixed bug whereby changing a primary key attribute on an entity where the attribute's previous value had been expired would produce an error upon flush(). .. change:: :tags: orm :tickets: Fixed custom instrumentation bug whereby get_instance_dict() was not called for newly constructed instances not loaded by the ORM. .. change:: :tags: orm :tickets: 1150 Session.delete() adds the given object to the session if not already present. This was a regression bug from 0.4. .. change:: :tags: orm :tickets: The `echo_uow` flag on `Session` is deprecated, and unit-of-work logging is now application-level only, not per-session level. .. change:: :tags: orm :tickets: 1153 Removed conflicting `contains()` operator from `InstrumentedAttribute` which didn't accept `escape` kwaarg. .. change:: :tags: declarative :tickets: 1161 Fixed bug whereby mapper couldn't initialize if a composite primary key referenced another table that was not defined yet. .. change:: :tags: declarative :tickets: Fixed exception throw which would occur when string-based primaryjoin condition was used in conjunction with backref. .. change:: :tags: schema :tickets: 1033 Added "sorted_tables" accessor to MetaData, which returns Table objects sorted in order of dependency as a list. This deprecates the MetaData.table_iterator() method. The "reverse=False" keyword argument has also been removed from util.sort_tables(); use the Python 'reversed' function to reverse the results. .. change:: :tags: schema :tickets: The 'length' argument to all Numeric types has been renamed to 'scale'. 'length' is deprecated and is still accepted with a warning. .. change:: :tags: schema :tickets: Dropped 0.3-compatibility for user defined types (convert_result_value, convert_bind_param). .. change:: :tags: sql :tickets: 1068 Temporarily rolled back the "ORDER BY" enhancement from. This feature is on hold pending further development. .. change:: :tags: sql :tickets: The exists() construct won't "export" its contained list of elements as FROM clauses, allowing them to be used more effectively in the columns clause of a SELECT. .. change:: :tags: sql :tickets: 798 and_() and or_() now generate a ColumnElement, allowing boolean expressions as result columns, i.e. select([and_(1, 0)]). .. change:: :tags: sql :tickets: Bind params now subclass ColumnElement which allows them to be selectable by orm.query (they already had most ColumnElement semantics). .. change:: :tags: sql :tickets: Added select_from() method to exists() construct, which becomes more and more compatible with a regular select(). .. change:: :tags: sql :tickets: 1160 Added func.min(), func.max(), func.sum() as "generic functions", which basically allows for their return type to be determined automatically. Helps with dates on SQLite, decimal types, others. .. change:: :tags: sql :tickets: added decimal.Decimal as an "auto-detect" type; bind parameters and generic functions will set their type to Numeric when a Decimal is used. .. change:: :tags: mysql :tickets: The 'length' argument to MSInteger, MSBigInteger, MSTinyInteger, MSSmallInteger and MSYear has been renamed to 'display_width'. .. change:: :tags: mysql :tickets: 1146 Added MSMediumInteger type. .. change:: :tags: mysql :tickets: the function func.utc_timestamp() compiles to UTC_TIMESTAMP, without the parenthesis, which seem to get in the way when using in conjunction with executemany(). .. change:: :tags: oracle :tickets: 536 limit/offset no longer uses ROW NUMBER OVER to limit rows, and instead uses subqueries in conjunction with a special Oracle optimization comment. Allows LIMIT/OFFSET to work in conjunction with DISTINCT. .. change:: :tags: oracle :tickets: 1155 has_sequence() now takes the current "schema" argument into account .. change:: :tags: oracle :tickets: 1121 added BFILE to reflected type names .. changelog:: :version: 0.5.0beta3 :released: Mon Aug 04 2008 .. change:: :tags: orm :tickets: The "entity_name" feature of SQLAlchemy mappers has been removed. For rationale, see http://tinyurl.com/6nm2ne .. change:: :tags: orm :tickets: the "autoexpire" flag on Session, sessionmaker(), and scoped_session() has been renamed to "expire_on_commit". It does not affect the expiration behavior of rollback(). .. change:: :tags: orm :tickets: fixed endless loop bug which could occur within a mapper's deferred load of inherited attributes. .. change:: :tags: orm :tickets: a legacy-support flag "_enable_transaction_accounting" flag added to Session which when False, disables all transaction-level object accounting, including expire on rollback, expire on commit, new/deleted list maintenance, and autoflush on begin. .. change:: :tags: orm :tickets: The 'cascade' parameter to relation() accepts None as a value, which is equivalent to no cascades. .. change:: :tags: orm :tickets: A critical fix to dynamic relations allows the "modified" history to be properly cleared after a flush(). .. change:: :tags: orm :tickets: user-defined @properties on a class are detected and left in place during mapper initialization. This means that a table-bound column of the same name will not be mapped at all if a @property is in the way (and the column is not remapped to a different name), nor will an instrumented attribute from an inherited class be applied. The same rules apply for names excluded using the include_properties/exclude_properties collections. .. change:: :tags: orm :tickets: Added a new SessionExtension hook called after_attach(). This is called at the point of attachment for objects via add(), add_all(), delete(), and merge(). .. change:: :tags: orm :tickets: 1111 A mapper which inherits from another, when inheriting the columns of its inherited mapper, will use any reassigned property names specified in that inheriting mapper. Previously, if "Base" had reassigned "base_id" to the name "id", "SubBase(Base)" would still get an attribute called "base_id". This could be worked around by explicitly stating the column in each submapper as well but this is fairly unworkable and also impossible when using declarative. .. change:: :tags: orm :tickets: Fixed a series of potential race conditions in Session whereby asynchronous GC could remove unmodified, no longer referenced items from the session as they were present in a list of items to be processed, typically during session.expunge_all() and dependent methods. .. change:: :tags: orm :tickets: Some improvements to the _CompileOnAttr mechanism which should reduce the probability of "Attribute x was not replaced during compile" warnings. (this generally applies to SQLA hackers, like Elixir devs). .. change:: :tags: orm :tickets: Fixed bug whereby the "unsaved, pending instance" FlushError raised for a pending orphan would not take superclass mappers into account when generating the list of relations responsible for the error. .. change:: :tags: sql :tickets: func.count() with no arguments renders as COUNT(*), equivalent to func.count(text('*')). .. change:: :tags: sql :tickets: 1068 simple label names in ORDER BY expressions render as themselves, and not as a re-statement of their corresponding expression. This feature is currently enabled only for SQLite, MySQL, and PostgreSQL. It can be enabled on other dialects as each is shown to support this behavior. .. change:: :tags: ext :tickets: Class-bound attributes sent as arguments to relation()'s remote_side and foreign_keys parameters are now accepted, allowing them to be used with declarative. Additionally fixed bugs involving order_by being specified as a class-bound attribute in conjunction with eager loading. .. change:: :tags: ext :tickets: declarative initialization of Columns adjusted so that non-renamed columns initialize in the same way as a non declarative mapper. This allows an inheriting mapper to set up its same-named "id" columns in particular such that the parent "id" column is favored over the child column, reducing database round trips when this value is requested. .. change:: :tags: mysql :tickets: 1110 Quoting of MSEnum values for use in CREATE TABLE is now optional & will be quoted on demand as required. (Quoting was always optional for use with existing tables.) .. changelog:: :version: 0.5.0beta2 :released: Mon Jul 14 2008 .. change:: :tags: orm :tickets: 870 In addition to expired attributes, deferred attributes also load if their data is present in the result set. .. change:: :tags: orm :tickets: session.refresh() raises an informative error message if the list of attributes does not include any column-based attributes. .. change:: :tags: orm :tickets: query() raises an informative error message if no columns or mappers are specified. .. change:: :tags: orm :tickets: lazy loaders now trigger autoflush before proceeding. This allows expire() of a collection or scalar relation to function properly in the context of autoflush. .. change:: :tags: orm :tickets: 887 column_property() attributes which represent SQL expressions or columns that are not present in the mapped tables (such as those from views) are automatically expired after an INSERT or UPDATE, assuming they have not been locally modified, so that they are refreshed with the most recent data upon access. .. change:: :tags: orm :tickets: 1082 Fixed explicit, self-referential joins between two joined-table inheritance mappers when using query.join(cls, aliased=True). .. change:: :tags: orm :tickets: Fixed query.join() when used in conjunction with a columns-only clause and an SQL-expression ON clause in the join. .. change:: :tags: orm :tickets: The "allow_column_override" flag from mapper() has been removed. This flag is virtually always misunderstood. Its specific functionality is available via the include_properties/exclude_properties mapper arguments. .. change:: :tags: orm :tickets: 1066 Repaired `__str__()` method on Query. .. change:: :tags: orm :tickets: Session.bind gets used as a default even when table/mapper specific binds are defined. .. change:: :tags: schema :tickets: 1075 Added prefixes option to `Table` that accepts a list of strings to insert after CREATE in the CREATE TABLE statement. .. change:: :tags: schema :tickets: Unicode, UnicodeText types now set "assert_unicode" and "convert_unicode" by default, but accept overriding \**kwargs for these values. .. change:: :tags: sql :tickets: Added new match() operator that performs a full-text search. Supported on PostgreSQL, SQLite, MySQL, MS-SQL, and Oracle backends. .. change:: :tags: sqlite :tickets: 1090 Modified SQLite's representation of "microseconds" to match the output of str(somedatetime), i.e. in that the microseconds are represented as fractional seconds in string format. This makes SQLA's SQLite date type compatible with datetimes that were saved directly using Pysqlite (which just calls str()). Note that this is incompatible with the existing microseconds values in a SQLA 0.4 generated SQLite database file. To get the old behavior globally: from sqlalchemy.databases.sqlite import DateTimeMixin DateTimeMixin.__legacy_microseconds__ = True To get the behavior on individual DateTime types: t = sqlite.SLDateTime() t.__legacy_microseconds__ = True Then use "t" as the type on the Column. .. change:: :tags: sqlite :tickets: SQLite Date, DateTime, and Time types only accept Python datetime objects now, not strings. If you'd like to format dates as strings yourself with SQLite, use a String type. If you'd like them to return datetime objects anyway despite their accepting strings as input, make a TypeDecorator around String - SQLA doesn't encourage this pattern. .. change:: :tags: extensions :tickets: 1096 Declarative supports a __table_args__ class variable, which is either a dictionary, or tuple of the form (arg1, arg2, ..., {kwarg1:value, ...}) which contains positional + kw arguments to be passed to the Table constructor. .. changelog:: :version: 0.5.0beta1 :released: Thu Jun 12 2008 .. change:: :tags: :tickets: The "__init__" trigger/decorator added by mapper now attempts to exactly mirror the argument signature of the original __init__. The pass-through for '_sa_session' is no longer implicit- you must allow for this keyword argument in your constructor. .. change:: :tags: :tickets: ClassState is renamed to ClassManager. .. change:: :tags: :tickets: Classes may supply their own InstrumentationManager by providing a __sa_instrumentation_manager__ property. .. change:: :tags: :tickets: Custom instrumentation may use any mechanism to associate a ClassManager with a class and an InstanceState with an instance. Attributes on those objects are still the default association mechanism used by SQLAlchemy's native instrumentation. .. change:: :tags: :tickets: Moved entity_name, _sa_session_id, and _instance_key from the instance object to the instance state. These values are still available in the old way, which is now deprecated, using descriptors attached to the class. A deprecation warning will be issued when accessed. .. change:: :tags: :tickets: The _prepare_instrumentation alias for prepare_instrumentation has been removed. .. change:: :tags: :tickets: sqlalchemy.exceptions has been renamed to sqlalchemy.exc. The module may be imported under either name. .. change:: :tags: :tickets: ORM-related exceptions are now defined in sqlalchemy.orm.exc. ConcurrentModificationError, FlushError, and UnmappedColumnError compatibility aliases are installed in sqlalchemy.exc during the import of sqlalchemy.orm. .. change:: :tags: :tickets: sqlalchemy.logging has been renamed to sqlalchemy.log. .. change:: :tags: :tickets: The transitional sqlalchemy.log.SADeprecationWarning alias for the warning's definition in sqlalchemy.exc has been removed. .. change:: :tags: :tickets: exc.AssertionError has been removed and usage replaced with Python's built-in AssertionError. .. change:: :tags: :tickets: The behavior of MapperExtensions attached to multiple, entity_name= primary mappers for a single class has been altered. The first mapper() defined for a class is the only mapper eligible for the MapperExtension 'instrument_class', 'init_instance' and 'init_failed' events. This is backwards incompatible; previously the extensions of last mapper defined would receive these events. .. change:: :tags: firebird :tickets: Added support for returning values from inserts (2.0+ only), updates and deletes (2.1+ only). .. change:: :tags: general :tickets: global "propigate"->"propagate" change. .. change:: :tags: orm :tickets: polymorphic_union() function respects the "key" of each Column if they differ from the column's name. .. change:: :tags: orm :tickets: 1199 Fixed 0.4-only bug preventing composite columns from working properly with inheriting mappers .. change:: :tags: orm :tickets: Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects. Ported from 0.5. .. change:: :tags: orm :tickets: 1213 Fixed bug in composite types which prevented a primary-key composite type from being mutated. .. change:: :tags: orm :tickets: 976 Added ScopedSession.is_active accessor. .. change:: :tags: orm :tickets: 939 Class-bound accessor can be used as the argument to relation() order_by. .. change:: :tags: orm :tickets: 1072 Fixed shard_id argument on ShardedSession.execute(). .. change:: :tags: sql :tickets: 1246 Connection.invalidate() checks for closed status to avoid attribute errors. .. change:: :tags: sql :tickets: 1094 NullPool supports reconnect on failure behavior. .. change:: :tags: sql :tickets: 1299 The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6. .. change:: :tags: sql :tickets: Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn't fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present. .. change:: :tags: postgres :tickets: 714 Added Index reflection support to Postgres, using a great patch we long neglected, submitted by Ken Kuhlman. .. change:: :tags: mysql :tickets: 1241 Fixed bug in exception raise when FK columns not present during reflection. .. change:: :tags: oracle :tickets: 1265 Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu ! SQLAlchemy-1.0.11/doc/build/changelog/migration_06.rst0000664000175000017500000012662012636375552023472 0ustar classicclassic00000000000000============================== What's New in SQLAlchemy 0.6? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.5, last released January 16, 2010, and SQLAlchemy version 0.6, last released May 5, 2012. Document date: June 6, 2010 This guide documents API changes which affect users migrating their applications from the 0.5 series of SQLAlchemy to 0.6. Note that SQLAlchemy 0.6 removes some behaviors which were deprecated throughout the span of the 0.5 series, and also deprecates more behaviors specific to 0.5. Platform Support ================ * cPython versions 2.4 and upwards throughout the 2.xx series * Jython 2.5.1 - using the zxJDBC DBAPI included with Jython. * cPython 3.x - see [source:sqlalchemy/trunk/README.py3k] for information on how to build for python3. New Dialect System ================== Dialect modules are now broken up into distinct subcomponents, within the scope of a single database backend. Dialect implementations are now in the ``sqlalchemy.dialects`` package. The ``sqlalchemy.databases`` package still exists as a placeholder to provide some level of backwards compatibility for simple imports. For each supported database, a sub-package exists within ``sqlalchemy.dialects`` where several files are contained. Each package contains a module called ``base.py`` which defines the specific SQL dialect used by that database. It also contains one or more "driver" modules, each one corresponding to a specific DBAPI - these files are named corresponding to the DBAPI itself, such as ``pysqlite``, ``cx_oracle``, or ``pyodbc``. The classes used by SQLAlchemy dialects are first declared in the ``base.py`` module, defining all behavioral characteristics defined by the database. These include capability mappings, such as "supports sequences", "supports returning", etc., type definitions, and SQL compilation rules. Each "driver" module in turn provides subclasses of those classes as needed which override the default behavior to accommodate the additional features, behaviors, and quirks of that DBAPI. For DBAPIs that support multiple backends (pyodbc, zxJDBC, mxODBC), the dialect module will use mixins from the ``sqlalchemy.connectors`` package, which provide functionality common to that DBAPI across all backends, most typically dealing with connect arguments. This means that connecting using pyodbc, zxJDBC or mxODBC (when implemented) is extremely consistent across supported backends. The URL format used by ``create_engine()`` has been enhanced to handle any number of DBAPIs for a particular backend, using a scheme that is inspired by that of JDBC. The previous format still works, and will select a "default" DBAPI implementation, such as the Postgresql URL below that will use psycopg2: :: create_engine('postgresql://scott:tiger@localhost/test') However to specify a specific DBAPI backend such as pg8000, add it to the "protocol" section of the URL using a plus sign "+": :: create_engine('postgresql+pg8000://scott:tiger@localhost/test') Important Dialect Links: * Documentation on connect arguments: http://www.sqlalchemy.org/docs/06/dbengine.html#create- engine-url-arguments. * Reference documentation for individual dialects: http://ww w.sqlalchemy.org/docs/06/reference/dialects/index.html * The tips and tricks at DatabaseNotes. Other notes regarding dialects: * the type system has been changed dramatically in SQLAlchemy 0.6. This has an impact on all dialects regarding naming conventions, behaviors, and implementations. See the section on "Types" below. * the ``ResultProxy`` object now offers a 2x speed improvement in some cases thanks to some refactorings. * the ``RowProxy``, i.e. individual result row object, is now directly pickleable. * the setuptools entrypoint used to locate external dialects is now called ``sqlalchemy.dialects``. An external dialect written against 0.4 or 0.5 will need to be modified to work with 0.6 in any case so this change does not add any additional difficulties. * dialects now receive an initialize() event on initial connection to determine connection properties. * Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form "visit_" and "visit__fn" to provide customed processing. This replaces the need to copy the "functions" and "operators" dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in. Dialect Imports --------------- The import structure of dialects has changed. Each dialect now exports its base "dialect" class as well as the full set of SQL types supported on that dialect via ``sqlalchemy.dialects.``. For example, to import a set of PG types: :: from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\ VARCHAR, MACADDR, DATE, BYTEA Above, ``INTEGER`` is actually the plain ``INTEGER`` type from ``sqlalchemy.types``, but the PG dialect makes it available in the same way as those types which are specific to PG, such as ``BYTEA`` and ``MACADDR``. Expression Language Changes =========================== An Important Expression Language Gotcha --------------------------------------- There's one quite significant behavioral change to the expression language which may affect some applications. The boolean value of Python boolean expressions, i.e. ``==``, ``!=``, and similar, now evaluates accurately with regards to the two clause objects being compared. As we know, comparing a ``ClauseElement`` to any other object returns another ``ClauseElement``: :: >>> from sqlalchemy.sql import column >>> column('foo') == 5 This so that Python expressions produce SQL expressions when converted to strings: :: >>> str(column('foo') == 5) 'foo = :foo_1' But what happens if we say this? :: >>> if column('foo') == 5: ... print "yes" ... In previous versions of SQLAlchemy, the returned ``_BinaryExpression`` was a plain Python object which evaluated to ``True``. Now it evaluates to whether or not the actual ``ClauseElement`` should have the same hash value as to that being compared. Meaning: :: >>> bool(column('foo') == 5) False >>> bool(column('foo') == column('foo')) False >>> c = column('foo') >>> bool(c == c) True >>> That means code such as the following: :: if expression: print "the expression is:", expression Would not evaluate if ``expression`` was a binary clause. Since the above pattern should never be used, the base ``ClauseElement`` now raises an exception if called in a boolean context: :: >>> bool(c) Traceback (most recent call last): File "", line 1, in ... raise TypeError("Boolean value of this clause is not defined") TypeError: Boolean value of this clause is not defined Code that wants to check for the presence of a ``ClauseElement`` expression should instead say: :: if expression is not None: print "the expression is:", expression Keep in mind, **this applies to Table and Column objects too**. The rationale for the change is twofold: * Comparisons of the form ``if c1 == c2: `` can actually be written now * Support for correct hashing of ``ClauseElement`` objects now works on alternate platforms, namely Jython. Up until this point SQLAlchemy relied heavily on the specific behavior of cPython in this regard (and still had occasional problems with it). Stricter "executemany" Behavior ------------------------------- An "executemany" in SQLAlchemy corresponds to a call to ``execute()``, passing along a collection of bind parameter sets: :: connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'}) When the ``Connection`` object sends off the given ``insert()`` construct for compilation, it passes to the compiler the keynames present in the first set of binds passed along to determine the construction of the statement's VALUES clause. Users familiar with this construct will know that additional keys present in the remaining dictionaries don't have any impact. What's different now is that all subsequent dictionaries need to include at least *every* key that is present in the first dictionary. This means that a call like this no longer works: :: connection.execute(table.insert(), {'timestamp':today, 'data':'row1'}, {'timestamp':today, 'data':'row2'}, {'data':'row3'}) Because the third row does not specify the 'timestamp' column. Previous versions of SQLAlchemy would simply insert NULL for these missing columns. However, if the ``timestamp`` column in the above example contained a Python-side default value or function, it would *not* be used. This because the "executemany" operation is optimized for maximum performance across huge numbers of parameter sets, and does not attempt to evaluate Python-side defaults for those missing keys. Because defaults are often implemented either as SQL expressions which are embedded inline with the INSERT statement, or are server side expressions which again are triggered based on the structure of the INSERT string, which by definition cannot fire off conditionally based on each parameter set, it would be inconsistent for Python side defaults to behave differently vs. SQL/server side defaults. (SQL expression based defaults are embedded inline as of the 0.5 series, again to minimize the impact of huge numbers of parameter sets). SQLAlchemy 0.6 therefore establishes predictable consistency by forbidding any subsequent parameter sets from leaving any fields blank. That way, there's no more silent failure of Python side default values and functions, which additionally are allowed to remain consistent in their behavior versus SQL and server side defaults. UNION and other "compound" constructs parenthesize consistently --------------------------------------------------------------- A rule that was designed to help SQLite has been removed, that of the first compound element within another compound (such as, a ``union()`` inside of an ``except_()``) wouldn't be parenthesized. This is inconsistent and produces the wrong results on Postgresql, which has precedence rules regarding INTERSECTION, and its generally a surprise. When using complex composites with SQLite, you now need to turn the first element into a subquery (which is also compatible on PG). A new example is in the SQL expression tutorial at the end of [http://www.sqlalchemy.org/docs/06/sqlexpression.html #unions-and-other-set-operations]. See :ticket:`1665` and r6690 for more background. C Extensions for Result Fetching ================================ The ``ResultProxy`` and related elements, including most common "row processing" functions such as unicode conversion, numerical/boolean conversions and date parsing, have been re-implemented as optional C extensions for the purposes of performance. This represents the beginning of SQLAlchemy's path to the "dark side" where we hope to continue improving performance by reimplementing critical sections in C. The extensions can be built by specifying ``--with-cextensions``, i.e. ``python setup.py --with- cextensions install``. The extensions have the most dramatic impact on result fetching using direct ``ResultProxy`` access, i.e. that which is returned by ``engine.execute()``, ``connection.execute()``, or ``session.execute()``. Within results returned by an ORM ``Query`` object, result fetching is not as high a percentage of overhead, so ORM performance improves more modestly, and mostly in the realm of fetching large result sets. The performance improvements highly depend on the dbapi in use and on the syntax used to access the columns of each row (eg ``row['name']`` is much faster than ``row.name``). The current extensions have no impact on the speed of inserts/updates/deletes, nor do they improve the latency of SQL execution, that is, an application that spends most of its time executing many statements with very small result sets will not see much improvement. Performance has been improved in 0.6 versus 0.5 regardless of the extensions. A quick overview of what connecting and fetching 50,000 rows looks like with SQLite, using mostly direct SQLite access, a ``ResultProxy``, and a simple mapped ORM object: :: sqlite select/native: 0.260s 0.6 / C extension sqlalchemy.sql select: 0.360s sqlalchemy.orm fetch: 2.500s 0.6 / Pure Python sqlalchemy.sql select: 0.600s sqlalchemy.orm fetch: 3.000s 0.5 / Pure Python sqlalchemy.sql select: 0.790s sqlalchemy.orm fetch: 4.030s Above, the ORM fetches the rows 33% faster than 0.5 due to in-python performance enhancements. With the C extensions we get another 20%. However, ``ResultProxy`` fetches improve by 67% with the C extension versus not. Other tests report as much as a 200% speed improvement for some scenarios, such as those where lots of string conversions are occurring. New Schema Capabilities ======================= The ``sqlalchemy.schema`` package has received some long- needed attention. The most visible change is the newly expanded DDL system. In SQLAlchemy, it was possible since version 0.5 to create custom DDL strings and associate them with tables or metadata objects: :: from sqlalchemy.schema import DDL DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata) Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD CONSTRAINT, etc.: :: from sqlalchemy.schema import Constraint, AddConstraint AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable) Additionally, all the DDL objects are now regular ``ClauseElement`` objects just like any other SQLAlchemy expression object: :: from sqlalchemy.schema import CreateTable create = CreateTable(mytable) # dumps the CREATE TABLE as a string print create # executes the CREATE TABLE statement engine.execute(create) and using the ``sqlalchemy.ext.compiler`` extension you can make your own: :: from sqlalchemy.schema import DDLElement from sqlalchemy.ext.compiler import compiles class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s %s ..." % ( element.column.table.name, element.column.name, element.cmd ) engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'")) Deprecated/Removed Schema Elements ---------------------------------- The schema package has also been greatly streamlined. Many options and methods which were deprecated throughout 0.5 have been removed. Other little known accessors and methods have also been removed. * the "owner" keyword argument is removed from ``Table``. Use "schema" to represent any namespaces to be prepended to the table name. * deprecated ``MetaData.connect()`` and ``ThreadLocalMetaData.connect()`` have been removed - send the "bind" attribute to bind a metadata. * deprecated metadata.table_iterator() method removed (use sorted_tables) * the "metadata" argument is removed from ``DefaultGenerator`` and subclasses, but remains locally present on ``Sequence``, which is a standalone construct in DDL. * deprecated ``PassiveDefault`` - use ``DefaultClause``. * Removed public mutability from ``Index`` and ``Constraint`` objects: * ``ForeignKeyConstraint.append_element()`` * ``Index.append_column()`` * ``UniqueConstraint.append_column()`` * ``PrimaryKeyConstraint.add()`` * ``PrimaryKeyConstraint.remove()`` These should be constructed declaratively (i.e. in one construction). * Other removed things: * ``Table.key`` (no idea what this was for) * ``Column.bind`` (get via column.table.bind) * ``Column.metadata`` (get via column.table.metadata) * ``Column.sequence`` (use column.default) Other Behavioral Changes ------------------------ * ``UniqueConstraint``, ``Index``, ``PrimaryKeyConstraint`` all accept lists of column names or column objects as arguments. * The ``use_alter`` flag on ``ForeignKey`` is now a shortcut option for operations that can be hand-constructed using the ``DDL()`` event system. A side effect of this refactor is that ``ForeignKeyConstraint`` objects with ``use_alter=True`` will *not* be emitted on SQLite, which does not support ALTER for foreign keys. This has no effect on SQLite's behavior since SQLite does not actually honor FOREIGN KEY constraints. * ``Table.primary_key`` is not assignable - use ``table.append_constraint(PrimaryKeyConstraint(...))`` * A ``Column`` definition with a ``ForeignKey`` and no type, e.g. ``Column(name, ForeignKey(sometable.c.somecol))`` used to get the type of the referenced column. Now support for that automatic type inference is partial and may not work in all cases. Logging opened up ================= At the expense of a few extra method calls here and there, you can set log levels for INFO and DEBUG after an engine, pool, or mapper has been created, and logging will commence. The ``isEnabledFor(INFO)`` method is now called per-``Connection`` and ``isEnabledFor(DEBUG)`` per-``ResultProxy`` if already enabled on the parent connection. Pool logging sends to ``log.info()`` and ``log.debug()`` with no check - note that pool checkout/checkin is typically once per transaction. Reflection/Inspector API ======================== The reflection system, which allows reflection of table columns via ``Table('sometable', metadata, autoload=True)`` has been opened up into its own fine-grained API, which allows direct inspection of database elements such as tables, columns, constraints, indexes, and more. This API expresses return values as simple lists of strings, dictionaries, and ``TypeEngine`` objects. The internals of ``autoload=True`` now build upon this system such that the translation of raw database information into ``sqlalchemy.schema`` constructs is centralized and the contract of individual dialects greatly simplified, vastly reducing bugs and inconsistencies across different backends. To use an inspector: :: from sqlalchemy.engine.reflection import Inspector insp = Inspector.from_engine(my_engine) print insp.get_schema_names() the ``from_engine()`` method will in some cases provide a backend-specific inspector with additional capabilities, such as that of Postgresql which provides a ``get_table_oid()`` method: :: my_engine = create_engine('postgresql://...') pg_insp = Inspector.from_engine(my_engine) print pg_insp.get_table_oid('my_table') RETURNING Support ================= The ``insert()``, ``update()`` and ``delete()`` constructs now support a ``returning()`` method, which corresponds to the SQL RETURNING clause as supported by Postgresql, Oracle, MS-SQL, and Firebird. It is not supported for any other backend at this time. Given a list of column expressions in the same manner as that of a ``select()`` construct, the values of these columns will be returned as a regular result set: :: result = connection.execute( table.insert().values(data='some data').returning(table.c.id, table.c.timestamp) ) row = result.first() print "ID:", row['id'], "Timestamp:", row['timestamp'] The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an intricate usage of OUT parameters which are re-routed into a "mock" result set, and in the case of MS-SQL using an awkward SQL syntax. The usage of RETURNING is subject to limitations: * it does not work for any "executemany()" style of execution. This is a limitation of all supported DBAPIs. * Some backends, such as Oracle, only support RETURNING that returns a single row - this includes UPDATE and DELETE statements, meaning the update() or delete() construct must match only a single row, or an error is raised (by Oracle, not SQLAlchemy). RETURNING is also used automatically by SQLAlchemy, when available and when not otherwise specified by an explicit ``returning()`` call, to fetch the value of newly generated primary key values for single-row INSERT statements. This means there's no more "SELECT nextval(sequence)" pre- execution for insert statements where the primary key value is required. Truth be told, implicit RETURNING feature does incur more method overhead than the old "select nextval()" system, which used a quick and dirty cursor.execute() to get at the sequence value, and in the case of Oracle requires additional binding of out parameters. So if method/protocol overhead is proving to be more expensive than additional database round trips, the feature can be disabled by specifying ``implicit_returning=False`` to ``create_engine()``. Type System Changes =================== New Archicture -------------- The type system has been completely reworked behind the scenes to provide two goals: * Separate the handling of bind parameters and result row values, typically a DBAPI requirement, from the SQL specification of the type itself, which is a database requirement. This is consistent with the overall dialect refactor that separates database SQL behavior from DBAPI. * Establish a clear and consistent contract for generating DDL from a ``TypeEngine`` object and for constructing ``TypeEngine`` objects based on column reflection. Highlights of these changes include: * The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in [source:/lib/sqlalc hemy/dialects/type_migration_guidelines.txt]. * Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types. * User defined types that subclass ``TypeEngine`` and wish to provide ``get_col_spec()`` should now subclass ``UserDefinedType``. * The ``result_processor()`` method on all type classes now accepts an additional argument ``coltype``. This is the DBAPI type object attached to cursor.description, and should be used when applicable to make better decisions on what kind of result-processing callable should be returned. Ideally result processor functions would never need to use ``isinstance()``, which is an expensive call at this level. Native Unicode Mode ------------------- As more DBAPIs support returning Python unicode objects directly, the base dialect now performs a check upon the first connection which establishes whether or not the DBAPI returns a Python unicode object for a basic select of a VARCHAR value. If so, the ``String`` type and all subclasses (i.e. ``Text``, ``Unicode``, etc.) will skip the "unicode" check/conversion step when result rows are received. This offers a dramatic performance increase for large result sets. The "unicode mode" currently is known to work with: * sqlite3 / pysqlite * psycopg2 - SQLA 0.6 now uses the "UNICODE" type extension by default on each psycopg2 connection object * pg8000 * cx_oracle (we use an output processor - nice feature !) Other types may choose to disable unicode processing as needed, such as the ``NVARCHAR`` type when used with MS-SQL. In particular, if porting an application based on a DBAPI that formerly returned non-unicode strings, the "native unicode" mode has a plainly different default behavior - columns that are declared as ``String`` or ``VARCHAR`` now return unicode by default whereas they would return strings before. This can break code which expects non-unicode strings. The psycopg2 "native unicode" mode can be disabled by passing ``use_native_unicode=False`` to ``create_engine()``. A more general solution for string columns that explicitly do not want a unicode object is to use a ``TypeDecorator`` that converts unicode back to utf-8, or whatever is desired: :: class UTF8Encoded(TypeDecorator): """Unicode type which coerces to utf-8.""" impl = sa.VARCHAR def process_result_value(self, value, dialect): if isinstance(value, unicode): value = value.encode('utf-8') return value Note that the ``assert_unicode`` flag is now deprecated. SQLAlchemy allows the DBAPI and backend database in use to handle Unicode parameters when available, and does not add operational overhead by checking the incoming type; modern systems like sqlite and Postgresql will raise an encoding error on their end if invalid data is passed. In those cases where SQLAlchemy does need to coerce a bind parameter from Python Unicode to an encoded string, or when the Unicode type is used explicitly, a warning is raised if the object is a bytestring. This warning can be suppressed or converted to an exception using the Python warnings filter documented at: http://docs.python.org/library/warnings.html Generic Enum Type ----------------- We now have an ``Enum`` in the ``types`` module. This is a string type that is given a collection of "labels" which constrain the possible values given to those labels. By default, this type generates a ``VARCHAR`` using the size of the largest label, and applies a CHECK constraint to the table within the CREATE TABLE statement. When using MySQL, the type by default uses MySQL's ENUM type, and when using Postgresql the type will generate a user defined type using ``CREATE TYPE AS ENUM``. In order to create the type using Postgresql, the ``name`` parameter must be specified to the constructor. The type also accepts a ``native_enum=False`` option which will issue the VARCHAR/CHECK strategy for all databases. Note that Postgresql ENUM types currently don't work with pg8000 or zxjdbc. Reflection Returns Dialect-Specific Types ----------------------------------------- Reflection now returns the most specific type possible from the database. That is, if you create a table using ``String``, then reflect it back, the reflected column will likely be ``VARCHAR``. For dialects that support a more specific form of the type, that's what you'll get. So a ``Text`` type would come back as ``oracle.CLOB`` on Oracle, a ``LargeBinary`` might be an ``mysql.MEDIUMBLOB`` etc. The obvious advantage here is that reflection preserves as much information possible from what the database had to say. Some applications that deal heavily in table metadata may wish to compare types across reflected tables and/or non- reflected tables. There's a semi-private accessor available on ``TypeEngine`` called ``_type_affinity`` and an associated comparison helper ``_compare_type_affinity``. This accessor returns the "generic" ``types`` class which the type corresponds to: :: >>> String(50)._compare_type_affinity(postgresql.VARCHAR(50)) True >>> Integer()._compare_type_affinity(mysql.REAL) False Miscellaneous API Changes ------------------------- The usual "generic" types are still the general system in use, i.e. ``String``, ``Float``, ``DateTime``. There's a few changes there: * Types no longer make any guesses as to default parameters. In particular, ``Numeric``, ``Float``, as well as subclasses NUMERIC, FLOAT, DECIMAL don't generate any length or scale unless specified. This also continues to include the controversial ``String`` and ``VARCHAR`` types (although MySQL dialect will pre-emptively raise when asked to render VARCHAR with no length). No defaults are assumed, and if they are used in a CREATE TABLE statement, an error will be raised if the underlying database does not allow non-lengthed versions of these types. * the ``Binary`` type has been renamed to ``LargeBinary``, for BLOB/BYTEA/similar types. For ``BINARY`` and ``VARBINARY``, those are present directly as ``types.BINARY``, ``types.VARBINARY``, as well as in the MySQL and MS-SQL dialects. * ``PickleType`` now uses == for comparison of values when mutable=True, unless the "comparator" argument with a comparison function is specified to the type. If you are pickling a custom object you should implement an ``__eq__()`` method so that value-based comparisons are accurate. * The default "precision" and "scale" arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided. * DATE, TIME and DATETIME types on SQLite can now take optional "storage_format" and "regexp" argument. "storage_format" can be used to store those types using a custom string format. "regexp" allows to use a custom regular expression to match string values from the database. * ``__legacy_microseconds__`` on SQLite ``Time`` and ``DateTime`` types is not supported anymore. You should use the new "storage_format" argument instead. * ``DateTime`` types on SQLite now use by a default a stricter regular expression to match strings from the database. Use the new "regexp" argument if you are using data stored in a legacy format. ORM Changes =========== Upgrading an ORM application from 0.5 to 0.6 should require little to no changes, as the ORM's behavior remains almost identical. There are some default argument and name changes, and some loading behaviors have been improved. New Unit of Work ---------------- The internals for the unit of work, primarily ``topological.py`` and ``unitofwork.py``, have been completely rewritten and are vastly simplified. This should have no impact on usage, as all existing behavior during flush has been maintained exactly (or at least, as far as it is exercised by our testsuite and the handful of production environments which have tested it heavily). The performance of flush() now uses 20-30% fewer method calls and should also use less memory. The intent and flow of the source code should now be reasonably easy to follow, and the architecture of the flush is fairly open-ended at this point, creating room for potential new areas of sophistication. The flush process no longer has any reliance on recursion so flush plans of arbitrary size and complexity can be flushed. Additionally, the mapper's "save" process, which issues INSERT and UPDATE statements, now caches the "compiled" form of the two statements so that callcounts are further dramatically reduced with very large flushes. Any changes in behavior observed with flush versus earlier versions of 0.6 or 0.5 should be reported to us ASAP - we'll make sure no functionality is lost. Changes to ``query.update()`` and ``query.delete()`` ---------------------------------------------------- * the 'expire' option on query.update() has been renamed to 'fetch', thus matching that of query.delete() * ``query.update()`` and ``query.delete()`` both default to 'evaluate' for the synchronize strategy. * the 'synchronize' strategy for update() and delete() raises an error on failure. There is no implicit fallback onto "fetch". Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure. ``relation()`` is officially named ``relationship()`` ----------------------------------------------------- This to solve the long running issue that "relation" means a "table or derived table" in relational algebra terms. The ``relation()`` name, which is less typing, will hang around for the foreseeable future so this change should be entirely painless. Subquery eager loading ---------------------- A new kind of eager loading is added called "subquery" loading. This is a load that emits a second SQL query immediately after the first which loads full collections for all the parents in the first query, joining upwards to the parent using INNER JOIN. Subquery loading is used simlarly to the current joined-eager loading, using the ```subqueryload()```` and ````subqueryload_all()```` options as well as the ````lazy='subquery'```` setting on ````relationship()```. The subquery load is usually much more efficient for loading many larger collections as it uses INNER JOIN unconditionally and also doesn't re-load parent rows. ```eagerload()````, ````eagerload_all()```` is now ````joinedload()````, ````joinedload_all()``` ------------------------------------------------------------------------------------------------ To make room for the new subquery load feature, the existing ```eagerload()````/````eagerload_all()```` options are now superseded by ````joinedload()```` and ````joinedload_all()````. The old names will hang around for the foreseeable future just like ````relation()```. ```lazy=False|None|True|'dynamic'```` now accepts ````lazy='noload'|'joined'|'subquery'|'select'|'dynamic'``` ------------------------------------------------------------------------------------------------------------- Continuing on the theme of loader strategies opened up, the standard keywords for the ```lazy```` option on ````relationship()```` are now ````select```` for lazy loading (via a SELECT issued on attribute access), ````joined```` for joined-eager loading, ````subquery```` for subquery-eager loading, ````noload```` for no loading should occur, and ````dynamic```` for a "dynamic" relationship. The old ````True````, ````False````, ````None``` arguments are still accepted with the identical behavior as before. innerjoin=True on relation, joinedload -------------------------------------- Joined-eagerly loaded scalars and collections can now be instructed to use INNER JOIN instead of OUTER JOIN. On Postgresql this is observed to provide a 300-600% speedup on some queries. Set this flag for any many-to-one which is on a NOT NULLable foreign key, and similarly for any collection where related items are guaranteed to exist. At mapper level: :: mapper(Child, child) mapper(Parent, parent, properties={ 'child':relationship(Child, lazy='joined', innerjoin=True) }) At query time level: :: session.query(Parent).options(joinedload(Parent.child, innerjoin=True)).all() The ``innerjoin=True`` flag at the ``relationship()`` level will also take effect for any ``joinedload()`` option which does not override the value. Many-to-one Enhancements ------------------------ * many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the "old" value when a new one is replaced. * many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the "use_get" condition), i.e. ``Related``->``Sub(Base)``, without the need to redefine the primaryjoin condition in terms of the base table. [ticket:1186] * specifying a foreign key with a declarative column, i.e. ``ForeignKey(MyRelatedClass.id)`` doesn't break the "use_get" condition from taking place [ticket:1492] * relationship(), joinedload(), and joinedload_all() now feature an option called "innerjoin". Specify ``True`` or ``False`` to control whether an eager join is constructed as an INNER or OUTER join. Default is ``False`` as always. The mapper options will override whichever setting is specified on relationship(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance. [ticket:1544] * the behavior of joined eager loading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result. For example, in 0.5 this query: :: session.query(Address).options(eagerload(Address.user)).limit(10) would produce SQL like: :: SELECT * FROM (SELECT * FROM addresses LIMIT 10) AS anon_1 LEFT OUTER JOIN users AS users_1 ON users_1.id = anon_1.addresses_user_id This because the presence of any eager loaders suggests that some or all of them may relate to multi-row collections, which would necessitate wrapping any kind of rowcount-sensitive modifiers like LIMIT inside of a subquery. In 0.6, that logic is more sensitive and can detect if all eager loaders represent many-to-ones, in which case the eager joins don't affect the rowcount: :: SELECT * FROM addresses LEFT OUTER JOIN users AS users_1 ON users_1.id = addresses.user_id LIMIT 10 Mutable Primary Keys with Joined Table Inheritance -------------------------------------------------- A joined table inheritance config where the child table has a PK that foreign keys to the parent PK can now be updated on a CASCADE-capable database like Postgresql. ``mapper()`` now has an option ``passive_updates=True`` which indicates this foreign key is updated automatically. If on a non-cascading database like SQLite or MySQL/MyISAM, set this flag to ``False``. A future feature enhancement will try to get this flag to be auto-configuring based on dialect/table style in use. Beaker Caching -------------- A promising new example of Beaker integration is in ``examples/beaker_caching``. This is a straightforward recipe which applies a Beaker cache within the result- generation engine of ``Query``. Cache parameters are provided via ``query.options()``, and allows full control over the contents of the cache. SQLAlchemy 0.6 includes improvements to the ``Session.merge()`` method to support this and similar recipes, as well as to provide significantly improved performance in most scenarios. Other Changes ------------- * the "row tuple" object returned by ``Query`` when multiple column/entities are selected is now picklable as well as higher performing. * ``query.join()`` has been reworked to provide more consistent behavior and more flexibility (includes [ticket:1537]) * ``query.select_from()`` accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses. * the "dont_load=True" flag on ``Session.merge()`` is deprecated and is now "load=False". * added "make_transient()" helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.) [ticket:1052] * the allow_null_pks flag on mapper() is deprecated and has been renamed to allow_partial_pks. It is turned "on" by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join. When set to False, a PK that has NULLs in it will not be considered a primary key - in particular this means a result row will come back as None (or not be filled into a collection), and new in 0.6 also indicates that session.merge() won't issue a round trip to the database for such a PK value. [ticket:1680] * the mechanics of "backref" have been fully merged into the finer grained "back_populates" system, and take place entirely within the ``_generate_backref()`` method of ``RelationProperty``. This makes the initialization procedure of ``RelationProperty`` simpler and allows easier propagation of settings (such as from subclasses of ``RelationProperty``) into the reverse reference. The internal ``BackRef()`` is gone and ``backref()`` returns a plain tuple that is understood by ``RelationProperty``. * the keys attribute of ``ResultProxy`` is now a method, so references to it (``result.keys``) must be changed to method invocations (``result.keys()``) * ``ResultProxy.last_inserted_ids`` is now deprecated, use ``ResultProxy.inserted_primary_key`` instead. Deprecated/Removed ORM Elements ------------------------------- Most elements that were deprecated throughout 0.5 and raised deprecation warnings have been removed (with a few exceptions). All elements that were marked "pending deprecation" are now deprecated and will raise a warning upon use. * 'transactional' flag on sessionmaker() and others is removed. Use 'autocommit=True' to indicate 'transactional=False'. * 'polymorphic_fetch' argument on mapper() is removed. Loading can be controlled using the 'with_polymorphic' option. * 'select_table' argument on mapper() is removed. Use 'with_polymorphic=("*", )' for this functionality. * 'proxy' argument on synonym() is removed. This flag did nothing throughout 0.5, as the "proxy generation" behavior is now automatic. * Passing a single list of elements to joinedload(), joinedload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional \*args is deprecated. * Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional \*args is deprecated. * ``query.iterate_instances()`` is removed. Use ``query.instances()``. * ``Query.query_from_parent()`` is removed. Use the sqlalchemy.orm.with_parent() function to produce a "parent" clause, or alternatively ``query.with_parent()``. * ``query._from_self()`` is removed, use ``query.from_self()`` instead. * the "comparator" argument to composite() is removed. Use "comparator_factory". * ``RelationProperty._get_join()`` is removed. * the 'echo_uow' flag on Session is removed. Use logging on the "sqlalchemy.orm.unitofwork" name. * ``session.clear()`` is removed. use ``session.expunge_all()``. * ``session.save()``, ``session.update()``, ``session.save_or_update()`` are removed. Use ``session.add()`` and ``session.add_all()``. * the "objects" flag on session.flush() remains deprecated. * the "dont_load=True" flag on session.merge() is deprecated in favor of "load=False". * ``ScopedSession.mapper`` remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/Usag eRecipes/SessionAwareMapper * passing an ``InstanceState`` (internal SQLAlchemy state object) to ``attributes.init_collection()`` or ``attributes.get_history()`` is deprecated. These functions are public API and normally expect a regular mapped object instance. * the 'engine' parameter to ``declarative_base()`` is removed. Use the 'bind' keyword argument. Extensions ========== SQLSoup ------- SQLSoup has been modernized and updated to reflect common 0.5/0.6 capabilities, including well defined session integration. Please read the new docs at [http://www.sqlalc hemy.org/docs/06/reference/ext/sqlsoup.html]. Declarative ----------- The ``DeclarativeMeta`` (default metaclass for ``declarative_base``) previously allowed subclasses to modify ``dict_`` to add class attributes (e.g. columns). This no longer works, the ``DeclarativeMeta`` constructor now ignores ``dict_``. Instead, the class attributes should be assigned directly, e.g. ``cls.id=Column(...)``, or the `MixIn class `_ approach should be used instead of the metaclass approach. SQLAlchemy-1.0.11/doc/build/changelog/changelog_02.rst0000664000175000017500000007454312636375552023432 0ustar classicclassic00000000000000 ============== 0.2 Changelog ============== .. changelog:: :version: 0.2.8 :released: Tue Sep 05 2006 .. change:: :tags: :tickets: cleanup on connection methods + documentation. custom DBAPI arguments specified in query string, 'connect_args' argument to 'create_engine', or custom creation function via 'creator' function to 'create_engine'. .. change:: :tags: :tickets: 274 added "recycle" argument to Pool, is "pool_recycle" on create_engine, defaults to 3600 seconds; connections after this age will be closed and replaced with a new one, to handle db's that automatically close stale connections .. change:: :tags: :tickets: 121 changed "invalidate" semantics with pooled connection; will instruct the underlying connection record to reconnect the next time its called. "invalidate" will also automatically be called if any error is thrown in the underlying call to connection.cursor(). this will hopefully allow the connection pool to reconnect to a database that had been stopped and started without restarting the connecting application .. change:: :tags: :tickets: eesh ! the tutorial doctest was broken for quite some time. .. change:: :tags: :tickets: add_property() method on mapper does a "compile all mappers" step in case the given property references a non-compiled mapper (as it did in the case of the tutorial !) .. change:: :tags: :tickets: 277 check for pg sequence already existing before create .. change:: :tags: :tickets: if a contextual session is established via MapperExtension.get_session (as it is using the sessioncontext plugin, etc), a lazy load operation will use that session by default if the parent object is not persistent with a session already. .. change:: :tags: :tickets: lazy loads will not fire off for an object that does not have a database identity (why? see http://www.sqlalchemy.org/trac/wiki/WhyDontForeignKeysLoadData) .. change:: :tags: :tickets: unit-of-work does a better check for "orphaned" objects that are part of a "delete-orphan" cascade, for certain conditions where the parent isn't available to cascade from. .. change:: :tags: :tickets: mappers can tell if one of their objects is an "orphan" based on interactions with the attribute package. this check is based on a status flag maintained for each relationship when objects are attached and detached from each other. .. change:: :tags: :tickets: it is now invalid to declare a self-referential relationship with "delete-orphan" (as the abovementioned check would make them impossible to save) .. change:: :tags: :tickets: improved the check for objects being part of a session when the unit of work seeks to flush() them as part of a relationship.. .. change:: :tags: :tickets: 280 statement execution supports using the same BindParam object more than once in an expression; simplified handling of positional parameters. nice job by Bill Noon figuring out the basic idea. .. change:: :tags: :tickets: 60, 71 postgres reflection moved to use pg_schema tables, can be overridden with use_information_schema=True argument to create_engine. .. change:: :tags: :tickets: 155 added case_sensitive argument to MetaData, Table, Column, determines itself automatically based on if a parent schemaitem has a non-None setting for the flag, or if not, then whether the identifier name is all lower case or not. when set to True, quoting is applied to identifiers with mixed or uppercase identifiers. quoting is also applied automatically in all cases to identifiers that are known to be reserved words or contain other non-standard characters. various database dialects can override all of this behavior, but currently they are all using the default behavior. tested with postgres, mysql, sqlite, oracle. needs more testing with firebird, ms-sql. part of the ongoing work with .. change:: :tags: :tickets: unit tests updated to run without any pysqlite installed; pool test uses a mock DBAPI .. change:: :tags: :tickets: 281 urls support escaped characters in passwords .. change:: :tags: :tickets: added limit/offset to UNION queries (though not yet in oracle) .. change:: :tags: :tickets: added "timezone=True" flag to DateTime and Time types. postgres so far will convert this to "TIME[STAMP] (WITH|WITHOUT) TIME ZONE", so that control over timezone presence is more controllable (psycopg2 returns datetimes with tzinfo's if available, which can create confusion against datetimes that don't). .. change:: :tags: :tickets: 287 fix to using query.count() with distinct, \**kwargs with SelectResults count() .. change:: :tags: :tickets: 289 deregister Table from MetaData when autoload fails; .. change:: :tags: :tickets: 293 import of py2.5s sqlite3 .. change:: :tags: :tickets: 296 unicode fix for startswith()/endswith() .. changelog:: :version: 0.2.7 :released: Sat Aug 12 2006 .. change:: :tags: :tickets: quoting facilities set up so that database-specific quoting can be turned on for individual table, schema, and column identifiers when used in all queries/creates/drops. Enabled via "quote=True" in Table or Column, as well as "quote_schema=True" in Table. Thanks to Aaron Spike for the excellent efforts. .. change:: :tags: :tickets: assignmapper was setting is_primary=True, causing all sorts of mayhem by not raising an error when redundant mappers were set up, fixed .. change:: :tags: :tickets: added allow_null_pks option to Mapper, allows rows where some primary key columns are null (i.e. when mapping to outer joins etc) .. change:: :tags: :tickets: modifcation to unitofwork to not maintain ordering within the "new" list or within the UOWTask "objects" list; instead, new objects are tagged with an ordering identifier as they are registered as new with the session, and the INSERT statements are then sorted within the mapper save_obj. the INSERT ordering has basically been pushed all the way to the end of the flush cycle. that way the various sorts and organizations occurring within UOWTask (particularly the circular task sort) don't have to worry about maintaining order (which they weren't anyway) .. change:: :tags: :tickets: fixed reflection of foreign keys to autoload the referenced table if it was not loaded already .. change:: :tags: :tickets: 256 - pass URL query string arguments to connect() function .. change:: :tags: :tickets: 257 - oracle boolean type .. change:: :tags: :tickets: custom primary/secondary join conditions in a relation *will* be propagated to backrefs by default. specifying a backref() will override this behavior. .. change:: :tags: :tickets: better check for ambiguous join conditions in sql.Join; propagates to a better error message in PropertyLoader (i.e. relation()/backref()) for when the join condition can't be reasonably determined. .. change:: :tags: :tickets: sqlite creates ForeignKeyConstraint objects properly upon table reflection. .. change:: :tags: :tickets: 224 adjustments to pool stemming from changes made for. overflow counter should only be decremented if the connection actually succeeded. added a test script to attempt testing this. .. change:: :tags: :tickets: fixed mysql reflection of default values to be PassiveDefault .. change:: :tags: :tickets: 263, 264 added reflected 'tinyint', 'mediumint' type to MS-SQL. .. change:: :tags: :tickets: SingletonThreadPool has a size and does a cleanup pass, so that only a given number of thread-local connections stay around (needed for sqlite applications that dispose of threads en masse) .. change:: :tags: :tickets: 267, 265 fixed small pickle bug(s) with lazy loaders .. change:: :tags: :tickets: fixed possible error in mysql reflection where certain versions return an array instead of string for SHOW CREATE TABLE call .. change:: :tags: :tickets: 1770 fix to lazy loads when mapping to joins .. change:: :tags: :tickets: all create()/drop() calls have a keyword argument of "connectable". "engine" is deprecated. .. change:: :tags: :tickets: fixed ms-sql connect() to work with adodbapi .. change:: :tags: :tickets: added "nowait" flag to Select() .. change:: :tags: :tickets: 271 inheritance check uses issubclass() instead of direct __mro__ check to make sure class A inherits from B, allowing mapper inheritance to more flexibly correspond to class inheritance .. change:: :tags: :tickets: 252 SelectResults will use a subselect, when calling an aggregate (i.e. max, min, etc.) on a SelectResults that has an ORDER BY clause .. change:: :tags: :tickets: 269 fixes to types so that database-specific types more easily used; fixes to mysql text types to work with this methodology .. change:: :tags: :tickets: some fixes to sqlite date type organization .. change:: :tags: :tickets: 263 added MSTinyInteger to MS-SQL .. changelog:: :version: 0.2.6 :released: Thu Jul 20 2006 .. change:: :tags: :tickets: 76 big overhaul to schema to allow truly composite primary and foreign key constraints, via new ForeignKeyConstraint and PrimaryKeyConstraint objects. Existing methods of primary/foreign key creation have not been changed but use these new objects behind the scenes. table creation and reflection is now more table oriented rather than column oriented. .. change:: :tags: :tickets: overhaul to MapperExtension calling scheme, wasn't working very well previously .. change:: :tags: :tickets: tweaks to ActiveMapper, supports self-referential relationships .. change:: :tags: :tickets: slight rearrangement to objectstore (in activemapper/threadlocal) so that the SessionContext is referenced by '.context' instead of subclassed directly. .. change:: :tags: :tickets: activemapper will use threadlocal's objectstore if the mod is activated when activemapper is imported .. change:: :tags: :tickets: small fix to URL regexp to allow filenames with '@' in them .. change:: :tags: :tickets: fixes to Session expunge/update/etc...needs more cleanup. .. change:: :tags: :tickets: select_table mappers *still* weren't always compiling .. change:: :tags: :tickets: fixed up Boolean datatype .. change:: :tags: :tickets: added count()/count_by() to list of methods proxied by assignmapper; this also adds them to activemapper .. change:: :tags: :tickets: connection exceptions wrapped in DBAPIError .. change:: :tags: :tickets: ActiveMapper now supports autoloading column definitions from the database if you supply a __autoload__ = True attribute in your mapping inner-class. Currently this does not support reflecting any relationships. .. change:: :tags: :tickets: deferred column load could screw up the connection status in a flush() under some circumstances, this was fixed .. change:: :tags: :tickets: expunge() was not working with cascade, fixed. .. change:: :tags: :tickets: potential endless loop in cascading operations fixed. .. change:: :tags: :tickets: added "synonym()" function, applied to properties to have a propname the same as another, for the purposes of overriding props and allowing the original propname to be accessible in select_by(). .. change:: :tags: :tickets: fix to typing in clause construction which specifically helps type issues with polymorphic_union (CAST/ColumnClause propagates its type to proxy columns) .. change:: :tags: :tickets: mapper compilation work ongoing, someday it'll work....moved around the initialization of MapperProperty objects to be after all mappers are created to better handle circular compilations. do_init() method is called on all properties now which are more aware of their "inherited" status if so. .. change:: :tags: :tickets: eager loads explicitly disallowed on self-referential relationships, or relationships to an inheriting mapper (which is also self-referential) .. change:: :tags: :tickets: 244 reduced bind param size in query._get to appease the picky oracle .. change:: :tags: :tickets: 234 added 'checkfirst' argument to table.create()/table.drop(), as well as table.exists() .. change:: :tags: :tickets: 245 some other ongoing fixes to inheritance .. change:: :tags: :tickets: attribute/backref/orphan/history-tracking tweaks as usual... .. changelog:: :version: 0.2.5 :released: Sat Jul 08 2006 .. change:: :tags: :tickets: fixed endless loop bug in select_by(), if the traversal hit two mappers that referenced each other .. change:: :tags: :tickets: upgraded all unittests to insert './lib/' into sys.path, working around new setuptools PYTHONPATH-killing behavior .. change:: :tags: :tickets: further fixes with attributes/dependencies/etc.... .. change:: :tags: :tickets: improved error handling for when DynamicMetaData is not connected .. change:: :tags: :tickets: MS-SQL support largely working (tested with pymssql) .. change:: :tags: :tickets: ordering of UPDATE and DELETE statements within groups is now in order of primary key values, for more deterministic ordering .. change:: :tags: :tickets: after_insert/delete/update mapper extensions now called per object, not per-object-per-table .. change:: :tags: :tickets: further fixes/refactorings to mapper compilation .. changelog:: :version: 0.2.4 :released: Tue Jun 27 2006 .. change:: :tags: :tickets: try/except when the mapper sets init.__name__ on a mapped class, supports python 2.3 .. change:: :tags: :tickets: fixed bug where threadlocal engine would still autocommit despite a transaction in progress .. change:: :tags: :tickets: lazy load and deferred load operations require the parent object to be in a Session to do the operation; whereas before the operation would just return a blank list or None, it now raises an exception. .. change:: :tags: :tickets: Session.update() is slightly more lenient if the session to which the given object was formerly attached to was garbage collected; otherwise still requires you explicitly remove the instance from the previous Session. .. change:: :tags: :tickets: fixes to mapper compilation, checking for more error conditions .. change:: :tags: :tickets: small fix to eager loading combined with ordering/limit/offset .. change:: :tags: :tickets: 206 utterly remarkable: added a single space between 'CREATE TABLE' and '(' since *that's how MySQL indicates a non- reserved word tablename.....* .. change:: :tags: :tickets: more fixes to inheritance, related to many-to-many relations properly saving .. change:: :tags: :tickets: fixed bug when specifying explicit module to mysql dialect .. change:: :tags: :tickets: when QueuePool times out it raises a TimeoutError instead of erroneously making another connection .. change:: :tags: :tickets: Queue.Queue usage in pool has been replaced with a locally modified version (works in py2.3/2.4!) that uses a threading.RLock for a mutex. this is to fix a reported case where a ConnectionFairy's __del__() method got called within the Queue's get() method, which then returns its connection to the Queue via the put() method, causing a reentrant hang unless threading.RLock is used. .. change:: :tags: :tickets: postgres will not place SERIAL keyword on a primary key column if it has a foreign key constraint .. change:: :tags: :tickets: 221 cursor() method on ConnectionFairy allows db-specific extension arguments to be propagated .. change:: :tags: :tickets: 225 lazy load bind params properly propagate column type .. change:: :tags: :tickets: new MySQL types: MSEnum, MSTinyText, MSMediumText, MSLongText, etc. more support for MS-specific length/precision params in numeric types patch courtesy Mike Bernson .. change:: :tags: :tickets: 224 some fixes to connection pool invalidate() .. changelog:: :version: 0.2.3 :released: Sat Jun 17 2006 .. change:: :tags: :tickets: overhaul to mapper compilation to be deferred. this allows mappers to be constructed in any order, and their relationships to each other are compiled when the mappers are first used. .. change:: :tags: :tickets: fixed a pretty big speed bottleneck in cascading behavior particularly when backrefs were in use .. change:: :tags: :tickets: the attribute instrumentation module has been completely rewritten; its now a large degree simpler and clearer, slightly faster. the "history" of an attribute is no longer micromanaged with each change and is instead part of a "CommittedState" object created when the instance is first loaded. HistoryArraySet is gone, the behavior of list attributes is now more open ended (i.e. they're not sets anymore). .. change:: :tags: :tickets: py2.4 "set" construct used internally, falls back to sets.Set when "set" not available/ordering is needed. .. change:: :tags: :tickets: fix to transaction control, so that repeated rollback() calls don't fail (was failing pretty badly when flush() would raise an exception in a larger try/except transaction block) .. change:: :tags: :tickets: 151 "foreignkey" argument to relation() can also be a list. fixed auto-foreignkey detection .. change:: :tags: :tickets: fixed bug where tables with schema names weren't getting indexed in the MetaData object properly .. change:: :tags: :tickets: 207 fixed bug where Column with redefined "key" property wasn't getting type conversion happening in the ResultProxy .. change:: :tags: :tickets: fixed 'port' attribute of URL to be an integer if present .. change:: :tags: :tickets: fixed old bug where if a many-to-many table mapped as "secondary" had extra columns, delete operations didn't work .. change:: :tags: :tickets: bugfixes for mapping against UNION queries .. change:: :tags: :tickets: fixed incorrect exception class thrown when no DB driver present .. change:: :tags: :tickets: 138 added NonExistentTable exception thrown when reflecting a table that doesn't exist .. change:: :tags: :tickets: small fix to ActiveMapper regarding one-to-one backrefs, other refactorings .. change:: :tags: :tickets: overridden constructor in mapped classes gets __name__ and __doc__ from the original class .. change:: :tags: :tickets: 200 fixed small bug in selectresult.py regarding mapper extension .. change:: :tags: :tickets: small tweak to cascade_mappers, not very strongly supported function at the moment .. change:: :tags: :tickets: 202 some fixes to between(), column.between() to propagate typing information better .. change:: :tags: :tickets: 203 if an object fails to be constructed, is not added to the session .. change:: :tags: :tickets: CAST function has been made into its own clause object with its own compilation function in ansicompiler; allows MySQL to silently ignore most CAST calls since MySQL seems to only support the standard CAST syntax with Date types. MySQL-compatible CAST support for strings, ints, etc. a TODO .. changelog:: :version: 0.2.2 :released: Mon Jun 05 2006 .. change:: :tags: :tickets: 190 big improvements to polymorphic inheritance behavior, enabling it to work with adjacency list table structures .. change:: :tags: :tickets: major fixes and refactorings to inheritance relationships overall, more unit tests .. change:: :tags: :tickets: fixed "echo_pool" flag on create_engine() .. change:: :tags: :tickets: fix to docs, removed incorrect info that close() is unsafe to use with threadlocal strategy (its totally safe !) .. change:: :tags: :tickets: 188 create_engine() can take URLs as string or unicode .. change:: :tags: :tickets: firebird support partially completed; thanks to James Ralston and Brad Clements for their efforts. .. change:: :tags: :tickets: Oracle url translation was broken, fixed, will feed host/port/sid into cx_oracle makedsn() if 'database' field is present, else uses straight TNS name from the 'host' field .. change:: :tags: :tickets: fix to using unicode criterion for query.get()/query.load() .. change:: :tags: :tickets: count() function on selectables now uses table primary key or first column instead of "1" for criterion, also uses label "rowcount" instead of "count". .. change:: :tags: :tickets: got rudimental "mapping to multiple tables" functionality cleaned up, more correctly documented .. change:: :tags: :tickets: restored global_connect() function, attaches to a DynamicMetaData instance called "default_metadata". leaving MetaData arg to Table out will use the default metadata. .. change:: :tags: :tickets: fixes to session cascade behavior, entity_name propigation .. change:: :tags: :tickets: reorganized unittests into subdirectories .. change:: :tags: :tickets: more fixes to threadlocal connection nesting patterns .. changelog:: :version: 0.2.1 :released: Mon May 29 2006 .. change:: :tags: :tickets: "pool" argument to create_engine() properly propagates .. change:: :tags: :tickets: fixes to URL, raises exception if not parsed, does not pass blank fields along to the DB connect string (a string such as user:host@/db was breaking on postgres) .. change:: :tags: :tickets: small fixes to Mapper when it inserts and tries to get new primary key values back .. change:: :tags: :tickets: rewrote half of TLEngine, the ComposedSQLEngine used with 'strategy="threadlocal"'. it now properly implements engine.begin()/ engine.commit(), which nest fully with connection.begin()/trans.commit(). added about six unittests. .. change:: :tags: :tickets: major "duh" in pool.Pool, forgot to put back the WeakValueDictionary. unittest which was supposed to check for this was also silently missing it. fixed unittest to ensure that ConnectionFairy properly falls out of scope. .. change:: :tags: :tickets: placeholder dispose() method added to SingletonThreadPool, doesn't do anything yet .. change:: :tags: :tickets: rollback() is automatically called when an exception is raised, but only if there's no transaction in process (i.e. works more like autocommit). .. change:: :tags: :tickets: fixed exception raise in sqlite if no sqlite module present .. change:: :tags: :tickets: added extra example detail for association object doc .. change:: :tags: :tickets: Connection adds checks for already being closed .. changelog:: :version: 0.2.0 :released: Sat May 27 2006 .. change:: :tags: :tickets: overhaul to Engine system so that what was formerly the SQLEngine is now a ComposedSQLEngine which consists of a variety of components, including a Dialect, ConnectionProvider, etc. This impacted all the db modules as well as Session and Mapper. .. change:: :tags: :tickets: create_engine now takes only RFC-1738-style strings: driver://user:password@host:port/database .. change:: :tags: :tickets: 152 total rewrite of connection-scoping methodology, Connection objects can now execute clause elements directly, added explicit "close" as well as support throughout Engine/ORM to handle closing properly, no longer relying upon __del__ internally to return connections to the pool. .. change:: :tags: :tickets: overhaul to Session interface and scoping. uses hibernate-style methods, including query(class), save(), save_or_update(), etc. no threadlocal scope is installed by default. Provides a binding interface to specific Engines and/or Connections so that underlying Schema objects do not need to be bound to an Engine. Added a basic SessionTransaction object that can simplistically aggregate transactions across multiple engines. .. change:: :tags: :tickets: overhaul to mapper's dependency and "cascade" behavior; dependency logic factored out of properties.py into a separate module "dependency.py". "cascade" behavior is now explicitly controllable, proper implementation of "delete", "delete-orphan", etc. dependency system can now determine at flush time if a child object has a parent or not so that it makes better decisions on how that child should be updated in the DB with regards to deletes. .. change:: :tags: :tickets: overhaul to Schema to build upon MetaData object instead of an Engine. Entire SQL/Schema system can be used with no Engines whatsoever, executed solely by an explicit Connection object. the "bound" methodlogy exists via the BoundMetaData for schema objects. ProxyEngine is generally not needed anymore and is replaced by DynamicMetaData. .. change:: :tags: :tickets: 167 true polymorphic behavior implemented, fixes .. change:: :tags: :tickets: 147 "oid" system has been totally moved into compile-time behavior; if they are used in an order_by where they are not available, the order_by doesn't get compiled, fixes .. change:: :tags: :tickets: overhaul to packaging; "mapping" is now "orm", "objectstore" is now "session", the old "objectstore" namespace gets loaded in via the "threadlocal" mod if used .. change:: :tags: :tickets: mods now called in via "import ". extensions favored over mods as mods are globally-monkeypatching .. change:: :tags: :tickets: 154 fix to add_property so that it propagates properties to inheriting mappers .. change:: :tags: :tickets: backrefs create themselves against primary mapper of its originating property, priamry/secondary join arguments can be specified to override. helps their usage with polymorphic mappers .. change:: :tags: :tickets: 31 "table exists" function has been implemented .. change:: :tags: :tickets: 98 "create_all/drop_all" added to MetaData object .. change:: :tags: :tickets: improvements and fixes to topological sort algorithm, as well as more unit tests .. change:: :tags: :tickets: tutorial page added to docs which also can be run with a custom doctest runner to ensure its properly working. docs generally overhauled to deal with new code patterns .. change:: :tags: :tickets: many more fixes, refactorings. .. change:: :tags: :tickets: migration guide is available on the Wiki at http://www.sqlalchemy.org/trac/wiki/02Migration SQLAlchemy-1.0.11/doc/build/changelog/changelog_03.rst0000664000175000017500000024215312636375552023425 0ustar classicclassic00000000000000 ============== 0.3 Changelog ============== .. changelog:: :version: 0.3.11 :released: Sun Oct 14 2007 .. change:: :tags: sql :tickets: tweak DISTINCT precedence for clauses like `func.count(t.c.col.distinct())` .. change:: :tags: sql :tickets: 719 Fixed detection of internal '$' characters in :bind$params .. change:: :tags: sql :tickets: 768 don't assume join criterion consists only of column objects .. change:: :tags: sql :tickets: 764 adjusted operator precedence of NOT to match '==' and others, so that ~(x==y) produces NOT (x=y), which is compatible with MySQL < 5.0 (doesn't like "NOT x=y") .. change:: :tags: orm :tickets: 687 added a check for joining from A->B using join(), along two different m2m tables. this raises an error in 0.3 but is possible in 0.4 when aliases are used. .. change:: :tags: orm :tickets: fixed small exception throw bug in Session.merge() .. change:: :tags: orm :tickets: fixed bug where mapper, being linked to a join where one table had no PK columns, would not detect that the joined table had no PK. .. change:: :tags: orm :tickets: 769 fixed bugs in determining proper sync clauses from custom inherit conditions .. change:: :tags: orm :tickets: 813 backref remove object operation doesn't fail if the other-side collection doesn't contain the item, supports noload collections .. change:: :tags: engine :tickets: fixed another occasional race condition which could occur when using pool with threadlocal setting .. change:: :tags: mysql :tickets: fixed specification of YEAR columns when generating schema .. change:: :tags: mssql :tickets: 679 added support for TIME columns (simulated using DATETIME) .. change:: :tags: mssql :tickets: 721 added support for BIGINT, MONEY, SMALLMONEY, UNIQUEIDENTIFIER and SQL_VARIANT .. change:: :tags: mssql :tickets: 684 index names are now quoted when dropping from reflected tables .. change:: :tags: mssql :tickets: can now specify a DSN for PyODBC, using a URI like mssql:///?dsn=bob .. change:: :tags: postgres :tickets: when reflecting tables from alternate schemas, the "default" placed upon the primary key, i.e. usually a sequence name, has the "schema" name unconditionally quoted, so that schema names which need quoting are fine. its slightly unnecessary for schema names which don't need quoting but not harmful. .. change:: :tags: sqlite :tickets: passthrough for stringified dates .. change:: :tags: firebird :tickets: supports_sane_rowcount() set to False due to ticket #370 (right way). .. change:: :tags: firebird :tickets: fixed reflection of Column's nullable property. .. change:: :tags: oracle :tickets: 622, 751 removed LONG_STRING, LONG_BINARY from "binary" types, so type objects don't try to read their values as LOB. .. changelog:: :version: 0.3.10 :released: Fri Jul 20 2007 .. change:: :tags: general :tickets: a new mutex that was added in 0.3.9 causes the pool_timeout feature to fail during a race condition; threads would raise TimeoutError immediately with no delay if many threads push the pool into overflow at the same time. this issue has been fixed. .. change:: :tags: sql :tickets: got connection-bound metadata to work with implicit execution .. change:: :tags: sql :tickets: 667 foreign key specs can have any chararcter in their identifiers .. change:: :tags: sql :tickets: 664 added commutativity-awareness to binary clause comparisons to each other, improves ORM lazy load optimization .. change:: :tags: orm :tickets: cleanup to connection-bound sessions, SessionTransaction .. change:: :tags: postgres :tickets: 571 fixed max identifier length (63) .. changelog:: :version: 0.3.9 :released: Sun Jul 15 2007 .. change:: :tags: general :tickets: 607 better error message for NoSuchColumnError .. change:: :tags: general :tickets: 428 finally figured out how to get setuptools version in, available as sqlalchemy.__version__ .. change:: :tags: general :tickets: the various "engine" arguments, such as "engine", "connectable", "engine_or_url", "bind_to", etc. are all present, but deprecated. they all get replaced by the single term "bind". you also set the "bind" of MetaData using metadata.bind = .. change:: :tags: ext :tickets: iteration over dict association proxies is now dict-like, not InstrumentedList-like (e.g. over keys instead of values) .. change:: :tags: ext :tickets: 597 association proxies no longer bind tightly to source collections, and are constructed with a thunk instead .. change:: :tags: ext :tickets: added selectone_by() to assignmapper .. change:: :tags: orm :tickets: forwards-compatibility with 0.4: added one(), first(), and all() to Query. almost all Query functionality from 0.4 is present in 0.3.9 for forwards-compat purposes. .. change:: :tags: orm :tickets: reset_joinpoint() really really works this time, promise ! lets you re-join from the root: query.join(['a', 'b']).filter().reset_joinpoint().\ join(['a', 'c']).filter().all() in 0.4 all join() calls start from the "root" .. change:: :tags: orm :tickets: 613 added synchronization to the mapper() construction step, to avoid thread collisions when pre-existing mappers are compiling in a different thread .. change:: :tags: orm :tickets: a warning is issued by Mapper when two primary key columns of the same name are munged into a single attribute. this happens frequently when mapping to joins (or inheritance). .. change:: :tags: orm :tickets: 598 synonym() properties are fully supported by all Query joining/ with_parent operations .. change:: :tags: orm :tickets: fixed very stupid bug when deleting items with many-to-many uselist=False relations .. change:: :tags: orm :tickets: remember all that stuff about polymorphic_union ? for joined table inheritance ? Funny thing... You sort of don't need it for joined table inheritance, you can just string all the tables together via outerjoin(). The UNION still applies if concrete tables are involved, though (since nothing to join them on). .. change:: :tags: orm :tickets: small fix to eager loading to better work with eager loads to polymorphic mappers that are using a straight "outerjoin" clause .. change:: :tags: sql :tickets: ForeignKey to a table in a schema that's not the default schema requires the schema to be explicit; i.e. ForeignKey('alt_schema.users.id') .. change:: :tags: sql :tickets: MetaData can now be constructed with an engine or url as the first argument, just like BoundMetaData .. change:: :tags: sql :tickets: BoundMetaData is now deprecated, and MetaData is a direct substitute. .. change:: :tags: sql :tickets: DynamicMetaData has been renamed to ThreadLocalMetaData. the DynamicMetaData name is deprecated and is an alias for ThreadLocalMetaData or a regular MetaData if threadlocal=False .. change:: :tags: sql :tickets: composite primary key is represented as a non-keyed set to allow for composite keys consisting of cols with the same name; occurs within a Join. helps inheritance scenarios formulate correct PK. .. change:: :tags: sql :tickets: 185 improved ability to get the "correct" and most minimal set of primary key columns from a join, equating foreign keys and otherwise equated columns. this is also mostly to help inheritance scenarios formulate the best choice of primary key columns. .. change:: :tags: sql :tickets: added 'bind' argument to Sequence.create()/drop(), ColumnDefault.execute() .. change:: :tags: sql :tickets: 650 columns can be overridden in a reflected table with a "key" attribute different than the column's name, including for primary key columns .. change:: :tags: sql :tickets: 657 fixed "ambiguous column" result detection, when dupe col names exist in a result .. change:: :tags: sql :tickets: some enhancements to "column targeting", the ability to match a column to a "corresponding" column in another selectable. this affects mostly ORM ability to map to complex joins .. change:: :tags: sql :tickets: 619 MetaData and all SchemaItems are safe to use with pickle. slow table reflections can be dumped into a pickled file to be reused later. Just reconnect the engine to the metadata after unpickling. .. change:: :tags: sql :tickets: added a mutex to QueuePool's "overflow" calculation to prevent a race condition that can bypass max_overflow .. change:: :tags: sql :tickets: 623 fixed grouping of compound selects to give correct results. will break on sqlite in some cases, but those cases were producing incorrect results anyway, sqlite doesn't support grouped compound selects .. change:: :tags: sql :tickets: 620 fixed precedence of operators so that parenthesis are correctly applied .. change:: :tags: sql :tickets: 545 calling .in_() (i.e. with no arguments) will return "CASE WHEN ( IS NULL) THEN NULL ELSE 0 END = 1)", so that NULL or False is returned in all cases, rather than throwing an error .. change:: :tags: sql :tickets: fixed "where"/"from" criterion of select() to accept a unicode string in addition to regular string - both convert to text() .. change:: :tags: sql :tickets: 558 added standalone distinct() function in addition to column.distinct() .. change:: :tags: sql :tickets: result.last_inserted_ids() should return a list that is identically sized to the primary key constraint of the table. values that were "passively" created and not available via cursor.lastrowid will be None. .. change:: :tags: sql :tickets: 589 long-identifier detection fixed to use > rather than >= for max ident length .. change:: :tags: sql :tickets: 593 fixed bug where selectable.corresponding_column(selectable.c.col) would not return selectable.c.col, if the selectable is a join of a table and another join involving the same table. messed up ORM decision making .. change:: :tags: sql :tickets: 595 added Interval type to types.py .. change:: :tags: mysql :tickets: 625 fixed catching of some errors that imply a dropped connection .. change:: :tags: mysql :tickets: 624 fixed escaping of the modulo operator .. change:: :tags: mysql :tickets: 590 added 'fields' to reserved words .. change:: :tags: mysql :tickets: various reflection enhancement/fixes .. change:: :tags: oracle :tickets: 604 datetime fixes: got subsecond TIMESTAMP to work, added OracleDate which supports types.Date with only year/month/day .. change:: :tags: oracle :tickets: added dialect flag "auto_convert_lobs", defaults to True; will cause any LOB objects detected in a result set to be forced into OracleBinary so that the LOB is read() automatically, if no typemap was present (i.e., if a textual execute() was issued). .. change:: :tags: oracle :tickets: 624 mod operator '%' produces MOD .. change:: :tags: oracle :tickets: 542 converts cx_oracle datetime objects to Python datetime.datetime when Python 2.3 used .. change:: :tags: oracle :tickets: fixed unicode conversion in Oracle TEXT type .. change:: :tags: postgres :tickets: 624 fixed escaping of the modulo operator .. change:: :tags: postgres :tickets: 570 added support for reflection of domains .. change:: :tags: postgres :tickets: types which are missing during reflection resolve to Null type instead of raising an error .. change:: :tags: postgres :tickets: the fix in "schema" above fixes reflection of foreign keys from an alt-schema table to a public schema table .. change:: :tags: sqlite :tickets: rearranged dialect initialization so it has time to warn about pysqlite1 being too old. .. change:: :tags: sqlite :tickets: sqlite better handles datetime/date/time objects mixed and matched with various Date/Time/DateTime columns .. change:: :tags: sqlite :tickets: 603 string PK column inserts don't get overwritten with OID .. change:: :tags: mssql :tickets: 634 fix port option handling for pyodbc .. change:: :tags: mssql :tickets: now able to reflect start and increment values for identity columns .. change:: :tags: mssql :tickets: preliminary support for using scope_identity() with pyodbc .. changelog:: :version: 0.3.8 :released: Sat Jun 02 2007 .. change:: :tags: engines :tickets: added detach() to Connection, allows underlying DBAPI connection to be detached from its pool, closing on dereference/close() instead of being reused by the pool. .. change:: :tags: engines :tickets: added invalidate() to Connection, immediately invalidates the Connection and its underlying DBAPI connection. .. change:: :tags: sql :tickets: _Label class overrides compare_self to return its ultimate object. meaning, if you say someexpr.label('foo') == 5, it produces the correct "someexpr == 5". .. change:: :tags: sql :tickets: _Label propagates "_hide_froms()" so that scalar selects behave more properly with regards to FROM clause #574 .. change:: :tags: sql :tickets: fix to long name generation when using oid_column as an order by (oids used heavily in mapper queries) .. change:: :tags: sql :tickets: significant speed improvement to ResultProxy, pre-caches TypeEngine dialect implementations and saves on function calls per column .. change:: :tags: sql :tickets: parenthesis are applied to clauses via a new _Grouping construct. uses operator precedence to more intelligently apply parenthesis to clauses, provides cleaner nesting of clauses (doesn't mutate clauses placed in other clauses, i.e. no 'parens' flag) .. change:: :tags: sql :tickets: added 'modifier' keyword, works like func. except does not add parenthesis. e.g. select([modifier.DISTINCT(...)]) etc. .. change:: :tags: sql :tickets: 578 removed "no group by's in a select that's part of a UNION" restriction .. change:: :tags: orm :tickets: added reset_joinpoint() method to Query, moves the "join point" back to the starting mapper. 0.4 will change the behavior of join() to reset the "join point" in all cases so this is an interim method. for forwards compatibility, ensure joins across multiple relations are specified using a single join(), i.e. join(['a', 'b', 'c']). .. change:: :tags: orm :tickets: fixed bug in query.instances() that wouldn't handle more than on additional mapper or one additional column. .. change:: :tags: orm :tickets: "delete-orphan" no longer implies "delete". ongoing effort to separate the behavior of these two operations. .. change:: :tags: orm :tickets: many-to-many relationships properly set the type of bind params for delete operations on the association table .. change:: :tags: orm :tickets: many-to-many relationships check that the number of rows deleted from the association table by a delete operation matches the expected results .. change:: :tags: orm :tickets: session.get() and session.load() propagate \**kwargs through to query .. change:: :tags: orm :tickets: 577 fix to polymorphic query which allows the original polymorphic_union to be embedded into a correlated subquery .. change:: :tags: orm :tickets: fix to select_by(=) -style joins in conjunction with many-to-many relationships, bug introduced in r2556 .. change:: :tags: orm :tickets: the "primary_key" argument to mapper() is propagated to the "polymorphic" mapper. primary key columns in this list get normalized to that of the mapper's local table. .. change:: :tags: orm :tickets: restored logging of "lazy loading clause" under sa.orm.strategies logger, got removed in 0.3.7 .. change:: :tags: orm :tickets: improved support for eagerloading of properties off of mappers that are mapped to select() statements; i.e. eagerloader is better at locating the correct selectable with which to attach its LEFT OUTER JOIN. .. change:: :tags: mysql :tickets: Nearly all MySQL column types are now supported for declaration and reflection. Added NCHAR, NVARCHAR, VARBINARY, TINYBLOB, LONGBLOB, YEAR .. change:: :tags: mysql :tickets: The sqltypes.Binary passthrough now always builds a BLOB, avoiding problems with very old database versions .. change:: :tags: mysql :tickets: support for column-level CHARACTER SET and COLLATE declarations, as well as ASCII, UNICODE, NATIONAL and BINARY shorthand. .. change:: :tags: firebird :tickets: set max identifier length to 31 .. change:: :tags: firebird :tickets: supports_sane_rowcount() set to False due to ticket #370. versioned_id_col feature wont work in FB. .. change:: :tags: firebird :tickets: some execution fixes .. change:: :tags: firebird :tickets: new association proxy implementation, implementing complete proxies to list, dict and set-based relation collections .. change:: :tags: firebird :tickets: added orderinglist, a custom list class that synchronizes an object attribute with that object's position in the list .. change:: :tags: firebird :tickets: small fix to SelectResultsExt to not bypass itself during select(). .. change:: :tags: firebird :tickets: added filter(), filter_by() to assignmapper .. changelog:: :version: 0.3.7 :released: Sun Apr 29 2007 .. change:: :tags: engines :tickets: warnings module used for issuing warnings (instead of logging) .. change:: :tags: engines :tickets: 480 cleanup of DBAPI import strategies across all engines .. change:: :tags: engines :tickets: refactoring of engine internals which reduces complexity, number of codepaths; places more state inside of ExecutionContext to allow more dialect control of cursor handling, result sets. ResultProxy totally refactored and also has two versions of "buffered" result sets used for different purposes. .. change:: :tags: engines :tickets: 514 server side cursor support fully functional in postgres. .. change:: :tags: engines :tickets: improved framework for auto-invalidation of connections that have lost their underlying database, via dialect-specific detection of exceptions corresponding to that database's disconnect related error messages. Additionally, when a "connection no longer open" condition is detected, the entire connection pool is discarded and replaced with a new instance. #516 .. change:: :tags: engines :tickets: 521 the dialects within sqlalchemy.databases become a setuptools entry points. loading the built-in database dialects works the same as always, but if none found will fall back to trying pkg_resources to load an external module .. change:: :tags: engines :tickets: Engine contains a "url" attribute referencing the url.URL object used by create_engine(). .. change:: :tags: sql :tickets: keys() of result set columns are not lowercased, come back exactly as they're expressed in cursor.description. note this causes colnames to be all caps in oracle. .. change:: :tags: sql :tickets: preliminary support for unicode table names, column names and SQL statements added, for databases which can support them. Works with sqlite and postgres so far. Mysql *mostly* works except the has_table() function does not work. Reflection works too. .. change:: :tags: sql :tickets: 522 the Unicode type is now a direct subclass of String, which now contains all the "convert_unicode" logic. This helps the variety of unicode situations that occur in db's such as MS-SQL to be better handled and allows subclassing of the Unicode datatype. .. change:: :tags: sql :tickets: ClauseElements can be used in in_() clauses now, such as bind parameters, etc. #476 .. change:: :tags: sql :tickets: reverse operators implemented for `CompareMixin` elements, allows expressions like "5 + somecolumn" etc. #474 .. change:: :tags: sql :tickets: the "where" criterion of an update() and delete() now correlates embedded select() statements against the table being updated or deleted. this works the same as nested select() statement correlation, and can be disabled via the correlate=False flag on the embedded select(). .. change:: :tags: sql :tickets: 512 column labels are now generated in the compilation phase, which means their lengths are dialect-dependent. So on oracle a label that gets truncated to 30 chars will go out to 63 characters on postgres. Also, the true labelname is always attached as the accessor on the parent Selectable so there's no need to be aware of the "truncated" label names. .. change:: :tags: sql :tickets: column label and bind param "truncation" also generate deterministic names now, based on their ordering within the full statement being compiled. this means the same statement will produce the same string across application restarts and allowing DB query plan caching to work better. .. change:: :tags: sql :tickets: 513 the "mini" column labels generated when using subqueries, which are to work around glitchy SQLite behavior that doesn't understand "foo.id" as equivalent to "id", are now only generated in the case that those named columns are selected from (part of) .. change:: :tags: sql :tickets: the label() method on ColumnElement will properly propagate the TypeEngine of the base element out to the label, including a label() created from a scalar=True select() statement. .. change:: :tags: sql :tickets: 513 MS-SQL better detects when a query is a subquery and knows not to generate ORDER BY phrases for those .. change:: :tags: sql :tickets: 505 fix for fetchmany() "size" argument being positional in most dbapis .. change:: :tags: sql :tickets: sending None as an argument to func. will produce an argument of NULL .. change:: :tags: sql :tickets: query strings in unicode URLs get keys encoded to ascii for \**kwargs compat .. change:: :tags: sql :tickets: 523 slight tweak to raw execute() change to also support tuples for positional parameters, not just lists .. change:: :tags: sql :tickets: fix to case() construct to propagate the type of the first WHEN condition as the return type of the case statement .. change:: :tags: orm :tickets: fixed critical issue when, after options(eagerload()) is used, the mapper would then always apply query "wrapping" behavior for all subsequent LIMIT/OFFSET/DISTINCT queries, even if no eager loading was applied on those subsequent queries. .. change:: :tags: orm :tickets: 541 added query.with_parent(someinstance) method. searches for target instance using lazy join criterion from parent instance. takes optional string "property" to isolate the desired relation. also adds static Query.query_from_parent(instance, property) version. .. change:: :tags: orm :tickets: 554 improved query.XXX_by(someprop=someinstance) querying to use similar methodology to with_parent, i.e. using the "lazy" clause which prevents adding the remote instance's table to the SQL, thereby making more complex conditions possible .. change:: :tags: orm :tickets: added generative versions of aggregates, i.e. sum(), avg(), etc. to query. used via query.apply_max(), apply_sum(), etc. #552 .. change:: :tags: orm :tickets: fix to using distinct() or distinct=True in combination with join() and similar .. change:: :tags: orm :tickets: corresponding to label/bindparam name generation, eager loaders generate deterministic names for the aliases they create using md5 hashes. .. change:: :tags: orm :tickets: improved/fixed custom collection classes when giving it "set"/ "sets.Set" classes or subclasses (was still looking for append() methods on them during lazy loads) .. change:: :tags: orm :tickets: restored old "column_property()" ORM function (used to be called "column()") to force any column expression to be added as a property on a mapper, particularly those that aren't present in the mapped selectable. this allows "scalar expressions" of any kind to be added as relations (though they have issues with eager loads). .. change:: :tags: orm :tickets: 533 fix to many-to-many relationships targeting polymorphic mappers .. change:: :tags: orm :tickets: 543 making progress with session.merge() as well as combining its usage with entity_name .. change:: :tags: orm :tickets: the usual adjustments to relationships between inheriting mappers, in this case establishing relation()s to subclass mappers where the join conditions come from the superclass' table .. change:: :tags: informix :tickets: informix support added ! courtesy James Zhang, who put a ton of effort in. .. change:: :tags: sqlite :tickets: removed silly behavior where sqlite would reflect UNIQUE indexes as part of the primary key (?!) .. change:: :tags: oracle :tickets: small fix to allow successive compiles of the same SELECT object which features LIMIT/OFFSET. oracle dialect needs to modify the object to have ROW_NUMBER OVER and wasn't performing the full series of steps on successive compiles. .. change:: :tags: mysql :tickets: support for SSL arguments given as inline within URL query string, prefixed with "ssl\_", courtesy terjeros@gmail.com. .. change:: :tags: , mysql :tickets: mysql uses "DESCRIBE.", catching exceptions if table doesn't exist, in order to determine if a table exists. this supports unicode table names as well as schema names. tested with MySQL5 but should work with 4.1 series as well. (#557) .. change:: :tags: extensions :tickets: big fix to AssociationProxy so that multiple AssociationProxy objects can be associated with a single association collection. .. change:: :tags: extensions :tickets: assign_mapper names methods according to their keys (i.e. __name__) #551 .. change:: :tags: mssql :tickets: pyodbc is now the preferred DB-API for MSSQL, and if no module is specifically requested, will be loaded first on a module probe. .. change:: :tags: mssql :tickets: The @@SCOPE_IDENTITY is now used instead of @@IDENTITY. This behavior may be overridden with the engine_connect "use_scope_identity" keyword parameter, which may also be specified in the dburi. .. changelog:: :version: 0.3.6 :released: Fri Mar 23 2007 .. change:: :tags: sql :tickets: bindparam() names are now repeatable! specify two distinct bindparam()s with the same name in a single statement, and the key will be shared. proper positional/named args translate at compile time. for the old behavior of "aliasing" bind parameters with conflicting names, specify "unique=True" - this option is still used internally for all the auto-genererated (value-based) bind parameters. .. change:: :tags: sql :tickets: slightly better support for bind params as column clauses, either via bindparam() or via literal(), i.e. select([literal('foo')]) .. change:: :tags: sql :tickets: MetaData can bind to an engine either via "url" or "engine" kwargs to constructor, or by using connect() method. BoundMetaData is identical to MetaData except engine_or_url param is required. DynamicMetaData is the same and provides thread-local connections be default. .. change:: :tags: sql :tickets: exists() becomes useable as a standalone selectable, not just in a WHERE clause, i.e. exists([columns], criterion).select() .. change:: :tags: sql :tickets: correlated subqueries work inside of ORDER BY, GROUP BY .. change:: :tags: sql :tickets: fixed function execution with explicit connections, i.e. conn.execute(func.dosomething()) .. change:: :tags: sql :tickets: use_labels flag on select() wont auto-create labels for literal text column elements, since we can make no assumptions about the text. to create labels for literal columns, you can say "somecol AS somelabel", or use literal_column("somecol").label("somelabel") .. change:: :tags: sql :tickets: quoting wont occur for literal columns when they are "proxied" into the column collection for their selectable (is_literal flag is propagated). literal columns are specified via literal_column("somestring"). .. change:: :tags: sql :tickets: added "fold_equivalents" boolean argument to Join.select(), which removes 'duplicate' columns from the resulting column clause that are known to be equivalent based on the join condition. this is of great usage when constructing subqueries of joins which Postgres complains about if duplicate column names are present. .. change:: :tags: sql :tickets: 503 fixed use_alter flag on ForeignKeyConstraint .. change:: :tags: sql :tickets: 506 fixed usage of 2.4-only "reversed" in topological.py .. change:: :tags: sql :tickets: 501 for hackers, refactored the "visitor" system of ClauseElement and SchemaItem so that the traversal of items is controlled by the ClauseVisitor itself, using the method visitor.traverse(item). accept_visitor() methods can still be called directly but will not do any traversal of child items. ClauseElement/SchemaItem now have a configurable get_children() method to return the collection of child elements for each parent object. This allows the full traversal of items to be clear and unambiguous (as well as loggable), with an easy method of limiting a traversal (just pass flags which are picked up by appropriate get_children() methods). .. change:: :tags: sql :tickets: the "else\_" parameter to the case statement now properly works when set to zero. .. change:: :tags: orm :tickets: the full featureset of the SelectResults extension has been merged into a new set of methods available off of Query. These methods all provide "generative" behavior, whereby the Query is copied and a new one returned with additional criterion added. The new methods include: * filter() - applies select criterion to the query * filter_by() - applies "by"-style criterion to the query * avg() - return the avg() function on the given column * join() - join to a property (or across a list of properties) * outerjoin() - like join() but uses LEFT OUTER JOIN * limit()/offset() - apply LIMIT/OFFSET range-based access which applies limit/offset: session.query(Foo)[3:5] * distinct() - apply DISTINCT * list() - evaluate the criterion and return results no incompatible changes have been made to Query's API and no methods have been deprecated. Existing methods like select(), select_by(), get(), get_by() all execute the query at once and return results like they always did. join_to()/join_via() are still there although the generative join()/outerjoin() methods are easier to use. .. change:: :tags: orm :tickets: the return value for multiple mappers used with instances() now returns a cartesian product of the requested list of mappers, represented as a list of tuples. this corresponds to the documented behavior. So that instances match up properly, the "uniquing" is disabled when this feature is used. .. change:: :tags: orm :tickets: Query has add_entity() and add_column() generative methods. these will add the given mapper/class or ColumnElement to the query at compile time, and apply them to the instances() method. the user is responsible for constructing reasonable join conditions (otherwise you can get full cartesian products). result set is the list of tuples, non-uniqued. .. change:: :tags: orm :tickets: strings and columns can also be sent to the \*args of instances() where those exact result columns will be part of the result tuples. .. change:: :tags: orm :tickets: a full select() construct can be passed to query.select() (which worked anyway), but also query.selectfirst(), query.selectone() which will be used as is (i.e. no query is compiled). works similarly to sending the results to instances(). .. change:: :tags: orm :tickets: 495 eager loading will not "aliasize" "order by" clauses that were placed in the select statement by something other than the eager loader itself, to fix possibility of dupe columns as illustrated in. however, this means you have to be more careful with the columns placed in the "order by" of Query.select(), that you have explicitly named them in your criterion (i.e. you cant rely on the eager loader adding them in for you) .. change:: :tags: orm :tickets: added a handy multi-use "identity_key()" method to Session, allowing the generation of identity keys for primary key values, instances, and rows, courtesy Daniel Miller .. change:: :tags: orm :tickets: 249 many-to-many table will be properly handled even for operations that occur on the "backref" side of the operation .. change:: :tags: orm :tickets: 492 added "refresh-expire" cascade. allows refresh() and expire() calls to propagate along relationships. .. change:: :tags: orm :tickets: 493 more fixes to polymorphic relations, involving proper lazy-clause generation on many-to-one relationships to polymorphic mappers. also fixes to detection of "direction", more specific targeting of columns that belong to the polymorphic union vs. those that don't. .. change:: :tags: orm :tickets: some fixes to relationship calcs when using "viewonly=True" to pull in other tables into the join condition which arent parent of the relationship's parent/child mappings .. change:: :tags: orm :tickets: flush fixes on cyclical-referential relationships that contain references to other instances outside of the cyclical chain, when some of the objects in the cycle are not actually part of the flush .. change:: :tags: orm :tickets: 500 put an aggressive check for "flushing object A with a collection of B's, but you put a C in the collection" error condition - **even if C is a subclass of B**, unless B's mapper loads polymorphically. Otherwise, the collection will later load a "B" which should be a "C" (since its not polymorphic) which breaks in bi-directional relationships (i.e. C has its A, but A's backref will lazyload it as a different instance of type "B") This check is going to bite some of you who do this without issues, so the error message will also document a flag "enable_typechecks=False" to disable this checking. But be aware that bi-directional relationships in particular become fragile without this check. .. change:: :tags: extensions :tickets: 472 options() method on SelectResults now implemented "generatively" like the rest of the SelectResults methods. But you're going to just use Query now anyway. .. change:: :tags: extensions :tickets: query() method is added by assignmapper. this helps with navigating to all the new generative methods on Query. .. change:: :tags: ms-sql :tickets: removed seconds input on DATE column types (probably should remove the time altogether) .. change:: :tags: ms-sql :tickets: null values in float fields no longer raise errors .. change:: :tags: ms-sql :tickets: LIMIT with OFFSET now raises an error (MS-SQL has no OFFSET support) .. change:: :tags: ms-sql :tickets: 509 added an facility to use the MSSQL type VARCHAR(max) instead of TEXT for large unsized string fields. Use the new "text_as_varchar" to turn it on. .. change:: :tags: ms-sql :tickets: ORDER BY clauses without a LIMIT are now stripped in subqueries, as MS-SQL forbids this usage .. change:: :tags: ms-sql :tickets: 480 cleanup of module importing code; specifiable DB-API module; more explicit ordering of module preferences. .. change:: :tags: oracle :tickets: got binary working for any size input ! cx_oracle works fine, it was my fault as BINARY was being passed and not BLOB for setinputsizes (also unit tests weren't even setting input sizes). .. change:: :tags: oracle :tickets: also fixed CLOB read/write on a separate changeset. .. change:: :tags: oracle :tickets: auto_setinputsizes defaults to True for Oracle, fixed cases where it improperly propagated bad types. .. change:: :tags: mysql :tickets: added a catchall \**kwargs to MSString, to help reflection of obscure types (like "varchar() binary" in MS 4.0) .. change:: :tags: mysql :tickets: added explicit MSTimeStamp type which takes effect when using types.TIMESTAMP. .. changelog:: :version: 0.3.5 :released: Thu Feb 22 2007 .. change:: :tags: sql :tickets: the value of "case_sensitive" defaults to True now, regardless of the casing of the identifier, unless specifically set to False. this is because the object might be label'ed as something else which does contain mixed case, and propigating "case_sensitive=False" breaks that. Other fixes to quoting when using labels and "fake" column objects .. change:: :tags: sql :tickets: added a "supports_execution()" method to ClauseElement, so that individual kinds of clauses can express if they are appropriate for executing...such as, you can execute a "select", but not a "Table" or a "Join". .. change:: :tags: sql :tickets: fixed argument passing to straight textual execute() on engine, connection. can handle \*args or a list instance for positional, \**kwargs or a dict instance for named args, or a list of list or dicts to invoke executemany() .. change:: :tags: sql :tickets: small fix to BoundMetaData to accept unicode or string URLs .. change:: :tags: sql :tickets: 466 fixed named PrimaryKeyConstraint generation courtesy andrija at gmail .. change:: :tags: sql :tickets: 464 fixed generation of CHECK constraints on columns .. change:: :tags: sql :tickets: fixes to tometadata() operation to propagate Constraints at column and table level .. change:: :tags: oracle :tickets: 436 when returning "rowid" as the ORDER BY column or in use with ROW_NUMBER OVER, oracle dialect checks the selectable its being applied to and will switch to table PK if not applicable, i.e. for a UNION. checking for DISTINCT, GROUP BY (other places that rowid is invalid) still a TODO. allows polymorphic mappings to function. .. change:: :tags: oracle :tickets: sequences on a non-pk column will properly fire off on INSERT .. change:: :tags: oracle :tickets: 435 added PrefetchingResultProxy support to pre-fetch LOB columns when they are known to be present, fixes .. change:: :tags: oracle :tickets: 379 implemented reflection of tables based on synonyms, including across dblinks .. change:: :tags: oracle :tickets: 363 issues a log warning when a related table cant be reflected due to certain permission errors .. change:: :tags: mysql :tickets: fix to reflection on older DB's that might return array() type for "show variables like" statements .. change:: :tags: postgres :tickets: 442 better reflection of sequences for alternate-schema Tables .. change:: :tags: postgres :tickets: sequences on a non-pk column will properly fire off on INSERT .. change:: :tags: postgres :tickets: 460, 444 added PGInterval type, PGInet type .. change:: :tags: mssql :tickets: 419 preliminary support for pyodbc (Yay!) .. change:: :tags: mssql :tickets: 298 better support for NVARCHAR types added .. change:: :tags: mssql :tickets: fix for commit logic on pymssql .. change:: :tags: mssql :tickets: 456 fix for query.get() with schema .. change:: :tags: mssql :tickets: 473 fix for non-integer relationships .. change:: :tags: mssql :tickets: 419 DB-API module now selectable at run-time .. change:: :tags: tickets:422, 481, 415, mssql :tickets: now passes many more unit tests .. change:: :tags: mssql :tickets: 479 better unittest compatibility with ANSI functions .. change:: :tags: mssql :tickets: 415 improved support for implicit sequence PK columns with auto-insert .. change:: :tags: mssql :tickets: 371 fix for blank password in adodbapi .. change:: :tags: mssql :tickets: 481 fixes to get unit tests working with pyodbc .. change:: :tags: mssql :tickets: fix to auto_identity_insert on db-url query .. change:: :tags: mssql :tickets: added query_timeout to db-url query parms. currently works only for pymssql .. change:: :tags: mssql :tickets: tested with pymssql 0.8.0 (which is now LGPL) .. change:: :tags: orm, bugs :tickets: 441, 448, 439 another refactoring to relationship calculation. Allows more accurate ORM behavior with relationships from/to/between mappers, particularly polymorphic mappers, also their usage with Query, SelectResults. tickets include,,. .. change:: :tags: orm, bugs :tickets: removed deprecated method of specifying custom collections on classes; you must now use the "collection_class" option. the old way was beginning to produce conflicts when people used assign_mapper(), which now patches an "options" method, in conjunction with a relationship named "options". (relationships take precedence over monkeypatched assign_mapper methods). .. change:: :tags: orm, bugs :tickets: 454 extension() query option propagates to Mapper._instance() method so that all loading-related methods get called .. change:: :tags: orm, bugs :tickets: eager relation to an inheriting mapper wont fail if no rows returned for the relationship. .. change:: :tags: orm, bugs :tickets: 486 eager relation loading bug fixed for eager relation on multiple descendant classes .. change:: :tags: orm, bugs :tickets: 423 fix for very large topological sorts, courtesy ants.aasma at gmail .. change:: :tags: orm, bugs :tickets: eager loading is slightly more strict about detecting "self-referential" relationships, specifically between polymorphic mappers. this results in an "eager degrade" to lazy loading. .. change:: :tags: orm, bugs :tickets: 449 improved support for complex queries embedded into "where" criterion for query.select() .. change:: :tags: orm, bugs :tickets: 485 mapper options like eagerload(), lazyload(), deferred(), will work for "synonym()" relationships .. change:: :tags: orm, bugs :tickets: 445 fixed bug where cascade operations incorrectly included deleted collection items in the cascade .. change:: :tags: orm, bugs :tickets: 478 fixed relationship deletion error when one-to-many child item is moved to a new parent in a single unit of work .. change:: :tags: orm, bugs :tickets: fixed relationship deletion error where parent/child with a single column as PK/FK on the child would raise a "blank out the primary key" error, if manually deleted or "delete" cascade without "delete-orphan" was used .. change:: :tags: orm, bugs :tickets: fix to deferred so that load operation doesn't mistakenly occur when only PK col attributes are set .. change:: :tags: orm, enhancements :tickets: 385 implemented foreign_keys argument to mapper. use in conjunction with primaryjoin/secondaryjoin arguments to specify/override foreign keys defined on the Table instance. .. change:: :tags: orm, enhancements :tickets: contains_eager('foo') automatically implies eagerload('foo') .. change:: :tags: orm, enhancements :tickets: added "alias" argument to contains_eager(). use it to specify the string name or Alias instance of an alias used in the query for the eagerly loaded child items. easier to use than "decorator" .. change:: :tags: orm, enhancements :tickets: added "contains_alias()" option for result set mapping to an alias of the mapped table .. change:: :tags: orm, enhancements :tickets: 468 added support for py2.5 "with" statement with SessionTransaction .. change:: :tags: extensions :tickets: added distinct() method to SelectResults. generally should only make a difference when using count(). .. change:: :tags: extensions :tickets: 472 added options() method to SelectResults, equivalent to query.options() .. change:: :tags: extensions :tickets: 462 added optional __table_opts__ dictionary to ActiveMapper, will send kw options to Table objects .. change:: :tags: extensions :tickets: 467 added selectfirst(), selectfirst_by() to assign_mapper .. changelog:: :version: 0.3.4 :released: Tue Jan 23 2007 .. change:: :tags: general :tickets: global "insure"->"ensure" change. in US english "insure" is actually largely interchangeable with "ensure" (so says the dictionary), so I'm not completely illiterate, but its definitely sub-optimal to "ensure" which is non-ambiguous. .. change:: :tags: sql :tickets: added "fetchmany()" support to ResultProxy .. change:: :tags: sql :tickets: added support for column "key" attribute to be useable in row[]/row. .. change:: :tags: sql :tickets: changed "BooleanExpression" to subclass from "BinaryExpression", so that boolean expressions can also follow column-clause behaviors (i.e. label(), etc). .. change:: :tags: sql :tickets: trailing underscores are trimmed from func. calls, such as func.if_() .. change:: :tags: sql :tickets: fix to correlation of subqueries when the column list of the select statement is constructed with individual calls to append_column(); this fixes an ORM bug whereby nested select statements were not getting correlated with the main select generated by the Query object. .. change:: :tags: sql :tickets: another fix to subquery correlation so that a subquery which has only one FROM element will *not* correlate that single element, since at least one FROM element is required in a query. .. change:: :tags: sql :tickets: 414 default "timezone" setting is now False. this corresponds to Python's datetime behavior as well as Postgres' timestamp/time types (which is the only timezone-sensitive dialect at the moment) .. change:: :tags: sql :tickets: the "op()" function is now treated as an "operation", rather than a "comparison". the difference is, an operation produces a BinaryExpression from which further operations can occur whereas comparison produces the more restrictive BooleanExpression .. change:: :tags: sql :tickets: trying to redefine a reflected primary key column as non-primary key raises an error .. change:: :tags: sql :tickets: type system slightly modified to support TypeDecorators that can be overridden by the dialect (ok, that's not very clear, it allows the mssql tweak below to be possible) .. change:: :tags: mssql :tickets: added an NVarchar type (produces NVARCHAR), also MSUnicode which provides Unicode-translation for the NVarchar regardless of dialect convert_unicode setting. .. change:: :tags: postgres :tickets: 424 fix to the initial checkfirst for tables to take current schema into account .. change:: :tags: postgres :tickets: postgres has an optional "server_side_cursors=True" flag which will utilize server side cursors. these are appropriate for fetching only partial results and are necessary for working with very large unbounded result sets. While we'd like this to be the default behavior, different environments seem to have different results and the causes have not been isolated so we are leaving the feature off by default for now. Uses an apparently undocumented psycopg2 behavior recently discovered on the psycopg mailing list. .. change:: :tags: postgres :tickets: added "BIGSERIAL" support for postgres table with PGBigInteger/autoincrement .. change:: :tags: postgres :tickets: 402 fixes to postgres reflection to better handle when schema names are present; thanks to jason (at) ncsmags.com .. change:: :tags: mysql :tickets: 420 mysql is inconsistent with what kinds of quotes it uses in foreign keys during a SHOW CREATE TABLE, reflection updated to accommodate for all three styles .. change:: :tags: mysql :tickets: 418 mysql table create options work on a generic passthru now, i.e. Table(..., mysql_engine='InnoDB', mysql_collate="latin1_german2_ci", mysql_auto_increment="5", mysql_...), helps .. change:: :tags: firebird :tickets: 408 order of constraint creation puts primary key first before all other constraints; required for firebird, not a bad idea for others .. change:: :tags: firebird :tickets: 409 Firebird fix to autoload multifield foreign keys .. change:: :tags: firebird :tickets: 409 Firebird NUMERIC type properly handles a type without precision .. change:: :tags: oracle :tickets: *slight* support for binary, but still need to figure out how to insert reasonably large values (over 4K). requires auto_setinputsizes=True sent to create_engine(), rows must be fully fetched individually, etc. .. change:: :tags: orm :tickets: poked the first hole in the can of worms: saying query.select_by(somerelationname=someinstance) will create the join of the primary key columns represented by "somerelationname"'s mapper to the actual primary key in "someinstance". .. change:: :tags: orm :tickets: reworked how relations interact with "polymorphic" mappers, i.e. mappers that have a select_table as well as polymorphic flags. better determination of proper join conditions, interaction with user- defined join conditions, and support for self-referential polymorphic mappers. .. change:: :tags: orm :tickets: related to polymorphic mapping relations, some deeper error checking when compiling relations, to detect an ambiguous "primaryjoin" in the case that both sides of the relationship have foreign key references in the primary join condition. also tightened down conditions used to locate "relation direction", associating the "foreignkey" of the relationship with the "primaryjoin" .. change:: :tags: orm :tickets: a little bit of improvement to the concept of a "concrete" inheritance mapping, though that concept is not well fleshed out yet (added test case to support concrete mappers on top of a polymorphic base). .. change:: :tags: orm :tickets: fix to "proxy=True" behavior on synonym() .. change:: :tags: orm :tickets: 427 fixed bug where delete-orphan basically didn't work with many-to-many relationships, backref presence generally hid the symptom .. change:: :tags: orm :tickets: added a mutex to the mapper compilation step. ive been reluctant to add any kind of threading anything to SA but this is one spot that its really needed since mappers are typically "global", and while their state does not change during normal operation, the initial compilation step does modify internal state significantly, and this step usually occurs not at module-level initialization time (unless you call compile()) but at first-request time .. change:: :tags: orm :tickets: basic idea of "session.merge()" actually implemented. needs more testing. .. change:: :tags: orm :tickets: added "compile_mappers()" function as a shortcut to compiling all mappers .. change:: :tags: orm :tickets: fix to MapperExtension create_instance so that entity_name properly associated with new instance .. change:: :tags: orm :tickets: speed enhancements to ORM object instantiation, eager loading of rows .. change:: :tags: orm :tickets: 406 invalid options sent to 'cascade' string will raise an exception .. change:: :tags: orm :tickets: 407 fixed bug in mapper refresh/expire whereby eager loaders didn't properly re-populate item lists .. change:: :tags: orm :tickets: 413 fix to post_update to ensure rows are updated even for non insert/delete scenarios .. change:: :tags: orm :tickets: 412 added an error message if you actually try to modify primary key values on an entity and then flush it .. change:: :tags: extensions :tickets: 426 added "validate=False" argument to assign_mapper, if True will ensure that only mapped attributes are named .. change:: :tags: extensions :tickets: assign_mapper gets "options", "instances" functions added (i.e. MyClass.instances()) .. changelog:: :version: 0.3.3 :released: Fri Dec 15 2006 .. change:: :tags: :tickets: string-based FROM clauses fixed, i.e. select(..., from_obj=["sometext"]) .. change:: :tags: :tickets: fixes to passive_deletes flag, lazy=None (noload) flag .. change:: :tags: :tickets: added example/docs for dealing with large collections .. change:: :tags: :tickets: added object_session() method to sqlalchemy namespace .. change:: :tags: :tickets: fixed QueuePool bug whereby its better able to reconnect to a database that was not reachable (thanks to Sébastien Lelong), also fixed dispose() method .. change:: :tags: :tickets: 396 patch that makes MySQL rowcount work correctly! .. change:: :tags: :tickets: fix to MySQL catch of 2006/2014 errors to properly re-raise OperationalError exception .. changelog:: :version: 0.3.2 :released: Sun Dec 10 2006 .. change:: :tags: :tickets: 387 major connection pool bug fixed. fixes MySQL out of sync errors, will also prevent transactions getting rolled back accidentally in all DBs .. change:: :tags: :tickets: major speed enhancements vs. 0.3.1, to bring speed back to 0.2.8 levels .. change:: :tags: :tickets: made conditional dozens of debug log calls that were time-intensive to generate log messages .. change:: :tags: :tickets: fixed bug in cascade rules whereby the entire object graph could be unnecessarily cascaded on the save/update cascade .. change:: :tags: :tickets: various speedups in attributes module .. change:: :tags: :tickets: 388 identity map in Session is by default *no longer weak referencing*. to have it be weak referencing, use create_session(weak_identity_map=True) fixes .. change:: :tags: :tickets: MySQL detects errors 2006 (server has gone away) and 2014 (commands out of sync) and invalidates the connection on which it occurred. .. change:: :tags: :tickets: 307 MySQL bool type fix: .. change:: :tags: :tickets: 382, 349 postgres reflection fixes: .. change:: :tags: :tickets: 247 added keywords for EXCEPT, INTERSECT, EXCEPT ALL, INTERSECT ALL .. change:: :tags: :tickets: 2110 assign_mapper in assignmapper extension returns the created mapper .. change:: :tags: :tickets: added label() function to Select class, when scalar=True is used to create a scalar subquery i.e. "select x, y, (select max(foo) from table) AS foomax from table" .. change:: :tags: :tickets: added onupdate and ondelete keyword arguments to ForeignKey; propagate to underlying ForeignKeyConstraint if present. (don't propagate in the other direction, however) .. change:: :tags: :tickets: fix to session.update() to preserve "dirty" status of incoming object .. change:: :tags: :tickets: sending a selectable to an IN via the in_() function no longer creates a "union" out of multiple selects; only one selectable to a the in_() function is allowed now (make a union yourself if union is needed) .. change:: :tags: :tickets: improved support for disabling save-update cascade via cascade="none" etc. .. change:: :tags: :tickets: added "remote_side" argument to relation(), used only with self-referential mappers to force the direction of the parent/child relationship. replaces the usage of the "foreignkey" parameter for "switching" the direction. "foreignkey" argument is deprecated for all uses and will eventually be replaced by an argument dedicated to ForeignKey specification on mappers. .. changelog:: :version: 0.3.1 :released: Mon Nov 13 2006 .. change:: :tags: engine/pool :tickets: some new Pool utility classes, updated docs .. change:: :tags: engine/pool :tickets: "use_threadlocal" on Pool defaults to False (same as create_engine) .. change:: :tags: engine/pool :tickets: fixed direct execution of Compiled objects .. change:: :tags: engine/pool :tickets: create_engine() reworked to be strict about incoming \**kwargs. all keyword arguments must be consumed by one of the dialect, connection pool, and engine constructors, else a TypeError is thrown which describes the full set of invalid kwargs in relation to the selected dialect/pool/engine configuration. .. change:: :tags: databases/types :tickets: MySQL catches exception on "describe" and reports as NoSuchTableError .. change:: :tags: databases/types :tickets: further fixes to sqlite booleans, weren't working as defaults .. change:: :tags: databases/types :tickets: fix to postgres sequence quoting when using schemas .. change:: :tags: orm :tickets: the "delete" cascade will load in all child objects, if they were not loaded already. this can be turned off (i.e. the old behavior) by setting passive_deletes=True on a relation(). .. change:: :tags: orm :tickets: adjustments to reworked eager query generation to not fail on circular eager-loaded relationships (like backrefs) .. change:: :tags: orm :tickets: fixed bug where eagerload() (nor lazyload()) option didn't properly instruct the Query whether or not to use "nesting" when producing a LIMIT query. .. change:: :tags: orm :tickets: 360 fixed bug in circular dependency sorting at flush time; if object A contained a cyclical many-to-one relationship to object B, and object B was just attached to object A, *but* object B itself wasn't changed, the many-to-one synchronize of B's primary key attribute to A's foreign key attribute wouldn't occur. .. change:: :tags: orm :tickets: 325 implemented from_obj argument for query.count, improves count function on selectresults .. change:: :tags: orm :tickets: added an assertion within the "cascade" step of ORM relationships to check that the class of object attached to a parent object is appropriate (i.e. if A.items stores B objects, raise an error if a C is appended to A.items) .. change:: :tags: orm :tickets: new extension sqlalchemy.ext.associationproxy, provides transparent "association object" mappings. new example examples/association/proxied_association.py illustrates. .. change:: :tags: orm :tickets: improvement to single table inheritance to load full hierarchies beneath the target class .. change:: :tags: orm :tickets: 362 fix to subtle condition in topological sort where a node could appear twice, for .. change:: :tags: orm :tickets: 365 additional rework to topological sort, refactoring, for .. change:: :tags: orm :tickets: "delete-orphan" for a certain type can be set on more than one parent class; the instance is an "orphan" only if its not attached to *any* of those parents .. changelog:: :version: 0.3.0 :released: Sun Oct 22 2006 .. change:: :tags: general :tickets: logging is now implemented via standard python "logging" module. "echo" keyword parameters are still functional but set/unset log levels for their respective classes/instances. all logging can be controlled directly through the Python API by setting INFO and DEBUG levels for loggers in the "sqlalchemy" namespace. class-level logging is under "sqlalchemy..", instance-level logging under "sqlalchemy...0x..<00-FF>". Test suite includes "--log-info" and "--log-debug" arguments which work independently of --verbose/--quiet. Logging added to orm to allow tracking of mapper configurations, row iteration. .. change:: :tags: general :tickets: the documentation-generation system has been overhauled to be much simpler in design and more integrated with Markdown .. change:: :tags: sqlite :tickets: sqlite boolean datatype converts False/True to 0/1 by default .. change:: :tags: sqlite :tickets: 335 fixes to Date/Time (SLDate/SLTime) types; works as good as postgres now .. change:: :tags: ms-sql :tickets: fixes bug 261 (table reflection broken for MS-SQL case-sensitive databases) .. change:: :tags: ms-sql :tickets: can now specify port for pymssql .. change:: :tags: ms-sql :tickets: introduces new "auto_identity_insert" option for auto-switching between "SET IDENTITY_INSERT" mode when values specified for IDENTITY columns .. change:: :tags: ms-sql :tickets: now supports multi-column foreign keys .. change:: :tags: ms-sql :tickets: fix to reflecting date/datetime columns .. change:: :tags: ms-sql :tickets: NCHAR and NVARCHAR type support added .. change:: :tags: oracle :tickets: Oracle has experimental support for cx_Oracle.TIMESTAMP, which requires a setinputsizes() call on the cursor that is now enabled via the 'auto_setinputsizes' flag to the oracle dialect. .. change:: :tags: firebird :tickets: aliases do not use "AS" .. change:: :tags: firebird :tickets: correctly raises NoSuchTableError when reflecting non-existent table .. change:: :tags: schema :tickets: a fair amount of cleanup to the schema package, removal of ambiguous methods, methods that are no longer needed. slightly more constrained usage, greater emphasis on explicitness .. change:: :tags: schema :tickets: the "primary_key" attribute of Table and other selectables becomes a setlike ColumnCollection object; is ordered but not numerically indexed. a comparison clause between two pks that are derived from the same underlying tables (i.e. such as two Alias objects) can be generated via table1.primary_key==table2.primary_key .. change:: :tags: schema :tickets: ForeignKey(Constraint) supports "use_alter=True", to create/drop a foreign key via ALTER. this allows circular foreign key relationships to be set up. .. change:: :tags: schema :tickets: append_item() methods removed from Table and Column; preferably construct Table/Column/related objects inline, but if needed use append_column(), append_foreign_key(), append_constraint(), etc. .. change:: :tags: schema :tickets: table.create() no longer returns the Table object, instead has no return value. the usual case is that tables are created via metadata, which is preferable since it will handle table dependencies. .. change:: :tags: schema :tickets: added UniqueConstraint (goes at Table level), CheckConstraint (goes at Table or Column level). .. change:: :tags: schema :tickets: index=False/unique=True on Column now creates a UniqueConstraint, index=True/unique=False creates a plain Index, index=True/unique=True on Column creates a unique Index. 'index' and 'unique' keyword arguments to column are now boolean only; for explcit names and groupings of indexes or unique constraints, use the UniqueConstraint/Index constructs explicitly. .. change:: :tags: schema :tickets: added autoincrement=True to Column; will disable schema generation of SERIAL/AUTO_INCREMENT/identity seq for postgres/mysql/mssql if explicitly set to False .. change:: :tags: schema :tickets: TypeEngine objects now have methods to deal with copying and comparing values of their specific type. Currently used by the ORM, see below. .. change:: :tags: schema :tickets: fixed condition that occurred during reflection when a primary key column was explciitly overridden, where the PrimaryKeyConstraint would get both the reflected and the programmatic column doubled up .. change:: :tags: schema :tickets: the "foreign_key" attribute on Column and ColumnElement in general is deprecated, in favor of the "foreign_keys" list/set-based attribute, which takes into account multiple foreign keys on one column. "foreign_key" will return the first element in the "foreign_keys" list/set or None if the list is empty. .. change:: :tags: connections/pooling/execution :tickets: connection pool tracks open cursors and automatically closes them if connection is returned to pool with cursors still opened. Can be affected by options which cause it to raise an error instead, or to do nothing. fixes issues with MySQL, others .. change:: :tags: connections/pooling/execution :tickets: fixed bug where Connection wouldn't lose its Transaction after commit/rollback .. change:: :tags: connections/pooling/execution :tickets: added scalar() method to ComposedSQLEngine, ResultProxy .. change:: :tags: connections/pooling/execution :tickets: ResultProxy will close() the underlying cursor when the ResultProxy itself is closed. this will auto-close cursors for ResultProxy objects that have had all their rows fetched (or had scalar() called). .. change:: :tags: connections/pooling/execution :tickets: ResultProxy.fetchall() internally uses DBAPI fetchall() for better efficiency, added to mapper iteration as well (courtesy Michael Twomey) .. change:: :tags: construction, sql :tickets: 292 changed "for_update" parameter to accept False/True/"nowait" and "read", the latter two of which are interpreted only by Oracle and Mysql .. change:: :tags: construction, sql :tickets: added extract() function to sql dialect (SELECT extract(field FROM expr)) .. change:: :tags: construction, sql :tickets: BooleanExpression includes new "negate" argument to specify the appropriate negation operator if one is available. .. change:: :tags: construction, sql :tickets: calling a negation on an "IN" or "IS" clause will result in "NOT IN", "IS NOT" (as opposed to NOT (x IN y)). .. change:: :tags: construction, sql :tickets: 172 Function objects know what to do in a FROM clause now. their behavior should be the same, except now you can also do things like select(['*'], from_obj=[func.my_function()]) to get multiple columns from the result, or even use sql.column() constructs to name the return columns .. change:: :tags: orm :tickets: attribute tracking modified to be more intelligent about detecting changes, particularly with mutable types. TypeEngine objects now take a greater role in defining how to compare two scalar instances, including the addition of a MutableType mixin which is implemented by PickleType. unit-of-work now tracks the "dirty" list as an expression of all persistent objects where the attribute manager detects changes. The basic issue that's fixed is detecting changes on PickleType objects, but also generalizes type handling and "modified" object checking to be more complete and extensible. .. change:: :tags: orm :tickets: a wide refactoring to "attribute loader" and "options" architectures. ColumnProperty and PropertyLoader define their loading behaivor via switchable "strategies", and MapperOptions no longer use mapper/property copying in order to function; they are instead propagated via QueryContext and SelectionContext objects at query/instances time. All of the internal copying of mappers and properties that was used to handle inheritance as well as options() has been removed; the structure of mappers and properties is much simpler than before and is clearly laid out in the new 'interfaces' module. .. change:: :tags: orm :tickets: related to the mapper/property overhaul, internal refactoring to mapper instances() method to use a SelectionContext object to track state during the operation. SLIGHT API BREAKAGE: the append_result() and populate_instances() methods on MapperExtension have a slightly different method signature now as a result of the change; hoping that these methods are not in widespread use as of yet. .. change:: :tags: orm :tickets: instances() method moved to Query now, backwards-compatible version remains on Mapper. .. change:: :tags: orm :tickets: added contains_eager() MapperOption, used in conjunction with instances() to specify properties that should be eagerly loaded from the result set, using their plain column names by default, or translated given an custom row-translation function. .. change:: :tags: orm :tickets: more rearrangements of unit-of-work commit scheme to better allow dependencies within circular flushes to work properly...updated task traversal/logging implementation .. change:: :tags: orm :tickets: 321 polymorphic mappers (i.e. using inheritance) now produces INSERT statements in order of tables across all inherited classes .. change:: :tags: orm :tickets: added an automatic "row switch" feature to mapping, which will detect a pending instance/deleted instance pair with the same identity key and convert the INSERT/DELETE to a single UPDATE .. change:: :tags: orm :tickets: "association" mappings simplified to take advantage of automatic "row switch" feature .. change:: :tags: orm :tickets: 212 "custom list classes" is now implemented via the "collection_class" keyword argument to relation(). the old way still works but is deprecated .. change:: :tags: orm :tickets: added "viewonly" flag to relation(), allows construction of relations that have no effect on the flush() process. .. change:: :tags: orm :tickets: 292 added "lockmode" argument to base Query select/get functions, including "with_lockmode" function to get a Query copy that has a default locking mode. Will translate "read"/"update" arguments into a for_update argument on the select side. .. change:: :tags: orm :tickets: implemented "version check" logic in Query/Mapper, used when version_id_col is in effect and query.with_lockmode() is used to get() an instance that's already loaded .. change:: :tags: orm :tickets: 208 post_update behavior improved; does a better job at not updating too many rows, updates only required columns .. change:: :tags: orm :tickets: 308 adjustments to eager loading so that its "eager chain" is kept separate from the normal mapper setup, thereby preventing conflicts with lazy loader operation, fixes .. change:: :tags: orm :tickets: fix to deferred group loading .. change:: :tags: orm :tickets: 346 session.flush() wont close a connection it opened .. change:: :tags: orm :tickets: added "batch=True" flag to mapper; if False, save_obj will fully save one object at a time including calls to before_XXXX and after_XXXX .. change:: :tags: orm :tickets: added "column_prefix=None" argument to mapper; prepends the given string (typically '_') to column-based attributes automatically set up from the mapper's Table .. change:: :tags: orm :tickets: 315 specifying joins in the from_obj argument of query.select() will replace the main table of the query, if the table is somewhere within the given from_obj. this makes it possible to produce custom joins and outerjoins in queries without the main table getting added twice. .. change:: :tags: orm :tickets: eagerloading is adjusted to more thoughtfully attach its LEFT OUTER JOINs to the given query, looking for custom "FROM" clauses that may have already been set up. .. change:: :tags: orm :tickets: added join_to and outerjoin_to transformative methods to SelectResults, to build up join/outerjoin conditions based on property names. also added select_from to explicitly set from_obj parameter. .. change:: :tags: orm :tickets: removed "is_primary" flag from mapper. SQLAlchemy-1.0.11/doc/build/changelog/changelog_04.rst0000664000175000017500000037070512636375552023433 0ustar classicclassic00000000000000 ============== 0.4 Changelog ============== .. changelog:: :version: 0.4.8 :released: Sun Oct 12 2008 .. change:: :tags: orm :tickets: 1039 Fixed bug regarding inherit_condition passed with "A=B" versus "B=A" leading to errors .. change:: :tags: orm :tickets: Changes made to new, dirty and deleted collections in SessionExtension.before_flush() will take effect for that flush. .. change:: :tags: orm :tickets: Added label() method to InstrumentedAttribute to establish forwards compatibility with 0.5. .. change:: :tags: sql :tickets: 1074 column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause .. change:: :tags: mysql :tickets: 1146 Added MSMediumInteger type. .. change:: :tags: sqlite :tickets: 968 Supplied a custom strftime() function which handles dates before 1900. .. change:: :tags: sqlite :tickets: String's (and Unicode's, UnicodeText's, etc.) convert_unicode logic disabled in the sqlite dialect, to adjust for pysqlite 2.5.0's new requirement that only Python unicode objects are accepted; http://itsystementwicklung.de/pipermail/list-pysqlite/2008-March/000018.html .. change:: :tags: oracle :tickets: 1155 has_sequence() now takes schema name into account .. change:: :tags: oracle :tickets: 1121 added BFILE to the list of reflected types .. changelog:: :version: 0.4.7p1 :released: Thu Jul 31 2008 .. change:: :tags: orm :tickets: Added "add()" and "add_all()" to scoped_session methods. Workaround for 0.4.7:: from sqlalchemy.orm.scoping import ScopedSession, instrument setattr(ScopedSession, "add", instrument("add")) setattr(ScopedSession, "add_all", instrument("add_all")) .. change:: :tags: orm :tickets: Fixed non-2.3 compatible usage of set() and generator expression within relation(). .. changelog:: :version: 0.4.7 :released: Sat Jul 26 2008 .. change:: :tags: orm :tickets: 1058 The contains() operator when used with many-to-many will alias() the secondary (association) table so that multiple contains() calls will not conflict with each other .. change:: :tags: orm :tickets: fixed bug preventing merge() from functioning in conjunction with a comparable_property() .. change:: :tags: orm :tickets: the enable_typechecks=False setting on relation() now only allows subtypes with inheriting mappers. Totally unrelated types, or subtypes not set up with mapper inheritance against the target mapper are still not allowed. .. change:: :tags: orm :tickets: 976 Added is_active flag to Sessions to detect when a transaction is in progress. This flag is always True with a "transactional" (in 0.5 a non-"autocommit") Session. .. change:: :tags: sql :tickets: Fixed bug when calling select([literal('foo')]) or select([bindparam('foo')]). .. change:: :tags: schema :tickets: 571 create_all(), drop_all(), create(), drop() all raise an error if the table name or schema name contains more characters than that dialect's configured character limit. Some DB's can handle too-long table names during usage, and SQLA can handle this as well. But various reflection/ checkfirst-during-create scenarios fail since we are looking for the name within the DB's catalog tables. .. change:: :tags: schema :tickets: 571, 820 The index name generated when you say "index=True" on a Column is truncated to the length appropriate for the dialect. Additionally, an Index with a too- long name cannot be explicitly dropped with Index.drop(), similar to. .. change:: :tags: postgres :tickets: Repaired server_side_cursors to properly detect text() clauses. .. change:: :tags: postgres :tickets: 1092 Added PGCidr type. .. change:: :tags: mysql :tickets: Added 'CALL' to the list of SQL keywords which return result rows. .. change:: :tags: oracle :tickets: Oracle get_default_schema_name() "normalizes" the name before returning, meaning it returns a lower-case name when the identifier is detected as case insensitive. .. change:: :tags: oracle :tickets: 709 creating/dropping tables takes schema name into account when searching for the existing table, so that tables in other owner namespaces with the same name do not conflict .. change:: :tags: oracle :tickets: 1062 Cursors now have "arraysize" set to 50 by default on them, the value of which is configurable using the "arraysize" argument to create_engine() with the Oracle dialect. This to account for cx_oracle's default setting of "1", which has the effect of many round trips being sent to Oracle. This actually works well in conjunction with BLOB/CLOB-bound cursors, of which there are any number available but only for the life of that row request (so BufferedColumnRow is still needed, but less so). .. change:: :tags: oracle :tickets: sqlite - add SLFloat type, which matches the SQLite REAL type affinity. Previously, only SLNumeric was provided which fulfills NUMERIC affinity, but that's not the same as REAL. .. changelog:: :version: 0.4.6 :released: Sat May 10 2008 .. change:: :tags: orm :tickets: Fix to the recent relation() refactoring which fixes exotic viewonly relations which join between local and remote table multiple times, with a common column shared between the joins. .. change:: :tags: orm :tickets: Also re-established viewonly relation() configurations that join across multiple tables. .. change:: :tags: orm :tickets: 610 Added experimental relation() flag to help with primaryjoins across functions, etc., _local_remote_pairs=[tuples]. This complements a complex primaryjoin condition allowing you to provide the individual column pairs which comprise the relation's local and remote sides. Also improved lazy load SQL generation to handle placing bind params inside of functions and other expressions. (partial progress towards) .. change:: :tags: orm :tickets: 1036 repaired single table inheritance such that you can single-table inherit from a joined-table inherting mapper without issue. .. change:: :tags: orm :tickets: 1027 Fixed "concatenate tuple" bug which could occur with Query.order_by() if clause adaption had taken place. .. change:: :tags: orm :tickets: Removed ancient assertion that mapped selectables require "alias names" - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued. .. change:: :tags: orm :tickets: fixes to the "exists" function involving inheritance (any(), has(), ~contains()); the full target join will be rendered into the EXISTS clause for relations that link to subclasses. .. change:: :tags: orm :tickets: restored usage of append_result() extension method for primary query rows, when the extension is present and only a single- entity result is being returned. .. change:: :tags: orm :tickets: Also re-established viewonly relation() configurations that join across multiple tables. .. change:: :tags: orm :tickets: removed ancient assertion that mapped selectables require "alias names" - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued. .. change:: :tags: orm :tickets: 1015 refined mapper._save_obj() which was unnecessarily calling __ne__() on scalar values during flush .. change:: :tags: orm :tickets: 1019 added a feature to eager loading whereby subqueries set as column_property() with explicit label names (which is not necessary, btw) will have the label anonymized when the instance is part of the eager join, to prevent conflicts with a subquery or column of the same name on the parent object. .. change:: :tags: orm :tickets: set-based collections \|=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or subclasses of the collection type. Previously, they would accept any duck-typed set. .. change:: :tags: orm :tickets: added an example dynamic_dict/dynamic_dict.py, illustrating a simple way to place dictionary behavior on top of a dynamic_loader. .. change:: :tags: declarative, extension :tickets: Joined table inheritance mappers use a slightly relaxed function to create the "inherit condition" to the parent table, so that other foreign keys to not-yet-declared Table objects don't trigger an error. .. change:: :tags: declarative, extension :tickets: fixed reentrant mapper compile hang when a declared attribute is used within ForeignKey, ie. ForeignKey(MyOtherClass.someattribute) .. change:: :tags: sql :tickets: Added COLLATE support via the .collate() expression operator and collate(, ) sql function. .. change:: :tags: sql :tickets: Fixed bug with union() when applied to non-Table connected select statements .. change:: :tags: sql :tickets: 1014 improved behavior of text() expressions when used as FROM clauses, such as select().select_from(text("sometext")) .. change:: :tags: sql :tickets: 1021 Column.copy() respects the value of "autoincrement", fixes usage with Migrate .. change:: :tags: engines :tickets: Pool listeners can now be provided as a dictionary of callables or a (possibly partial) duck-type of PoolListener, your choice. .. change:: :tags: engines :tickets: added "rollback_returned" option to Pool which will disable the rollback() issued when connections are returned. This flag is only safe to use with a database which does not support transactions (i.e. MySQL/MyISAM). .. change:: :tags: ext :tickets: set-based association proxies \|=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or other association proxies. Previously, they would accept any duck-typed set. .. change:: :tags: mssql :tickets: 1005 Added "odbc_autotranslate" parameter to engine / dburi parameters. Any given string will be passed through to the ODBC connection string as: "AutoTranslate=%s" % odbc_autotranslate .. change:: :tags: mssql :tickets: Added "odbc_options" parameter to engine / dburi parameters. The given string is simply appended to the SQLAlchemy-generated odbc connection string. This should obviate the need of adding a myriad of ODBC options in the future. .. change:: :tags: firebird :tickets: Handle the "SUBSTRING(:string FROM :start FOR :length)" builtin. .. changelog:: :version: 0.4.5 :released: Fri Apr 04 2008 .. change:: :tags: orm :tickets: A small change in behavior to session.merge() - existing objects are checked for based on primary key attributes, not necessarily _instance_key. So the widely requested capability, that: x = MyObject(id=1) x = sess.merge(x) will in fact load MyObject with id #1 from the database if present, is now available. merge() still copies the state of the given object to the persistent one, so an example like the above would typically have copied "None" from all attributes of "x" onto the persistent copy. These can be reverted using session.expire(x). .. change:: :tags: orm :tickets: Also fixed behavior in merge() whereby collection elements present on the destination but not the merged collection were not being removed from the destination. .. change:: :tags: orm :tickets: 995 Added a more aggressive check for "uncompiled mappers", helps particularly with declarative layer .. change:: :tags: orm :tickets: The methodology behind "primaryjoin"/"secondaryjoin" has been refactored. Behavior should be slightly more intelligent, primarily in terms of error messages which have been pared down to be more readable. In a slight number of scenarios it can better resolve the correct foreign key than before. .. change:: :tags: orm :tickets: Added comparable_property(), adds query Comparator behavior to regular, unmanaged Python properties .. change:: :tags: orm, Company.employees.of_type(Engineer), 'machines' :tickets: the functionality of query.with_polymorphic() has been added to mapper() as a configuration option. It's set via several forms: with_polymorphic='*' with_polymorphic=[mappers] with_polymorphic=('*', selectable) with_polymorphic=([mappers], selectable) This controls the default polymorphic loading strategy for inherited mappers. When a selectable is not given, outer joins are created for all joined-table inheriting mappers requested. Note that the auto-create of joins is not compatible with concrete table inheritance. The existing select_table flag on mapper() is now deprecated and is synonymous with with_polymorphic('*', select_table). Note that the underlying "guts" of select_table have been completely removed and replaced with the newer, more flexible approach. The new approach also automatically allows eager loads to work for subclasses, if they are present, for example:: sess.query(Company).options( eagerload_all( )) to load Company objects, their employees, and the 'machines' collection of employees who happen to be Engineers. A "with_polymorphic" Query option should be introduced soon as well which would allow per-Query control of with_polymorphic() on relations. .. change:: :tags: orm :tickets: added two "experimental" features to Query, "experimental" in that their specific name/behavior is not carved in stone just yet: _values() and _from_self(). We'd like feedback on these. - _values(\*columns) is given a list of column expressions, and returns a new Query that only returns those columns. When evaluated, the return value is a list of tuples just like when using add_column() or add_entity(), the only difference is that "entity zero", i.e. the mapped class, is not included in the results. This means it finally makes sense to use group_by() and having() on Query, which have been sitting around uselessly until now. A future change to this method may include that its ability to join, filter and allow other options not related to a "resultset" are removed, so the feedback we're looking for is how people want to use _values()...i.e. at the very end, or do people prefer to continue generating after it's called. - _from_self() compiles the SELECT statement for the Query (minus any eager loaders), and returns a new Query that selects from that SELECT. So basically you can query from a Query without needing to extract the SELECT statement manually. This gives meaning to operations like query[3:5]._from_self().filter(some criterion). There's not much controversial here except that you can quickly create highly nested queries that are less efficient, and we want feedback on the naming choice. .. change:: :tags: orm :tickets: query.order_by() and query.group_by() will accept multiple arguments using \*args (like select() already does). .. change:: :tags: orm :tickets: Added some convenience descriptors to Query: query.statement returns the full SELECT construct, query.whereclause returns just the WHERE part of the SELECT construct. .. change:: :tags: orm :tickets: Fixed/covered case when using a False/0 value as a polymorphic discriminator. .. change:: :tags: orm :tickets: Fixed bug which was preventing synonym() attributes from being used with inheritance .. change:: :tags: orm :tickets: 996 Fixed SQL function truncation of trailing underscores .. change:: :tags: orm :tickets: When attributes are expired on a pending instance, an error will not be raised when the "refresh" action is triggered and no result is found. .. change:: :tags: orm :tickets: Session.execute can now find binds from metadata .. change:: :tags: orm :tickets: Adjusted the definition of "self-referential" to be any two mappers with a common parent (this affects whether or not aliased=True is required when joining with Query). .. change:: :tags: orm :tickets: Made some fixes to the "from_joinpoint" argument to query.join() so that if the previous join was aliased and this one isn't, the join still happens successfully. .. change:: :tags: orm :tickets: 895 Assorted "cascade deletes" fixes: - Fixed "cascade delete" operation of dynamic relations, which had only been implemented for foreign-key nulling behavior in 0.4.2 and not actual cascading deletes - Delete cascade without delete-orphan cascade on a many-to-one will not delete orphans which were disconnected from the parent before session.delete() is called on the parent (one-to-many already had this). - Delete cascade with delete-orphan will delete orphans whether or not it remains attached to its also-deleted parent. - delete-orphan casacde is properly detected on relations that are present on superclasses when using inheritance. .. change:: :tags: orm :tickets: Fixed order_by calculation in Query to properly alias mapper-config'ed order_by when using select_from() .. change:: :tags: orm :tickets: Refactored the diffing logic that kicks in when replacing one collection with another into collections.bulk_replace, useful to anyone building multi-level collections. .. change:: :tags: orm :tickets: Cascade traversal algorithm converted from recursive to iterative to support deep object graphs. .. change:: :tags: sql :tickets: 999 schema-qualified tables now will place the schemaname ahead of the tablename in all column expressions as well as when generating column labels. This prevents cross- schema name collisions in all cases .. change:: :tags: sql :tickets: can now allow selects which correlate all FROM clauses and have no FROM themselves. These are typically used in a scalar context, i.e. SELECT x, (SELECT x WHERE y) FROM table. Requires explicit correlate() call. .. change:: :tags: sql :tickets: 'name' is no longer a required constructor argument for Column(). It (and .key) may now be deferred until the column is added to a Table. .. change:: :tags: sql :tickets: 791, 993 like(), ilike(), contains(), startswith(), endswith() take an optional keyword argument "escape=", which is set as the escape character using the syntax "x LIKE y ESCAPE ''". .. change:: :tags: sql :tickets: random() is now a generic sql function and will compile to the database's random implementation, if any. .. change:: :tags: sql :tickets: update().values() and insert().values() take keyword arguments. .. change:: :tags: sql :tickets: Fixed an issue in select() regarding its generation of FROM clauses, in rare circumstances two clauses could be produced when one was intended to cancel out the other. Some ORM queries with lots of eager loads might have seen this symptom. .. change:: :tags: sql :tickets: The case() function now also takes a dictionary as its whens parameter. It also interprets the "THEN" expressions as values by default, meaning case([(x==y, "foo")]) will interpret "foo" as a bound value, not a SQL expression. use text(expr) for literal SQL expressions in this case. For the criterion itself, these may be literal strings only if the "value" keyword is present, otherwise SA will force explicit usage of either text() or literal(). .. change:: :tags: oracle :tickets: The "owner" keyword on Table is now deprecated, and is exactly synonymous with the "schema" keyword. Tables can now be reflected with alternate "owner" attributes, explicitly stated on the Table object or not using "schema". .. change:: :tags: oracle :tickets: All of the "magic" searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify "oracle_resolve_synonyms=True" on the Table object. Resolving synonyms necessarily leads to some messy guessing which we'd rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it's off by default. .. change:: :tags: declarative, extension :tickets: The "synonym" function is now directly usable with "declarative". Pass in the decorated property using the "descriptor" keyword argument, e.g.: somekey = synonym('_somekey', descriptor=property(g, s)) .. change:: :tags: declarative, extension :tickets: The "deferred" function is usable with "declarative". Simplest usage is to declare deferred and Column together, e.g.: data = deferred(Column(Text)) .. change:: :tags: declarative, extension :tickets: Declarative also gained @synonym_for(...) and @comparable_using(...), front-ends for synonym and comparable_property. .. change:: :tags: declarative, extension :tickets: 995 Improvements to mapper compilation when using declarative; already-compiled mappers will still trigger compiles of other uncompiled mappers when used .. change:: :tags: declarative, extension :tickets: Declarative will complete setup for Columns lacking names, allows a more DRY syntax. class Foo(Base): __tablename__ = 'foos' id = Column(Integer, primary_key=True) .. change:: :tags: declarative, extension :tickets: inheritance in declarative can be disabled when sending "inherits=None" to __mapper_args__. .. change:: :tags: declarative, extension :tickets: declarative_base() takes optional kwarg "mapper", which is any callable/class/method that produces a mapper, such as declarative_base(mapper=scopedsession.mapper). This property can also be set on individual declarative classes using the "__mapper_cls__" property. .. change:: :tags: postgres :tickets: 1001 Got PG server side cursors back into shape, added fixed unit tests as part of the default test suite. Added better uniqueness to the cursor ID .. change:: :tags: oracle :tickets: The "owner" keyword on Table is now deprecated, and is exactly synonymous with the "schema" keyword. Tables can now be reflected with alternate "owner" attributes, explicitly stated on the Table object or not using "schema". .. change:: :tags: oracle :tickets: All of the "magic" searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify "oracle_resolve_synonyms=True" on the Table object. Resolving synonyms necessarily leads to some messy guessing which we'd rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it's off by default. .. change:: :tags: mssql :tickets: 979 Reflected tables will now automatically load other tables which are referenced by Foreign keys in the auto-loaded table,. .. change:: :tags: mssql :tickets: 916 Added executemany check to skip identity fetch,. .. change:: :tags: mssql :tickets: 884 Added stubs for small date type. .. change:: :tags: mssql :tickets: Added a new 'driver' keyword parameter for the pyodbc dialect. Will substitute into the ODBC connection string if given, defaults to 'SQL Server'. .. change:: :tags: mssql :tickets: Added a new 'max_identifier_length' keyword parameter for the pyodbc dialect. .. change:: :tags: mssql :tickets: Improvements to pyodbc + Unix. If you couldn't get that combination to work before, please try again. .. change:: :tags: mysql :tickets: The connection.info keys the dialect uses to cache server settings have changed and are now namespaced. .. changelog:: :version: 0.4.4 :released: Wed Mar 12 2008 .. change:: :tags: sql :tickets: 975 Can again create aliases of selects against textual FROM clauses. .. change:: :tags: sql :tickets: The value of a bindparam() can be a callable, in which case it's evaluated at statement execution time to get the value. .. change:: :tags: sql :tickets: 978 Added exception wrapping/reconnect support to result set fetching. Reconnect works for those databases that raise a catchable data error during results (i.e. doesn't work on MySQL) .. change:: :tags: sql :tickets: 936 Implemented two-phase API for "threadlocal" engine, via engine.begin_twophase(), engine.prepare() .. change:: :tags: sql :tickets: 986 Fixed bug which was preventing UNIONS from being cloneable. .. change:: :tags: sql :tickets: Added "bind" keyword argument to insert(), update(), delete() and DDL(). The .bind property is now assignable on those statements as well as on select(). .. change:: :tags: sql :tickets: Insert statements can now be compiled with extra "prefix" words between INSERT and INTO, for vendor extensions like MySQL's INSERT IGNORE INTO table. .. change:: :tags: orm :tickets: any(), has(), contains(), ~contains(), attribute level == and != now work properly with self-referential relations - the clause inside the EXISTS is aliased on the "remote" side to distinguish it from the parent table. This applies to single table self-referential as well as inheritance-based self-referential. .. change:: :tags: orm :tickets: 985 Repaired behavior of == and != operators at the relation() level when compared against NULL for one-to-one relations .. change:: :tags: orm :tickets: Fixed bug whereby session.expire() attributes were not loading on an polymorphically-mapped instance mapped by a select_table mapper. .. change:: :tags: orm :tickets: Added query.with_polymorphic() - specifies a list of classes which descend from the base class, which will be added to the FROM clause of the query. Allows subclasses to be used within filter() criterion as well as eagerly loads the attributes of those subclasses. .. change:: :tags: orm :tickets: Your cries have been heard: removing a pending item from an attribute or collection with delete-orphan expunges the item from the session; no FlushError is raised. Note that if you session.save()'ed the pending item explicitly, the attribute/collection removal still knocks it out. .. change:: :tags: orm :tickets: session.refresh() and session.expire() raise an error when called on instances which are not persistent within the session .. change:: :tags: orm :tickets: Fixed potential generative bug when the same Query was used to generate multiple Query objects using join(). .. change:: :tags: orm :tickets: Fixed bug which was introduced in 0.4.3, whereby loading an already-persistent instance mapped with joined table inheritance would trigger a useless "secondary" load from its joined table, when using the default "select" polymorphic_fetch. This was due to attributes being marked as expired during its first load and not getting unmarked from the previous "secondary" load. Attributes are now unexpired based on presence in __dict__ after any load or commit operation succeeds. .. change:: :tags: orm :tickets: Deprecated Query methods apply_sum(), apply_max(), apply_min(), apply_avg(). Better methodologies are coming.... .. change:: :tags: orm :tickets: relation() can accept a callable for its first argument, which returns the class to be related. This is in place to assist declarative packages to define relations without classes yet being in place. .. change:: :tags: orm :tickets: Added a new "higher level" operator called "of_type()": used in join() as well as with any() and has(), qualifies the subclass which will be used in filter criterion, e.g.: query.filter(Company.employees.of_type(Engineer). any(Engineer.name=='foo')) or query.join(Company.employees.of_type(Engineer)). filter(Engineer.name=='foo') .. change:: :tags: orm :tickets: Preventive code against a potential lost-reference bug in flush(). .. change:: :tags: orm :tickets: Expressions used in filter(), filter_by() and others, when they make usage of a clause generated from a relation using the identity of a child object (e.g., filter(Parent.child==)), evaluate the actual primary key value of at execution time so that the autoflush step of the Query can complete, thereby populating the PK value of in the case that was pending. .. change:: :tags: orm :tickets: setting the relation()-level order by to a column in the many-to-many "secondary" table will now work with eager loading, previously the "order by" wasn't aliased against the secondary table's alias. .. change:: :tags: orm :tickets: Synonyms riding on top of existing descriptors are now full proxies to those descriptors. .. change:: :tags: dialects :tickets: Invalid SQLite connection URLs now raise an error. .. change:: :tags: dialects :tickets: 981 postgres TIMESTAMP renders correctly .. change:: :tags: dialects :tickets: postgres PGArray is a "mutable" type by default; when used with the ORM, mutable-style equality/ copy-on-write techniques are used to test for changes. .. change:: :tags: extensions :tickets: a new super-small "declarative" extension has been added, which allows Table and mapper() configuration to take place inline underneath a class declaration. This extension differs from ActiveMapper and Elixir in that it does not redefine any SQLAlchemy semantics at all; literal Column, Table and relation() constructs are used to define the class behavior and table definition. .. changelog:: :version: 0.4.3 :released: Thu Feb 14 2008 .. change:: :tags: sql :tickets: Added "schema.DDL", an executable free-form DDL statement. DDLs can be executed in isolation or attached to Table or MetaData instances and executed automatically when those objects are created and/or dropped. .. change:: :tags: sql :tickets: Table columns and constraints can be overridden on a an existing table (such as a table that was already reflected) using the 'useexisting=True' flag, which now takes into account the arguments passed along with it. .. change:: :tags: sql :tickets: Added a callable-based DDL events interface, adds hooks before and after Tables and MetaData create and drop. .. change:: :tags: sql :tickets: Added generative where() method to delete() and update() constructs which return a new object with criterion joined to existing criterion via AND, just like select().where(). .. change:: :tags: sql :tickets: 727 Added "ilike()" operator to column operations. Compiles to ILIKE on postgres, lower(x) LIKE lower(y) on all others. .. change:: :tags: sql :tickets: 943 Added "now()" as a generic function; on SQLite, Oracle and MSSQL compiles as "CURRENT_TIMESTAMP"; "now()" on all others. .. change:: :tags: sql :tickets: 962 The startswith(), endswith(), and contains() operators now concatenate the wildcard operator with the given operand in SQL, i.e. "'%' || " in all cases, accept text('something') operands properly .. change:: :tags: sql :tickets: 962 cast() accepts text('something') and other non-literal operands properly .. change:: :tags: sql :tickets: fixed bug in result proxy where anonymously generated column labels would not be accessible using their straight string name .. change:: :tags: sql :tickets: Deferrable constraints can now be defined. .. change:: :tags: sql :tickets: 915 Added "autocommit=True" keyword argument to select() and text(), as well as generative autocommit() method on select(); for statements which modify the database through some user-defined means other than the usual INSERT/UPDATE/ DELETE etc. This flag will enable "autocommit" behavior during execution if no transaction is in progress. .. change:: :tags: sql :tickets: The '.c.' attribute on a selectable now gets an entry for every column expression in its columns clause. Previously, "unnamed" columns like functions and CASE statements weren't getting put there. Now they will, using their full string representation if no 'name' is available. .. change:: :tags: sql :tickets: a CompositeSelect, i.e. any union(), union_all(), intersect(), etc. now asserts that each selectable contains the same number of columns. This conforms to the corresponding SQL requirement. .. change:: :tags: sql :tickets: The anonymous 'label' generated for otherwise unlabeled functions and expressions now propagates outwards at compile time for expressions like select([select([func.foo()])]). .. change:: :tags: sql :tickets: Building on the above ideas, CompositeSelects now build up their ".c." collection based on the names present in the first selectable only; corresponding_column() now works fully for all embedded selectables. .. change:: :tags: sql :tickets: Oracle and others properly encode SQL used for defaults like sequences, etc., even if no unicode idents are used since identifier preparer may return a cached unicode identifier. .. change:: :tags: sql :tickets: Column and clause comparisons to datetime objects on the left hand side of the expression now work (d < table.c.col). (datetimes on the RHS have always worked, the LHS exception is a quirk of the datetime implementation.) .. change:: :tags: orm :tickets: Every Session.begin() must now be accompanied by a corresponding commit() or rollback() unless the session is closed with Session.close(). This also includes the begin() which is implicit to a session created with transactional=True. The biggest change introduced here is that when a Session created with transactional=True raises an exception during flush(), you must call Session.rollback() or Session.close() in order for that Session to continue after an exception. .. change:: :tags: orm :tickets: 961 Fixed merge() collection-doubling bug when merging transient entities with backref'ed collections. .. change:: :tags: orm :tickets: merge(dont_load=True) does not accept transient entities, this is in continuation with the fact that merge(dont_load=True) does not accept any "dirty" objects either. .. change:: :tags: orm :tickets: Added standalone "query" class attribute generated by a scoped_session. This provides MyClass.query without using Session.mapper. Use via: MyClass.query = Session.query_property() .. change:: :tags: orm :tickets: The proper error message is raised when trying to access expired instance attributes with no session present .. change:: :tags: orm :tickets: dynamic_loader() / lazy="dynamic" now accepts and uses the order_by parameter in the same way in which it works with relation(). .. change:: :tags: orm :tickets: Added expire_all() method to Session. Calls expire() for all persistent instances. This is handy in conjunction with... .. change:: :tags: orm :tickets: Instances which have been partially or fully expired will have their expired attributes populated during a regular Query operation which affects those objects, preventing a needless second SQL statement for each instance. .. change:: :tags: orm :tickets: 938 Dynamic relations, when referenced, create a strong reference to the parent object so that the query still has a parent to call against even if the parent is only created (and otherwise dereferenced) within the scope of a single expression. .. change:: :tags: orm :tickets: Added a mapper() flag "eager_defaults". When set to True, defaults that are generated during an INSERT or UPDATE operation are post-fetched immediately, instead of being deferred until later. This mimics the old 0.3 behavior. .. change:: :tags: orm :tickets: query.join() can now accept class-mapped attributes as arguments. These can be used in place or in any combination with strings. In particular this allows construction of joins to subclasses on a polymorphic relation, i.e.: query(Company).join(['employees', Engineer.name]) .. change:: :tags: orm, ('employees', people.join(engineer)), Engineer.name :tickets: query.join() can also accept tuples of attribute name/some selectable as arguments. This allows construction of joins *from* subclasses of a polymorphic relation, i.e.: query(Company).\ join( ) .. change:: :tags: orm :tickets: General improvements to the behavior of join() in conjunction with polymorphic mappers, i.e. joining from/to polymorphic mappers and properly applying aliases. .. change:: :tags: orm :tickets: 933 Fixed/improved behavior when a mapper determines the natural "primary key" of a mapped join, it will more effectively reduce columns which are equivalent via foreign key relation. This affects how many arguments need to be sent to query.get(), among other things. .. change:: :tags: orm :tickets: 946 The lazy loader can now handle a join condition where the "bound" column (i.e. the one that gets the parent id sent as a bind parameter) appears more than once in the join condition. Specifically this allows the common task of a relation() which contains a parent-correlated subquery, such as "select only the most recent child item". .. change:: :tags: orm :tickets: Fixed bug in polymorphic inheritance where an incorrect exception is raised when base polymorphic_on column does not correspond to any columns within the local selectable of an inheriting mapper more than one level deep .. change:: :tags: orm :tickets: Fixed bug in polymorphic inheritance which made it difficult to set a working "order_by" on a polymorphic mapper. .. change:: :tags: orm :tickets: Fixed a rather expensive call in Query that was slowing down polymorphic queries. .. change:: :tags: orm :tickets: 954 "Passive defaults" and other "inline" defaults can now be loaded during a flush() call if needed; in particular, this allows constructing relations() where a foreign key column references a server-side-generated, non-primary-key column. .. change:: :tags: orm :tickets: Additional Session transaction fixes/changes: - Fixed bug with session transaction management: parent transactions weren't started on the connection when adding a connection to a nested transaction. - session.transaction now always refers to the innermost active transaction, even when commit/rollback are called directly on the session transaction object. - Two-phase transactions can now be prepared. - When preparing a two-phase transaction fails on one connection, all the connections are rolled back. - session.close() didn't close all transactions when nested transactions were used. - rollback() previously erroneously set the current transaction directly to the parent of the transaction that could be rolled back to. Now it rolls back the next transaction up that can handle it, but sets the current transaction to its parent and inactivates the transactions in between. Inactive transactions can only be rolled back or closed, any other call results in an error. - autoflush for commit() wasn't flushing for simple subtransactions. - unitofwork flush didn't close the failed transaction when the session was not in a transaction and committing the transaction failed. .. change:: :tags: orm :tickets: 964, 940 Miscellaneous tickets: .. change:: :tags: general :tickets: Fixed a variety of hidden and some not-so-hidden compatibility issues for Python 2.3, thanks to new support for running the full test suite on 2.3. .. change:: :tags: general :tickets: Warnings are now issued as type exceptions.SAWarning. .. change:: :tags: dialects :tickets: Better support for schemas in SQLite (linked in by ATTACH DATABASE ... AS name). In some cases in the past, schema names were omitted from generated SQL for SQLite. This is no longer the case. .. change:: :tags: dialects :tickets: table_names on SQLite now picks up temporary tables as well. .. change:: :tags: dialects :tickets: Auto-detect an unspecified MySQL ANSI_QUOTES mode during reflection operations, support for changing the mode midstream. Manual mode setting is still required if no reflection is used. .. change:: :tags: dialects :tickets: Fixed reflection of TIME columns on SQLite. .. change:: :tags: dialects :tickets: 580 Finally added PGMacAddr type to postgres .. change:: :tags: dialects :tickets: Reflect the sequence associated to a PK field (typically with a BEFORE INSERT trigger) under Firebird .. change:: :tags: dialects :tickets: 941 Oracle assembles the correct columns in the result set column mapping when generating a LIMIT/OFFSET subquery, allows columns to map properly to result sets even if long-name truncation kicks in .. change:: :tags: dialects :tickets: MSSQL now includes EXEC in the _is_select regexp, which should allow row-returning stored procedures to be used. .. change:: :tags: dialects :tickets: MSSQL now includes an experimental implementation of LIMIT/OFFSET using the ANSI SQL row_number() function, so it requires MSSQL-2005 or higher. To enable the feature, add "has_window_funcs" to the keyword arguments for connect, or add "?has_window_funcs=1" to your dburi query arguments. .. change:: :tags: ext :tickets: Changed ext.activemapper to use a non-transactional session for the objectstore. .. change:: :tags: ext :tickets: Fixed output order of "['a'] + obj.proxied" binary operation on association-proxied lists. .. changelog:: :version: 0.4.2p3 :released: Wed Jan 09 2008 .. change:: :tags: general :tickets: sub version numbering scheme changed to suite setuptools version number rules; easy_install -u should now get this version over 0.4.2. .. change:: :tags: sql :tickets: 912 Text type is properly exported now and does not raise a warning on DDL create; String types with no length only raise warnings during CREATE TABLE .. change:: :tags: sql :tickets: new UnicodeText type is added, to specify an encoded, unlengthed Text type .. change:: :tags: sql :tickets: fixed bug in union() so that select() statements which don't derive from FromClause objects can be unioned .. change:: :tags: orm :tickets: fixed bug with session.dirty when using "mutable scalars" (such as PickleTypes) .. change:: :tags: orm :tickets: added a more descriptive error message when flushing on a relation() that has non-locally-mapped columns in its primary or secondary join condition .. change:: :tags: dialects :tickets: Fixed reflection of mysql empty string column defaults. .. change:: :tags: sql :tickets: 912 changed name of TEXT to Text since its a "generic" type; TEXT name is deprecated until 0.5. The "upgrading" behavior of String to Text when no length is present is also deprecated until 0.5; will issue a warning when used for CREATE TABLE statements (String with no length for SQL expression purposes is still fine) .. change:: :tags: sql :tickets: 924 generative select.order_by(None) / group_by(None) was not managing to reset order by/group by criterion, fixed .. change:: :tags: orm :tickets: suppressing *all* errors in InstanceState.__cleanup() now. .. change:: :tags: orm :tickets: 922 fixed an attribute history bug whereby assigning a new collection to a collection-based attribute which already had pending changes would generate incorrect history .. change:: :tags: orm :tickets: 925 fixed delete-orphan cascade bug whereby setting the same object twice to a scalar attribute could log it as an orphan .. change:: :tags: orm :tickets: Fixed cascades on a += assignment to a list-based relation. .. change:: :tags: orm :tickets: 919 synonyms can now be created against props that don't exist yet, which are later added via add_property(). This commonly includes backrefs. (i.e. you can make synonyms for backrefs without worrying about the order of operations) .. change:: :tags: orm :tickets: fixed bug which could occur with polymorphic "union" mapper which falls back to "deferred" loading of inheriting tables .. change:: :tags: orm :tickets: the "columns" collection on a mapper/mapped class (i.e. 'c') is against the mapped table, not the select_table in the case of polymorphic "union" loading (this shouldn't be noticeable). .. change:: :tags: ext :tickets: '+', '*', '+=' and '\*=' support for association proxied lists. .. change:: :tags: dialects :tickets: 923 mssql - narrowed down the test for "date"/"datetime" in MSDate/ MSDateTime subclasses so that incoming "datetime" objects don't get mis-interpreted as "date" objects and vice versa. .. change:: :tags: orm :tickets: fixed fairly critical bug whereby the same instance could be listed more than once in the unitofwork.new collection; most typically reproduced when using a combination of inheriting mappers and ScopedSession.mapper, as the multiple __init__ calls per instance could save() the object with distinct _state objects .. change:: :tags: orm :tickets: added very rudimentary yielding iterator behavior to Query. Call query.yield_per() and evaluate the Query in an iterative context; every collection of N rows will be packaged up and yielded. Use this method with extreme caution since it does not attempt to reconcile eagerly loaded collections across result batch boundaries, nor will it behave nicely if the same instance occurs in more than one batch. This means that an eagerly loaded collection will get cleared out if it's referenced in more than one batch, and in all cases attributes will be overwritten on instances that occur in more than one batch. .. change:: :tags: orm :tickets: 920 Fixed in-place set mutation operators for set collections and association proxied sets. .. change:: :tags: dialects :tickets: 913 Fixed the missing call to subtype result processor for the PGArray type. .. changelog:: :version: 0.4.2 :released: Wed Jan 02 2008 .. change:: :tags: sql :tickets: 615 generic functions ! we introduce a database of known SQL functions, such as current_timestamp, coalesce, and create explicit function objects representing them. These objects have constrained argument lists, are type aware, and can compile in a dialect-specific fashion. So saying func.char_length("foo", "bar") raises an error (too many args), func.coalesce(datetime.date(2007, 10, 5), datetime.date(2005, 10, 15)) knows that its return type is a Date. We only have a few functions represented so far but will continue to add to the system .. change:: :tags: sql :tickets: auto-reconnect support improved; a Connection can now automatically reconnect after its underlying connection is invalidated, without needing to connect() again from the engine. This allows an ORM session bound to a single Connection to not need a reconnect. Open transactions on the Connection must be rolled back after an invalidation of the underlying connection else an error is raised. Also fixed bug where disconnect detect was not being called for cursor(), rollback(), or commit(). .. change:: :tags: sql :tickets: added new flag to String and create_engine(), assert_unicode=(True|False|'warn'\|None). Defaults to `False` or `None` on create_engine() and String, `'warn'` on the Unicode type. When `True`, results in all unicode conversion operations raising an exception when a non-unicode bytestring is passed as a bind parameter. 'warn' results in a warning. It is strongly advised that all unicode-aware applications make proper use of Python unicode objects (i.e. u'hello' and not 'hello') so that data round trips accurately. .. change:: :tags: sql :tickets: generation of "unique" bind parameters has been simplified to use the same "unique identifier" mechanisms as everything else. This doesn't affect user code, except any code that might have been hardcoded against the generated names. Generated bind params now have the form "_", whereas before only the second bind of the same name would have this form. .. change:: :tags: sql :tickets: select().as_scalar() will raise an exception if the select does not have exactly one expression in its columns clause. .. change:: :tags: sql :tickets: bindparam() objects themselves can be used as keys for execute(), i.e. statement.execute({bind1:'foo', bind2:'bar'}) .. change:: :tags: sql :tickets: added new methods to TypeDecorator, process_bind_param() and process_result_value(), which automatically take advantage of the processing of the underlying type. Ideal for using with Unicode or Pickletype. TypeDecorator should now be the primary way to augment the behavior of any existing type including other TypeDecorator subclasses such as PickleType. .. change:: :tags: sql :tickets: selectables (and others) will issue a warning when two columns in their exported columns collection conflict based on name. .. change:: :tags: sql :tickets: 890 tables with schemas can still be used in sqlite, firebird, schema name just gets dropped .. change:: :tags: sql :tickets: changed the various "literal" generation functions to use an anonymous bind parameter. not much changes here except their labels now look like ":param_1", ":param_2" instead of ":literal" .. change:: :tags: sql :tickets: column labels in the form "tablename.columname", i.e. with a dot, are now supported. .. change:: :tags: sql :tickets: from_obj keyword argument to select() can be a scalar or a list. .. change:: :tags: orm :tickets: 871 a major behavioral change to collection-based backrefs: they no longer trigger lazy loads ! "reverse" adds and removes are queued up and are merged with the collection when it is actually read from and loaded; but do not trigger a load beforehand. For users who have noticed this behavior, this should be much more convenient than using dynamic relations in some cases; for those who have not, you might notice your apps using a lot fewer queries than before in some situations. .. change:: :tags: orm :tickets: mutable primary key support is added. primary key columns can be changed freely, and the identity of the instance will change upon flush. In addition, update cascades of foreign key referents (primary key or not) along relations are supported, either in tandem with the database's ON UPDATE CASCADE (required for DB's like Postgres) or issued directly by the ORM in the form of UPDATE statements, by setting the flag "passive_cascades=False". .. change:: :tags: orm :tickets: 490 inheriting mappers now inherit the MapperExtensions of their parent mapper directly, so that all methods for a particular MapperExtension are called for subclasses as well. As always, any MapperExtension can return either EXT_CONTINUE to continue extension processing or EXT_STOP to stop processing. The order of mapper resolution is: . Note that if you instantiate the same extension class separately and then apply it individually for two mappers in the same inheritance chain, the extension will be applied twice to the inheriting class, and each method will be called twice. To apply a mapper extension explicitly to each inheriting class but have each method called only once per operation, use the same instance of the extension for both mappers. .. change:: :tags: orm :tickets: 907 MapperExtension.before_update() and after_update() are now called symmetrically; previously, an instance that had no modified column attributes (but had a relation() modification) could be called with before_update() but not after_update() .. change:: :tags: orm :tickets: columns which are missing from a Query's select statement now get automatically deferred during load. .. change:: :tags: orm :tickets: 908 mapped classes which extend "object" and do not provide an __init__() method will now raise TypeError if non-empty \*args or \**kwargs are present at instance construction time (and are not consumed by any extensions such as the scoped_session mapper), consistent with the behavior of normal Python classes .. change:: :tags: orm :tickets: 899 fixed Query bug when filter_by() compares a relation against None .. change:: :tags: orm :tickets: improved support for pickling of mapped entities. Per-instance lazy/deferred/expired callables are now serializable so that they serialize and deserialize with _state. .. change:: :tags: orm :tickets: 801 new synonym() behavior: an attribute will be placed on the mapped class, if one does not exist already, in all cases. if a property already exists on the class, the synonym will decorate the property with the appropriate comparison operators so that it can be used in column expressions just like any other mapped attribute (i.e. usable in filter(), etc.) the "proxy=True" flag is deprecated and no longer means anything. Additionally, the flag "map_column=True" will automatically generate a ColumnProperty corresponding to the name of the synonym, i.e.: 'somename':synonym('_somename', map_column=True) will map the column named 'somename' to the attribute '_somename'. See the example in the mapper docs. .. change:: :tags: orm :tickets: Query.select_from() now replaces all existing FROM criterion with the given argument; the previous behavior of constructing a list of FROM clauses was generally not useful as is required filter() calls to create join criterion, and new tables introduced within filter() already add themselves to the FROM clause. The new behavior allows not just joins from the main table, but select statements as well. Filter criterion, order bys, eager load clauses will be "aliased" against the given statement. .. change:: :tags: orm :tickets: this month's refactoring of attribute instrumentation changes the "copy-on-load" behavior we've had since midway through 0.3 with "copy-on-modify" in most cases. This takes a sizable chunk of latency out of load operations and overall does less work as only attributes which are actually modified get their "committed state" copied. Only "mutable scalar" attributes (i.e. a pickled object or other mutable item), the reason for the copy-on-load change in the first place, retain the old behavior. .. change:: :tags: attrname, orm :tickets: a slight behavioral change to attributes is, del'ing an attribute does *not* cause the lazyloader of that attribute to fire off again; the "del" makes the effective value of the attribute "None". To re-trigger the "loader" for an attribute, use session.expire(instance,). .. change:: :tags: orm :tickets: query.filter(SomeClass.somechild == None), when comparing a many-to-one property to None, properly generates "id IS NULL" including that the NULL is on the right side. .. change:: :tags: orm :tickets: query.order_by() takes into account aliased joins, i.e. query.join('orders', aliased=True).order_by(Order.id) .. change:: :tags: orm :tickets: eagerload(), lazyload(), eagerload_all() take an optional second class-or-mapper argument, which will select the mapper to apply the option towards. This can select among other mappers which were added using add_entity(). .. change:: :tags: orm :tickets: eagerloading will work with mappers added via add_entity(). .. change:: :tags: orm :tickets: added "cascade delete" behavior to "dynamic" relations just like that of regular relations. if passive_deletes flag (also just added) is not set, a delete of the parent item will trigger a full load of the child items so that they can be deleted or updated accordingly. .. change:: :tags: orm :tickets: also with dynamic, implemented correct count() behavior as well as other helper methods. .. change:: :tags: orm :tickets: fix to cascades on polymorphic relations, such that cascades from an object to a polymorphic collection continue cascading along the set of attributes specific to each element in the collection. .. change:: :tags: orm :tickets: 893 query.get() and query.load() do not take existing filter or other criterion into account; these methods *always* look up the given id in the database or return the current instance from the identity map, disregarding any existing filter, join, group_by or other criterion which has been configured. .. change:: :tags: orm :tickets: 883 added support for version_id_col in conjunction with inheriting mappers. version_id_col is typically set on the base mapper in an inheritance relationship where it takes effect for all inheriting mappers. .. change:: :tags: orm :tickets: relaxed rules on column_property() expressions having labels; any ColumnElement is accepted now, as the compiler auto-labels non-labeled ColumnElements now. a selectable, like a select() statement, still requires conversion to ColumnElement via as_scalar() or label(). .. change:: :tags: orm :tickets: fixed backref bug where you could not del instance.attr if attr was None .. change:: :tags: orm :tickets: several ORM attributes have been removed or made private: mapper.get_attr_by_column(), mapper.set_attr_by_column(), mapper.pks_by_table, mapper.cascade_callable(), MapperProperty.cascade_callable(), mapper.canload(), mapper.save_obj(), mapper.delete_obj(), mapper._mapper_registry, attributes.AttributeManager .. change:: :tags: orm :tickets: Assigning an incompatible collection type to a relation attribute now raises TypeError instead of sqlalchemy's ArgumentError. .. change:: :tags: orm :tickets: 886 Bulk assignment of a MappedCollection now raises an error if a key in the incoming dictionary does not match the key that the collection's keyfunc would use for that value. .. change:: :tags: orm, newval1, newval2 :tickets: Custom collections can now specify a @converter method to translate objects used in "bulk" assignment into a stream of values, as in:: obj.col = # or obj.dictcol = {'foo': newval1, 'bar': newval2} The MappedCollection uses this hook to ensure that incoming key/value pairs are sane from the collection's perspective. .. change:: :tags: orm :tickets: 872 fixed endless loop issue when using lazy="dynamic" on both sides of a bi-directional relationship .. change:: :tags: orm :tickets: 904 more fixes to the LIMIT/OFFSET aliasing applied with Query + eagerloads, in this case when mapped against a select statement .. change:: :tags: orm :tickets: fix to self-referential eager loading such that if the same mapped instance appears in two or more distinct sets of columns in the same result set, its eagerly loaded collection will be populated regardless of whether or not all of the rows contain a set of "eager" columns for that collection. this would also show up as a KeyError when fetching results with join_depth turned on. .. change:: :tags: orm :tickets: fixed bug where Query would not apply a subquery to the SQL when LIMIT was used in conjunction with an inheriting mapper where the eager loader was only in the parent mapper. .. change:: :tags: orm :tickets: clarified the error message which occurs when you try to update() an instance with the same identity key as an instance already present in the session. .. change:: :tags: orm :tickets: some clarifications and fixes to merge(instance, dont_load=True). fixed bug where lazy loaders were getting disabled on returned instances. Also, we currently do not support merging an instance which has uncommitted changes on it, in the case that dont_load=True is used....this will now raise an error. This is due to complexities in merging the "committed state" of the given instance to correctly correspond to the newly copied instance, as well as other modified state. Since the use case for dont_load=True is caching, the given instances shouldn't have any uncommitted changes on them anyway. We also copy the instances over without using any events now, so that the 'dirty' list on the new session remains unaffected. .. change:: :tags: orm :tickets: fixed bug which could arise when using session.begin_nested() in conjunction with more than one level deep of enclosing session.begin() statements .. change:: :tags: orm :tickets: 914 fixed session.refresh() with instance that has custom entity_name .. change:: :tags: dialects :tickets: sqlite SLDate type will not erroneously render "microseconds" portion of a datetime or time object. .. change:: :tags: dialects :tickets: 902 oracle - added disconnect detection support for Oracle - some cleanup to binary/raw types so that cx_oracle.LOB is detected on an ad-hoc basis .. change:: :tags: dialects :tickets: 824, 839, 842, 901 MSSQL - PyODBC no longer has a global "set nocount on". - Fix non-identity integer PKs on autload - Better support for convert_unicode - Less strict date conversion for pyodbc/adodbapi - Schema-qualified tables / autoload .. change:: :tags: firebird, backend :tickets: 410 does properly reflect domains (partially fixing) and PassiveDefaults .. change:: :tags: 3562, firebird, backend :tickets: reverted to use default poolclass (was set to SingletonThreadPool in 0.4.0 for test purposes) .. change:: :tags: firebird, backend :tickets: map func.length() to 'char_length' (easily overridable with the UDF 'strlen' on old versions of Firebird) .. changelog:: :version: 0.4.1 :released: Sun Nov 18 2007 .. change:: :tags: sql :tickets: the "shortname" keyword parameter on bindparam() has been deprecated. .. change:: :tags: sql :tickets: Added contains operator (generates a "LIKE %%" clause). .. change:: :tags: sql :tickets: anonymous column expressions are automatically labeled. e.g. select([x* 5]) produces "SELECT x * 5 AS anon_1". This allows the labelname to be present in the cursor.description which can then be appropriately matched to result-column processing rules. (we can't reliably use positional tracking for result-column matches since text() expressions may represent multiple columns). .. change:: :tags: sql :tickets: operator overloading is now controlled by TypeEngine objects - the one built-in operator overload so far is String types overloading '+' to be the string concatenation operator. User-defined types can also define their own operator overloading by overriding the adapt_operator(self, op) method. .. change:: :tags: sql :tickets: 819 untyped bind parameters on the right side of a binary expression will be assigned the type of the left side of the operation, to better enable the appropriate bind parameter processing to take effect .. change:: :tags: sql :tickets: 833 Removed regular expression step from most statement compilations. Also fixes .. change:: :tags: sql :tickets: Fixed empty (zero column) sqlite inserts, allowing inserts on autoincrementing single column tables. .. change:: :tags: sql :tickets: Fixed expression translation of text() clauses; this repairs various ORM scenarios where literal text is used for SQL expressions .. change:: :tags: sql :tickets: Removed ClauseParameters object; compiled.params returns a regular dictionary now, as well as result.last_inserted_params() / last_updated_params(). .. change:: :tags: sql :tickets: Fixed INSERT statements w.r.t. primary key columns that have SQL-expression based default generators on them; SQL expression executes inline as normal but will not trigger a "postfetch" condition for the column, for those DB's who provide it via cursor.lastrowid .. change:: :tags: sql :tickets: 844 func. objects can be pickled/unpickled .. change:: :tags: sql :tickets: rewrote and simplified the system used to "target" columns across selectable expressions. On the SQL side this is represented by the "corresponding_column()" method. This method is used heavily by the ORM to "adapt" elements of an expression to similar, aliased expressions, as well as to target result set columns originally bound to a table or selectable to an aliased, "corresponding" expression. The new rewrite features completely consistent and accurate behavior. .. change:: :tags: sql :tickets: 573 Added a field ("info") for storing arbitrary data on schema items .. change:: :tags: sql :tickets: The "properties" collection on Connections has been renamed "info" to match schema's writable collections. Access is still available via the "properties" name until 0.5. .. change:: :tags: sql :tickets: fixed the close() method on Transaction when using strategy='threadlocal' .. change:: :tags: sql :tickets: 853 fix to compiled bind parameters to not mistakenly populate None .. change:: :tags: sql :tickets: ._execute_clauseelement becomes a public method Connectable.execute_clauseelement .. change:: :tags: orm :tickets: 843 eager loading with LIMIT/OFFSET applied no longer adds the primary table joined to a limited subquery of itself; the eager loads now join directly to the subquery which also provides the primary table's columns to the result set. This eliminates a JOIN from all eager loads with LIMIT/OFFSET. .. change:: :tags: orm :tickets: 802 session.refresh() and session.expire() now support an additional argument "attribute_names", a list of individual attribute keynames to be refreshed or expired, allowing partial reloads of attributes on an already-loaded instance. .. change:: :tags: orm :tickets: 767 added op() operator to instrumented attributes; i.e. User.name.op('ilike')('%somename%') .. change:: :tags: orm :tickets: 676 Mapped classes may now define __eq__, __hash__, and __nonzero__ methods with arbitrary semantics. The orm now handles all mapped instances on an identity-only basis. (e.g. 'is' vs '==') .. change:: :tags: orm :tickets: the "properties" accessor on Mapper is removed; it now throws an informative exception explaining the usage of mapper.get_property() and mapper.iterate_properties .. change:: :tags: orm :tickets: added having() method to Query, applies HAVING to the generated statement in the same way as filter() appends to the WHERE clause. .. change:: :tags: orm :tickets: 777 The behavior of query.options() is now fully based on paths, i.e. an option such as eagerload_all('x.y.z.y.x') will apply eagerloading to only those paths, i.e. and not 'x.y.x'; eagerload('children.children') applies only to exactly two-levels deep, etc. .. change:: :tags: orm :tickets: PickleType will compare using `==` when set up with mutable=False, and not the `is` operator. To use `is` or any other comparator, send in a custom comparison function using PickleType(comparator=my_custom_comparator). .. change:: :tags: orm :tickets: 848 query doesn't throw an error if you use distinct() and an order_by() containing UnaryExpressions (or other) together .. change:: :tags: orm :tickets: 786 order_by() expressions from joined tables are properly added to columns clause when using distinct() .. change:: :tags: orm :tickets: 858 fixed error where Query.add_column() would not accept a class-bound attribute as an argument; Query also raises an error if an invalid argument was sent to add_column() (at instances() time) .. change:: :tags: orm :tickets: added a little more checking for garbage-collection dereferences in InstanceState.__cleanup() to reduce "gc ignored" errors on app shutdown .. change:: :tags: orm :tickets: The session API has been solidified: .. change:: :tags: orm :tickets: 840 It's an error to session.save() an object which is already persistent .. change:: :tags: orm :tickets: It's an error to session.delete() an object which is *not* persistent. .. change:: :tags: orm :tickets: session.update() and session.delete() raise an error when updating or deleting an instance that is already in the session with a different identity. .. change:: :tags: orm :tickets: The session checks more carefully when determining "object X already in another session"; e.g. if you pickle a series of objects and unpickle (i.e. as in a Pylons HTTP session or similar), they can go into a new session without any conflict .. change:: :tags: orm :tickets: merge() includes a keyword argument "dont_load=True". setting this flag will cause the merge operation to not load any data from the database in response to incoming detached objects, and will accept the incoming detached object as though it were already present in that session. Use this to merge detached objects from external caching systems into the session. .. change:: :tags: orm :tickets: Deferred column attributes no longer trigger a load operation when the attribute is assigned to. In those cases, the newly assigned value will be present in the flushes' UPDATE statement unconditionally. .. change:: :tags: orm :tickets: 834 Fixed a truncation error when re-assigning a subset of a collection (obj.relation = obj.relation[1:]) .. change:: :tags: orm :tickets: 832 De-cruftified backref configuration code, backrefs which step on existing properties now raise an error .. change:: :tags: orm :tickets: 831 Improved behavior of add_property() etc., fixed involving synonym/deferred. .. change:: :tags: orm :tickets: Fixed clear_mappers() behavior to better clean up after itself. .. change:: :tags: orm :tickets: 841 Fix to "row switch" behavior, i.e. when an INSERT/DELETE is combined into a single UPDATE; many-to-many relations on the parent object update properly. .. change:: :tags: orm :tickets: Fixed __hash__ for association proxy- these collections are unhashable, just like their mutable Python counterparts. .. change:: :tags: orm :tickets: Added proxying of save_or_update, __contains__ and __iter__ methods for scoped sessions. .. change:: :tags: orm :tickets: 852 fixed very hard-to-reproduce issue where by the FROM clause of Query could get polluted by certain generative calls .. change:: :tags: dialects :tickets: Added experimental support for MaxDB (versions >= 7.6.03.007 only). .. change:: :tags: dialects :tickets: oracle will now reflect "DATE" as an OracleDateTime column, not OracleDate .. change:: :tags: dialects :tickets: 847 added awareness of schema name in oracle table_names() function, fixes metadata.reflect(schema='someschema') .. change:: :tags: dialects :tickets: MSSQL anonymous labels for selection of functions made deterministic .. change:: :tags: dialects :tickets: sqlite will reflect "DECIMAL" as a numeric column. .. change:: :tags: dialects :tickets: 828 Made access dao detection more reliable .. change:: :tags: dialects :tickets: Renamed the Dialect attribute 'preexecute_sequences' to 'preexecute_pk_sequences'. An attribute porxy is in place for out-of-tree dialects using the old name. .. change:: :tags: dialects :tickets: Added test coverage for unknown type reflection. Fixed sqlite/mysql handling of type reflection for unknown types. .. change:: :tags: dialects :tickets: Added REAL for mysql dialect (for folks exploiting the REAL_AS_FLOAT sql mode). .. change:: :tags: dialects :tickets: mysql Float, MSFloat and MSDouble constructed without arguments now produce no-argument DDL, e.g.'FLOAT'. .. change:: :tags: misc :tickets: Removed unused util.hash(). .. changelog:: :version: 0.4.0 :released: Wed Oct 17 2007 .. change:: :tags: :tickets: (see 0.4.0beta1 for the start of major changes against 0.3, as well as http://www.sqlalchemy.org/trac/wiki/WhatsNewIn04 ) .. change:: :tags: :tickets: 785 Added initial Sybase support (mxODBC so far) .. change:: :tags: :tickets: Added partial index support for PostgreSQL. Use the postgres_where keyword on the Index. .. change:: :tags: :tickets: 817 string-based query param parsing/config file parser understands wider range of string values for booleans .. change:: :tags: :tickets: 813 backref remove object operation doesn't fail if the other-side collection doesn't contain the item, supports noload collections .. change:: :tags: :tickets: 818 removed __len__ from "dynamic" collection as it would require issuing a SQL "count()" operation, thus forcing all list evaluations to issue redundant SQL .. change:: :tags: :tickets: 816 inline optimizations added to locate_dirty() which can greatly speed up repeated calls to flush(), as occurs with autoflush=True .. change:: :tags: :tickets: The IdentifierPreprarer's _requires_quotes test is now regex based. Any out-of-tree dialects that provide custom sets of legal_characters or illegal_initial_characters will need to move to regexes or override _requires_quotes. .. change:: :tags: :tickets: Firebird has supports_sane_rowcount and supports_sane_multi_rowcount set to False due to ticket #370 (right way). .. change:: :tags: :tickets: Improvements and fixes on Firebird reflection: * FBDialect now mimics OracleDialect, regarding case-sensitivity of TABLE and COLUMN names (see 'case_sensitive remotion' topic on this current file). * FBDialect.table_names() doesn't bring system tables (ticket:796). * FB now reflects Column's nullable property correctly. .. change:: :tags: :tickets: Fixed SQL compiler's awareness of top-level column labels as used in result-set processing; nested selects which contain the same column names don't affect the result or conflict with result-column metadata. .. change:: :tags: :tickets: query.get() and related functions (like many-to-one lazyloading) use compile-time-aliased bind parameter names, to prevent name conflicts with bind parameters that already exist in the mapped selectable. .. change:: :tags: :tickets: 795 Fixed three- and multi-level select and deferred inheritance loading (i.e. abc inheritance with no select_table). .. change:: :tags: :tickets: Ident passed to id_chooser in shard.py always a list. .. change:: :tags: :tickets: The no-arg ResultProxy._row_processor() is now the class attribute `_process_row`. .. change:: :tags: :tickets: 797 Added support for returning values from inserts and updates for PostgreSQL 8.2+. .. change:: :tags: :tickets: PG reflection, upon seeing the default schema name being used explicitly as the "schema" argument in a Table, will assume that this is the user's desired convention, and will explicitly set the "schema" argument in foreign-key-related reflected tables, thus making them match only with Table constructors that also use the explicit "schema" argument (even though its the default schema). In other words, SA assumes the user is being consistent in this usage. .. change:: :tags: :tickets: 808 fixed sqlite reflection of BOOL/BOOLEAN .. change:: :tags: :tickets: Added support for UPDATE with LIMIT on mysql. .. change:: :tags: :tickets: 803 null foreign key on a m2o doesn't trigger a lazyload .. change:: :tags: :tickets: 800 oracle does not implicitly convert to unicode for non-typed result sets (i.e. when no TypeEngine/String/Unicode type is even being used; previously it was detecting DBAPI types and converting regardless). should fix .. change:: :tags: :tickets: 806 fix to anonymous label generation of long table/column names .. change:: :tags: :tickets: Firebird dialect now uses SingletonThreadPool as poolclass. .. change:: :tags: :tickets: Firebird now uses dialect.preparer to format sequences names .. change:: :tags: :tickets: 810 Fixed breakage with postgres and multiple two-phase transactions. Two-phase commits and rollbacks didn't automatically end up with a new transaction as the usual dbapi commits/rollbacks do. .. change:: :tags: :tickets: Added an option to the _ScopedExt mapper extension to not automatically save new objects to session on object initialization. .. change:: :tags: :tickets: fixed Oracle non-ansi join syntax .. change:: :tags: :tickets: PickleType and Interval types (on db not supporting it natively) are now slightly faster. .. change:: :tags: :tickets: Added Float and Time types to Firebird (FBFloat and FBTime). Fixed BLOB SUB_TYPE for TEXT and Binary types. .. change:: :tags: :tickets: Changed the API for the in\_ operator. in_() now accepts a single argument that is a sequence of values or a selectable. The old API of passing in values as varargs still works but is deprecated. .. changelog:: :version: 0.4.0beta6 :released: Thu Sep 27 2007 .. change:: :tags: :tickets: The Session identity map is now *weak referencing* by default, use weak_identity_map=False to use a regular dict. The weak dict we are using is customized to detect instances which are "dirty" and maintain a temporary strong reference to those instances until changes are flushed. .. change:: :tags: :tickets: 758 Mapper compilation has been reorganized such that most compilation occurs upon mapper construction. This allows us to have fewer calls to mapper.compile() and also to allow class-based properties to force a compilation (i.e. User.addresses == 7 will compile all mappers; this is). The only caveat here is that an inheriting mapper now looks for its inherited mapper upon construction; so mappers within inheritance relationships need to be constructed in inheritance order (which should be the normal case anyway). .. change:: :tags: :tickets: added "FETCH" to the keywords detected by Postgres to indicate a result-row holding statement (i.e. in addition to "SELECT"). .. change:: :tags: :tickets: Added full list of SQLite reserved keywords so that they get escaped properly. .. change:: :tags: :tickets: Tightened up the relationship between the Query's generation of "eager load" aliases, and Query.instances() which actually grabs the eagerly loaded rows. If the aliases were not specifically generated for that statement by EagerLoader, the EagerLoader will not take effect when the rows are fetched. This prevents columns from being grabbed accidentally as being part of an eager load when they were not meant for such, which can happen with textual SQL as well as some inheritance situations. It's particularly important since the "anonymous aliasing" of columns uses simple integer counts now to generate labels. .. change:: :tags: :tickets: Removed "parameters" argument from clauseelement.compile(), replaced with "column_keys". The parameters sent to execute() only interact with the insert/update statement compilation process in terms of the column names present but not the values for those columns. Produces more consistent execute/executemany behavior, simplifies things a bit internally. .. change:: :tags: :tickets: 560 Added 'comparator' keyword argument to PickleType. By default, "mutable" PickleType does a "deep compare" of objects using their dumps() representation. But this doesn't work for dictionaries. Pickled objects which provide an adequate __eq__() implementation can be set up with "PickleType(comparator=operator.eq)" .. change:: :tags: :tickets: Added session.is_modified(obj) method; performs the same "history" comparison operation as occurs within a flush operation; setting include_collections=False gives the same result as is used when the flush determines whether or not to issue an UPDATE for the instance's row. .. change:: :tags: :tickets: 584, 761 Added "schema" argument to Sequence; use this with Postgres /Oracle when the sequence is located in an alternate schema. Implements part of, should fix. .. change:: :tags: :tickets: Fixed reflection of the empty string for mysql enums. .. change:: :tags: :tickets: 794 Changed MySQL dialect to use the older LIMIT , syntax instead of LIMIT OFFSET for folks using 3.23. .. change:: :tags: :tickets: Added 'passive_deletes="all"' flag to relation(), disables all nulling-out of foreign key attributes during a flush where the parent object is deleted. .. change:: :tags: :tickets: Column defaults and onupdates, executing inline, will add parenthesis for subqueries and other parenthesis-requiring expressions .. change:: :tags: :tickets: 793 The behavior of String/Unicode types regarding that they auto-convert to TEXT/CLOB when no length is present now occurs *only* for an exact type of String or Unicode with no arguments. If you use VARCHAR or NCHAR (subclasses of String/Unicode) with no length, they will be interpreted by the dialect as VARCHAR/NCHAR; no "magic" conversion happens there. This is less surprising behavior and in particular this helps Oracle keep string-based bind parameters as VARCHARs and not CLOBs. .. change:: :tags: :tickets: 771 Fixes to ShardedSession to work with deferred columns. .. change:: :tags: :tickets: User-defined shard_chooser() function must accept "clause=None" argument; this is the ClauseElement passed to session.execute(statement) and can be used to determine correct shard id (since execute() doesn't take an instance.) .. change:: :tags: :tickets: 764 Adjusted operator precedence of NOT to match '==' and others, so that ~(x y) produces NOT (x y), which is better compatible with older MySQL versions.. This doesn't apply to "~(x==y)" as it does in 0.3 since ~(x==y) compiles to "x != y", but still applies to operators like BETWEEN. .. change:: :tags: :tickets: 757, 768, 779, 728 Other tickets:,,. .. changelog:: :version: 0.4.0beta5 :released: .. change:: :tags: :tickets: 754 Connection pool fixes; the better performance of beta4 remains but fixes "connection overflow" and other bugs which were present (like). .. change:: :tags: :tickets: 769 Fixed bugs in determining proper sync clauses from custom inherit conditions. .. change:: :tags: :tickets: 763 Extended 'engine_from_config' coercion for QueuePool size / overflow. .. change:: :tags: :tickets: 748 mysql views can be reflected again. .. change:: :tags: :tickets: AssociationProxy can now take custom getters and setters. .. change:: :tags: :tickets: Fixed malfunctioning BETWEEN in orm queries. .. change:: :tags: :tickets: 762 Fixed OrderedProperties pickling .. change:: :tags: :tickets: SQL-expression defaults and sequences now execute "inline" for all non-primary key columns during an INSERT or UPDATE, and for all columns during an executemany()-style call. inline=True flag on any insert/update statement also forces the same behavior with a single execute(). result.postfetch_cols() is a collection of columns for which the previous single insert or update statement contained a SQL-side default expression. .. change:: :tags: :tickets: 759 Fixed PG executemany() behavior. .. change:: :tags: :tickets: postgres reflects tables with autoincrement=False for primary key columns which have no defaults. .. change:: :tags: :tickets: postgres no longer wraps executemany() with individual execute() calls, instead favoring performance. "rowcount"/"concurrency" checks with deleted items (which use executemany) are disabled with PG since psycopg2 does not report proper rowcount for executemany(). .. change:: :tags: tickets, fixed :tickets: 742 .. change:: :tags: tickets, fixed :tickets: 748 .. change:: :tags: tickets, fixed :tickets: 760 .. change:: :tags: tickets, fixed :tickets: 762 .. change:: :tags: tickets, fixed :tickets: 763 .. changelog:: :version: 0.4.0beta4 :released: Wed Aug 22 2007 .. change:: :tags: :tickets: Tidied up what ends up in your namespace when you 'from sqlalchemy import \*': .. change:: :tags: :tickets: 'table' and 'column' are no longer imported. They remain available by direct reference (as in 'sql.table' and 'sql.column') or a glob import from the sql package. It was too easy to accidentally use a sql.expressions.table instead of schema.Table when just starting out with SQLAlchemy, likewise column. .. change:: :tags: :tickets: Internal-ish classes like ClauseElement, FromClause, NullTypeEngine, etc., are also no longer imported into your namespace .. change:: :tags: :tickets: The 'Smallinteger' compatibility name (small i!) is no longer imported, but remains in schema.py for now. SmallInteger (big I!) is still imported. .. change:: :tags: :tickets: The connection pool uses a "threadlocal" strategy internally to return the same connection already bound to a thread, for "contextual" connections; these are the connections used when you do a "connectionless" execution like insert().execute(). This is like a "partial" version of the "threadlocal" engine strategy but without the thread-local transaction part of it. We're hoping it reduces connection pool overhead as well as database usage. However, if it proves to impact stability in a negative way, we'll roll it right back. .. change:: :tags: :tickets: Fix to bind param processing such that "False" values (like blank strings) still get processed/encoded. .. change:: :tags: :tickets: 752 Fix to select() "generative" behavior, such that calling column(), select_from(), correlate(), and with_prefix() does not modify the original select object .. change:: :tags: :tickets: Added a "legacy" adapter to types, such that user-defined TypeEngine and TypeDecorator classes which define convert_bind_param() and/or convert_result_value() will continue to function. Also supports calling the super() version of those methods. .. change:: :tags: :tickets: Added session.prune(), trims away instances cached in a session that are no longer referenced elsewhere. (A utility for strong-ref identity maps). .. change:: :tags: :tickets: Added close() method to Transaction. Closes out a transaction using rollback if it's the outermost transaction, otherwise just ends without affecting the outer transaction. .. change:: :tags: :tickets: Transactional and non-transactional Session integrates better with bound connection; a close() will ensure that connection transactional state is the same as that which existed on it before being bound to the Session. .. change:: :tags: :tickets: 735 Modified SQL operator functions to be module-level operators, allowing SQL expressions to be pickleable. .. change:: :tags: :tickets: Small adjustment to mapper class.__init__ to allow for Py2.6 object.__init__() behavior. .. change:: :tags: :tickets: Fixed 'prefix' argument for select() .. change:: :tags: :tickets: Connection.begin() no longer accepts nested=True, this logic is now all in begin_nested(). .. change:: :tags: :tickets: Fixes to new "dynamic" relation loader involving cascades .. change:: :tags: tickets, fixed :tickets: 735 .. change:: :tags: tickets, fixed :tickets: 752 .. changelog:: :version: 0.4.0beta3 :released: Thu Aug 16 2007 .. change:: :tags: :tickets: SQL types optimization: .. change:: :tags: :tickets: New performance tests show a combined mass-insert/mass-select test as having 68% fewer function calls than the same test run against 0.3. .. change:: :tags: :tickets: General performance improvement of result set iteration is around 10-20%. .. change:: :tags: :tickets: In types.AbstractType, convert_bind_param() and convert_result_value() have migrated to callable-returning bind_processor() and result_processor() methods. If no callable is returned, no pre/post processing function is called. .. change:: :tags: :tickets: Hooks added throughout base/sql/defaults to optimize the calling of bind aram/result processors so that method call overhead is minimized. .. change:: :tags: :tickets: Support added for executemany() scenarios such that unneeded "last row id" logic doesn't kick in, parameters aren't excessively traversed. .. change:: :tags: :tickets: Added 'inherit_foreign_keys' arg to mapper(). .. change:: :tags: :tickets: Added support for string date passthrough in sqlite. .. change:: :tags: tickets, fixed :tickets: 738 .. change:: :tags: tickets, fixed :tickets: 739 .. change:: :tags: tickets, fixed :tickets: 743 .. change:: :tags: tickets, fixed :tickets: 744 .. changelog:: :version: 0.4.0beta2 :released: Tue Aug 14 2007 .. change:: :tags: oracle, improvements. :tickets: Auto-commit after LOAD DATA INFILE for mysql. .. change:: :tags: oracle, improvements. :tickets: A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries. .. change:: :tags: oracle, improvements. :tickets: Added engine_from_config() function for helping to create_engine() from an .ini style config. .. change:: :tags: oracle, improvements. :tickets: base_mapper() becomes a plain attribute. .. change:: :tags: oracle, improvements. :tickets: session.execute() and scalar() can search for a Table with which to bind from using the given ClauseElement. .. change:: :tags: oracle, improvements. :tickets: Session automatically extrapolates tables from mappers with binds, also uses base_mapper so that inheritance hierarchies bind automatically. .. change:: :tags: oracle, improvements. :tickets: Moved ClauseVisitor traversal back to inlined non-recursive. .. change:: :tags: tickets, fixed :tickets: 730 .. change:: :tags: tickets, fixed :tickets: 732 .. change:: :tags: tickets, fixed :tickets: 733 .. change:: :tags: tickets, fixed :tickets: 734 .. changelog:: :version: 0.4.0beta1 :released: Sun Aug 12 2007 .. change:: :tags: orm :tickets: Speed! Along with recent speedups to ResultProxy, total number of function calls significantly reduced for large loads. .. change:: :tags: orm :tickets: test/perf/masseagerload.py reports 0.4 as having the fewest number of function calls across all SA versions (0.1, 0.2, and 0.3). .. change:: :tags: orm :tickets: 213 New collection_class api and implementation. Collections are now instrumented via decorations rather than proxying. You can now have collections that manage their own membership, and your class instance will be directly exposed on the relation property. The changes are transparent for most users. .. change:: :tags: orm :tickets: InstrumentedList (as it was) is removed, and relation properties no longer have 'clear()', '.data', or any other added methods beyond those provided by the collection type. You are free, of course, to add them to a custom class. .. change:: :tags: orm :tickets: __setitem__-like assignments now fire remove events for the existing value, if any. .. change:: :tags: orm :tickets: dict-likes used as collection classes no longer need to change __iter__ semantics- itervalues() is used by default instead. This is a backwards incompatible change. .. change:: :tags: orm :tickets: Subclassing dict for a mapped collection is no longer needed in most cases. orm.collections provides canned implementations that key objects by a specified column or a custom function of your choice. .. change:: :tags: orm :tickets: Collection assignment now requires a compatible type- assigning None to clear a collection or assigning a list to a dict collection will now raise an argument error. .. change:: :tags: orm :tickets: AttributeExtension moved to interfaces, and .delete is now .remove The event method signature has also been swapped around. .. change:: :tags: orm :tickets: Major overhaul for Query: .. change:: :tags: orm :tickets: All selectXXX methods are deprecated. Generative methods are now the standard way to do things, i.e. filter(), filter_by(), all(), one(), etc. Deprecated methods are docstring'ed with their new replacements. .. change:: :tags: orm :tickets: 643 Class-level properties are now usable as query elements... no more '.c.'! "Class.c.propname" is now superseded by "Class.propname". All clause operators are supported, as well as higher level operators such as Class.prop== for scalar attributes, Class.prop.contains() and Class.prop.any() for collection-based attributes (all are also negatable). Table-based column expressions as well as columns mounted on mapped classes via 'c' are of course still fully available and can be freely mixed with the new attributes. .. change:: :tags: orm :tickets: Removed ancient query.select_by_attributename() capability. .. change:: :tags: orm :tickets: The aliasing logic used by eager loading has been generalized, so that it also adds full automatic aliasing support to Query. It's no longer necessary to create an explicit Alias to join to the same tables multiple times; *even for self-referential relationships*. - join() and outerjoin() take arguments "aliased=True". Yhis causes their joins to be built on aliased tables; subsequent calls to filter() and filter_by() will translate all table expressions (yes, real expressions using the original mapped Table) to be that of the Alias for the duration of that join() (i.e. until reset_joinpoint() or another join() is called). - join() and outerjoin() take arguments "id=". When used with "aliased=True", the id can be referenced by add_entity(cls, id=) so that you can select the joined instances even if they're from an alias. - join() and outerjoin() now work with self-referential relationships! Using "aliased=True", you can join as many levels deep as desired, i.e. query.join(['children', 'children'], aliased=True); filter criterion will be against the rightmost joined table .. change:: :tags: orm :tickets: 660 Added query.populate_existing(), marks the query to reload all attributes and collections of all instances touched in the query, including eagerly-loaded entities. .. change:: :tags: orm :tickets: Added eagerload_all(), allows eagerload_all('x.y.z') to specify eager loading of all properties in the given path. .. change:: :tags: orm :tickets: Major overhaul for Session: .. change:: :tags: orm :tickets: New function which "configures" a session called "sessionmaker()". Send various keyword arguments to this function once, returns a new class which creates a Session against that stereotype. .. change:: :tags: orm :tickets: SessionTransaction removed from "public" API. You now can call begin()/ commit()/rollback() on the Session itself. .. change:: :tags: orm :tickets: Session also supports SAVEPOINT transactions; call begin_nested(). .. change:: :tags: orm :tickets: Session supports two-phase commit behavior when vertically or horizontally partitioning (i.e., using more than one engine). Use twophase=True. .. change:: :tags: orm :tickets: Session flag "transactional=True" produces a session which always places itself into a transaction when first used. Upon commit(), rollback() or close(), the transaction ends; but begins again on the next usage. .. change:: :tags: orm :tickets: Session supports "autoflush=True". This issues a flush() before each query. Use in conjunction with transactional, and you can just save()/update() and then query, the new objects will be there. Use commit() at the end (or flush() if non-transactional) to flush remaining changes. .. change:: :tags: orm :tickets: New scoped_session() function replaces SessionContext and assignmapper. Builds onto "sessionmaker()" concept to produce a class whos Session() construction returns the thread-local session. Or, call all Session methods as class methods, i.e. Session.save(foo); Session.commit(). just like the old "objectstore" days. .. change:: :tags: orm :tickets: Added new "binds" argument to Session to support configuration of multiple binds with sessionmaker() function. .. change:: :tags: orm :tickets: A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries. .. change:: :tags: orm :tickets: Query-based relation()s available with dynamic_loader(). This is a *writable* collection (supporting append() and remove()) which is also a live Query object when accessed for reads. Ideal for dealing with very large collections where only partial loading is desired. .. change:: :tags: orm :tickets: flush()-embedded inline INSERT/UPDATE expressions. Assign any SQL expression, like "sometable.c.column + 1", to an instance's attribute. Upon flush(), the mapper detects the expression and embeds it directly in the INSERT or UPDATE statement; the attribute gets deferred on the instance so it loads the new value the next time you access it. .. change:: :tags: orm :tickets: 618 A rudimental sharding (horizontal scaling) system is introduced. This system uses a modified Session which can distribute read and write operations among multiple databases, based on user-defined functions defining the "sharding strategy". Instances and their dependents can be distributed and queried among multiple databases based on attribute values, round-robin approaches or any other user-defined system. .. change:: :tags: orm :tickets: 659 Eager loading has been enhanced to allow even more joins in more places. It now functions at any arbitrary depth along self-referential and cyclical structures. When loading cyclical structures, specify "join_depth" on relation() indicating how many times you'd like the table to join to itself; each level gets a distinct table alias. The alias names themselves are generated at compile time using a simple counting scheme now and are a lot easier on the eyes, as well as of course completely deterministic. .. change:: :tags: orm :tickets: 211 Added composite column properties. This allows you to create a type which is represented by more than one column, when using the ORM. Objects of the new type are fully functional in query expressions, comparisons, query.get() clauses, etc. and act as though they are regular single-column scalars... except they're not! Use the function composite(cls, \*columns) inside of the mapper's "properties" dict, and instances of cls will be created/mapped to a single attribute, comprised of the values corresponding to \*columns. .. change:: :tags: orm :tickets: Improved support for custom column_property() attributes which feature correlated subqueries, works better with eager loading now. .. change:: :tags: orm :tickets: 611 Primary key "collapse" behavior; the mapper will analyze all columns in its given selectable for primary key "equivalence", that is, columns which are equivalent via foreign key relationship or via an explicit inherit_condition. primarily for joined-table inheritance scenarios where different named PK columns in inheriting tables should "collapse" into a single-valued (or fewer-valued) primary key. Fixes things like. .. change:: :tags: orm :tickets: Joined-table inheritance will now generate the primary key columns of all inherited classes against the root table of the join only. This implies that each row in the root table is distinct to a single instance. If for some rare reason this is not desirable, explicit primary_key settings on individual mappers will override it. .. change:: :tags: orm :tickets: When "polymorphic" flags are used with joined-table or single-table inheritance, all identity keys are generated against the root class of the inheritance hierarchy; this allows query.get() to work polymorphically using the same caching semantics as a non-polymorphic get. Note that this currently does not work with concrete inheritance. .. change:: :tags: orm :tickets: Secondary inheritance loading: polymorphic mappers can be constructed *without* a select_table argument. inheriting mappers whose tables were not represented in the initial load will issue a second SQL query immediately, once per instance (i.e. not very efficient for large lists), in order to load the remaining columns. .. change:: :tags: orm :tickets: Secondary inheritance loading can also move its second query into a column-level "deferred" load, via the "polymorphic_fetch" argument, which can be set to 'select' or 'deferred' .. change:: :tags: orm :tickets: 696 It's now possible to map only a subset of available selectable columns onto mapper properties, using include_columns/exclude_columns.. .. change:: :tags: orm :tickets: Added undefer_group() MapperOption, sets a set of "deferred" columns joined by a "group" to load as "undeferred". .. change:: :tags: orm :tickets: Rewrite of the "deterministic alias name" logic to be part of the SQL layer, produces much simpler alias and label names more in the style of Hibernate .. change:: :tags: sql :tickets: Speed! Clause compilation as well as the mechanics of SQL constructs have been streamlined and simplified to a significant degree, for a 20-30% improvement of the statement construction/compilation overhead of 0.3. .. change:: :tags: sql :tickets: All "type" keyword arguments, such as those to bindparam(), column(), Column(), and func.(), renamed to "type\_". Those objects still name their "type" attribute as "type". .. change:: :tags: sql :tickets: case_sensitive=(True|False) setting removed from schema items, since checking this state added a lot of method call overhead and there was no decent reason to ever set it to False. Table and column names which are all lower case will be treated as case-insensitive (yes we adjust for Oracle's UPPERCASE style too). .. change:: :tags: transactions :tickets: Added context manager (with statement) support for transactions. .. change:: :tags: transactions :tickets: Added support for two phase commit, works with mysql and postgres so far. .. change:: :tags: transactions :tickets: Added a subtransaction implementation that uses savepoints. .. change:: :tags: transactions :tickets: Added support for savepoints. .. change:: :tags: metadata :tickets: Tables can be reflected from the database en-masse without declaring them in advance. MetaData(engine, reflect=True) will load all tables present in the database, or use metadata.reflect() for finer control. .. change:: :tags: metadata :tickets: DynamicMetaData has been renamed to ThreadLocalMetaData .. change:: :tags: metadata :tickets: The ThreadLocalMetaData constructor now takes no arguments. .. change:: :tags: metadata :tickets: BoundMetaData has been removed- regular MetaData is equivalent .. change:: :tags: metadata :tickets: 646 Numeric and Float types now have an "asdecimal" flag; defaults to True for Numeric, False for Float. When True, values are returned as decimal.Decimal objects; when False, values are returned as float(). The defaults of True/False are already the behavior for PG and MySQL's DBAPI modules. .. change:: :tags: metadata :tickets: 475 New SQL operator implementation which removes all hardcoded operators from expression structures and moves them into compilation; allows greater flexibility of operator compilation; for example, "+" compiles to "||" when used in a string context, or "concat(a,b)" on MySQL; whereas in a numeric context it compiles to "+". Fixes. .. change:: :tags: metadata :tickets: "Anonymous" alias and label names are now generated at SQL compilation time in a completely deterministic fashion... no more random hex IDs .. change:: :tags: metadata :tickets: Significant architectural overhaul to SQL elements (ClauseElement). All elements share a common "mutability" framework which allows a consistent approach to in-place modifications of elements as well as generative behavior. Improves stability of the ORM which makes heavy usage of mutations to SQL expressions. .. change:: :tags: metadata :tickets: select() and union()'s now have "generative" behavior. Methods like order_by() and group_by() return a *new* instance - the original instance is left unchanged. Non-generative methods remain as well. .. change:: :tags: metadata :tickets: 569, 52 The internals of select/union vastly simplified- all decision making regarding "is subquery" and "correlation" pushed to SQL generation phase. select() elements are now *never* mutated by their enclosing containers or by any dialect's compilation process .. change:: :tags: metadata :tickets: select(scalar=True) argument is deprecated; use select(..).as_scalar(). The resulting object obeys the full "column" interface and plays better within expressions. .. change:: :tags: metadata :tickets: 504 Added select().with_prefix('foo') allowing any set of keywords to be placed before the columns clause of the SELECT .. change:: :tags: metadata :tickets: 686 Added array slice support to row[] .. change:: :tags: metadata :tickets: Result sets make a better attempt at matching the DBAPI types present in cursor.description to the TypeEngine objects defined by the dialect, which are then used for result-processing. Note this only takes effect for textual SQL; constructed SQL statements always have an explicit type map. .. change:: :tags: metadata :tickets: Result sets from CRUD operations close their underlying cursor immediately and will also autoclose the connection if defined for the operation; this allows more efficient usage of connections for successive CRUD operations with less chance of "dangling connections". .. change:: :tags: metadata :tickets: 559 Column defaults and onupdate Python functions (i.e. passed to ColumnDefault) may take zero or one arguments; the one argument is the ExecutionContext, from which you can call "context.parameters[someparam]" to access the other bind parameter values affixed to the statement. The connection used for the execution is available as well so that you can pre-execute statements. .. change:: :tags: metadata :tickets: Added "explcit" create/drop/execute support for sequences (i.e. you can pass a "connectable" to each of those methods on Sequence). .. change:: :tags: metadata :tickets: Better quoting of identifiers when manipulating schemas. .. change:: :tags: metadata :tickets: Standardized the behavior for table reflection where types can't be located; NullType is substituted instead, warning is raised. .. change:: :tags: metadata :tickets: 606 ColumnCollection (i.e. the 'c' attribute on tables) follows dictionary semantics for "__contains__" .. change:: :tags: engines :tickets: Speed! The mechanics of result processing and bind parameter processing have been overhauled, streamlined and optimized to issue as little method calls as possible. Bench tests for mass INSERT and mass rowset iteration both show 0.4 to be over twice as fast as 0.3, using 68% fewer function calls. .. change:: :tags: engines :tickets: You can now hook into the pool lifecycle and run SQL statements or other logic at new each DBAPI connection, pool check-out and check-in. .. change:: :tags: engines :tickets: Connections gain a .properties collection, with contents scoped to the lifetime of the underlying DBAPI connection .. change:: :tags: engines :tickets: Removed auto_close_cursors and disallow_open_cursors arguments from Pool; reduces overhead as cursors are normally closed by ResultProxy and Connection. .. change:: :tags: extensions :tickets: proxyengine is temporarily removed, pending an actually working replacement. .. change:: :tags: extensions :tickets: SelectResults has been replaced by Query. SelectResults / SelectResultsExt still exist but just return a slightly modified Query object for backwards-compatibility. join_to() method from SelectResults isn't present anymore, need to use join(). .. change:: :tags: mysql :tickets: Table and column names loaded via reflection are now Unicode. .. change:: :tags: mysql :tickets: All standard column types are now supported, including SET. .. change:: :tags: mysql :tickets: Table reflection can now be performed in as little as one round-trip. .. change:: :tags: mysql :tickets: ANSI and ANSI_QUOTES sql modes are now supported. .. change:: :tags: mysql :tickets: Indexes are now reflected. .. change:: :tags: postgres :tickets: Added PGArray datatype for using postgres array datatypes. .. change:: :tags: oracle :tickets: 507 Very rudimental support for OUT parameters added; use sql.outparam(name, type) to set up an OUT parameter, just like bindparam(); after execution, values are available via result.out_parameters dictionary. SQLAlchemy-1.0.11/doc/build/changelog/migration_08.rst0000664000175000017500000014563012636375552023476 0ustar classicclassic00000000000000============================== What's New in SQLAlchemy 0.8? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012, and SQLAlchemy version 0.8, which is expected for release in early 2013. Document date: October 25, 2012 Updated: March 9, 2013 Introduction ============ This guide introduces what's new in SQLAlchemy version 0.8, and also documents changes which affect users migrating their applications from the 0.7 series of SQLAlchemy to 0.8. SQLAlchemy releases are closing in on 1.0, and each new version since 0.5 features fewer major usage changes. Most applications that are settled into modern 0.7 patterns should be movable to 0.8 with no changes. Applications that use 0.6 and even 0.5 patterns should be directly migratable to 0.8 as well, though larger applications may want to test with each interim version. Platform Support ================ Targeting Python 2.5 and Up Now ------------------------------- SQLAlchemy 0.8 will target Python 2.5 and forward; compatibility for Python 2.4 is being dropped. The internals will be able to make usage of Python ternaries (that is, ``x if y else z``) which will improve things versus the usage of ``y and x or z``, which naturally has been the source of some bugs, as well as context managers (that is, ``with:``) and perhaps in some cases ``try:/except:/else:`` blocks which will help with code readability. SQLAlchemy will eventually drop 2.5 support as well - when 2.6 is reached as the baseline, SQLAlchemy will move to use 2.6/3.3 in-place compatibility, removing the usage of the ``2to3`` tool and maintaining a source base that works with Python 2 and 3 at the same time. New ORM Features ================ .. _feature_relationship_08: Rewritten :func:`.relationship` mechanics ----------------------------------------- 0.8 features a much improved and capable system regarding how :func:`.relationship` determines how to join between two entities. The new system includes these features: * The ``primaryjoin`` argument is **no longer needed** when constructing a :func:`.relationship` against a class that has multiple foreign key paths to the target. Only the ``foreign_keys`` argument is needed to specify those columns which should be included: :: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id_one = Column(Integer, ForeignKey('child.id')) child_id_two = Column(Integer, ForeignKey('child.id')) child_one = relationship("Child", foreign_keys=child_id_one) child_two = relationship("Child", foreign_keys=child_id_two) class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) * relationships against self-referential, composite foreign keys where **a column points to itself** are now supported. The canonical case is as follows: :: class Folder(Base): __tablename__ = 'folder' __table_args__ = ( ForeignKeyConstraint( ['account_id', 'parent_id'], ['folder.account_id', 'folder.folder_id']), ) account_id = Column(Integer, primary_key=True) folder_id = Column(Integer, primary_key=True) parent_id = Column(Integer) name = Column(String) parent_folder = relationship("Folder", backref="child_folders", remote_side=[account_id, folder_id] ) Above, the ``Folder`` refers to its parent ``Folder`` joining from ``account_id`` to itself, and ``parent_id`` to ``folder_id``. When SQLAlchemy constructs an auto- join, no longer can it assume all columns on the "remote" side are aliased, and all columns on the "local" side are not - the ``account_id`` column is **on both sides**. So the internal relationship mechanics were totally rewritten to support an entirely different system whereby two copies of ``account_id`` are generated, each containing different *annotations* to determine their role within the statement. Note the join condition within a basic eager load: :: SELECT folder.account_id AS folder_account_id, folder.folder_id AS folder_folder_id, folder.parent_id AS folder_parent_id, folder.name AS folder_name, folder_1.account_id AS folder_1_account_id, folder_1.folder_id AS folder_1_folder_id, folder_1.parent_id AS folder_1_parent_id, folder_1.name AS folder_1_name FROM folder LEFT OUTER JOIN folder AS folder_1 ON folder_1.account_id = folder.account_id AND folder.folder_id = folder_1.parent_id WHERE folder.folder_id = ? AND folder.account_id = ? * Previously difficult custom join conditions, like those involving functions and/or CASTing of types, will now function as expected in most cases:: class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side parent_host = relationship("HostEntry", primaryjoin=ip_address == cast(content, INET), foreign_keys=content, remote_side=ip_address ) The new :func:`.relationship` mechanics make use of a SQLAlchemy concept known as :term:`annotations`. These annotations are also available to application code explicitly via the :func:`.foreign` and :func:`.remote` functions, either as a means to improve readability for advanced configurations or to directly inject an exact configuration, bypassing the usual join-inspection heuristics:: from sqlalchemy.orm import foreign, remote class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments parent_host = relationship("HostEntry", primaryjoin=remote(ip_address) == \ cast(foreign(content), INET), ) .. seealso:: :ref:`relationship_configure_joins` - a newly revised section on :func:`.relationship` detailing the latest techniques for customizing related attributes and collection access. :ticket:`1401` :ticket:`610` .. _feature_orminspection_08: New Class/Object Inspection System ---------------------------------- Lots of SQLAlchemy users are writing systems that require the ability to inspect the attributes of a mapped class, including being able to get at the primary key columns, object relationships, plain attributes, and so forth, typically for the purpose of building data-marshalling systems, like JSON/XML conversion schemes and of course form libraries galore. Originally, the :class:`.Table` and :class:`.Column` model were the original inspection points, which have a well-documented system. While SQLAlchemy ORM models are also fully introspectable, this has never been a fully stable and supported feature, and users tended to not have a clear idea how to get at this information. 0.8 now provides a consistent, stable and fully documented API for this purpose, including an inspection system which works on mapped classes, instances, attributes, and other Core and ORM constructs. The entrypoint to this system is the core-level :func:`.inspect` function. In most cases, the object being inspected is one already part of SQLAlchemy's system, such as :class:`.Mapper`, :class:`.InstanceState`, :class:`.Inspector`. In some cases, new objects have been added with the job of providing the inspection API in certain contexts, such as :class:`.AliasedInsp` and :class:`.AttributeState`. A walkthrough of some key capabilities follows:: >>> class User(Base): ... __tablename__ = 'user' ... id = Column(Integer, primary_key=True) ... name = Column(String) ... name_syn = synonym(name) ... addresses = relationship("Address") ... >>> # universal entry point is inspect() >>> b = inspect(User) >>> # b in this case is the Mapper >>> b >>> # Column namespace >>> b.columns.id Column('id', Integer(), table=, primary_key=True, nullable=False) >>> # mapper's perspective of the primary key >>> b.primary_key (Column('id', Integer(), table=, primary_key=True, nullable=False),) >>> # MapperProperties available from .attrs >>> b.attrs.keys() ['name_syn', 'addresses', 'id', 'name'] >>> # .column_attrs, .relationships, etc. filter this collection >>> b.column_attrs.keys() ['id', 'name'] >>> list(b.relationships) [] >>> # they are also namespaces >>> b.column_attrs.id >>> b.relationships.addresses >>> # point inspect() at a mapped, class level attribute, >>> # returns the attribute itself >>> b = inspect(User.addresses) >>> b >>> # From here we can get the mapper: >>> b.mapper >>> # the parent inspector, in this case a mapper >>> b.parent >>> # an expression >>> print b.expression "user".id = address.user_id >>> # inspect works on instances >>> u1 = User(id=3, name='x') >>> b = inspect(u1) >>> # it returns the InstanceState >>> b >>> # similar attrs accessor refers to the >>> b.attrs.keys() ['id', 'name_syn', 'addresses', 'name'] >>> # attribute interface - from attrs, you get a state object >>> b.attrs.id >>> # this object can give you, current value... >>> b.attrs.id.value 3 >>> # ... current history >>> b.attrs.id.history History(added=[3], unchanged=(), deleted=()) >>> # InstanceState can also provide session state information >>> # lets assume the object is persistent >>> s = Session() >>> s.add(u1) >>> s.commit() >>> # now we can get primary key identity, always >>> # works in query.get() >>> b.identity (3,) >>> # the mapper level key >>> b.identity_key (, (3,)) >>> # state within the session >>> b.persistent, b.transient, b.deleted, b.detached (True, False, False, False) >>> # owning session >>> b.session .. seealso:: :ref:`core_inspection_toplevel` :ticket:`2208` New with_polymorphic() feature, can be used anywhere ---------------------------------------------------- The :meth:`.Query.with_polymorphic` method allows the user to specify which tables should be present when querying against a joined-table entity. Unfortunately the method is awkward and only applies to the first entity in the list, and otherwise has awkward behaviors both in usage as well as within the internals. A new enhancement to the :func:`.aliased` construct has been added called :func:`.with_polymorphic` which allows any entity to be "aliased" into a "polymorphic" version of itself, freely usable anywhere: :: from sqlalchemy.orm import with_polymorphic palias = with_polymorphic(Person, [Engineer, Manager]) session.query(Company).\ join(palias, Company.employees).\ filter(or_(Engineer.language=='java', Manager.hair=='pointy')) .. seealso:: :ref:`with_polymorphic` - newly updated documentation for polymorphic loading control. :ticket:`2333` of_type() works with alias(), with_polymorphic(), any(), has(), joinedload(), subqueryload(), contains_eager() -------------------------------------------------------------------------------------------------------------- The :meth:`.PropComparator.of_type` method is used to specify a specific subtype to use when constructing SQL expressions along a :func:`.relationship` that has a :term:`polymorphic` mapping as its target. This method can now be used to target *any number* of target subtypes, by combining it with the new :func:`.with_polymorphic` function:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) q = s.query(DataContainer).\ join(DataContainer.jobs.of_type(Job_P)).\ options(contains_eager(DataContainer.jobs.of_type(Job_P))) The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like :func:`.joinedload`, :func:`.subqueryload`, :func:`.contains_eager`, and comparison methods like :meth:`.PropComparator.any` and :meth:`.PropComparator.has`:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) q = s.query(DataContainer).\ join(DataContainer.jobs.of_type(Job_P)).\ options(contains_eager(DataContainer.jobs.of_type(Job_P))) # pass subclasses to eager loads (implicitly applies with_polymorphic) q = s.query(ParentThing).\ options( joinedload_all( ParentThing.container, DataContainer.jobs.of_type(SubJob) )) # control self-referential aliasing with any()/has() Job_A = aliased(Job) q = s.query(Job).join(DataContainer.jobs).\ filter( DataContainer.jobs.of_type(Job_A).\ any(and_(Job_A.id < Job.id, Job_A.type=='fred') ) ) .. seealso:: :ref:`of_type` :ticket:`2438` :ticket:`1106` Events Can Be Applied to Unmapped Superclasses ---------------------------------------------- Mapper and instance events can now be associated with an unmapped superclass, where those events will be propagated to subclasses as those subclasses are mapped. The ``propagate=True`` flag should be used. This feature allows events to be associated with a declarative base class:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() @event.listens_for("load", Base, propagate=True) def on_load(target, context): print "New instance loaded:", target # on_load() will be applied to SomeClass class SomeClass(Base): __tablename__ = 'sometable' # ... :ticket:`2585` Declarative Distinguishes Between Modules/Packages -------------------------------------------------- A key feature of Declarative is the ability to refer to other mapped classes using their string name. The registry of class names is now sensitive to the owning module and package of a given class. The classes can be referred to via dotted name in expressions:: class Snack(Base): # ... peanuts = relationship("nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id") The resolution allows that any full or partial disambiguating package name can be used. If the path to a particular class is still ambiguous, an error is raised. :ticket:`2338` New DeferredReflection Feature in Declarative --------------------------------------------- The "deferred reflection" example has been moved to a supported feature within Declarative. This feature allows the construction of declarative mapped classes with only placeholder ``Table`` metadata, until a ``prepare()`` step is called, given an ``Engine`` with which to reflect fully all tables and establish actual mappings. The system supports overriding of columns, single and joined inheritance, as well as distinct bases-per-engine. A full declarative configuration can now be created against an existing table that is assembled upon engine creation time in one step: :: class ReflectedOne(DeferredReflection, Base): __abstract__ = True class ReflectedTwo(DeferredReflection, Base): __abstract__ = True class MyClass(ReflectedOne): __tablename__ = 'mytable' class MyOtherClass(ReflectedOne): __tablename__ = 'myothertable' class YetAnotherClass(ReflectedTwo): __tablename__ = 'yetanothertable' ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) .. seealso:: :class:`.DeferredReflection` :ticket:`2485` ORM Classes Now Accepted by Core Constructs ------------------------------------------- While the SQL expressions used with :meth:`.Query.filter`, such as ``User.id == 5``, have always been compatible for use with core constructs such as :func:`.select`, the mapped class itself would not be recognized when passed to :func:`.select`, :meth:`.Select.select_from`, or :meth:`.Select.correlate`. A new SQL registration system allows a mapped class to be accepted as a FROM clause within the core:: from sqlalchemy import select stmt = select([User]).where(User.id == 5) Above, the mapped ``User`` class will expand into the :class:`.Table` to which ``User`` is mapped. :ticket:`2245` Query.update() supports UPDATE..FROM ------------------------------------- The new UPDATE..FROM mechanics work in query.update(). Below, we emit an UPDATE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: query(SomeEntity).\ filter(SomeEntity.id==SomeOtherEntity.id).\ filter(SomeOtherEntity.foo=='bar').\ update({"data":"x"}) In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the table being filtered on, or if the parent and child tables are mixed, they are joined explicitly in the query. Below, given ``Engineer`` as a joined subclass of ``Person``: :: query(Engineer).\ filter(Person.id==Engineer.id).\ filter(Person.name=='dilbert').\ update({"engineer_data":"java"}) would produce: :: UPDATE engineer SET engineer_data='java' FROM person WHERE person.id=engineer.id AND person.name='dilbert' :ticket:`2365` rollback() will only roll back "dirty" objects from a begin_nested() -------------------------------------------------------------------- A behavioral change that should improve efficiency for those users using SAVEPOINT via ``Session.begin_nested()`` - upon ``rollback()``, only those objects that were made dirty since the last flush will be expired, the rest of the ``Session`` remains intact. This because a ROLLBACK to a SAVEPOINT does not terminate the containing transaction's isolation, so no expiry is needed except for those changes that were not flushed in the current transaction. :ticket:`2452` Caching Example now uses dogpile.cache --------------------------------------- The caching example now uses `dogpile.cache `_. Dogpile.cache is a rewrite of the caching portion of Beaker, featuring vastly simpler and faster operation, as well as support for distributed locking. Note that the SQLAlchemy APIs used by the Dogpile example as well as the previous Beaker example have changed slightly, in particular this change is needed as illustrated in the Beaker example:: --- examples/beaker_caching/caching_query.py +++ examples/beaker_caching/caching_query.py @@ -222,7 +222,8 @@ """ if query._current_path: - mapper, key = query._current_path[-2:] + mapper, prop = query._current_path[-2:] + key = prop.key for cls in mapper.class_.__mro__: if (cls, key) in self._relationship_options: .. seealso:: :mod:`dogpile_caching` :ticket:`2589` New Core Features ================== Fully extensible, type-level operator support in Core ----------------------------------------------------- The Core has to date never had any system of adding support for new SQL operators to Column and other expression constructs, other than the :meth:`.ColumnOperators.op` method which is "just enough" to make things work. There has also never been any system in place for Core which allows the behavior of existing operators to be overridden. Up until now, the only way operators could be flexibly redefined was in the ORM layer, using :func:`.column_property` given a ``comparator_factory`` argument. Third party libraries like GeoAlchemy therefore were forced to be ORM-centric and rely upon an array of hacks to apply new opertions as well as to get them to propagate correctly. The new operator system in Core adds the one hook that's been missing all along, which is to associate new and overridden operators with *types*. Since after all, it's not really a column, CAST operator, or SQL function that really drives what kinds of operations are present, it's the *type* of the expression. The implementation details are minimal - only a few extra methods are added to the core :class:`.ColumnElement` type so that it consults its :class:`.TypeEngine` object for an optional set of operators. New or revised operations can be associated with any type, either via subclassing of an existing type, by using :class:`.TypeDecorator`, or "globally across-the-board" by attaching a new :class:`.TypeEngine.Comparator` object to an existing type class. For example, to add logarithm support to :class:`.Numeric` types: :: from sqlalchemy.types import Numeric from sqlalchemy.sql import func class CustomNumeric(Numeric): class comparator_factory(Numeric.Comparator): def log(self, other): return func.log(self.expr, other) The new type is usable like any other type: :: data = Table('data', metadata, Column('id', Integer, primary_key=True), Column('x', CustomNumeric(10, 5)), Column('y', CustomNumeric(10, 5)) ) stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value) print conn.execute(stmt).fetchall() New features which have come from this immediately include support for Postgresql's HSTORE type, as well as new operations associated with Postgresql's ARRAY type. It also paves the way for existing types to acquire lots more operators that are specific to those types, such as more string, integer and date operators. .. seealso:: :ref:`types_operators` :class:`.HSTORE` :ticket:`2547` .. _feature_2623: Multiple-VALUES support for Insert ----------------------------------- The :meth:`.Insert.values` method now supports a list of dictionaries, which will render a multi-VALUES statement such as ``VALUES (), (), ...``. This is only relevant to backends which support this syntax, including Postgresql, SQLite, and MySQL. It is not the same thing as the usual ``executemany()`` style of INSERT which remains unchanged:: users.insert().values([ {"name": "some name"}, {"name": "some other name"}, {"name": "yet another name"}, ]) .. seealso:: :meth:`.Insert.values` :ticket:`2623` Type Expressions ----------------- SQL expressions can now be associated with types. Historically, :class:`.TypeEngine` has always allowed Python-side functions which receive both bound parameters as well as result row values, passing them through a Python side conversion function on the way to/back from the database. The new feature allows similar functionality, except on the database side:: from sqlalchemy.types import String from sqlalchemy import func, Table, Column, MetaData class LowerString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) def column_expression(self, col): return func.lower(col) metadata = MetaData() test_table = Table( 'test_table', metadata, Column('data', LowerString) ) Above, the ``LowerString`` type defines a SQL expression that will be emitted whenever the ``test_table.c.data`` column is rendered in the columns clause of a SELECT statement:: >>> print select([test_table]).where(test_table.c.data == 'HI') SELECT lower(test_table.data) AS data FROM test_table WHERE test_table.data = lower(:data_1) This feature is also used heavily by the new release of GeoAlchemy, to embed PostGIS expressions inline in SQL based on type rules. .. seealso:: :ref:`types_sql_value_processing` :ticket:`1534` Core Inspection System ----------------------- The :func:`.inspect` function introduced in :ref:`feature_orminspection_08` also applies to the core. Applied to an :class:`.Engine` it produces an :class:`.Inspector` object:: from sqlalchemy import inspect from sqlalchemy import create_engine engine = create_engine("postgresql://scott:tiger@localhost/test") insp = inspect(engine) print insp.get_table_names() It can also be applied to any :class:`.ClauseElement`, which returns the :class:`.ClauseElement` itself, such as :class:`.Table`, :class:`.Column`, :class:`.Select`, etc. This allows it to work fluently between Core and ORM constructs. New Method :meth:`.Select.correlate_except` ------------------------------------------- :func:`.select` now has a method :meth:`.Select.correlate_except` which specifies "correlate on all FROM clauses except those specified". It can be used for mapping scenarios where a related subquery should correlate normally, except against a particular target selectable:: class SnortEvent(Base): __tablename__ = "event" id = Column(Integer, primary_key=True) signature = Column(Integer, ForeignKey("signature.id")) signatures = relationship("Signature", lazy=False) class Signature(Base): __tablename__ = "signature" id = Column(Integer, primary_key=True) sig_count = column_property( select([func.count('*')]).\ where(SnortEvent.signature == id). correlate_except(SnortEvent) ) .. seealso:: :meth:`.Select.correlate_except` Postgresql HSTORE type ---------------------- Support for Postgresql's ``HSTORE`` type is now available as :class:`.postgresql.HSTORE`. This type makes great usage of the new operator system to provide a full range of operators for HSTORE types, including index access, concatenation, and containment methods such as :meth:`~.HSTORE.comparator_factory.has_key`, :meth:`~.HSTORE.comparator_factory.has_any`, and :meth:`~.HSTORE.comparator_factory.matrix`:: from sqlalchemy.dialects.postgresql import HSTORE data = Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('hstore_data', HSTORE) ) engine.execute( select([data.c.hstore_data['some_key']]) ).scalar() engine.execute( select([data.c.hstore_data.matrix()]) ).scalar() .. seealso:: :class:`.postgresql.HSTORE` :class:`.postgresql.hstore` :ticket:`2606` Enhanced Postgresql ARRAY type ------------------------------ The :class:`.postgresql.ARRAY` type will accept an optional "dimension" argument, pinning it to a fixed number of dimensions and greatly improving efficiency when retrieving results: :: # old way, still works since PG supports N-dimensions per row: Column("my_array", postgresql.ARRAY(Integer)) # new way, will render ARRAY with correct number of [] in DDL, # will process binds and results more efficiently as we don't need # to guess how many levels deep to go Column("my_array", postgresql.ARRAY(Integer, dimensions=2)) The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:: result = conn.execute( select([mytable.c.arraycol[2]]) ) slice access in SELECT:: result = conn.execute( select([mytable.c.arraycol[2:4]]) ) slice updates in UPDATE:: conn.execute( mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}) ) freestanding array literals:: >>> from sqlalchemy.dialects import postgresql >>> conn.scalar( ... select([ ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) ... ]) ... ) [1, 2, 3, 4, 5] array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal:: select([mytable.c.arraycol + [4, 5, 6]]) .. seealso:: :class:`.postgresql.ARRAY` :class:`.postgresql.array` :ticket:`2441` New, configurable DATE, TIME types for SQLite --------------------------------------------- SQLite has no built-in DATE, TIME, or DATETIME types, and instead provides some support for storage of date and time values either as strings or integers. The date and time types for SQLite are enhanced in 0.8 to be much more configurable as to the specific format, including that the "microseconds" portion is optional, as well as pretty much everything else. :: Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True)) Column('sometimestamp', sqlite.DATETIME( storage_format=( "%(year)04d%(month)02d%(day)02d" "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" ), regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})" ) ) Column('somedate', sqlite.DATE( storage_format="%(month)02d/%(day)02d/%(year)04d", regexp="(?P\d+)/(?P\d+)/(?P\d+)", ) ) Huge thanks to Nate Dub for the sprinting on this at Pycon 2012. .. seealso:: :class:`.sqlite.DATETIME` :class:`.sqlite.DATE` :class:`.sqlite.TIME` :ticket:`2363` "COLLATE" supported across all dialects; in particular MySQL, Postgresql, SQLite -------------------------------------------------------------------------------- The "collate" keyword, long accepted by the MySQL dialect, is now established on all :class:`.String` types and will render on any backend, including when features such as :meth:`.MetaData.create_all` and :func:`.cast` is used:: >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))]) >>> print stmt SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1 FROM sometable .. seealso:: :class:`.String` :ticket:`2276` "Prefixes" now supported for :func:`.update`, :func:`.delete` ------------------------------------------------------------- Geared towards MySQL, a "prefix" can be rendered within any of these constructs. E.g.:: stmt = table.delete().prefix_with("LOW_PRIORITY", dialect="mysql") stmt = table.update().prefix_with("LOW_PRIORITY", dialect="mysql") The method is new in addition to those which already existed on :func:`.insert`, :func:`.select` and :class:`.Query`. .. seealso:: :meth:`.Update.prefix_with` :meth:`.Delete.prefix_with` :meth:`.Insert.prefix_with` :meth:`.Select.prefix_with` :meth:`.Query.prefix_with` :ticket:`2431` Behavioral Changes ================== .. _legacy_is_orphan_addition: The consideration of a "pending" object as an "orphan" has been made more aggressive ------------------------------------------------------------------------------------ This is a late add to the 0.8 series, however it is hoped that the new behavior is generally more consistent and intuitive in a wider variety of situations. The ORM has since at least version 0.4 included behavior such that an object that's "pending", meaning that it's associated with a :class:`.Session` but hasn't been inserted into the database yet, is automatically expunged from the :class:`.Session` when it becomes an "orphan", which means it has been de-associated with a parent object that refers to it with ``delete-orphan`` cascade on the configured :func:`.relationship`. This behavior is intended to approximately mirror the behavior of a persistent (that is, already inserted) object, where the ORM will emit a DELETE for such objects that become orphans based on the interception of detachment events. The behavioral change comes into play for objects that are referred to by multiple kinds of parents that each specify ``delete-orphan``; the typical example is an :ref:`association object ` that bridges two other kinds of objects in a many-to-many pattern. Previously, the behavior was such that the pending object would be expunged only when de-associated with *all* of its parents. With the behavioral change, the pending object is expunged as soon as it is de-associated from *any* of the parents that it was previously associated with. This behavior is intended to more closely match that of persistent objects, which are deleted as soon as they are de-associated from any parent. The rationale for the older behavior dates back at least to version 0.4, and was basically a defensive decision to try to alleviate confusion when an object was still being constructed for INSERT. But the reality is that the object is re-associated with the :class:`.Session` as soon as it is attached to any new parent in any case. It's still possible to flush an object that is not associated with all of its required parents, if the object was either not associated with those parents in the first place, or if it was expunged, but then re-associated with a :class:`.Session` via a subsequent attachment event but still not fully associated. In this situation, it is expected that the database would emit an integrity error, as there are likely NOT NULL foreign key columns that are unpopulated. The ORM makes the decision to let these INSERT attempts occur, based on the judgment that an object that is only partially associated with its required parents but has been actively associated with some of them, is more often than not a user error, rather than an intentional omission which should be silently skipped - silently skipping the INSERT here would make user errors of this nature very hard to debug. The old behavior, for applications that might have been relying upon it, can be re-enabled for any :class:`.Mapper` by specifying the flag ``legacy_is_orphan`` as a mapper option. The new behavior allows the following test case to work:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) user = relationship(User, backref=backref("user_keywords", cascade="all, delete-orphan") ) keyword = relationship("Keyword", backref=backref("user_keywords", cascade="all, delete-orphan") ) # uncomment this to enable the old behavior # __mapper_args__ = {"legacy_is_orphan": True} class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) from sqlalchemy import create_engine from sqlalchemy.orm import Session # note we're using Postgresql to ensure that referential integrity # is enforced, for demonstration purposes. e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) Base.metadata.drop_all(e) Base.metadata.create_all(e) session = Session(e) u1 = User(name="u1") k1 = Keyword(keyword="k1") session.add_all([u1, k1]) uk1 = UserKeyword(keyword=k1, user=u1) # previously, if session.flush() were called here, # this operation would succeed, but if session.flush() # were not called here, the operation fails with an # integrity error. # session.flush() del u1.user_keywords[0] session.commit() :ticket:`2655` The after_attach event fires after the item is associated with the Session instead of before; before_attach added ----------------------------------------------------------------------------------------------------------------- Event handlers which use after_attach can now assume the given instance is associated with the given session: :: @event.listens_for(Session, "after_attach") def after_attach(session, instance): assert instance in session Some use cases require that it work this way. However, other use cases require that the item is *not* yet part of the session, such as when a query, intended to load some state required for an instance, emits autoflush first and would otherwise prematurely flush the target object. Those use cases should use the new "before_attach" event: :: @event.listens_for(Session, "before_attach") def before_attach(session, instance): instance.some_necessary_attribute = session.query(Widget).\ filter_by(instance.widget_name).\ first() :ticket:`2464` Query now auto-correlates like a select() does ---------------------------------------------- Previously it was necessary to call :meth:`.Query.correlate` in order to have a column- or WHERE-subquery correlate to the parent: :: subq = session.query(Entity.value).\ filter(Entity.id==Parent.entity_id).\ correlate(Parent).\ as_scalar() session.query(Parent).filter(subq=="some value") This was the opposite behavior of a plain ``select()`` construct which would assume auto-correlation by default. The above statement in 0.8 will correlate automatically: :: subq = session.query(Entity.value).\ filter(Entity.id==Parent.entity_id).\ as_scalar() session.query(Parent).filter(subq=="some value") like in ``select()``, correlation can be disabled by calling ``query.correlate(None)`` or manually set by passing an entity, ``query.correlate(someentity)``. :ticket:`2179` .. _correlation_context_specific: Correlation is now always context-specific ------------------------------------------ To allow a wider variety of correlation scenarios, the behavior of :meth:`.Select.correlate` and :meth:`.Query.correlate` has changed slightly such that the SELECT statement will omit the "correlated" target from the FROM clause only if the statement is actually used in that context. Additionally, it's no longer possible for a SELECT statement that's placed as a FROM in an enclosing SELECT statement to "correlate" (i.e. omit) a FROM clause. This change only makes things better as far as rendering SQL, in that it's no longer possible to render illegal SQL where there are insufficient FROM objects relative to what's being selected:: from sqlalchemy.sql import table, column, select t1 = table('t1', column('x')) t2 = table('t2', column('y')) s = select([t1, t2]).correlate(t1) print(s) Prior to this change, the above would return:: SELECT t1.x, t2.y FROM t2 which is invalid SQL as "t1" is not referred to in any FROM clause. Now, in the absence of an enclosing SELECT, it returns:: SELECT t1.x, t2.y FROM t1, t2 Within a SELECT, the correlation takes effect as expected:: s2 = select([t1, t2]).where(t1.c.x == t2.c.y).where(t1.c.x == s) print (s2) SELECT t1.x, t2.y FROM t1, t2 WHERE t1.x = t2.y AND t1.x = (SELECT t1.x, t2.y FROM t2) This change is not expected to impact any existing applications, as the correlation behavior remains identical for properly constructed expressions. Only an application that relies, most likely within a testing scenario, on the invalid string output of a correlated SELECT used in a non-correlating context would see any change. :ticket:`2668` .. _metadata_create_drop_tables: create_all() and drop_all() will now honor an empty list as such ---------------------------------------------------------------- The methods :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` will now accept a list of :class:`.Table` objects that is empty, and will not emit any CREATE or DROP statements. Previously, an empty list was interepreted the same as passing ``None`` for a collection, and CREATE/DROP would be emitted for all items unconditionally. This is a bug fix but some applications may have been relying upon the previous behavior. :ticket:`2664` Repaired the Event Targeting of :class:`.InstrumentationEvents` ---------------------------------------------------------------- The :class:`.InstrumentationEvents` series of event targets have documented that the events will only be fired off according to the actual class passed as a target. Through 0.7, this wasn't the case, and any event listener applied to :class:`.InstrumentationEvents` would be invoked for all classes mapped. In 0.8, additional logic has been added so that the events will only invoke for those classes sent in. The ``propagate`` flag here is set to ``True`` by default as class instrumentation events are typically used to intercept classes that aren't yet created. :ticket:`2590` No more magic coercion of "=" to IN when comparing to subquery in MS-SQL ------------------------------------------------------------------------ We found a very old behavior in the MSSQL dialect which would attempt to rescue users from themselves when doing something like this: :: scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo') select([sometable]).where(sometable.c.id==scalar_subq) SQL Server doesn't allow an equality comparison to a scalar SELECT, that is, "x = (SELECT something)". The MSSQL dialect would convert this to an IN. The same thing would happen however upon a comparison like "(SELECT something) = x", and overall this level of guessing is outside of SQLAlchemy's usual scope so the behavior is removed. :ticket:`2277` Fixed the behavior of :meth:`.Session.is_modified` -------------------------------------------------- The :meth:`.Session.is_modified` method accepts an argument ``passive`` which basically should not be necessary, the argument in all cases should be the value ``True`` - when left at its default of ``False`` it would have the effect of hitting the database, and often triggering autoflush which would itself change the results. In 0.8 the ``passive`` argument will have no effect, and unloaded attributes will never be checked for history since by definition there can be no pending state change on an unloaded attribute. .. seealso:: :meth:`.Session.is_modified` :ticket:`2320` :attr:`.Column.key` is honored in the :attr:`.Select.c` attribute of :func:`.select` with :meth:`.Select.apply_labels` ----------------------------------------------------------------------------------------------------------------------- Users of the expression system know that :meth:`.Select.apply_labels` prepends the table name to each column name, affecting the names that are available from :attr:`.Select.c`: :: s = select([table1]).apply_labels() s.c.table1_col1 s.c.table1_col2 Before 0.8, if the :class:`.Column` had a different :attr:`.Column.key`, this key would be ignored, inconsistently versus when :meth:`.Select.apply_labels` were not used: :: # before 0.8 table1 = Table('t1', metadata, Column('col1', Integer, key='column_one') ) s = select([table1]) s.c.column_one # would be accessible like this s.c.col1 # would raise AttributeError s = select([table1]).apply_labels() s.c.table1_column_one # would raise AttributeError s.c.table1_col1 # would be accessible like this In 0.8, :attr:`.Column.key` is honored in both cases: :: # with 0.8 table1 = Table('t1', metadata, Column('col1', Integer, key='column_one') ) s = select([table1]) s.c.column_one # works s.c.col1 # AttributeError s = select([table1]).apply_labels() s.c.table1_column_one # works s.c.table1_col1 # AttributeError All other behavior regarding "name" and "key" are the same, including that the rendered SQL will still use the form ``_`` - the emphasis here was on preventing the :attr:`.Column.key` contents from being rendered into the ``SELECT`` statement so that there are no issues with special/ non-ascii characters used in the :attr:`.Column.key`. :ticket:`2397` single_parent warning is now an error ------------------------------------- A :func:`.relationship` that is many-to-one or many-to-many and specifies "cascade='all, delete-orphan'", which is an awkward but nonetheless supported use case (with restrictions) will now raise an error if the relationship does not specify the ``single_parent=True`` option. Previously it would only emit a warning, but a failure would follow almost immediately within the attribute system in any case. :ticket:`2405` Adding the ``inspector`` argument to the ``column_reflect`` event ----------------------------------------------------------------- 0.7 added a new event called ``column_reflect``, provided so that the reflection of columns could be augmented as each one were reflected. We got this event slightly wrong in that the event gave no way to get at the current ``Inspector`` and ``Connection`` being used for the reflection, in the case that additional information from the database is needed. As this is a new event not widely used yet, we'll be adding the ``inspector`` argument into it directly: :: @event.listens_for(Table, "column_reflect") def listen_for_col(inspector, table, column_info): # ... :ticket:`2418` Disabling auto-detect of collations, casing for MySQL ----------------------------------------------------- The MySQL dialect does two calls, one very expensive, to load all possible collations from the database as well as information on casing, the first time an ``Engine`` connects. Neither of these collections are used for any SQLAlchemy functions, so these calls will be changed to no longer be emitted automatically. Applications that might have relied on these collections being present on ``engine.dialect`` will need to call upon ``_detect_collations()`` and ``_detect_casing()`` directly. :ticket:`2404` "Unconsumed column names" warning becomes an exception ------------------------------------------------------ Referring to a non-existent column in an ``insert()`` or ``update()`` construct will raise an error instead of a warning: :: t1 = table('t1', column('x')) t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" :ticket:`2415` Inspector.get_primary_keys() is deprecated, use Inspector.get_pk_constraint --------------------------------------------------------------------------- These two methods on ``Inspector`` were redundant, where ``get_primary_keys()`` would return the same information as ``get_pk_constraint()`` minus the name of the constraint: :: >>> insp.get_primary_keys() ["a", "b"] >>> insp.get_pk_constraint() {"name":"pk_constraint", "constrained_columns":["a", "b"]} :ticket:`2422` Case-insensitive result row names will be disabled in most cases ---------------------------------------------------------------- A very old behavior, the column names in ``RowProxy`` were always compared case-insensitively: :: >>> row = result.fetchone() >>> row['foo'] == row['FOO'] == row['Foo'] True This was for the benefit of a few dialects which in the early days needed this, like Oracle and Firebird, but in modern usage we have more accurate ways of dealing with the case-insensitive behavior of these two platforms. Going forward, this behavior will be available only optionally, by passing the flag ```case_sensitive=False``` to ```create_engine()```, but otherwise column names requested from the row must match as far as casing. :ticket:`2423` ``InstrumentationManager`` and alternate class instrumentation is now an extension ---------------------------------------------------------------------------------- The ``sqlalchemy.orm.interfaces.InstrumentationManager`` class is moved to ``sqlalchemy.ext.instrumentation.InstrumentationManager``. The "alternate instrumentation" system was built for the benefit of a very small number of installations that needed to work with existing or unusual class instrumentation systems, and generally is very seldom used. The complexity of this system has been exported to an ``ext.`` module. It remains unused until once imported, typically when a third party library imports ``InstrumentationManager``, at which point it is injected back into ``sqlalchemy.orm`` by replacing the default ``InstrumentationFactory`` with ``ExtendedInstrumentationRegistry``. Removed ======= SQLSoup ------- SQLSoup is a handy package that presents an alternative interface on top of the SQLAlchemy ORM. SQLSoup is now moved into its own project and documented/released separately; see https://bitbucket.org/zzzeek/sqlsoup. SQLSoup is a very simple tool that could also benefit from contributors who are interested in its style of usage. :ticket:`2262` MutableType ----------- The older "mutable" system within the SQLAlchemy ORM has been removed. This refers to the ``MutableType`` interface which was applied to types such as ``PickleType`` and conditionally to ``TypeDecorator``, and since very early SQLAlchemy versions has provided a way for the ORM to detect changes in so-called "mutable" data structures such as JSON structures and pickled objects. However, the implementation was never reasonable and forced a very inefficient mode of usage on the unit-of-work which caused an expensive scan of all objects to take place during flush. In 0.7, the `sqlalchemy.ext.mutable `_ extension was introduced so that user-defined datatypes can appropriately send events to the unit of work as changes occur. Today, usage of ``MutableType`` is expected to be low, as warnings have been in place for some years now regarding its inefficiency. :ticket:`2442` sqlalchemy.exceptions (has been sqlalchemy.exc for years) --------------------------------------------------------- We had left in an alias ``sqlalchemy.exceptions`` to attempt to make it slightly easier for some very old libraries that hadn't yet been upgraded to use ``sqlalchemy.exc``. Some users are still being confused by it however so in 0.8 we're taking it out entirely to eliminate any of that confusion. :ticket:`2433` SQLAlchemy-1.0.11/doc/build/changelog/migration_05.rst0000664000175000017500000006332112636375552023467 0ustar classicclassic00000000000000============================= What's new in SQLAlchemy 0.5? ============================= .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.4, last released October 12, 2008, and SQLAlchemy version 0.5, last released January 16, 2010. Document date: August 4, 2009 This guide documents API changes which affect users migrating their applications from the 0.4 series of SQLAlchemy to 0.5. It's also recommended for those working from `Essential SQLAlchemy `_, which only covers 0.4 and seems to even have some old 0.3isms in it. Note that SQLAlchemy 0.5 removes many behaviors which were deprecated throughout the span of the 0.4 series, and also deprecates more behaviors specific to 0.4. Major Documentation Changes =========================== Some sections of the documentation have been completely rewritten and can serve as an introduction to new ORM features. The ``Query`` and ``Session`` objects in particular have some distinct differences in API and behavior which fundamentally change many of the basic ways things are done, particularly with regards to constructing highly customized ORM queries and dealing with stale session state, commits and rollbacks. * `ORM Tutorial `_ * `Session Documentation `_ Deprecations Source =================== Another source of information is documented within a series of unit tests illustrating up to date usages of some common ``Query`` patterns; this file can be viewed at [source:sqlalchemy/trunk/test/orm/test_deprecations.py]. Requirements Changes ==================== * Python 2.4 or higher is required. The SQLAlchemy 0.4 line is the last version with Python 2.3 support. Object Relational Mapping ========================= * **Column level expressions within Query.** - as detailed in the `tutorial `_, ``Query`` has the capability to create specific SELECT statements, not just those against full rows: :: session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name) The tuples returned by any multi-column/entity query are *named*' tuples: :: for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name): print "name", row.name, "number", row.numaddresses ``Query`` has a ``statement`` accessor, as well as a ``subquery()`` method which allow ``Query`` to be used to create more complex combinations: :: subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery() recipes = session.query(Recipe).filter(exists(). where(Recipe.id==recipe_keywords.c.recipe_id). where(recipe_keywords.c.keyword_id==subq.c.keyword_id) ) * **Explicit ORM aliases are recommended for aliased joins** - The ``aliased()`` function produces an "alias" of a class, which allows fine-grained control of aliases in conjunction with ORM queries. While a table-level alias (i.e. ``table.alias()``) is still usable, an ORM level alias retains the semantics of the ORM mapped object which is significant for inheritance mappings, options, and other scenarios. E.g.: :: Friend = aliased(Person) session.query(Person, Friend).join((Friend, Person.friends)).all() * **query.join() greatly enhanced.** - You can now specify the target and ON clause for a join in multiple ways. A target class alone can be provided where SQLA will attempt to form a join to it via foreign key in the same way as ``table.join(someothertable)``. A target and an explicit ON condition can be provided, where the ON condition can be a ``relation()`` name, an actual class descriptor, or a SQL expression. Or the old way of just a ``relation()`` name or class descriptor works too. See the ORM tutorial which has several examples. * **Declarative is recommended for applications which don't require (and don't prefer) abstraction between tables and mappers** - The [/docs/05/reference/ext/declarative.html Declarative] module, which is used to combine the expression of ``Table``, ``mapper()``, and user defined class objects together, is highly recommended as it simplifies application configuration, ensures the "one mapper per class" pattern, and allows the full range of configuration available to distinct ``mapper()`` calls. Separate ``mapper()`` and ``Table`` usage is now referred to as "classical SQLAlchemy usage" and of course is freely mixable with declarative. * **The .c. attribute has been removed** from classes (i.e. ``MyClass.c.somecolumn``). As is the case in 0.4, class- level properties are usable as query elements, i.e. ``Class.c.propname`` is now superseded by ``Class.propname``, and the ``c`` attribute continues to remain on ``Table`` objects where they indicate the namespace of ``Column`` objects present on the table. To get at the Table for a mapped class (if you didn't keep it around already): :: table = class_mapper(someclass).mapped_table Iterate through columns: :: for col in table.c: print col Work with a specific column: :: table.c.somecolumn The class-bound descriptors support the full set of Column operators as well as the documented relation-oriented operators like ``has()``, ``any()``, ``contains()``, etc. The reason for the hard removal of ``.c.`` is that in 0.5, class-bound descriptors carry potentially different meaning, as well as information regarding class mappings, versus plain ``Column`` objects - and there are use cases where you'd specifically want to use one or the other. Generally, using class-bound descriptors invokes a set of mapping/polymorphic aware translations, and using table- bound columns does not. In 0.4, these translations were applied across the board to all expressions, but 0.5 differentiates completely between columns and mapped descriptors, only applying translations to the latter. So in many cases, particularly when dealing with joined table inheritance configurations as well as when using ``query()``, ``Class.propname`` and ``table.c.colname`` are not interchangeable. For example, ``session.query(users.c.id, users.c.name)`` is different versus ``session.query(User.id, User.name)``; in the latter case, the ``Query`` is aware of the mapper in use and further mapper-specific operations like ``query.join()``, ``query.with_parent()`` etc. may be used, but in the former case cannot. Additionally, in polymorphic inheritance scenarios, the class-bound descriptors refer to the columns present in the polymorphic selectable in use, not necessarily the table column which directly corresponds to the descriptor. For example, a set of classes related by joined-table inheritance to the ``person`` table along the ``person_id`` column of each table will all have their ``Class.person_id`` attribute mapped to the ``person_id`` column in ``person``, and not their subclass table. Version 0.4 would map this behavior onto table-bound ``Column`` objects automatically. In 0.5, this automatic conversion has been removed, so that you in fact *can* use table-bound columns as a means to override the translations which occur with polymorphic querying; this allows ``Query`` to be able to create optimized selects among joined-table or concrete-table inheritance setups, as well as portable subqueries, etc. * **Session Now Synchronizes Automatically with Transactions.** Session now synchronizes against the transaction automatically by default, including autoflush and autoexpire. A transaction is present at all times unless disabled using the ``autocommit`` option. When all three flags are set to their default, the Session recovers gracefully after rollbacks and it's very difficult to get stale data into the session. See the new Session documentation for details. * **Implicit Order By Is Removed**. This will impact ORM users who rely upon SA's "implicit ordering" behavior, which states that all Query objects which don't have an ``order_by()`` will ORDER BY the "id" or "oid" column of the primary mapped table, and all lazy/eagerly loaded collections apply a similar ordering. In 0.5, automatic ordering must be explicitly configured on ``mapper()`` and ``relation()`` objects (if desired), or otherwise when using ``Query``. To convert an 0.4 mapping to 0.5, such that its ordering behavior will be extremely similar to 0.4 or previous, use the ``order_by`` setting on ``mapper()`` and ``relation()``: :: mapper(User, users, properties={ 'addresses':relation(Address, order_by=addresses.c.id) }, order_by=users.c.id) To set ordering on a backref, use the ``backref()`` function: :: 'keywords':relation(Keyword, secondary=item_keywords, order_by=keywords.c.name, backref=backref('items', order_by=items.c.id)) Using declarative ? To help with the new ``order_by`` requirement, ``order_by`` and friends can now be set using strings which are evaluated in Python later on (this works **only** with declarative, not plain mappers): :: class MyClass(MyDeclarativeBase): ... 'addresses':relation("Address", order_by="Address.id") It's generally a good idea to set ``order_by`` on ``relation()s`` which load list-based collections of items, since that ordering cannot otherwise be affected. Other than that, the best practice is to use ``Query.order_by()`` to control ordering of the primary entities being loaded. * **Session is now autoflush=True/autoexpire=True/autocommit=False.** - To set it up, just call ``sessionmaker()`` with no arguments. The name ``transactional=True`` is now ``autocommit=False``. Flushes occur upon each query issued (disable with ``autoflush=False``), within each ``commit()`` (as always), and before each ``begin_nested()`` (so rolling back to the SAVEPOINT is meaningful). All objects are expired after each ``commit()`` and after each ``rollback()``. After rollback, pending objects are expunged, deleted objects move back to persistent. These defaults work together very nicely and there's really no more need for old techniques like ``clear()`` (which is renamed to ``expunge_all()`` as well). P.S.: sessions are now reusable after a ``rollback()``. Scalar and collection attribute changes, adds and deletes are all rolled back. * **session.add() replaces session.save(), session.update(), session.save_or_update().** - the ``session.add(someitem)`` and ``session.add_all([list of items])`` methods replace ``save()``, ``update()``, and ``save_or_update()``. Those methods will remain deprecated throughout 0.5. * **backref configuration made less verbose.** - The ``backref()`` function now uses the ``primaryjoin`` and ``secondaryjoin`` arguments of the forwards-facing ``relation()`` when they are not explicitly stated. It's no longer necessary to specify ``primaryjoin``/``secondaryjoin`` in both directions separately. * **Simplified polymorphic options.** - The ORM's "polymorphic load" behavior has been simplified. In 0.4, mapper() had an argument called ``polymorphic_fetch`` which could be configured as ``select`` or ``deferred``. This option is removed; the mapper will now just defer any columns which were not present in the SELECT statement. The actual SELECT statement used is controlled by the ``with_polymorphic`` mapper argument (which is also in 0.4 and replaces ``select_table``), as well as the ``with_polymorphic()`` method on ``Query`` (also in 0.4). An improvement to the deferred loading of inheriting classes is that the mapper now produces the "optimized" version of the SELECT statement in all cases; that is, if class B inherits from A, and several attributes only present on class B have been expired, the refresh operation will only include B's table in the SELECT statement and will not JOIN to A. * The ``execute()`` method on ``Session`` converts plain strings into ``text()`` constructs, so that bind parameters may all be specified as ":bindname" without needing to call ``text()`` explicitly. If "raw" SQL is desired here, use ``session.connection().execute("raw text")``. * ``session.Query().iterate_instances()`` has been renamed to just ``instances()``. The old ``instances()`` method returning a list instead of an iterator no longer exists. If you were relying on that behavior, you should use ``list(your_query.instances())``. Extending the ORM ================= In 0.5 we're moving forward with more ways to modify and extend the ORM. Heres a summary: * **MapperExtension.** - This is the classic extension class, which remains. Methods which should rarely be needed are ``create_instance()`` and ``populate_instance()``. To control the initialization of an object when it's loaded from the database, use the ``reconstruct_instance()`` method, or more easily the ``@reconstructor`` decorator described in the documentation. * **SessionExtension.** - This is an easy to use extension class for session events. In particular, it provides ``before_flush()``, ``after_flush()`` and ``after_flush_postexec()`` methods. This usage is recommended over ``MapperExtension.before_XXX`` in many cases since within ``before_flush()`` you can modify the flush plan of the session freely, something which cannot be done from within ``MapperExtension``. * **AttributeExtension.** - This class is now part of the public API, and allows the interception of userland events on attributes, including attribute set and delete operations, and collection appends and removes. It also allows the value to be set or appended to be modified. The ``@validates`` decorator, described in the documentation, provides a quick way to mark any mapped attributes as being "validated" by a particular class method. * **Attribute Instrumentation Customization.** - An API is provided for ambitious efforts to entirely replace SQLAlchemy's attribute instrumentation, or just to augment it in some cases. This API was produced for the purposes of the Trellis toolkit, but is available as a public API. Some examples are provided in the distribution in the ``/examples/custom_attributes`` directory. Schema/Types ============ * **String with no length no longer generates TEXT, it generates VARCHAR** - The ``String`` type no longer magically converts into a ``Text`` type when specified with no length. This only has an effect when CREATE TABLE is issued, as it will issue ``VARCHAR`` with no length parameter, which is not valid on many (but not all) databases. To create a TEXT (or CLOB, i.e. unbounded string) column, use the ``Text`` type. * **PickleType() with mutable=True requires an __eq__() method** - The ``PickleType`` type needs to compare values when mutable=True. The method of comparing ``pickle.dumps()`` is inefficient and unreliable. If an incoming object does not implement ``__eq__()`` and is also not ``None``, the ``dumps()`` comparison is used but a warning is raised. For types which implement ``__eq__()`` which includes all dictionaries, lists, etc., comparison will use ``==`` and is now reliable by default. * **convert_bind_param() and convert_result_value() methods of TypeEngine/TypeDecorator are removed.** - The O'Reilly book unfortunately documented these methods even though they were deprecated post 0.3. For a user-defined type which subclasses ``TypeEngine``, the ``bind_processor()`` and ``result_processor()`` methods should be used for bind/result processing. Any user defined type, whether extending ``TypeEngine`` or ``TypeDecorator``, which uses the old 0.3 style can be easily adapted to the new style using the following adapter: :: class AdaptOldConvertMethods(object): """A mixin which adapts 0.3-style convert_bind_param and convert_result_value methods """ def bind_processor(self, dialect): def convert(value): return self.convert_bind_param(value, dialect) return convert def result_processor(self, dialect): def convert(value): return self.convert_result_value(value, dialect) return convert def convert_result_value(self, value, dialect): return value def convert_bind_param(self, value, dialect): return value To use the above mixin: :: class MyType(AdaptOldConvertMethods, TypeEngine): # ... * The ``quote`` flag on ``Column`` and ``Table`` as well as the ``quote_schema`` flag on ``Table`` now control quoting both positively and negatively. The default is ``None``, meaning let regular quoting rules take effect. When ``True``, quoting is forced on. When ``False``, quoting is forced off. * Column ``DEFAULT`` value DDL can now be more conveniently specified with ``Column(..., server_default='val')``, deprecating ``Column(..., PassiveDefault('val'))``. ``default=`` is now exclusively for Python-initiated default values, and can coexist with server_default. A new ``server_default=FetchedValue()`` replaces the ``PassiveDefault('')`` idiom for marking columns as subject to influence from external triggers and has no DDL side effects. * SQLite's ``DateTime``, ``Time`` and ``Date`` types now **only accept datetime objects, not strings** as bind parameter input. If you'd like to create your own "hybrid" type which accepts strings and returns results as date objects (from whatever format you'd like), create a ``TypeDecorator`` that builds on ``String``. If you only want string-based dates, just use ``String``. * Additionally, the ``DateTime`` and ``Time`` types, when used with SQLite, now represent the "microseconds" field of the Python ``datetime.datetime`` object in the same manner as ``str(datetime)`` - as fractional seconds, not a count of microseconds. That is: :: dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec # old way '2008-06-27 12:00:00.125' # new way '2008-06-27 12:00:00.000125' So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the datetime columns to store the new format (NOTE: please test this, I'm pretty sure its correct): :: UPDATE mytable SET somedatecol = substr(somedatecol, 0, 19) || '.' || substr((substr(somedatecol, 21, -1) / 1000000), 3, -1); or, enable "legacy" mode as follows: :: from sqlalchemy.databases.sqlite import DateTimeMixin DateTimeMixin.__legacy_microseconds__ = True Connection Pool no longer threadlocal by default ================================================ 0.4 has an unfortunate default setting of "pool_threadlocal=True", leading to surprise behavior when, for example, using multiple Sessions within a single thread. This flag is now off in 0.5. To re-enable 0.4's behavior, specify ``pool_threadlocal=True`` to ``create_engine()``, or alternatively use the "threadlocal" strategy via ``strategy="threadlocal"``. \*args Accepted, \*args No Longer Accepted ========================================== The policy with ``method(\*args)`` vs. ``method([args])`` is, if the method accepts a variable-length set of items which represent a fixed structure, it takes ``\*args``. If the method accepts a variable-length set of items that are data-driven, it takes ``[args]``. * The various Query.options() functions ``eagerload()``, ``eagerload_all()``, ``lazyload()``, ``contains_eager()``, ``defer()``, ``undefer()`` all accept variable-length ``\*keys`` as their argument now, which allows a path to be formulated using descriptors, ie.: :: query.options(eagerload_all(User.orders, Order.items, Item.keywords)) A single array argument is still accepted for backwards compatibility. * Similarly, the ``Query.join()`` and ``Query.outerjoin()`` methods accept a variable length \*args, with a single array accepted for backwards compatibility: :: query.join('orders', 'items') query.join(User.orders, Order.items) * the ``in_()`` method on columns and similar only accepts a list argument now. It no longer accepts ``\*args``. Removed ======= * **entity_name** - This feature was always problematic and rarely used. 0.5's more deeply fleshed out use cases revealed further issues with ``entity_name`` which led to its removal. If different mappings are required for a single class, break the class into separate subclasses and map them separately. An example of this is at [wiki:UsageRecipes/EntityName]. More information regarding rationale is described at http://groups.google.c om/group/sqlalchemy/browse_thread/thread/9e23a0641a88b96d? hl=en . * **get()/load() cleanup** The ``load()`` method has been removed. Its functionality was kind of arbitrary and basically copied from Hibernate, where it's also not a particularly meaningful method. To get equivalent functionality: :: x = session.query(SomeClass).populate_existing().get(7) ``Session.get(cls, id)`` and ``Session.load(cls, id)`` have been removed. ``Session.get()`` is redundant vs. ``session.query(cls).get(id)``. ``MapperExtension.get()`` is also removed (as is ``MapperExtension.load()``). To override the functionality of ``Query.get()``, use a subclass: :: class MyQuery(Query): def get(self, ident): # ... session = sessionmaker(query_cls=MyQuery)() ad1 = session.query(Address).get(1) * ``sqlalchemy.orm.relation()`` The following deprecated keyword arguments have been removed: foreignkey, association, private, attributeext, is_backref In particular, ``attributeext`` is replaced with ``extension`` - the ``AttributeExtension`` class is now in the public API. * ``session.Query()`` The following deprecated functions have been removed: list, scalar, count_by, select_whereclause, get_by, select_by, join_by, selectfirst, selectone, select, execute, select_statement, select_text, join_to, join_via, selectfirst_by, selectone_by, apply_max, apply_min, apply_avg, apply_sum Additionally, the ``id`` keyword argument to ``join()``, ``outerjoin()``, ``add_entity()`` and ``add_column()`` has been removed. To target table aliases in ``Query`` to result columns, use the ``aliased`` construct: :: from sqlalchemy.orm import aliased address_alias = aliased(Address) print session.query(User, address_alias).join((address_alias, User.addresses)).all() * ``sqlalchemy.orm.Mapper`` * instances() * get_session() - this method was not very noticeable, but had the effect of associating lazy loads with a particular session even if the parent object was entirely detached, when an extension such as ``scoped_session()`` or the old ``SessionContextExt`` was used. It's possible that some applications which relied upon this behavior will no longer work as expected; but the better programming practice here is to always ensure objects are present within sessions if database access from their attributes are required. * ``mapper(MyClass, mytable)`` Mapped classes no are longer instrumented with a "c" class attribute; e.g. ``MyClass.c`` * ``sqlalchemy.orm.collections`` The _prepare_instrumentation alias for prepare_instrumentation has been removed. * ``sqlalchemy.orm`` Removed the ``EXT_PASS`` alias of ``EXT_CONTINUE``. * ``sqlalchemy.engine`` The alias from ``DefaultDialect.preexecute_sequences`` to ``.preexecute_pk_sequences`` has been removed. The deprecated engine_descriptors() function has been removed. * ``sqlalchemy.ext.activemapper`` Module removed. * ``sqlalchemy.ext.assignmapper`` Module removed. * ``sqlalchemy.ext.associationproxy`` Pass-through of keyword args on the proxy's ``.append(item, \**kw)`` has been removed and is now simply ``.append(item)`` * ``sqlalchemy.ext.selectresults``, ``sqlalchemy.mods.selectresults`` Modules removed. * ``sqlalchemy.ext.declarative`` ``declared_synonym()`` removed. * ``sqlalchemy.ext.sessioncontext`` Module removed. * ``sqlalchemy.log`` The ``SADeprecationWarning`` alias to ``sqlalchemy.exc.SADeprecationWarning`` has been removed. * ``sqlalchemy.exc`` ``exc.AssertionError`` has been removed and usage replaced by the Python built-in of the same name. * ``sqlalchemy.databases.mysql`` The deprecated ``get_version_info`` dialect method has been removed. Renamed or Moved ================ * ``sqlalchemy.exceptions`` is now ``sqlalchemy.exc`` The module may still be imported under the old name until 0.6. * ``FlushError``, ``ConcurrentModificationError``, ``UnmappedColumnError`` -> sqlalchemy.orm.exc These exceptions moved to the orm package. Importing 'sqlalchemy.orm' will install aliases in sqlalchemy.exc for compatibility until 0.6. * ``sqlalchemy.logging`` -> ``sqlalchemy.log`` This internal module was renamed. No longer needs to be special cased when packaging SA with py2app and similar tools that scan imports. * ``session.Query().iterate_instances()`` -> ``session.Query().instances()``. Deprecated ========== * ``Session.save()``, ``Session.update()``, ``Session.save_or_update()`` All three replaced by ``Session.add()`` * ``sqlalchemy.PassiveDefault`` Use ``Column(server_default=...)`` Translates to sqlalchemy.DefaultClause() under the hood. * ``session.Query().iterate_instances()``. It has been renamed to ``instances()``. SQLAlchemy-1.0.11/doc/build/changelog/index.rst0000664000175000017500000000121312636375552022271 0ustar classicclassic00000000000000.. _changelog_toplevel: Changes and Migration ===================== SQLAlchemy changelogs and migration guides are now integrated within the main documentation. Current Migration Guide ------------------------ .. toctree:: :titlesonly: migration_10 Change logs ----------- .. toctree:: :titlesonly: changelog_10 changelog_09 changelog_08 changelog_07 changelog_06 changelog_05 changelog_04 changelog_03 changelog_02 changelog_01 Older Migration Guides ---------------------- .. toctree:: :titlesonly: migration_09 migration_08 migration_07 migration_06 migration_05 migration_04 SQLAlchemy-1.0.11/doc/build/changelog/changelog_08.rst0000664000175000017500000037725512636375552023446 0ustar classicclassic00000000000000 ============== 0.8 Changelog ============== .. changelog_imports:: .. include:: changelog_07.rst :start-line: 5 .. changelog:: :version: 0.8.7 :released: July 22, 2014 .. change:: :tags: bug, mssql :versions: 1.0.0b1, 0.9.7 Added statement encoding to the "SET IDENTITY_INSERT" statements which operate when an explicit INSERT is being interjected into an IDENTITY column, to support non-ascii table identifiers on drivers such as pyodbc + unix + py2k that don't support unicode statements. .. change:: :tags: bug, mssql :versions: 1.0.0b1, 0.9.7 :tickets: 3091 In the SQL Server pyodbc dialect, repaired the implementation for the ``description_encoding`` dialect parameter, which when not explicitly set was preventing cursor.description from being parsed correctly in the case of result sets that contained names in alternate encodings. This parameter shouldn't be needed going forward. .. change:: :tags: bug, sql :versions: 1.0.0b1, 0.9.7 :tickets: 3124 Fixed bug in :class:`.Enum` and other :class:`.SchemaType` subclasses where direct association of the type with a :class:`.MetaData` would lead to a hang when events (like create events) were emitted on the :class:`.MetaData`. .. change:: :tags: bug, sql :versions: 1.0.0b1, 0.9.7 :tickets: 3102 Fixed a bug within the custom operator plus :meth:`.TypeEngine.with_variant` system, whereby using a :class:`.TypeDecorator` in conjunction with variant would fail with an MRO error when a comparison operator was used. .. change:: :tags: bug, mysql :versions: 1.0.0b1, 0.9.7 :tickets: 3101 MySQL error 2014 "commands out of sync" appears to be raised as a ProgrammingError, not OperationalError, in modern MySQL-Python versions; all MySQL error codes that are tested for "is disconnect" are now checked within OperationalError and ProgrammingError regardless. .. change:: :tags: bug, mysql :versions: 1.0.0b1, 0.9.5 :tickets: 3085 Fixed bug where column names added to ``mysql_length`` parameter on an index needed to have the same quoting for quoted names in order to be recognized. The fix makes the quotes optional but also provides the old behavior for backwards compatibility with those using the workaround. .. change:: :tags: bug, declarative :versions: 1.0.0b1, 0.9.5 :tickets: 3062 The ``__mapper_args__`` dictionary is copied from a declarative mixin or abstract class when accessed, so that modifications made to this dictionary by declarative itself won't conflict with that of other mappings. The dictionary is modified regarding the ``version_id_col`` and ``polymorphic_on`` arguments, replacing the column within with the one that is officially mapped to the local class/table. .. change:: :tags: bug, sql :versions: 0.9.5, 1.0.0b1 :tickets: 3044 Fixed bug in INSERT..FROM SELECT construct where selecting from a UNION would wrap the union in an anonymous (e.g. unlabled) subquery. .. change:: :tags: bug, postgresql :versions: 0.9.5, 1.0.0b1 :tickets: 3053 Added the ``hashable=False`` flag to the PG :class:`.HSTORE` type, which is needed to allow the ORM to skip over trying to "hash" an ORM-mapped HSTORE column when requesting it in a mixed column/entity list. Patch courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, orm :versions: 0.9.5, 1.0.0b1 :tickets: 3055 Fixed bug in subquery eager loading where a long chain of eager loads across a polymorphic-subclass boundary in conjunction with polymorphic loading would fail to locate the subclass-link in the chain, erroring out with a missing property name on an :class:`.AliasedClass`. .. change:: :tags: bug, ext :versions: 0.9.5, 1.0.0b1 :tickets: 3051, 3093 Fixed bug in mutable extension where :class:`.MutableDict` did not report change events for the ``setdefault()`` dictionary operation. .. change:: :tags: bug, ext :versions: 0.9.5, 1.0.0b1 :pullreq: bitbucket:24 :tickets: 3093, 3051 Fixed bug where :meth:`.MutableDict.setdefault` didn't return the existing or new value (this bug was not released in any 0.8 version). Pull request courtesy Thomas Hervé. .. change:: :tags: bug, mysql :versions: 0.9.5, 1.0.0b1 :pullreq: bitbucket:15 Added support for reflecting tables where an index includes KEY_BLOCK_SIZE using an equal sign. Pull request courtesy Sean McGivern. .. change:: :tags: bug, orm :tickets: 3047 :versions: 0.9.5, 1.0.0b1 Fixed ORM bug where the :func:`.class_mapper` function would mask AttributeErrors or KeyErrors that should raise during mapper configuration due to user errors. The catch for attribute/keyerror has been made more specific to not include the configuration step. .. change:: :tags: bug, sql :tickets: 3045 :versions: 0.9.5, 1.0.0b1 Fixed bug where :meth:`.Table.update` and :meth:`.Table.delete` would produce an empty WHERE clause when an empty :func:`.and_()` or :func:`.or_()` or other blank expression were applied. This is now consistent with that of :func:`.select`. .. change:: :tags: bug, postgresql :pullreq: bitbucket:13 :versions: 0.9.5, 1.0.0b1 Added a new "disconnect" message "connection has been closed unexpectedly". This appears to be related to newer versions of SSL. Pull request courtesy Antti Haapala. .. changelog:: :version: 0.8.6 :released: March 28, 2014 .. change:: :tags: bug, orm :tickets: 3006 :versions: 0.9.4 Fixed ORM bug where changing the primary key of an object, then marking it for DELETE would fail to target the correct row for DELETE. .. change:: :tags: feature, postgresql :versions: 0.9.4 Enabled "sane multi-row count" checking for the psycopg2 DBAPI, as this seems to be supported as of psycopg2 2.0.9. .. change:: :tags: bug, postgresql :tickets: 3000 :versions: 0.9.4 Fixed regression caused by release 0.8.5 / 0.9.3's compatibility enhancements where index reflection on Postgresql versions specific to only the 8.1, 8.2 series again broke, surrounding the ever problematic int2vector type. While int2vector supports array operations as of 8.1, apparently it only supports CAST to a varchar as of 8.3. .. change:: :tags: bug, orm :tickets: 2995, :versions: 0.9.4 Fixed regression from 0.8.3 as a result of :ticket:`2818` where :meth:`.Query.exists` wouldn't work on a query that only had a :meth:`.Query.select_from` entry but no other entities. .. change:: :tags: bug, general :tickets: 2986 :versions: 0.9.4 Adjusted ``setup.py`` file to support the possible future removal of the ``setuptools.Feature`` extension from setuptools. If this keyword isn't present, the setup will still succeed with setuptools rather than falling back to distutils. C extension building can be disabled now also by setting the DISABLE_SQLALCHEMY_CEXT environment variable. This variable works whether or not setuptools is even available. .. change:: :tags: bug, ext :versions: 0.9.4 :tickets: 2997 Fixed bug in mutable extension as well as :func:`.attributes.flag_modified` where the change event would not be propagated if the attribute had been reassigned to itself. .. change:: :tags: bug, orm :versions: 0.9.4 Improved an error message which would occur if a query() were made against a non-selectable, such as a :func:`.literal_column`, and then an attempt was made to use :meth:`.Query.join` such that the "left" side would be determined as ``None`` and then fail. This condition is now detected explicitly. .. change:: :tags: bug, sql :versions: 0.9.4 :tickets: 2977 Fixed bug in :func:`.tuple_` construct where the "type" of essentially the first SQL expression would be applied as the "comparison type" to a compared tuple value; this has the effect in some cases of an inappropriate "type coersion" occurring, such as when a tuple that has a mix of String and Binary values improperly coerces target values to Binary even though that's not what they are on the left side. :func:`.tuple_` now expects heterogeneous types within its list of values. .. change:: :tags: orm, bug :versions: 0.9.4 :tickets: 2975 Removed stale names from ``sqlalchemy.orm.interfaces.__all__`` and refreshed with current names, so that an ``import *`` from this module again works. .. changelog:: :version: 0.8.5 :released: February 19, 2014 .. change:: :tags: postgresql, bug :versions: 0.9.3 :tickets: 2936 Added an additional message to psycopg2 disconnect detection, "could not send data to server", which complements the existing "could not receive data from server" and has been observed by users. .. change:: :tags: postgresql, bug :versions: 0.9.3 Support has been improved for Postgresql reflection behavior on very old (pre 8.1) versions of Postgresql, and potentially other PG engines such as Redshift (assuming Redshift reports the version as < 8.1). The query for "indexes" as well as "primary keys" relies upon inspecting a so-called "int2vector" datatype, which refuses to coerce to an array prior to 8.1 causing failures regarding the "ANY()" operator used in the query. Extensive googling has located the very hacky, but recommended-by-PG-core-developer query to use when PG version < 8.1 is in use, so index and primary key constraint reflection now work on these versions. .. change:: :tags: feature, mysql :versions: 0.9.3 :tickets: 2941 Added new MySQL-specific :class:`.mysql.DATETIME` which includes fractional seconds support; also added fractional seconds support to :class:`.mysql.TIMESTAMP`. DBAPI support is limited, though fractional seconds are known to be supported by MySQL Connector/Python. Patch courtesy Geert JM Vanderkelen. .. change:: :tags: bug, mysql :versions: 0.9.3 :tickets: 2966 :pullreq: bitbucket:12 Added support for the ``PARTITION BY`` and ``PARTITIONS`` MySQL table keywords, specified as ``mysql_partition_by='value'`` and ``mysql_partitions='value'`` to :class:`.Table`. Pull request courtesy Marcus McCurdy. .. change:: :tags: bug, sql :versions: 0.9.3 :tickets: 2944 Fixed bug where calling :meth:`.Insert.values` with an empty list or tuple would raise an IndexError. It now produces an empty insert construct as would be the case with an empty dictionary. .. change:: :tags: bug, engine, pool :versions: 0.9.3 :tickets: 2880, 2964 Fixed a critical regression caused by :ticket:`2880` where the newly concurrent ability to return connections from the pool means that the "first_connect" event is now no longer synchronized either, thus leading to dialect mis-configurations under even minimal concurrency situations. .. change:: :tags: bug, sqlite :pullreq: github:72 Restored a change that was missed in the backport of unique constraint reflection to 0.8, where :class:`.UniqueConstraint` with SQLite would fail if reserved keywords were included in the names of columns. Pull request courtesy Roman Podolyaka. .. change:: :tags: bug, postgresql :tickets: 2291 :versions: 0.9.3 Revised this very old issue where the Postgresql "get primary key" reflection query were updated to take into account primary key constraints that were renamed; the newer query fails on very old versions of Postgresql such as version 7, so the old query is restored in those cases when server_version_info < (8, 0) is detected. .. change:: :tags: bug, sql :tickets: 2957 :versions: 0.9.3 Fixed bug where :meth:`.in_()` would go into an endless loop if erroneously passed a column expression whose comparator included the ``__getitem__()`` method, such as a column that uses the :class:`.postgresql.ARRAY` type. .. change:: :tags: bug, orm :tickets: 2951 :versions: 0.9.3 Fixed bug where :meth:`.Query.get` would fail to consistently raise the :class:`.InvalidRequestError` that invokes when called on a query with existing criterion, when the given identity is already present in the identity map. .. change:: :tags: bug, mysql :tickets: 2933 :versions: 0.9.3 Fixed bug which prevented MySQLdb-based dialects (e.g. pymysql) from working in Py3K, where a check for "connection charset" would fail due to Py3K's more strict value comparison rules. The call in question wasn't taking the database version into account in any case as the server version was still None at that point, so the method overall has been simplified to rely upon connection.character_set_name(). .. change:: :tags: bug, mysql :pullreq: github:61 :versions: 0.9.2 Some missing methods added to the cymysql dialect, including _get_server_version_info() and _detect_charset(). Pullreq courtesy Hajime Nakagami. .. change:: :tags: bug, py3k :pullreq: github:63 Fixed Py3K bug where a missing import would cause "literal binary" mode to fail to import "util.binary_type" when rendering a bound parameter. 0.9 handles this differently. Pull request courtesy Andreas Zeidler. .. change:: :tags: bug, orm :versions: 0.9.2 :pullreq: github:58 Fixed error message when an iterator object is passed to :func:`.class_mapper` or similar, where the error would fail to render on string formatting. Pullreq courtesy Kyle Stark. .. change:: :tags: bug, firebird :versions: 0.9.0 :tickets: 2897 The firebird dialect will quote identifiers which begin with an underscore. Courtesy Treeve Jelbert. .. change:: :tags: bug, firebird :versions: 0.9.0 Fixed bug in Firebird index reflection where the columns within the index were not sorted correctly; they are now sorted in order of RDB$FIELD_POSITION. .. change:: :tags: bug, mssql, firebird :versions: 0.9.0 The "asdecimal" flag used with the :class:`.Float` type will now work with Firebird as well as the mssql+pyodbc dialects; previously the decimal conversion was not occurring. .. change:: :tags: bug, mssql, pymssql :versions: 0.9.0 :pullreq: github:51 Added "Net-Lib error during Connection reset by peer" message to the list of messages checked for "disconnect" within the pymssql dialect. Courtesy John Anderson. .. change:: :tags: bug, sql :versions: 0.9.0 :tickets: 2896 Fixed issue where a primary key column that has a Sequence on it, yet the column is not the "auto increment" column, either because it has a foreign key constraint or ``autoincrement=False`` set, would attempt to fire the Sequence on INSERT for backends that don't support sequences, when presented with an INSERT missing the primary key value. This would take place on non-sequence backends like SQLite, MySQL. .. change:: :tags: bug, sql :versions: 0.9.0 :tickets: 2895 Fixed bug with :meth:`.Insert.from_select` method where the order of the given names would not be taken into account when generating the INSERT statement, thus producing a mismatch versus the column names in the given SELECT statement. Also noted that :meth:`.Insert.from_select` implies that Python-side insert defaults cannot be used, since the statement has no VALUES clause. .. change:: :tags: enhancement, sql :versions: 0.9.0 The exception raised when a :class:`.BindParameter` is present in a compiled statement without a value now includes the key name of the bound parameter in the error message. .. change:: :tags: bug, orm :versions: 0.9.0 :tickets: 2887 An adjustment to the :func:`.subqueryload` strategy which ensures that the query runs after the loading process has begun; this is so that the subqueryload takes precedence over other loaders that may be hitting the same attribute due to other eager/noload situations at the wrong time. .. change:: :tags: bug, orm :versions: 0.9.0 :tickets: 2885 Fixed bug when using joined table inheritance from a table to a select/alias on the base, where the PK columns were also not same named; the persistence system would fail to copy primary key values from the base table to the inherited table upon INSERT. .. change:: :tags: bug, orm :versions: 0.9.0 :tickets: 2889 :func:`.composite` will raise an informative error message when the columns/attribute (names) passed don't resolve to a Column or mapped attribute (such as an erroneous tuple); previously raised an unbound local. .. change:: :tags: bug, declarative :versions: 0.9.0 :tickets: 2888 Error message when a string arg sent to :func:`.relationship` which doesn't resolve to a class or mapper has been corrected to work the same way as when a non-string arg is received, which indicates the name of the relationship which had the configurational error. .. changelog:: :version: 0.8.4 :released: December 8, 2013 .. change:: :tags: bug, engine :versions: 0.9.0 :tickets: 2881 A DBAPI that raises an error on ``connect()`` which is not a subclass of dbapi.Error (such as ``TypeError``, ``NotImplementedError``, etc.) will propagate the exception unchanged. Previously, the error handling specific to the ``connect()`` routine would both inappropriately run the exception through the dialect's :meth:`.Dialect.is_disconnect` routine as well as wrap it in a :class:`sqlalchemy.exc.DBAPIError`. It is now propagated unchanged in the same way as occurs within the execute process. .. change:: :tags: bug, engine, pool :versions: 0.9.0 :tickets: 2880 The :class:`.QueuePool` has been enhanced to not block new connection attempts when an existing connection attempt is blocking. Previously, the production of new connections was serialized within the block that monitored overflow; the overflow counter is now altered within its own critical section outside of the connection process itself. .. change:: :tags: bug, engine, pool :versions: 0.9.0 :tickets: 2522 Made a slight adjustment to the logic which waits for a pooled connection to be available, such that for a connection pool with no timeout specified, it will every half a second break out of the wait to check for the so-called "abort" flag, which allows the waiter to break out in case the whole connection pool was dumped; normally the waiter should break out due to a notify_all() but it's possible this notify_all() is missed in very slim cases. This is an extension of logic first introduced in 0.8.0, and the issue has only been observed occasionally in stress tests. .. change:: :tags: bug, mssql :versions: 0.9.0 :pullreq: bitbucket:7 Fixed bug introduced in 0.8.0 where the ``DROP INDEX`` statement for an index in MSSQL would render incorrectly if the index were in an alternate schema; the schemaname/tablename would be reversed. The format has been also been revised to match current MSSQL documentation. Courtesy Derek Harland. .. change:: :tags: feature, sql :tickets: 1443 :versions: 0.9.0b1 Added support for "unique constraint" reflection, via the :meth:`.Inspector.get_unique_constraints` method. Thanks for Roman Podolyaka for the patch. .. change:: :tags: bug, oracle :tickets: 2864 :versions: 0.9.0 Added ORA-02396 "maximum idle time" error code to list of "is disconnect" codes with cx_oracle. .. change:: :tags: bug, engine :tickets: 2871 :versions: 0.9.0 Fixed bug where SQL statement would be improperly ASCII-encoded when a pre-DBAPI :class:`.StatementError` were raised within :meth:`.Connection.execute`, causing encoding errors for non-ASCII statements. The stringification now remains within Python unicode thus avoiding encoding errors. .. change:: :tags: bug, oracle :tickets: 2870 :versions: 0.9.0 Fixed bug where Oracle ``VARCHAR`` types given with no length (e.g. for a ``CAST`` or similar) would incorrectly render ``None CHAR`` or similar. .. change:: :tags: bug, ext :tickets: 2869 :versions: 0.9.0 Fixed bug which prevented the ``serializer`` extension from working correctly with table or column names that contain non-ASCII characters. .. change:: :tags: bug, orm :tickets: 2818 :versions: 0.9.0 Fixed a regression introduced by :ticket:`2818` where the EXISTS query being generated would produce a "columns being replaced" warning for a statement with two same-named columns, as the internal SELECT wouldn't have use_labels set. .. change:: :tags: bug, postgresql :tickets: 2855 :versions: 0.9.0 Fixed bug where index reflection would mis-interpret indkey values when using the pypostgresql adapter, which returns these values as lists vs. psycopg2's return type of string. .. changelog:: :version: 0.8.3 :released: October 26, 2013 .. change:: :tags: bug, oracle :tickets: 2853 :versions: 0.9.0b1 Fixed bug where Oracle table reflection using synonyms would fail if the synonym and the table were in different remote schemas. Patch to fix courtesy Kyle Derr. .. change:: :tags: bug, sql :tickets: 2849 :versions: 0.9.0b1 Fixed bug where :func:`.type_coerce` would not interpret ORM elements with a ``__clause_element__()`` method properly. .. change:: :tags: bug, sql :tickets: 2842 :versions: 0.9.0b1 The :class:`.Enum` and :class:`.Boolean` types now bypass any custom (e.g. TypeDecorator) type in use when producing the CHECK constraint for the "non native" type. This so that the custom type isn't involved in the expression within the CHECK, since this expression is against the "impl" value and not the "decorated" value. .. change:: :tags: bug, postgresql :tickets: 2844 :versions: 0.9.0b1 Removed a 128-character truncation from the reflection of the server default for a column; this code was original from PG system views which truncated the string for readability. .. change:: :tags: bug, mysql :tickets: 2721, 2839 :versions: 0.9.0b1 The change in :ticket:`2721`, which is that the ``deferrable`` keyword of :class:`.ForeignKeyConstraint` is silently ignored on the MySQL backend, will be reverted as of 0.9; this keyword will now render again, raising errors on MySQL as it is not understood - the same behavior will also apply to the ``initially`` keyword. In 0.8, the keywords will remain ignored but a warning is emitted. Additionally, the ``match`` keyword now raises a :exc:`.CompileError` on 0.9 and emits a warning on 0.8; this keyword is not only silently ignored by MySQL but also breaks the ON UPDATE/ON DELETE options. To use a :class:`.ForeignKeyConstraint` that does not render or renders differently on MySQL, use a custom compilation option. An example of this usage has been added to the documentation, see :ref:`mysql_foreign_keys`. .. change:: :tags: bug, sql :tickets: 2825 :versions: 0.9.0b1 The ``.unique`` flag on :class:`.Index` could be produced as ``None`` if it was generated from a :class:`.Column` that didn't specify ``unique`` (where it defaults to ``None``). The flag will now always be ``True`` or ``False``. .. change:: :tags: feature, orm :tickets: 2836 :versions: 0.9.0b1 Added new option to :func:`.relationship` ``distinct_target_key``. This enables the subquery eager loader strategy to apply a DISTINCT to the innermost SELECT subquery, to assist in the case where duplicate rows are generated by the innermost query which corresponds to this relationship (there's not yet a general solution to the issue of dupe rows within subquery eager loading, however, when joins outside of the innermost subquery produce dupes). When the flag is set to ``True``, the DISTINCT is rendered unconditionally, and when it is set to ``None``, DISTINCT is rendered if the innermost relationship targets columns that do not comprise a full primary key. The option defaults to False in 0.8 (e.g. off by default in all cases), None in 0.9 (e.g. automatic by default). Thanks to Alexander Koval for help with this. .. seealso:: :ref:`change_2836` .. change:: :tags: bug, mysql :tickets: 2515 :versions: 0.9.0b1 MySQL-connector dialect now allows options in the create_engine query string to override those defaults set up in the connect, including "buffered" and "raise_on_warnings". .. change:: :tags: bug, postgresql :tickets: 2742 :versions: 0.9.0b1 Parenthesis will be applied to a compound SQL expression as rendered in the column list of a CREATE INDEX statement. .. change:: :tags: bug, sql :tickets: 2742 :versions: 0.9.0b1 Fixed bug in default compiler plus those of postgresql, mysql, and mssql to ensure that any literal SQL expression values are rendered directly as literals, instead of as bound parameters, within a CREATE INDEX statement. This also changes the rendering scheme for other DDL such as constraints. .. change:: :tags: bug, sql :tickets: 2815 :versions: 0.9.0b1 A :func:`.select` that is made to refer to itself in its FROM clause, typically via in-place mutation, will raise an informative error message rather than causing a recursion overflow. .. change:: :tags: bug, orm :tickets: 2813 :versions: 0.9.0b1 Fixed bug where using an annotation such as :func:`.remote` or :func:`.foreign` on a :class:`.Column` before association with a parent :class:`.Table` could produce issues related to the parent table not rendering within joins, due to the inherent copy operation performed by an annotation. .. change:: :tags: bug, sql :tickets: 2831 Non-working "schema" argument on :class:`.ForeignKey` is deprecated; raises a warning. Removed in 0.9. .. change:: :tags: bug, postgresql :tickets: 2819 :versions: 0.9.0b1 Fixed bug where Postgresql version strings that had a prefix preceding the words "Postgresql" or "EnterpriseDB" would not parse. Courtesy Scott Schaefer. .. change:: :tags: feature, engine :tickets: 2821 :versions: 0.9.0b1 ``repr()`` for the :class:`.URL` of an :class:`.Engine` will now conceal the password using asterisks. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, orm :tickets: 2818 :versions: 0.9.0b1 Fixed bug where :meth:`.Query.exists` failed to work correctly without any WHERE criterion. Courtesy Vladimir Magamedov. .. change:: :tags: bug, sql :tickets: 2811 :versions: 0.9.0b1 Fixed bug where using the ``column_reflect`` event to change the ``.key`` of the incoming :class:`.Column` would prevent primary key constraints, indexes, and foreign key constraints from being correctly reflected. .. change:: :tags: feature :versions: 0.9.0b1 Added a new flag ``system=True`` to :class:`.Column`, which marks the column as a "system" column which is automatically made present by the database (such as Postgresql ``oid`` or ``xmin``). The column will be omitted from the ``CREATE TABLE`` statement but will otherwise be available for querying. In addition, the :class:`.CreateColumn` construct can be appled to a custom compilation rule which allows skipping of columns, by producing a rule that returns ``None``. .. change:: :tags: bug, orm :tickets: 2779 Backported a change from 0.9 whereby the iteration of a hierarchy of mappers used in polymorphic inheritance loads is sorted, which allows the SELECT statements generated for polymorphic queries to have deterministic rendering, which in turn helps with caching schemes that cache on the SQL string itself. .. change:: :tags: bug, orm :tickets: 2794 :versions: 0.9.0b1 Fixed a potential issue in an ordered sequence implementation used by the ORM to iterate mapper hierarchies; under the Jython interpreter this implementation wasn't ordered, even though cPython and Pypy maintained ordering. .. change:: :tags: bug, examples :versions: 0.9.0b1 Added "autoincrement=False" to the history table created in the versioning example, as this table shouldn't have autoinc on it in any case, courtesy Patrick Schmid. .. change:: :tags: bug, sql :versions: 0.9.0b1 The :meth:`.ColumnOperators.notin_` operator added in 0.8 now properly produces the negation of the expression "IN" returns when used against an empty collection. .. change:: :tags: feature, examples :versions: 0.9.0b1 Improved the examples in ``examples/generic_associations``, including that ``discriminator_on_association.py`` makes use of single table inheritance do the work with the "discriminator". Also added a true "generic foreign key" example, which works similarly to other popular frameworks in that it uses an open-ended integer to point to any other table, foregoing traditional referential integrity. While we don't recommend this pattern, information wants to be free. .. change:: :tags: feature, orm, declarative :versions: 0.9.0b1 Added a convenience class decorator :func:`.as_declarative`, is a wrapper for :func:`.declarative_base` which allows an existing base class to be applied using a nifty class-decorated approach. .. change:: :tags: bug, orm :tickets: 2786 :versions: 0.9.0b1 Fixed bug in ORM-level event registration where the "raw" or "propagate" flags could potentially be mis-configured in some "unmapped base class" configurations. .. change:: :tags: bug, orm :tickets: 2778 :versions: 0.9.0b1 A performance fix related to the usage of the :func:`.defer` option when loading mapped entities. The function overhead of applying a per-object deferred callable to an instance at load time was significantly higher than that of just loading the data from the row (note that ``defer()`` is meant to reduce DB/network overhead, not necessarily function call count); the function call overhead is now less than that of loading data from the column in all cases. There is also a reduction in the number of "lazy callable" objects created per load from N (total deferred values in the result) to 1 (total number of deferred cols). .. change:: :tags: bug, sqlite :tickets: 2781 :versions: 0.9.0b1 The newly added SQLite DATETIME arguments storage_format and regexp apparently were not fully implemented correctly; while the arguments were accepted, in practice they would have no effect; this has been fixed. .. change:: :tags: bug, sql, postgresql :tickets: 2780 :versions: 0.9.0b1 Fixed bug where the expression system relied upon the ``str()`` form of a some expressions when referring to the ``.c`` collection on a ``select()`` construct, but the ``str()`` form isn't available since the element relies on dialect-specific compilation constructs, notably the ``__getitem__()`` operator as used with a Postgresql ``ARRAY`` element. The fix also adds a new exception class :exc:`.UnsupportedCompilationError` which is raised in those cases where a compiler is asked to compile something it doesn't know how to. .. change:: :tags: bug, engine, oracle :tickets: 2776 :versions: 0.9.0b1 Dialect.initialize() is not called a second time if an :class:`.Engine` is recreated, due to a disconnect error. This fixes a particular issue in the Oracle 8 dialect, but in general the dialect.initialize() phase should only be once per dialect. .. change:: :tags: feature, sql :tickets: 722 Added new method to the :func:`.insert` construct :meth:`.Insert.from_select`. Given a list of columns and a selectable, renders ``INSERT INTO (table) (columns) SELECT ..``. .. change:: :tags: feature, sql :versions: 0.9.0b1 The :func:`.update`, :func:`.insert`, and :func:`.delete` constructs will now interpret ORM entities as target tables to be operated upon, e.g.:: from sqlalchemy import insert, update, delete ins = insert(SomeMappedClass).values(x=5) del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5) upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed') .. change:: :tags: bug, orm :tickets: 2773 :versions: 0.9.0b1 Fixed bug whereby attribute history functions would fail when an object we moved from "persistent" to "pending" using the :func:`.make_transient` function, for operations involving collection-based backrefs. .. change:: :tags: bug, engine, pool :tickets: 2772 :versions: 0.9.0b1 Fixed bug where :class:`.QueuePool` would lose the correct checked out count if an existing pooled connection failed to reconnect after an invalidate or recycle event. .. changelog:: :version: 0.8.2 :released: July 3, 2013 .. change:: :tags: bug, mysql :tickets: 2768 :versions: 0.9.0b1 Fixed bug when using multi-table UPDATE where a supplemental table is a SELECT with its own bound parameters, where the positioning of the bound parameters would be reversed versus the statement itself when using MySQL's special syntax. .. change:: :tags: bug, sqlite :tickets: 2764 :versions: 0.9.0b1 Added :class:`sqlalchemy.types.BIGINT` to the list of type names that can be reflected by the SQLite dialect; courtesy Russell Stuart. .. change:: :tags: feature, orm, declarative :tickets: 2761 :versions: 0.9.0b1 ORM descriptors such as hybrid properties can now be referenced by name in a string argument used with ``order_by``, ``primaryjoin``, or similar in :func:`.relationship`, in addition to column-bound attributes. .. change:: :tags: feature, firebird :tickets: 2763 :versions: 0.9.0b1 Added new flag ``retaining=True`` to the kinterbasdb and fdb dialects. This controls the value of the ``retaining`` flag sent to the ``commit()`` and ``rollback()`` methods of the DBAPI connection. Due to historical concerns, this flag defaults to ``True`` in 0.8.2, however in 0.9.0b1 this flag defaults to ``False``. .. change:: :tags: requirements :versions: 0.9.0b1 The Python `mock `_ library is now required in order to run the unit test suite. While part of the standard library as of Python 3.3, previous Python installations will need to install this in order to run unit tests or to use the ``sqlalchemy.testing`` package for external dialects. .. change:: :tags: bug, orm :tickets: 2750 :versions: 0.9.0b1 A warning is emitted when trying to flush an object of an inherited class where the polymorphic discriminator has been assigned to a value that is invalid for the class. .. change:: :tags: bug, postgresql :tickets: 2740 :versions: 0.9.0b1 The behavior of :func:`.extract` has been simplified on the Postgresql dialect to no longer inject a hardcoded ``::timestamp`` or similar cast into the given expression, as this interfered with types such as timezone-aware datetimes, but also does not appear to be at all necessary with modern versions of psycopg2. .. change:: :tags: bug, firebird :tickets: 2757 :versions: 0.9.0b1 Type lookup when reflecting the Firebird types LONG and INT64 has been fixed so that LONG is treated as INTEGER, INT64 treated as BIGINT, unless the type has a "precision" in which case it's treated as NUMERIC. Patch courtesy Russell Stuart. .. change:: :tags: bug, postgresql :tickets: 2766 :versions: 0.9.0b1 Fixed bug in HSTORE type where keys/values that contained backslashed quotes would not be escaped correctly when using the "non native" (i.e. non-psycopg2) means of translating HSTORE data. Patch courtesy Ryan Kelly. .. change:: :tags: bug, postgresql :tickets: 2767 :versions: 0.9.0b1 Fixed bug where the order of columns in a multi-column Postgresql index would be reflected in the wrong order. Courtesy Roman Podolyaka. .. change:: :tags: bug, sql :tickets: 2746, 2668 :versions: 0.9.0b1 Multiple fixes to the correlation behavior of :class:`.Select` constructs, first introduced in 0.8.0: * To satisfy the use case where FROM entries should be correlated outwards to a SELECT that encloses another, which then encloses this one, correlation now works across multiple levels when explicit correlation is established via :meth:`.Select.correlate`, provided that the target select is somewhere along the chain contained by a WHERE/ORDER BY/columns clause, not just nested FROM clauses. This makes :meth:`.Select.correlate` act more compatibly to that of 0.7 again while still maintaining the new "smart" correlation. * When explicit correlation is not used, the usual "implicit" correlation limits its behavior to just the immediate enclosing SELECT, to maximize compatibility with 0.7 applications, and also prevents correlation across nested FROMs in this case, maintaining compatibility with 0.8.0/0.8.1. * The :meth:`.Select.correlate_except` method was not preventing the given FROM clauses from correlation in all cases, and also would cause FROM clauses to be incorrectly omitted entirely (more like what 0.7 would do), this has been fixed. * Calling `select.correlate_except(None)` will enter all FROM clauses into correlation as would be expected. .. change:: :tags: bug, ext :versions: 0.9.0b1 Fixed bug whereby if a composite type were set up with a function instead of a class, the mutable extension would trip up when it tried to check that column for being a :class:`.MutableComposite` (which it isn't). Courtesy asldevi. .. change:: :tags: feature, sql :tickets: 2744, 2734 Provided a new attribute for :class:`.TypeDecorator` called :attr:`.TypeDecorator.coerce_to_is_types`, to make it easier to control how comparisons using ``==`` or ``!=`` to ``None`` and boolean types goes about producing an ``IS`` expression, or a plain equality expression with a bound parameter. .. change:: :tags: feature, postgresql :versions: 0.9.0b1 Support for Postgresql 9.2 range types has been added. Currently, no type translation is provided, so works directly with strings or psycopg2 2.5 range extension types at the moment. Patch courtesy Chris Withers. .. change:: :tags: bug, examples :versions: 0.9.0b1 Fixed an issue with the "versioning" recipe whereby a many-to-one reference could produce a meaningless version for the target, even though it was not changed, when backrefs were present. Patch courtesy Matt Chisholm. .. change:: :tags: feature, postgresql :tickets: 2072 :versions: 0.9.0b1 Added support for "AUTOCOMMIT" isolation when using the psycopg2 DBAPI. The keyword is available via the ``isolation_level`` execution option. Patch courtesy Roman Podolyaka. .. change:: :tags: bug, orm :tickets: 2759 :versions: 0.9.0b1 Fixed bug in polymorphic SQL generation where multiple joined-inheritance entities against the same base class joined to each other as well would not track columns on the base table independently of each other if the string of joins were more than two entities long. .. change:: :tags: bug, engine :pullreq: github:6 :versions: 0.9.0b1 Fixed bug where the ``reset_on_return`` argument to various :class:`.Pool` implementations would not be propagated when the pool was regenerated. Courtesy Eevee. .. change:: :tags: bug, orm :tickets: 2754 :versions: 0.9.0b1 Fixed bug where sending a composite attribute into :meth:`.Query.order_by` would produce a parenthesized expression not accepted by some databases. .. change:: :tags: bug, orm :tickets: 2755 :versions: 0.9.0b1 Fixed the interaction between composite attributes and the :func:`.aliased` function. Previously, composite attributes wouldn't work correctly in comparison operations when aliasing was applied. .. change:: :tags: bug, mysql :tickets: 2715 :versions: 0.9.0b1 Added another conditional to the ``mysql+gaerdbms`` dialect to detect so-called "development" mode, where we should use the ``rdbms_mysqldb`` DBAPI. Patch courtesy Brett Slatkin. .. change:: :tags: feature, mysql :tickets: 2704 :versions: 0.9.0b1 The ``mysql_length`` parameter used with :class:`.Index` can now be passed as a dictionary of column names/lengths, for use with composite indexes. Big thanks to Roman Podolyaka for the patch. .. change:: :tags: bug, mssql :tickets: 2747 :versions: 0.9.0b1 When querying the information schema on SQL Server 2000, removed a CAST call that was added in 0.8.1 to help with driver issues, which apparently is not compatible on 2000. The CAST remains in place for SQL Server 2005 and greater. .. change:: :tags: bug, mysql :tickets: 2721 :versions: 0.9.0b1 The ``deferrable`` keyword argument on :class:`.ForeignKey` and :class:`.ForeignKeyConstraint` will not render the ``DEFERRABLE`` keyword on the MySQL dialect. For a long time we left this in place because a non-deferrable foreign key would act very differently than a deferrable one, but some environments just disable FKs on MySQL, so we'll be less opinionated here. .. change:: :tags: bug, ext, orm :tickets: 2730 :versions: 0.9.0b1 Fixed bug where :class:`.MutableDict` didn't report a change event when ``clear()`` was called. .. change:: :tags: bug, sql :tickets: 2738 :versions: 0.9.0b1 Fixed bug whereby joining a select() of a table "A" with multiple foreign key paths to a table "B", to that table "B", would fail to produce the "ambiguous join condition" error that would be reported if you join table "A" directly to "B"; it would instead produce a join condition with multiple criteria. .. change:: :tags: bug, sql, reflection :tickets: 2728 :versions: 0.9.0b1 Fixed bug whereby using :meth:`.MetaData.reflect` across a remote schema as well as a local schema could produce wrong results in the case where both schemas had a table of the same name. .. change:: :tags: bug, sql :tickets: 2726 :versions: 0.9.0b1 Removed the "not implemented" ``__iter__()`` call from the base :class:`.ColumnOperators` class, while this was introduced in 0.8.0 to prevent an endless, memory-growing loop when one also implements a ``__getitem__()`` method on a custom operator and then calls erroneously ``list()`` on that object, it had the effect of causing column elements to report that they were in fact iterable types which then throw an error when you try to iterate. There's no real way to have both sides here so we stick with Python best practices. Careful with implementing ``__getitem__()`` on your custom operators! .. change:: :tags: feature, orm :tickets: 2736 Added a new method :meth:`.Query.select_entity_from` which will in 0.9 replace part of the functionality of :meth:`.Query.select_from`. In 0.8, the two methods perform the same function, so that code can be migrated to use the :meth:`.Query.select_entity_from` method as appropriate. See the 0.9 migration guide for details. .. change:: :tags: bug, orm :tickets: 2737 Fixed a regression caused by :ticket:`2682` whereby the evaluation invoked by :meth:`.Query.update` and :meth:`.Query.delete` would hit upon unsupported ``True`` and ``False`` symbols which now appear due to the usage of ``IS``. .. change:: :tags: bug, postgresql :pullreq: github:2 :tickets: 2735 Fixed the HSTORE type to correctly encode/decode for unicode. This is always on, as the hstore is a textual type, and matches the behavior of psycopg2 when using Python 3. Courtesy Dmitry Mugtasimov. .. change:: :tags: bug, examples Fixed a small bug in the dogpile example where the generation of SQL cache keys wasn't applying deduping labels to the statement the same way :class:`.Query` normally does. .. change:: :tags: bug, engine, sybase :tickets: 2732 Fixed a bug where the routine to detect the correct kwargs being sent to :func:`.create_engine` would fail in some cases, such as with the Sybase dialect. .. change:: :tags: bug, orm :tickets: 2481 Fixed a regression from 0.7 caused by this ticket, which made the check for recursion overflow in self-referential eager joining too loose, missing a particular circumstance where a subclass had lazy="joined" or "subquery" configured and the load was a "with_polymorphic" against the base. .. change:: :tags: bug, orm :tickets: 2718 Fixed a regression from 0.7 where the contextmanager feature of :meth:`.Session.begin_nested` would fail to correctly roll back the transaction when a flush error occurred, instead raising its own exception while leaving the session still pending a rollback. .. change:: :tags: bug, mysql Updated mysqlconnector dialect to check for disconnect based on the apparent string message sent in the exception; tested against mysqlconnector 1.0.9. .. change:: :tags: bug, sql, mssql :tickets: 2682 Regression from this ticket caused the unsupported keyword "true" to render, added logic to convert this to 1/0 for SQL server. .. changelog:: :version: 0.8.1 :released: April 27, 2013 .. change:: :tags: bug, orm :tickets: 2698 Fixes to the ``sqlalchemy.ext.serializer`` extension, including that the "id" passed from the pickler is turned into a string to prevent against bytes being parsed on Py3K, as well as that ``relationship()`` and ``orm.join()`` constructs are now properly serialized. .. change:: :tags: bug, orm :tickets: 2714 A significant improvement to the inner workings of query.join(), such that the decisionmaking involved on how to join has been dramatically simplified. New test cases now pass such as multiple joins extending from the middle of an already complex series of joins involving inheritance and such. Joining from deeply nested subquery structures is still complicated and not without caveats, but with these improvements the edge cases are hopefully pushed even farther out to the edges. .. change:: :tags: feature, orm :tickets: 2673 Added a convenience method to Query that turns a query into an EXISTS subquery of the form ``EXISTS (SELECT 1 FROM ... WHERE ...)``. .. change:: :tags: bug, orm Added a conditional to the unpickling process for ORM mapped objects, such that if the reference to the object were lost when the object was pickled, we don't erroneously try to set up _sa_instance_state - fixes a NoneType error. .. change:: :tags: bug, postgresql :tickets: 2712 Opened up the checking for "disconnect" with psycopg2/libpq to check for all the various "disconnect" messages within the full exception hierarchy. Specifically the "closed the connection unexpectedly" message has now been seen in at least three different exception types. Courtesy Eli Collins. .. change:: :tags: bug, sql, mysql :tickets: 2682 Fully implemented the IS and IS NOT operators with regards to the True/False constants. An expression like ``col.is_(True)`` will now render ``col IS true`` on the target platform, rather than converting the True/ False constant to an integer bound parameter. This allows the ``is_()`` operator to work on MySQL when given True/False constants. .. change:: :tags: bug, postgresql :tickets: 2681 The operators for the Postgresql ARRAY type supports input types of sets, generators, etc. even when a dimension is not specified, by turning the given iterable into a collection unconditionally. .. change:: :tags: bug, mysql Fixes to support the latest cymysql DBAPI, courtesy Hajime Nakagami. .. change:: :tags: bug, mysql :tickets: 2663 Improvements to the operation of the pymysql dialect on Python 3, including some important decode/bytes steps. Issues remain with BLOB types due to driver issues. Courtesy Ben Trofatter. .. change:: :tags: bug, orm :tickets: 2710 Fixed bug where many-to-many relationship with uselist=False would fail to delete the association row and raise an error if the scalar attribute were set to None. This was a regression introduced by the changes for :ticket:`2229`. .. change:: :tags: bug, orm :tickets: 2708 Improved the behavior of instance management regarding the creation of strong references within the Session; an object will no longer have an internal reference cycle created if it's in the transient state or moves into the detached state - the strong ref is created only when the object is attached to a Session and is removed when the object is detached. This makes it somewhat safer for an object to have a `__del__()` method, even though this is not recommended, as relationships with backrefs produce cycles too. A warning has been added when a class with a `__del__()` method is mapped. .. change:: :tags: bug, sql :tickets: 2702 A major fix to the way in which a select() object produces labeled columns when apply_labels() is used; this mode produces a SELECT where each column is labeled as in _, to remove column name collisions for a multiple table select. The fix is that if two labels collide when combined with the table name, i.e. "foo.bar_id" and "foo_bar.id", anonymous aliasing will be applied to one of the dupes. This allows the ORM to handle both columns independently; previously, 0.7 would in some cases silently emit a second SELECT for the column that was "duped", and in 0.8 an ambiguous column error would be emitted. The "keys" applied to the .c. collection of the select() will also be deduped, so that the "column being replaced" warning will no longer emit for any select() that specifies use_labels, though the dupe key will be given an anonymous label which isn't generally user-friendly. .. change:: :tags: bug, mysql Updated a regexp to correctly extract error code on google app engine v1.7.5 and newer. Courtesy Dan Ring. .. change:: :tags: bug, examples Fixed a long-standing bug in the caching example, where the limit/offset parameter values wouldn't be taken into account when computing the cache key. The _key_from_query() function has been simplified to work directly from the final compiled statement in order to get at both the full statement as well as the fully processed parameter list. .. change:: :tags: bug, mssql :tickets: 2355 Part of a longer series of fixes needed for pyodbc+ mssql, a CAST to NVARCHAR(max) has been added to the bound parameter for the table name and schema name in all information schema queries to avoid the issue of comparing NVARCHAR to NTEXT, which seems to be rejected by the ODBC driver in some cases, such as FreeTDS (0.91 only?) plus unicode bound parameters being passed. The issue seems to be specific to the SQL Server information schema tables and the workaround is harmless for those cases where the problem doesn't exist in the first place. .. change:: :tags: bug, sql :tickets: 2691 Fixed bug where disconnect detect on error would raise an attribute error if the error were being raised after the Connection object had already been closed. .. change:: :tags: bug, sql :tickets: 2703 Reworked internal exception raises that emit a rollback() before re-raising, so that the stack trace is preserved from sys.exc_info() before entering the rollback. This so that the traceback is preserved when using coroutine frameworks which may have switched contexts before the rollback function returns. .. change:: :tags: bug, orm :tickets: 2697 Fixed bug whereby ORM would run the wrong kind of query when refreshing an inheritance-mapped class where the superclass was mapped to a non-Table object, like a custom join() or a select(), running a query that assumed a hierarchy that's mapped to individual Table-per-class. .. change:: :tags: bug, orm Fixed `__repr__()` on mapper property constructs to work before the object is initialized, so that Sphinx builds with recent Sphinx versions can read them. .. change:: :tags: bug, sql, postgresql The _Binary base type now converts values through the bytes() callable when run on Python 3; in particular psycopg2 2.5 with Python 3.3 seems to now be returning the "memoryview" type, so this is converted to bytes before return. .. change:: :tags: bug, sql :tickets: 2695 Improvements to Connection auto-invalidation handling. If a non-disconnect error occurs, but leads to a delayed disconnect error within error handling (happens with MySQL), the disconnect condition is detected. The Connection can now also be closed when in an invalid state, meaning it will raise "closed" on next usage, and additionally the "close with result" feature will work even if the autorollback in an error handling routine fails and regardless of whether the condition is a disconnect or not. .. change:: :tags: bug, orm, declarative :tickets: 2656 Fixed indirect regression regarding :func:`.has_inherited_table`, where since it considers the current class' ``__table__``, was sensitive to when it was called. This is 0.7's behavior also, but in 0.7 things tended to "work out" within events like ``__mapper_args__()``. :func:`.has_inherited_table` now only considers superclasses, so should return the same answer regarding the current class no matter when it's called (obviously assuming the state of the superclass). .. change:: :tags: bug, mssql Added support for additional "disconnect" messages to the pymssql dialect. Courtesy John Anderson. .. change:: :tags: feature, sql Loosened the check on dialect-specific argument names passed to Table(); since we want to support external dialects and also want to support args without a certain dialect being installed, it only checks the format of the arg now, rather than looking for that dialect in sqlalchemy.dialects. .. change:: :tags: bug, sql Fixed bug whereby a DBAPI that can return "0" for cursor.lastrowid would not function correctly in conjunction with :attr:`.ResultProxy.inserted_primary_key`. .. change:: :tags: bug, mssql :tickets: 2683 Fixed Py3K bug regarding "binary" types and pymssql. Courtesy Marc Abramowitz. .. change:: :tags: bug, postgresql :tickets: 2680 Added missing HSTORE type to postgresql type names so that the type can be reflected. .. changelog:: :version: 0.8.0 :released: March 9, 2013 .. note:: There are some new behavioral changes as of 0.8.0 not present in 0.8.0b2. They are present in the migration document as follows: * :ref:`legacy_is_orphan_addition` * :ref:`metadata_create_drop_tables` * :ref:`correlation_context_specific` .. change:: :tags: feature, orm :tickets: 2675 A meaningful :attr:`.QueryableAttribute.info` attribute is added, which proxies down to the ``.info`` attribute on either the :class:`.schema.Column` object if directly present, or the :class:`.MapperProperty` otherwise. The full behavior is documented and ensured by tests to remain stable. .. change:: :tags: bug, sql :tickets: 2668 The behavior of SELECT correlation has been improved such that the :meth:`.Select.correlate` and :meth:`.Select.correlate_except` methods, as well as their ORM analogues, will still retain "auto-correlation" behavior in that the FROM clause is modified only if the output would be legal SQL; that is, the FROM clause is left intact if the correlated SELECT is not used in the context of an enclosing SELECT inside of the WHERE, columns, or HAVING clause. The two methods now only specify conditions to the default "auto correlation", rather than absolute FROM lists. .. change:: :tags: feature, mysql New dialect for CyMySQL added, courtesy Hajime Nakagami. .. change:: :tags: bug, orm :tickets: 2674 Improved checking for an existing backref name conflict during mapper configuration; will now test for name conflicts on superclasses and subclasses, in addition to the current mapper, as these conflicts break things just as much. This is new for 0.8, but see below for a warning that will also be triggered in 0.7.11. .. change:: :tags: bug, orm :tickets: 2674 Improved the error message emitted when a "backref loop" is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair. Also in 0.7.11. .. change:: :tags: bug, orm :tickets: 2674 A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren't plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. Also in 0.7.11. .. change:: :tags: feature, orm Can set/change the "cascade" attribute on a :func:`.relationship` construct after it's been constructed already. This is not a pattern for normal use but we like to change the setting for demonstration purposes in tutorials. .. change:: :tags: bug, schema :tickets: 2664 :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` will now accommodate an empty list as an instruction to not create/drop any items, rather than ignoring the collection. .. change:: :tags: bug, tests :tickets: 2669 Fixed an import of "logging" in test_execute which was not working on some linux platforms. Also in 0.7.11. .. change:: :tags: bug, orm :tickets: 2662 A clear error message is emitted if an event handler attempts to emit SQL on a Session within the after_commit() handler, where there is not a viable transaction in progress. .. change:: :tags: bug, orm :tickets: 2665 Detection of a primary key change within the process of cascading a natural primary key update will succeed even if the key is composite and only some of the attributes have changed. .. change:: :tags: feature, orm :tickets: 2658 Added new helper function :func:`.was_deleted`, returns True if the given object was the subject of a :meth:`.Session.delete` operation. .. change:: :tags: bug, orm :tickets: 2658 An object that's deleted from a session will be de-associated with that session fully after the transaction is committed, that is the :func:`.object_session` function will return None. .. change:: :tags: bug, oracle The cx_oracle dialect will no longer run the bind parameter names through ``encode()``, as this is not valid on Python 3, and prevented statements from functioning correctly on Python 3. We now encode only if ``supports_unicode_binds`` is False, which is not the case for cx_oracle when at least version 5 of cx_oracle is used. .. change:: :tags: bug, orm :tickets: 2661 Fixed bug whereby :meth:`.Query.yield_per` would set the execution options incorrectly, thereby breaking subsequent usage of the :meth:`.Query.execution_options` method. Courtesy Ryan Kelly. .. change:: :tags: bug, orm :tickets: 1768 Fixed the consideration of the ``between()`` operator so that it works correctly with the new relationship local/remote system. .. change:: :tags: bug, sql :tickets: 2660, 1768 Fixed a bug regarding column annotations which in particular could impact some usages of the new :func:`.orm.remote` and :func:`.orm.local` annotation functions, where annotations could be lost when the column were used in a subsequent expression. .. change:: :tags: bug, mysql, gae :tickets: 2649 Added a conditional import to the ``gaerdbms`` dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the ``instance`` attribute. Courtesy Sean Lynch. Also in 0.7.10. .. change:: :tags: bug, sql :tickets: 2496 The :meth:`.ColumnOperators.in_` operator will now coerce values of ``None`` to :func:`.null`. .. change:: :tags: feature, sql :tickets: 2657 Added a new argument to :class:`.Enum` and its base :class:`.SchemaType` ``inherit_schema``. When set to ``True``, the type will set its ``schema`` attribute of that of the :class:`.Table` to which it is associated. This also occurs during a :meth:`.Table.tometadata` operation; the :class:`.SchemaType` is now copied in all cases when :meth:`.Table.tometadata` happens, and if ``inherit_schema=True``, the type will take on the new schema name passed to the method. The ``schema`` is important when used with the Postgresql backend, as the type results in a ``CREATE TYPE`` statement. .. change:: :tags: feature, postgresql Added :meth:`.postgresql.ARRAY.Comparator.any` and :meth:`.postgresql.ARRAY.Comparator.all` methods, as well as standalone expression constructs. Big thanks to Audrius Kažukauskas for the terrific work here. .. change:: :tags: sql, bug :tickets: 2643 Fixed bug where :meth:`.Table.tometadata` would fail if a :class:`.Column` had both a foreign key as well as an alternate ".key" name for the column. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2629 insert().returning() raises an informative CompileError if attempted to compile on a dialect that doesn't support RETURNING. .. change:: :tags: orm, bug :tickets: 2655 the consideration of a pending object as an "orphan" has been modified to more closely match the behavior as that of persistent objects, which is that the object is expunged from the :class:`.Session` as soon as it is de-associated from any of its orphan-enabled parents. Previously, the pending object would be expunged only if de-associated from all of its orphan-enabled parents. The new flag ``legacy_is_orphan`` is added to :func:`.orm.mapper` which re-establishes the legacy behavior. See the change note and example case at :ref:`legacy_is_orphan_addition` for a detailed discussion of this change. .. change:: :tags: orm, bug :tickets: 2653 Fixed the (most likely never used) "@collection.link" collection method, which fires off each time the collection is associated or de-associated with a mapped object - the decorator was not tested or functional. The decorator method is now named :meth:`.collection.linker` though the name "link" remains for backwards compatibility. Courtesy Luca Wehrstedt. .. change:: :tags: orm, bug :tickets: 2654 Made some fixes to the system of producing custom instrumented collections, mainly that the usage of the @collection decorators will now honor the __mro__ of the given class, applying the logic of the sub-most classes' version of a particular collection method. Previously, it wasn't predictable when subclassing an existing instrumented class such as :class:`.MappedCollection` whether or not custom methods would resolve correctly. .. change:: :tags: orm, removed The undocumented (and hopefully unused) system of producing custom collections using an ``__instrumentation__`` datastructure associated with the collection has been removed, as this was a complex and untested feature which was also essentially redundant versus the decorator approach. Other internal simplifcations to the orm.collections module have been made as well. .. change:: :tags: mssql, feature Added ``mssql_include`` and ``mssql_clustered`` options to :class:`.Index`, renders the ``INCLUDE`` and ``CLUSTERED`` keywords, respectively. Courtesy Derek Harland. .. change:: :tags: sql, feature :tickets: 695 :class:`.Index` now supports arbitrary SQL expressions and/or functions, in addition to straight columns. Common modifiers include using ``somecolumn.desc()`` for a descending index and ``func.lower(somecolumn)`` for a case-insensitive index, depending on the capabilities of the target backend. .. change:: :tags: mssql, bug :tickets: 2638 Added a py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3K. Also in 0.7.10. .. change:: :tags: orm, bug :tickets: 2650 Fixed potential memory leak which could occur if an arbitrary number of :class:`.sessionmaker` objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher. Also in 0.7.10. .. change:: :tags: mssql, bug Fixed a regression whereby the "collation" parameter of the character types CHAR, NCHAR, etc. stopped working, as "collation" is now supported by the base string types. The TEXT, NCHAR, CHAR, VARCHAR types within the MSSQL dialect are now synonyms for the base types. .. change:: :tags: mssql, feature :tickets: 2644 DDL for IDENTITY columns is now supported on non-primary key columns, by establishing a :class:`.Sequence` construct on any integer column. Courtesy Derek Harland. .. change:: :tags: examples, bug Fixed a regression in the examples/dogpile_caching example which was due to the change in :ticket:`2614`. .. change:: :tags: orm, bug :tickets: 2640 :meth:`.Query.merge_result` can now load rows from an outer join where an entity may be ``None`` without throwing an error. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2648 Tweaked the "REQUIRED" symbol used by the compiler to identify INSERT/UPDATE bound parameters that need to be passed, so that it's more easily identifiable when writing custom bind-handling code. .. change:: :tags: postgresql, bug Fixed bug in :class:`~sqlalchemy.dialects.postgresql.array()` construct whereby using it inside of an :func:`.expression.insert` construct would produce an error regarding a parameter issue in the ``self_group()`` method. .. change:: :tags: orm, feature Extended the :doc:`/core/inspection` system so that all Python descriptors associated with the ORM or its extensions can be retrieved. This fulfills the common request of being able to inspect all :class:`.QueryableAttribute` descriptors in addition to extension types such as :class:`.hybrid_property` and :class:`.AssociationProxy`. See :attr:`.Mapper.all_orm_descriptors`. .. change:: :tags: mysql, feature GAE dialect now accepts username/password arguments in the URL, courtesy Owen Nelson. .. change:: :tags: mysql, bug GAE dialect won't fail on None match if the error code can't be extracted from the exception throw; courtesy Owen Nelson. .. change:: :tags: orm, bug :tickets: 2637 Fixes to the "dynamic" loader on :func:`.relationship`, includes that backrefs will work properly even when autoflush is disabled, history events are more accurate in scenarios where multiple add/remove of the same object occurs. .. changelog:: :version: 0.8.0b2 :released: December 14, 2012 .. change:: :tags: orm, bug :tickets: 2635 The :meth:`.Query.select_from` method can now be used with a :func:`.aliased` construct without it interfering with the entities being selected. Basically, a statement like this:: ua = aliased(User) session.query(User.name).select_from(ua).join(User, User.name > ua.name) Will maintain the columns clause of the SELECT as coming from the unaliased "user", as specified; the select_from only takes place in the FROM clause:: SELECT users.name AS users_name FROM users AS users_1 JOIN users ON users.name < users_1.name Note that this behavior is in contrast to the original, older use case for :meth:`.Query.select_from`, which is that of restating the mapped entity in terms of a different selectable:: session.query(User.name).\ select_from(user_table.select().where(user_table.c.id > 5)) Which produces:: SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, users.name AS name FROM users WHERE users.id > :id_1) AS anon_1 It was the "aliasing" behavior of the latter use case that was getting in the way of the former use case. The method now specifically considers a SQL expression like :func:`.expression.select` or :func:`.expression.alias` separately from a mapped entity like a :func:`.aliased` construct. .. change:: :tags: sql, bug :tickets: 2633 Fixed a regression caused by :ticket:`2410` whereby a :class:`.CheckConstraint` would apply itself back to the original table during a :meth:`.Table.tometadata` operation, as it would parse the SQL expression for a parent table. The operation now copies the given expression to correspond to the new table. .. change:: :tags: oracle, bug :tickets: 2619 Fixed table reflection for Oracle when accessing a synonym that refers to a DBLINK remote database; while the syntax has been present in the Oracle dialect for some time, up until now it has never been tested. The syntax has been tested against a sample database linking to itself, however there's still some uncertainty as to what should be used for the "owner" when querying the remote database for table information. Currently, the value of "username" from user_db_links is used to match the "owner". .. change:: :tags: orm, feature :tickets: 2601 Added :meth:`.KeyedTuple._asdict` and :attr:`.KeyedTuple._fields` to the :class:`.KeyedTuple` class to provide some degree of compatibility with the Python standard library ``collections.namedtuple()``. .. change:: :tags: sql, bug :tickets: 2610 Fixed bug whereby using a label_length on dialect that was smaller than the size of actual column identifiers would fail to render the columns correctly in a SELECT statement. .. change:: :tags: sql, feature :tickets: 2623 The :class:`.Insert` construct now supports multi-valued inserts, that is, an INSERT that renders like "INSERT INTO table VALUES (...), (...), ...". Supported by Postgresql, SQLite, and MySQL. Big thanks to Idan Kamara for doing the legwork on this one. .. seealso:: :ref:`feature_2623` .. change:: :tags: oracle, bug :tickets: 2620 The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied. Also in 0.7.10. .. change:: :tags: oracle, bug :tickets: 2611 Repaired the usage of ``.prepare()`` in conjunction with cx_Oracle so that a return value of ``False`` will result in no call to ``connection.commit()``, hence avoiding "no transaction" errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2618 The :class:`~sqlalchemy.types.DECIMAL` type now honors the "precision" and "scale" arguments when rendering DDL. .. change:: :tags: orm, bug :tickets: 2624 The :class:`.MutableComposite` type did not allow for the :meth:`.MutableBase.coerce` method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new :class:`.MutableComposite` types no longer add per-type global event handlers. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2621 Made an adjustment to the "boolean", (i.e. ``__nonzero__``) evaluation of binary expressions, i.e. ``x1 == x2``, such that the "auto-grouping" applied by :class:`.BinaryExpression` in some cases won't get in the way of this comparison. Previously, an expression like:: expr1 = mycolumn > 2 bool(expr1 == expr1) Would evaluate as ``False``, even though this is an identity comparison, because ``mycolumn > 2`` would be "grouped" before being placed into the :class:`.BinaryExpression`, thus changing its identity. :class:`.BinaryExpression` now keeps track of the "original" objects passed in. Additionally the ``__nonzero__`` method now only returns if the operator is ``==`` or ``!=`` - all others raise ``TypeError``. .. change:: :tags: firebird, bug :tickets: 2622 Added missing import for "fdb" to the experimental "firebird+fdb" dialect. .. change:: :tags: orm, feature Allow synonyms to be used when defining primary and secondary joins for relationships. .. change:: :tags: orm, bug :tickets: 2614 A second overhaul of aliasing/internal pathing mechanics now allows two subclasses to have different relationships of the same name, supported with subquery or joined eager loading on both simultaneously when a full polymorphic load is used. .. change:: :tags: orm, bug :tickets: 2617 Fixed bug whereby a multi-hop subqueryload within a particular with_polymorphic load would produce a KeyError. Takes advantage of the same internal pathing overhaul as :ticket:`2614`. .. change:: :tags: sql, bug Fixed a gotcha where inadvertently calling list() on a :class:`.ColumnElement` would go into an endless loop, if :meth:`.ColumnOperators.__getitem__` were implemented. A new NotImplementedError is emitted via ``__iter__()``. .. change:: :tags: orm, extensions, feature The :mod:`sqlalchemy.ext.mutable` extension now includes the example :class:`.MutableDict` class as part of the extension. .. change:: :tags: postgresql, feature :tickets: 2606 :class:`.HSTORE` is now available in the Postgresql dialect. Will also use psycopg2's extensions if available. Courtesy Audrius Kažukauskas. .. change:: :tags: sybase, feature :tickets: 1753 Reflection support has been added to the Sybase dialect. Big thanks to Ben Trofatter for all the work developing and testing this. .. change:: :tags: engine, feature The :meth:`.Connection.connect` and :meth:`.Connection.contextual_connect` methods now return a "branched" version so that the :meth:`.Connection.close` method can be called on the returned connection without affecting the original. Allows symmetry when using :class:`.Engine` and :class:`.Connection` objects as context managers:: with conn.connect() as c: # leaves the Connection open c.execute("...") with engine.connect() as c: # closes the Connection c.execute("...") .. change:: :tags: engine The "reflect=True" argument to :class:`~sqlalchemy.schema.MetaData` is deprecated. Please use the :meth:`.MetaData.reflect` method. .. change:: :tags: sql, bug :tickets: 2603 Fixed bug in type_coerce() whereby typing information could be lost if the statement were used as a subquery inside of another statement, as well as other similar situations. Among other things, would cause typing information to be lost when the Oracle/mssql dialects would apply limit/offset wrappings. .. change:: :tags: orm, bug :tickets: 2602 Fixed regression where query.update() would produce an error if an object matched by the "fetch" synchronization strategy wasn't locally present. Courtesy Scott Torborg. .. change:: :tags: sql, bug :tickets: 2597 Fixed bug whereby the ".key" of a Column wasn't being used when producing a "proxy" of the column against a selectable. This probably didn't occur in 0.7 since 0.7 doesn't respect the ".key" in a wider range of scenarios. .. change:: :tags: mssql, feature :tickets: 2600 Support for reflection of the "name" of primary key constraints added, courtesy Dave Moore. .. change:: :tags: informix Some cruft regarding informix transaction handling has been removed, including a feature that would skip calling commit()/rollback() as well as some hardcoded isolation level assumptions on begin().. The status of this dialect is not well understood as we don't have any users working with it, nor any access to an Informix database. If someone with access to Informix wants to help test this dialect, please let us know. .. change:: :tags: pool, feature The :class:`.Pool` will now log all connection.close() operations equally, including closes which occur for invalidated connections, detached connections, and connections beyond the pool capacity. .. change:: :tags: pool, feature :tickets: 2611 The :class:`.Pool` now consults the :class:`.Dialect` for functionality regarding how the connection should be "auto rolled back", as well as closed. This grants more control of transaction scope to the dialect, so that we will be better able to implement transactional workarounds like those potentially needed for pysqlite and cx_oracle. .. change:: :tags: pool, feature Added new :meth:`.PoolEvents.reset` hook to capture the event before a connection is auto-rolled back, upon return to the pool. Together with :meth:`.ConnectionEvents.rollback` this allows all rollback events to be intercepted. .. changelog:: :version: 0.8.0b1 :released: October 30, 2012 .. change:: :tags: sql, bug :tickets: 2593 Fixed bug where keyword arguments passed to :meth:`.Compiler.process` wouldn't get propagated to the column expressions present in the columns clause of a SELECT statement. In particular this would come up when used by custom compilation schemes that relied upon special flags. .. change:: :tags: sql, feature Added a new method :meth:`.Engine.execution_options` to :class:`.Engine`. This method works similarly to :meth:`.Connection.execution_options` in that it creates a copy of the parent object which will refer to the new set of options. The method can be used to build sharding schemes where each engine shares the same underlying pool of connections. The method has been tested against the horizontal shard recipe in the ORM as well. .. seealso:: :meth:`.Engine.execution_options` .. change:: :tags: sql, orm, bug :tickets: 2595 The auto-correlation feature of :func:`.select`, and by proxy that of :class:`.Query`, will not take effect for a SELECT statement that is being rendered directly in the FROM list of the enclosing SELECT. Correlation in SQL only applies to column expressions such as those in the WHERE, ORDER BY, columns clause. .. change:: :tags: sqlite :changeset: c3addcc9ffad Added :class:`.types.NCHAR`, :class:`.types.NVARCHAR` to the SQLite dialect's list of recognized type names for reflection. SQLite returns the name given to a type as the name returned. .. change:: :tags: examples :tickets: 2589 The Beaker caching example has been converted to use `dogpile.cache `_. This is a new caching library written by the same creator of Beaker's caching internals, and represents a vastly improved, simplified, and modernized system of caching. .. seealso:: :ref:`examples_caching` .. change:: :tags: general :tickets: SQLAlchemy 0.8 now targets Python 2.5 and above. Python 2.4 is no longer supported. .. change:: :tags: removed, general :tickets: 2433 The "sqlalchemy.exceptions" synonym for "sqlalchemy.exc" is removed fully. .. change:: :tags: removed, orm :tickets: 2442 The legacy "mutable" system of the ORM, including the MutableType class as well as the mutable=True flag on PickleType and postgresql.ARRAY has been removed. In-place mutations are detected by the ORM using the sqlalchemy.ext.mutable extension, introduced in 0.7. The removal of MutableType and associated constructs removes a great deal of complexity from SQLAlchemy's internals. The approach performed poorly as it would incur a scan of the full contents of the Session when in use. .. change:: :tags: orm, moved :tickets: The InstrumentationManager interface and the entire related system of alternate class implementation is now moved out to sqlalchemy.ext.instrumentation. This is a seldom used system that adds significant complexity and overhead to the mechanics of class instrumentation. The new architecture allows it to remain unused until InstrumentationManager is actually imported, at which point it is bootstrapped into the core. .. change:: :tags: orm, feature :tickets: 1401 Major rewrite of relationship() internals now allow join conditions which include columns pointing to themselves within composite foreign keys. A new API for very specialized primaryjoin conditions is added, allowing conditions based on SQL functions, CAST, etc. to be handled by placing the annotation functions remote() and foreign() inline within the expression when necessary. Previous recipes using the semi-private _local_remote_pairs approach can be upgraded to this new approach. .. seealso:: :ref:`feature_relationship_08` .. change:: :tags: orm, bug :tickets: 2527 ORM will perform extra effort to determine that an FK dependency between two tables is not significant during flush if the tables are related via joined inheritance and the FK dependency is not part of the inherit_condition, saves the user a use_alter directive. .. change:: :tags: orm, feature :tickets: 2333 New standalone function with_polymorphic() provides the functionality of query.with_polymorphic() in a standalone form. It can be applied to any entity within a query, including as the target of a join in place of the "of_type()" modifier. .. change:: :tags: orm, feature :tickets: 1106, 2438 The of_type() construct on attributes now accepts aliased() class constructs as well as with_polymorphic constructs, and works with query.join(), any(), has(), and also eager loaders subqueryload(), joinedload(), contains_eager() .. change:: :tags: orm, feature :tickets: 2585 Improvements to event listening for mapped classes allows that unmapped classes can be specified for instance- and mapper-events. The established events will be automatically set up on subclasses of that class when the propagate=True flag is passed, and the events will be set up for that class itself if and when it is ultimately mapped. .. change:: :tags: orm, bug :tickets: 2590 The instrumentation events class_instrument(), class_uninstrument(), and attribute_instrument() will now fire off only for descendant classes of the class assigned to listen(). Previously, an event listener would be assigned to listen for all classes in all cases regardless of the "target" argument passed. .. change:: :tags: orm, bug :tickets: 1900 with_polymorphic() produces JOINs in the correct order and with correct inheriting tables in the case of sending multi-level subclasses in an arbitrary order or with intermediary classes missing. .. change:: :tags: orm, feature :tickets: 2485 The "deferred declarative reflection" system has been moved into the declarative extension itself, using the new DeferredReflection class. This class is now tested with both single and joined table inheritance use cases. .. change:: :tags: orm, feature :tickets: 2208 Added new core function "inspect()", which serves as a generic gateway to introspection into mappers, objects, others. The Mapper and InstanceState objects have been enhanced with a public API that allows inspection of mapped attributes, including filters for column-bound or relationship-bound properties, inspection of current object state, history of attributes, etc. .. change:: :tags: orm, feature :tickets: 2452 Calling rollback() within a session.begin_nested() will now only expire those objects that had net changes within the scope of that transaction, that is objects which were dirty or were modified on a flush. This allows the typical use case for begin_nested(), that of altering a small subset of objects, to leave in place the data from the larger enclosing set of objects that weren't modified in that sub-transaction. .. change:: :tags: orm, feature :tickets: 2372 Added utility feature Session.enable_relationship_loading(), supersedes relationship.load_on_pending. Both features should be avoided, however. .. change:: :tags: orm, feature :tickets: Added support for .info dictionary argument to column_property(), relationship(), composite(). All MapperProperty classes have an auto-creating .info dict available overall. .. change:: :tags: orm, feature :tickets: 2229 Adding/removing None from a mapped collection now generates attribute events. Previously, a None append would be ignored in some cases. Related to. .. change:: :tags: orm, feature :tickets: 2229 The presence of None in a mapped collection now raises an error during flush. Previously, None values in collections would be silently ignored. .. change:: :tags: orm, feature :tickets: The Query.update() method is now more lenient as to the table being updated. Plain Table objects are better supported now, and additional a joined-inheritance subclass may be used with update(); the subclass table will be the target of the update, and if the parent table is referenced in the WHERE clause, the compiler will call upon UPDATE..FROM syntax as allowed by the dialect to satisfy the WHERE clause. MySQL's multi-table update feature is also supported if columns are specified by object in the "values" dicitionary. PG's DELETE..USING is also not available in Core yet. .. change:: :tags: orm, feature :tickets: New session events after_transaction_create and after_transaction_end allows tracking of new SessionTransaction objects. If the object is inspected, can be used to determine when a session first becomes active and when it deactivates. .. change:: :tags: orm, feature :tickets: 2592 The Query can now load entity/scalar-mixed "tuple" rows that contain types which aren't hashable, by setting the flag "hashable=False" on the corresponding TypeEngine object in use. Custom types that return unhashable types (typically lists) can set this flag to False. .. change:: :tags: orm, bug :tickets: 2481 Improvements to joined/subquery eager loading dealing with chains of subclass entities sharing a common base, with no specific "join depth" provided. Will chain out to each subclass mapper individually before detecting a "cycle", rather than considering the base class to be the source of the "cycle". .. change:: :tags: orm, bug :tickets: 2320 The "passive" flag on Session.is_modified() no longer has any effect. is_modified() in all cases looks only at local in-memory modified flags and will not emit any SQL or invoke loader callables/initializers. .. change:: :tags: orm, bug :tickets: 2405 The warning emitted when using delete-orphan cascade with one-to-many or many-to-many without single-parent=True is now an error. The ORM would fail to function subsequent to this warning in any case. .. change:: :tags: orm, bug :tickets: 2350 Lazy loads emitted within flush events such as before_flush(), before_update(), etc. will now function as they would within non-event code, regarding consideration of the PK/FK values used in the lazy-emitted query. Previously, special flags would be established that would cause lazy loads to load related items based on the "previous" value of the parent PK/FK values specifically when called upon within a flush; the signal to load in this way is now localized to where the unit of work actually needs to load that way. Note that the UOW does sometimes load these collections before the before_update() event is called, so the usage of "passive_updates" or not can affect whether or not a collection will represent the "old" or "new" data, when accessed within a flush event, based on when the lazy load was emitted. The change is backwards incompatible in the exceedingly small chance that user event code depended on the old behavior. .. change:: :tags: orm, feature :tickets: 2179 Query now "auto correlates" by default in the same way as select() does. Previously, a Query used as a subquery in another would require the correlate() method be called explicitly in order to correlate a table on the inside to the outside. As always, correlate(None) disables correlation. .. change:: :tags: orm, feature :tickets: 2464 The after_attach event is now emitted after the object is established in Session.new or Session.identity_map upon Session.add(), Session.merge(), etc., so that the object is represented in these collections when the event is called. Added before_attach event to accommodate use cases that need autoflush w pre-attached object. .. change:: :tags: orm, feature :tickets: The Session will produce warnings when unsupported methods are used inside the "execute" portion of the flush. These are the familiar methods add(), delete(), etc. as well as collection and related-object manipulations, as called within mapper-level flush events like after_insert(), after_update(), etc. It's been prominently documented for a long time that SQLAlchemy cannot guarantee results when the Session is manipulated within the execution of the flush plan, however users are still doing it, so now there's a warning. Maybe someday the Session will be enhanced to support these operations inside of the flush, but for now, results can't be guaranteed. .. change:: :tags: orm, bug :tickets: 2582, 2566 Continuing regarding extra state post-flush due to event listeners; any states that are marked as "dirty" from an attribute perspective, usually via column-attribute set events within after_insert(), after_update(), etc., will get the "history" flag reset in all cases, instead of only those instances that were part of the flush. This has the effect that this "dirty" state doesn't carry over after the flush and won't result in UPDATE statements. A warning is emitted to this effect; the set_committed_state() method can be used to assign attributes on objects without producing history events. .. change:: :tags: orm, feature :tickets: 2245 ORM entities can be passed to the core select() construct as well as to the select_from(), correlate(), and correlate_except() methods of select(), where they will be unwrapped into selectables. .. change:: :tags: orm, feature :tickets: 2245 Some support for auto-rendering of a relationship join condition based on the mapped attribute, with usage of core SQL constructs. E.g. select([SomeClass]).where(SomeClass.somerelationship) would render SELECT from "someclass" and use the primaryjoin of "somerelationship" as the WHERE clause. This changes the previous meaning of "SomeClass.somerelationship" when used in a core SQL context; previously, it would "resolve" to the parent selectable, which wasn't generally useful. Also works with query.filter(). Related to. .. change:: :tags: orm, feature :tickets: 2526 The registry of classes in declarative_base() is now a WeakValueDictionary. So subclasses of "Base" that are dereferenced will be garbage collected, *if they are not referred to by any other mappers/superclass mappers*. See the next note for this ticket. .. change:: :tags: orm, feature :tickets: 2472 Conflicts between columns on single-inheritance declarative subclasses, with or without using a mixin, can be resolved using a new @declared_attr usage described in the documentation. .. change:: :tags: orm, feature :tickets: 2472 declared_attr can now be used on non-mixin classes, even though this is generally only useful for single-inheritance subclass column conflict resolution. .. change:: :tags: orm, feature :tickets: 2517 declared_attr can now be used with attributes that are not Column or MapperProperty; including any user-defined value as well as association proxy objects. .. change:: :tags: orm, bug :tickets: 2565 Fixed a disconnect that slowly evolved between a @declared_attr Column and a directly-defined Column on a mixin. In both cases, the Column will be applied to the declared class' table, but not to that of a joined inheritance subclass. Previously, the directly-defined Column would be placed on both the base and the sub table, which isn't typically what's desired. .. change:: :tags: orm, feature :tickets: 2526 *Very limited* support for inheriting mappers to be GC'ed when the class itself is deferenced. The mapper must not have its own table (i.e. single table inh only) without polymorphic attributes in place. This allows for the use case of creating a temporary subclass of a declarative mapped class, with no table or mapping directives of its own, to be garbage collected when dereferenced by a unit test. .. change:: :tags: orm, feature :tickets: 2338 Declarative now maintains a registry of classes by string name as well as by full module-qualified name. Multiple classes with the same name can now be looked up based on a module-qualified string within relationship(). Simple class name lookups where more than one class shares the same name now raises an informative error message. .. change:: :tags: orm, feature :tickets: 2535 Can now provide class-bound attributes that override columns which are of any non-ORM type, not just descriptors. .. change:: :tags: orm, feature :tickets: 1729 Added with_labels and reduce_columns keyword arguments to Query.subquery(), to provide two alternate strategies for producing queries with uniquely- named columns. . .. change:: :tags: orm, feature :tickets: 2476 A warning is emitted when a reference to an instrumented collection is no longer associated with the parent class due to expiration/attribute refresh/collection replacement, but an append or remove operation is received on the now-detached collection. .. change:: :tags: orm, bug :tickets: 2549 Declarative can now propagate a column declared on a single-table inheritance subclass up to the parent class' table, when the parent class is itself mapped to a join() or select() statement, directly or via joined inheritance, and not just a Table. .. change:: :tags: orm, bug :tickets: An error is emitted when uselist=False is combined with a "dynamic" loader. This is a warning in 0.7.9. .. change:: :tags: removed, orm :tickets: Deprecated identifiers removed: * allow_null_pks mapper() argument (use allow_partial_pks) * _get_col_to_prop() mapper method (use get_property_by_column()) * dont_load argument to Session.merge() (use load=True) * sqlalchemy.orm.shard module (use sqlalchemy.ext.horizontal_shard) .. change:: :tags: engine, feature :tickets: 2511 Connection event listeners can now be associated with individual Connection objects, not just Engine objects. .. change:: :tags: engine, feature :tickets: 2459 The before_cursor_execute event fires off for so-called "_cursor_execute" events, which are usually special-case executions of primary-key bound sequences and default-generation SQL phrases that invoke separately when RETURNING is not used with INSERT. .. change:: :tags: engine, feature :tickets: The libraries used by the test suite have been moved around a bit so that they are part of the SQLAlchemy install again. In addition, a new suite of tests is present in the new sqlalchemy.testing.suite package. This is an under-development system that hopes to provide a universal testing suite for external dialects. Dialects which are maintained outside of SQLAlchemy can use the new test fixture as the framework for their own tests, and will get for free a "compliance" suite of dialect-focused tests, including an improved "requirements" system where specific capabilities and features can be enabled or disabled for testing. .. change:: :tags: engine, bug :tickets: The Inspector.get_table_names() order_by="foreign_key" feature now sorts tables by dependee first, to be consistent with util.sort_tables and metadata.sorted_tables. .. change:: :tags: engine, bug :tickets: 2522 Fixed bug whereby if a database restart affected multiple connections, each connection would individually invoke a new disposal of the pool, even though only one disposal is needed. .. change:: :tags: engine, feature :tickets: 2462 Added a new system for registration of new dialects in-process without using an entrypoint. See the docs for "Registering New Dialects". .. change:: :tags: engine, feature :tickets: 2556 The "required" flag is set to True by default, if not passed explicitly, on bindparam() if the "value" or "callable" parameters are not passed. This will cause statement execution to check for the parameter being present in the final collection of bound parameters, rather than implicitly assigning None. .. change:: :tags: engine, feature :tickets: Various API tweaks to the "dialect" API to better support highly specialized systems such as the Akiban database, including more hooks to allow an execution context to access type processors. .. change:: :tags: engine, bug :tickets: 2397 The names of the columns on the .c. attribute of a select().apply_labels() is now based on _ instead of _, for those columns that have a distinctly named .key. .. change:: :tags: engine, feature :tickets: 2422 Inspector.get_primary_keys() is deprecated; use Inspector.get_pk_constraint(). Courtesy Diana Clarke. .. change:: :tags: engine, bug :tickets: The autoload_replace flag on Table, when False, will cause any reflected foreign key constraints which refer to already-declared columns to be skipped, assuming that the in-Python declared column will take over the task of specifying in-Python ForeignKey or ForeignKeyConstraint declarations. .. change:: :tags: engine, bug :tickets: 2498 The ResultProxy methods inserted_primary_key, last_updated_params(), last_inserted_params(), postfetch_cols(), prefetch_cols() all assert that the given statement is a compiled construct, and is an insert() or update() statement as is appropriate, else raise InvalidRequestError. .. change:: :tags: engine, feature :tickets: New C extension module "utils" has been added for additional function speedups as we have time to implement. .. change:: :tags: engine :tickets: ResultProxy.last_inserted_ids is removed, replaced by inserted_primary_key. .. change:: :tags: feature, sql :tickets: 2547 Major rework of operator system in Core, to allow redefinition of existing operators as well as addition of new operators at the type level. New types can be created from existing ones which add or redefine operations that are exported out to column expressions, in a similar manner to how the ORM has allowed comparator_factory. The new architecture moves this capability into the Core so that it is consistently usable in all cases, propagating cleanly using existing type propagation behavior. .. change:: :tags: feature, sql :tickets: 1534, 2547 To complement, types can now provide "bind expressions" and "column expressions" which allow compile-time injection of SQL expressions into statements on a per-column or per-bind level. This is to suit the use case of a type which needs to augment bind- and result- behavior at the SQL level, as opposed to in the Python level. Allows for schemes like transparent encryption/ decryption, usage of Postgis functions, etc. .. change:: :tags: feature, sql :tickets: The Core oeprator system now includes the `getitem` operator, i.e. the bracket operator in Python. This is used at first to provide index and slice behavior to the Postgresql ARRAY type, and also provides a hook for end-user definition of custom __getitem__ schemes which can be applied at the type level as well as within ORM-level custom operator schemes. `lshift` (<<) and `rshift` (>>) are also supported as optional operators. Note that this change has the effect that descriptor-based __getitem__ schemes used by the ORM in conjunction with synonym() or other "descriptor-wrapped" schemes will need to start using a custom comparator in order to maintain this behavior. .. change:: :tags: feature, sql :tickets: 2537 Revised the rules used to determine the operator precedence for the user-defined operator, i.e. that granted using the ``op()`` method. Previously, the smallest precedence was applied in all cases, now the default precedence is zero, lower than all operators except "comma" (such as, used in the argument list of a ``func`` call) and "AS", and is also customizable via the "precedence" argument on the ``op()`` method. .. change:: :tags: feature, sql :tickets: 2276 Added "collation" parameter to all String types. When present, renders as COLLATE . This to support the COLLATE keyword now supported by several databases including MySQL, SQLite, and Postgresql. .. change:: :tags: change, sql :tickets: The Text() type renders the length given to it, if a length was specified. .. change:: :tags: feature, sql :tickets: Custom unary operators can now be used by combining operators.custom_op() with UnaryExpression(). .. change:: :tags: bug, sql :tickets: 2564 A tweak to column precedence which moves the "concat" and "match" operators to be the same as that of "is", "like", and others; this helps with parenthesization rendering when used in conjunction with "IS". .. change:: :tags: feature, sql :tickets: Enhanced GenericFunction and func.* to allow for user-defined GenericFunction subclasses to be available via the func.* namespace automatically by classname, optionally using a package name, as well as with the ability to have the rendered name different from the identified name in func.*. .. change:: :tags: feature, sql :tickets: 2562 The cast() and extract() constructs will now be produced via the func.* accessor as well, as users naturally try to access these names from func.* they might as well do what's expected, even though the returned object is not a FunctionElement. .. change:: :tags: changed, sql :tickets: Most classes in expression.sql are no longer preceded with an underscore, i.e. Label, SelectBase, Generative, CompareMixin. _BindParamClause is also renamed to BindParameter. The old underscore names for these classes will remain available as synonyms for the foreseeable future. .. change:: :tags: feature, sql :tickets: 2208 The Inspector object can now be acquired using the new inspect() service, part of .. change:: :tags: feature, sql :tickets: 2418 The column_reflect event now accepts the Inspector object as the first argument, preceding "table". Code which uses the 0.7 version of this very new event will need modification to add the "inspector" object as the first argument. .. change:: :tags: feature, sql :tickets: 2423 The behavior of column targeting in result sets is now case sensitive by default. SQLAlchemy for many years would run a case-insensitive conversion on these values, probably to alleviate early case sensitivity issues with dialects like Oracle and Firebird. These issues have been more cleanly solved in more modern versions so the performance hit of calling lower() on identifiers is removed. The case insensitive comparisons can be re-enabled by setting "case_insensitive=False" on create_engine(). .. change:: :tags: bug, sql :tickets: 2591 Applying a column expression to a select statement using a label with or without other modifying constructs will no longer "target" that expression to the underlying Column; this affects ORM operations that rely upon Column targeting in order to retrieve results. That is, a query like query(User.id, User.id.label('foo')) will now track the value of each "User.id" expression separately instead of munging them together. It is not expected that any users will be impacted by this; however, a usage that uses select() in conjunction with query.from_statement() and attempts to load fully composed ORM entities may not function as expected if the select() named Column objects with arbitrary .label() names, as these will no longer target to the Column objects mapped by that entity. .. change:: :tags: feature, sql :tickets: 2415 The "unconsumed column names" warning emitted when keys are present in insert.values() or update.values() that aren't in the target table is now an exception. .. change:: :tags: feature, sql :tickets: 2502 Added "MATCH" clause to ForeignKey, ForeignKeyConstraint, courtesy Ryan Kelly. .. change:: :tags: feature, sql :tickets: 2507 Added support for DELETE and UPDATE from an alias of a table, which would assumedly be related to itself elsewhere in the query, courtesy Ryan Kelly. .. change:: :tags: feature, sql :tickets: select() features a correlate_except() method, auto correlates all selectables except those passed. .. change:: :tags: feature, sql :tickets: 2431 The prefix_with() method is now available on each of select(), insert(), update(), delete(), all with the same API, accepting multiple prefix calls, as well as a "dialect name" so that the prefix can be limited to one kind of dialect. .. change:: :tags: feature, sql :tickets: 1729 Added reduce_columns() method to select() construct, replaces columns inline using the util.reduce_columns utility function to remove equivalent columns. reduce_columns() also adds "with_only_synonyms" to limit the reduction just to those columns which have the same name. The deprecated fold_equivalents() feature is removed. .. change:: :tags: feature, sql :tickets: 2470 Reworked the startswith(), endswith(), contains() operators to do a better job with negation (NOT LIKE), and also to assemble them at compilation time so that their rendered SQL can be altered, such as in the case for Firebird STARTING WITH .. change:: :tags: feature, sql :tickets: 2463 Added a hook to the system of rendering CREATE TABLE that provides access to the render for each Column individually, by constructing a @compiles function against the new schema.CreateColumn construct. .. change:: :tags: feature, sql :tickets: "scalar" selects now have a WHERE method to help with generative building. Also slight adjustment regarding how SS "correlates" columns; the new methodology no longer applies meaning to the underlying Table column being selected. This improves some fairly esoteric situations, and the logic that was there didn't seem to have any purpose. .. change:: :tags: bug, sql :tickets: 2520 Fixes to the interpretation of the Column "default" parameter as a callable to not pass ExecutionContext into a keyword argument parameter. .. change:: :tags: bug, sql :tickets: 2410 All of UniqueConstraint, ForeignKeyConstraint, CheckConstraint, and PrimaryKeyConstraint will attach themselves to their parent table automatically when they refer to a Table-bound Column object directly (i.e. not just string column name), and refer to one and only one Table. Prior to 0.8 this behavior occurred for UniqueConstraint and PrimaryKeyConstraint, but not ForeignKeyConstraint or CheckConstraint. .. change:: :tags: bug, sql :tickets: 2594 TypeDecorator now includes a generic repr() that works in terms of the "impl" type by default. This is a behavioral change for those TypeDecorator classes that specify a custom __init__ method; those types will need to re-define __repr__() if they need __repr__() to provide a faithful constructor representation. .. change:: :tags: bug, sql :tickets: 2168 column.label(None) now produces an anonymous label, instead of returning the column object itself, consistent with the behavior of label(column, None). .. change:: :tags: feature, sql :tickets: 2455 An explicit error is raised when a ForeignKeyConstraint() that was constructed to refer to multiple remote tables is first used. .. change:: :tags: access, feature :tickets: the MS Access dialect has been moved to its own project on Bitbucket, taking advantage of the new SQLAlchemy dialect compliance suite. The dialect is still in very rough shape and probably not ready for general use yet, however it does have *extremely* rudimental functionality now. https://bitbucket.org/zzzeek/sqlalchemy-access .. change:: :tags: maxdb, moved :tickets: The MaxDB dialect, which hasn't been functional for several years, is moved out to a pending bitbucket project, https://bitbucket.org/zzzeek/sqlalchemy-maxdb. .. change:: :tags: sqlite, feature :tickets: 2363 the SQLite date and time types have been overhauled to support a more open ended format for input and output, using name based format strings and regexps. A new argument "microseconds" also provides the option to omit the "microseconds" portion of timestamps. Thanks to Nathan Wright for the work and tests on this. .. change:: :tags: mssql, feature :tickets: SQL Server dialect can be given database-qualified schema names, i.e. "schema='mydatabase.dbo'"; reflection operations will detect this, split the schema among the "." to get the owner separately, and emit a "USE mydatabase" statement before reflecting targets within the "dbo" owner; the existing database returned from DB_NAME() is then restored. .. change:: :tags: mssql, bug :tickets: 2277 removed legacy behavior whereby a column comparison to a scalar SELECT via == would coerce to an IN with the SQL server dialect. This is implicit behavior which fails in other scenarios so is removed. Code which relies on this needs to be modified to use column.in_(select) explicitly. .. change:: :tags: mssql, feature :tickets: updated support for the mxodbc driver; mxodbc 3.2.1 is recommended for full compatibility. .. change:: :tags: postgresql, feature :tickets: 2441 postgresql.ARRAY features an optional "dimension" argument, will assign a specific number of dimensions to the array which will render in DDL as ARRAY[][]..., also improves performance of bind/result processing. .. change:: :tags: postgresql, feature :tickets: postgresql.ARRAY now supports indexing and slicing. The Python [] operator is available on all SQL expressions that are of type ARRAY; integer or simple slices can be passed. The slices can also be used on the assignment side in the SET clause of an UPDATE statement by passing them into Update.values(); see the docs for examples. .. change:: :tags: postgresql, feature :tickets: Added new "array literal" construct postgresql.array(). Basically a "tuple" that renders as ARRAY[1,2,3]. .. change:: :tags: postgresql, feature :tickets: 2506 Added support for the Postgresql ONLY keyword, which can appear corresponding to a table in a SELECT, UPDATE, or DELETE statement. The phrase is established using with_hint(). Courtesy Ryan Kelly .. change:: :tags: postgresql, feature :tickets: The "ischema_names" dictionary of the Postgresql dialect is "unofficially" customizable. Meaning, new types such as PostGIS types can be added into this dictionary, and the PG type reflection code should be able to handle simple types with variable numbers of arguments. The functionality here is "unofficial" for three reasons: 1. this is not an "official" API. Ideally an "official" API would allow custom type-handling callables at the dialect or global level in a generic way. 2. This is only implemented for the PG dialect, in particular because PG has broad support for custom types vs. other database backends. A real API would be implemented at the default dialect level. 3. The reflection code here is only tested against simple types and probably has issues with more compositional types. patch courtesy Éric Lemoine. .. change:: :tags: firebird, feature :tickets: 2470 The "startswith()" operator renders as "STARTING WITH", "~startswith()" renders as "NOT STARTING WITH", using FB's more efficient operator. .. change:: :tags: firebird, bug :tickets: 2505 CompileError is raised when VARCHAR with no length is attempted to be emitted, same way as MySQL. .. change:: :tags: firebird, bug :tickets: Firebird now uses strict "ansi bind rules" so that bound parameters don't render in the columns clause of a statement - they render literally instead. .. change:: :tags: firebird, bug :tickets: Support for passing datetime as date when using the DateTime type with Firebird; other dialects support this. .. change:: :tags: firebird, feature :tickets: 2504 An experimental dialect for the fdb driver is added, but is untested as I cannot get the fdb package to build. .. change:: :tags: bug, mysql :tickets: 2404 Dialect no longer emits expensive server collations query, as well as server casing, on first connect. These functions are still available as semi-private. .. change:: :tags: feature, mysql :tickets: 2534 Added TIME type to mysql dialect, accepts "fst" argument which is the new "fractional seconds" specifier for recent MySQL versions. The datatype will interpret a microseconds portion received from the driver, however note that at this time most/all MySQL DBAPIs do not support returning this value. .. change:: :tags: oracle, bug :tickets: 2437 Quoting information is now passed along from a Column with quote=True when generating a same-named bound parameter to the bindparam() object, as is the case in generated INSERT and UPDATE statements, so that unknown reserved names can be fully supported. .. change:: :tags: oracle, feature :tickets: 2561 The types of columns excluded from the setinputsizes() set can be customized by sending a list of string DBAPI type names to exclude, using the exclude_setinputsizes dialect parameter. This list was previously fixed. The list also now defaults to STRING, UNICODE, removing CLOB, NCLOB from the list. .. change:: :tags: oracle, bug :tickets: The CreateIndex construct in Oracle will now schema-qualify the name of the index to be that of the parent table. Previously this name was omitted which apparently creates the index in the default schema, rather than that of the table. .. change:: :tags: sql, feature :tickets: 2580 Added :meth:`.ColumnOperators.notin_`, :meth:`.ColumnOperators.notlike`, :meth:`.ColumnOperators.notilike` to :class:`.ColumnOperators`. .. change:: :tags: sql, removed The long-deprecated and non-functional ``assert_unicode`` flag on :func:`.create_engine` as well as :class:`.String` is removed. SQLAlchemy-1.0.11/doc/build/changelog/migration_10.rst0000664000175000017500000027737712636375552023505 0ustar classicclassic00000000000000============================== What's New in SQLAlchemy 1.0? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.9, undergoing maintenance releases as of May, 2014, and SQLAlchemy version 1.0, released in April, 2015. Document last updated: June 9, 2015 Introduction ============ This guide introduces what's new in SQLAlchemy version 1.0, and also documents changes which affect users migrating their applications from the 0.9 series of SQLAlchemy to 1.0. Please carefully review the sections on behavioral changes for potentially backwards-incompatible changes in behavior. New Features and Improvements - ORM =================================== New Session Bulk INSERT/UPDATE API ---------------------------------- A new series of :class:`.Session` methods which provide hooks directly into the unit of work's facility for emitting INSERT and UPDATE statements has been created. When used correctly, this expert-oriented system can allow ORM-mappings to be used to generate bulk insert and update statements batched into executemany groups, allowing the statements to proceed at speeds that rival direct use of the Core. .. seealso:: :ref:`bulk_operations` - introduction and full documentation :ticket:`3100` New Performance Example Suite ------------------------------ Inspired by the benchmarking done for the :ref:`bulk_operations` feature as well as for the :ref:`faq_how_to_profile` section of the FAQ, a new example section has been added which features several scripts designed to illustrate the relative performance profile of various Core and ORM techniques. The scripts are organized into use cases, and are packaged under a single console interface such that any combination of demonstrations can be run, dumping out timings, Python profile results and/or RunSnake profile displays. .. seealso:: :ref:`examples_performance` "Baked" Queries --------------- The "baked" query feature is an unusual new approach which allows for straightforward construction an invocation of :class:`.Query` objects using caching, which upon successive calls features vastly reduced Python function call overhead (over 75%). By specifying a :class:`.Query` object as a series of lambdas which are only invoked once, a query as a pre-compiled unit begins to be feasable:: from sqlalchemy.ext import baked from sqlalchemy import bindparam bakery = baked.bakery() def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) baked_query += lambda q: q.filter(User.name == bindparam('username')) baked_query += lambda q: q.order_by(User.id) if email: baked_query += lambda q: q.filter(User.email == bindparam('email')) result = baked_query(session).params(username=username, email=email).all() return result .. seealso:: :ref:`baked_toplevel` :ticket:`3054` .. _feature_3150: Improvements to declarative mixins, ``@declared_attr`` and related features ---------------------------------------------------------------------------- The declarative system in conjunction with :class:`.declared_attr` has been overhauled to support new capabilities. A function decorated with :class:`.declared_attr` is now called only **after** any mixin-based column copies are generated. This means the function can call upon mixin-established columns and will receive a reference to the correct :class:`.Column` object:: class HasFooBar(object): foobar = Column(Integer) @declared_attr def foobar_prop(cls): return column_property('foobar: ' + cls.foobar) class SomeClass(HasFooBar, Base): __tablename__ = 'some_table' id = Column(Integer, primary_key=True) Above, ``SomeClass.foobar_prop`` will be invoked against ``SomeClass``, and ``SomeClass.foobar`` will be the final :class:`.Column` object that is to be mapped to ``SomeClass``, as opposed to the non-copied object present directly on ``HasFooBar``, even though the columns aren't mapped yet. The :class:`.declared_attr` function now **memoizes** the value that's returned on a per-class basis, so that repeated calls to the same attribute will return the same value. We can alter the example to illustrate this:: class HasFooBar(object): @declared_attr def foobar(cls): return Column(Integer) @declared_attr def foobar_prop(cls): return column_property('foobar: ' + cls.foobar) class SomeClass(HasFooBar, Base): __tablename__ = 'some_table' id = Column(Integer, primary_key=True) Previously, ``SomeClass`` would be mapped with one particular copy of the ``foobar`` column, but the ``foobar_prop`` by calling upon ``foobar`` a second time would produce a different column. The value of ``SomeClass.foobar`` is now memoized during declarative setup time, so that even before the attribute is mapped by the mapper, the interim column value will remain consistent no matter how many times the :class:`.declared_attr` is called upon. The two behaviors above should help considerably with declarative definition of many types of mapper properties that derive from other attributes, where the :class:`.declared_attr` function is called upon from other :class:`.declared_attr` functions locally present before the class is actually mapped. For a pretty slim edge case where one wishes to build a declarative mixin that establishes distinct columns per subclass, a new modifier :attr:`.declared_attr.cascading` is added. With this modifier, the decorated function will be invoked individually for each class in the mapped inheritance hierarchy. While this is already the behavior for special attributes such as ``__table_args__`` and ``__mapper_args__``, for columns and other properties the behavior by default assumes that attribute is affixed to the base class only, and just inherited from subclasses. With :attr:`.declared_attr.cascading`, individual behaviors can be applied:: class HasSomeAttribute(object): @declared_attr.cascading def some_id(cls): if has_inherited_table(cls): return Column(ForeignKey('myclass.id'), primary_key=True) else: return Column(Integer, primary_key=True) return Column('id', Integer, primary_key=True) class MyClass(HasSomeAttribute, Base): "" # ... class MySubClass(MyClass): "" # ... .. seealso:: :ref:`mixin_inheritance_columns` Finally, the :class:`.AbstractConcreteBase` class has been reworked so that a relationship or other mapper property can be set up inline on the abstract base:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import (declarative_base, declared_attr, AbstractConcreteBase) Base = declarative_base() class Something(Base): __tablename__ = u'something' id = Column(Integer, primary_key=True) class Abstract(AbstractConcreteBase, Base): id = Column(Integer, primary_key=True) @declared_attr def something_id(cls): return Column(ForeignKey(Something.id)) @declared_attr def something(cls): return relationship(Something) class Concrete(Abstract): __tablename__ = u'cca' __mapper_args__ = {'polymorphic_identity': 'cca', 'concrete': True} The above mapping will set up a table ``cca`` with both an ``id`` and a ``something_id`` column, and ``Concrete`` will also have a relationship ``something``. The new feature is that ``Abstract`` will also have an independently configured relationship ``something`` that builds against the polymorphic union of the base. :ticket:`3150` :ticket:`2670` :ticket:`3149` :ticket:`2952` :ticket:`3050` ORM full object fetches 25% faster ---------------------------------- The mechanics of the ``loading.py`` module as well as the identity map have undergone several passes of inlining, refactoring, and pruning, so that a raw load of rows now populates ORM-based objects around 25% faster. Assuming a 1M row table, a script like the following illustrates the type of load that's improved the most:: import time from sqlalchemy import Integer, Column, create_engine, Table from sqlalchemy.orm import Session from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Foo(Base): __table__ = Table( 'foo', Base.metadata, Column('id', Integer, primary_key=True), Column('a', Integer(), nullable=False), Column('b', Integer(), nullable=False), Column('c', Integer(), nullable=False), ) engine = create_engine( 'mysql+mysqldb://scott:tiger@localhost/test', echo=True) sess = Session(engine) now = time.time() # avoid using all() so that we don't have the overhead of building # a large list of full objects in memory for obj in sess.query(Foo).yield_per(100).limit(1000000): pass print("Total time: %d" % (time.time() - now)) Local MacBookPro results bench from 19 seconds for 0.9 down to 14 seconds for 1.0. The :meth:`.Query.yield_per` call is always a good idea when batching huge numbers of rows, as it prevents the Python interpreter from having to allocate a huge amount of memory for all objects and their instrumentation at once. Without the :meth:`.Query.yield_per`, the above script on the MacBookPro is 31 seconds on 0.9 and 26 seconds on 1.0, the extra time spent setting up very large memory buffers. .. _feature_3176: New KeyedTuple implementation dramatically faster ------------------------------------------------- We took a look into the :class:`.KeyedTuple` implementation in the hopes of improving queries like this:: rows = sess.query(Foo.a, Foo.b, Foo.c).all() The :class:`.KeyedTuple` class is used rather than Python's ``collections.namedtuple()``, because the latter has a very complex type-creation routine that benchmarks much slower than :class:`.KeyedTuple`. However, when fetching hundreds of thousands of rows, ``collections.namedtuple()`` quickly overtakes :class:`.KeyedTuple` which becomes dramatically slower as instance invocation goes up. What to do? A new type that hedges between the approaches of both. Benching all three types for "size" (number of rows returned) and "num" (number of distinct queries), the new "lightweight keyed tuple" either outperforms both, or lags very slightly behind the faster object, based on which scenario. In the "sweet spot", where we are both creating a good number of new types as well as fetching a good number of rows, the lightweight object totally smokes both namedtuple and KeyedTuple:: ----------------- size=10 num=10000 # few rows, lots of queries namedtuple: 3.60302400589 # namedtuple falls over keyedtuple: 0.255059957504 # KeyedTuple very fast lw keyed tuple: 0.582715034485 # lw keyed trails right on KeyedTuple ----------------- size=100 num=1000 # <--- sweet spot namedtuple: 0.365247011185 keyedtuple: 0.24896979332 lw keyed tuple: 0.0889317989349 # lw keyed blows both away! ----------------- size=10000 num=100 namedtuple: 0.572599887848 keyedtuple: 2.54251694679 lw keyed tuple: 0.613876104355 ----------------- size=1000000 num=10 # few queries, lots of rows namedtuple: 5.79669594765 # namedtuple very fast keyedtuple: 28.856498003 # KeyedTuple falls over lw keyed tuple: 6.74346804619 # lw keyed trails right on namedtuple :ticket:`3176` .. _feature_slots: Significant Improvements in Structural Memory Use -------------------------------------------------- Structural memory use has been improved via much more significant use of ``__slots__`` for many internal objects. This optimization is particularly geared towards the base memory size of large applications that have lots of tables and columns, and reduces memory size for a variety of high-volume objects including event listening internals, comparator objects and parts of the ORM attribute and loader strategy system. A bench that makes use of heapy measure the startup size of Nova illustrates a difference of about 3.7 fewer megs, or 46%, taken up by SQLAlchemy's objects, associated dictionaries, as well as weakrefs, within a basic import of "nova.db.sqlalchemy.models":: # reported by heapy, summation of SQLAlchemy objects + # associated dicts + weakref-related objects with core of Nova imported: Before: total count 26477 total bytes 7975712 After: total count 18181 total bytes 4236456 # reported for the Python module space overall with the # core of Nova imported: Before: Partition of a set of 355558 objects. Total size = 61661760 bytes. After: Partition of a set of 346034 objects. Total size = 57808016 bytes. .. _feature_updatemany: UPDATE statements are now batched with executemany() in a flush ---------------------------------------------------------------- UPDATE statements can now be batched within an ORM flush into more performant executemany() call, similarly to how INSERT statements can be batched; this will be invoked within flush based on the following criteria: * two or more UPDATE statements in sequence involve the identical set of columns to be modified. * The statement has no embedded SQL expressions in the SET clause. * The mapping does not use a :paramref:`~.orm.mapper.version_id_col`, or the backend dialect supports a "sane" rowcount for an executemany() operation; most DBAPIs support this correctly now. .. _feature_3178: .. _bug_3035: Session.get_bind() handles a wider variety of inheritance scenarios ------------------------------------------------------------------- The :meth:`.Session.get_bind` method is invoked whenever a query or unit of work flush process seeks to locate the database engine that corresponds to a particular class. The method has been improved to handle a variety of inheritance-oriented scenarios, including: * Binding to a Mixin or Abstract Class:: class MyClass(SomeMixin, Base): __tablename__ = 'my_table' # ... session = Session(binds={SomeMixin: some_engine}) * Binding to inherited concrete subclasses individually based on table:: class BaseClass(Base): __tablename__ = 'base' # ... class ConcreteSubClass(BaseClass): __tablename__ = 'concrete' # ... __mapper_args__ = {'concrete': True} session = Session(binds={ base_table: some_engine, concrete_table: some_other_engine }) :ticket:`3035` .. _bug_3227: Session.get_bind() will receive the Mapper in all relevant Query cases ----------------------------------------------------------------------- A series of issues were repaired where the :meth:`.Session.get_bind` would not receive the primary :class:`.Mapper` of the :class:`.Query`, even though this mapper was readily available (the primary mapper is the single mapper, or alternatively the first mapper, that is associated with a :class:`.Query` object). The :class:`.Mapper` object, when passed to :meth:`.Session.get_bind`, is typically used by sessions that make use of the :paramref:`.Session.binds` parameter to associate mappers with a series of engines (although in this use case, things frequently "worked" in most cases anyway as the bind would be located via the mapped table object), or more specifically implement a user-defined :meth:`.Session.get_bind` method that provies some pattern of selecting engines based on mappers, such as horizontal sharding or a so-called "routing" session that routes queries to different backends. These scenarios include: * :meth:`.Query.count`:: session.query(User).count() * :meth:`.Query.update` and :meth:`.Query.delete`, both for the UPDATE/DELETE statement as well as for the SELECT used by the "fetch" strategy:: session.query(User).filter(User.id == 15).update( {"name": "foob"}, synchronize_session='fetch') session.query(User).filter(User.id == 15).delete( synchronize_session='fetch') * Queries against individual columns:: session.query(User.id, User.name).all() * SQL functions and other expressions against indirect mappings such as :obj:`.column_property`:: class User(Base): # ... score = column_property(func.coalesce(self.tables.users.c.name, None))) session.query(func.max(User.score)).scalar() :ticket:`3227` :ticket:`3242` :ticket:`1326` .. _feature_2963: .info dictionary improvements ----------------------------- The :attr:`.InspectionAttr.info` collection is now available on every kind of object that one would retrieve from the :attr:`.Mapper.all_orm_descriptors` collection. This includes :class:`.hybrid_property` and :func:`.association_proxy`. However, as these objects are class-bound descriptors, they must be accessed **separately** from the class to which they are attached in order to get at the attribute. Below this is illustared using the :attr:`.Mapper.all_orm_descriptors` namespace:: class SomeObject(Base): # ... @hybrid_property def some_prop(self): return self.value + 5 inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar' It is also available as a constructor argument for all :class:`.SchemaItem` objects (e.g. :class:`.ForeignKey`, :class:`.UniqueConstraint` etc.) as well as remaining ORM constructs such as :func:`.orm.synonym`. :ticket:`2971` :ticket:`2963` .. _bug_3188: ColumnProperty constructs work a lot better with aliases, order_by ------------------------------------------------------------------- A variety of issues regarding :func:`.column_property` have been fixed, most specifically with regards to the :func:`.aliased` construct as well as the "order by label" logic introduced in 0.9 (see :ref:`migration_1068`). Given a mapping like the following:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('a.id')) A.b = column_property( select([func.max(B.id)]).where(B.a_id == A.id).correlate(A) ) A simple scenario that included "A.b" twice would fail to render correctly:: print sess.query(A, a1).order_by(a1.b) This would order by the wrong column:: SELECT a.id AS a_id, (SELECT max(b.id) AS max_1 FROM b WHERE b.a_id = a.id) AS anon_1, a_1.id AS a_1_id, (SELECT max(b.id) AS max_2 FROM b WHERE b.a_id = a_1.id) AS anon_2 FROM a, a AS a_1 ORDER BY anon_1 New output:: SELECT a.id AS a_id, (SELECT max(b.id) AS max_1 FROM b WHERE b.a_id = a.id) AS anon_1, a_1.id AS a_1_id, (SELECT max(b.id) AS max_2 FROM b WHERE b.a_id = a_1.id) AS anon_2 FROM a, a AS a_1 ORDER BY anon_2 There were also many scenarios where the "order by" logic would fail to order by label, for example if the mapping were "polymorphic":: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) type = Column(String) __mapper_args__ = {'polymorphic_on': type, 'with_polymorphic': '*'} The order_by would fail to use the label, as it would be anonymized due to the polymorphic loading:: SELECT a.id AS a_id, a.type AS a_type, (SELECT max(b.id) AS max_1 FROM b WHERE b.a_id = a.id) AS anon_1 FROM a ORDER BY (SELECT max(b.id) AS max_2 FROM b WHERE b.a_id = a.id) Now that the order by label tracks the anonymized label, this now works:: SELECT a.id AS a_id, a.type AS a_type, (SELECT max(b.id) AS max_1 FROM b WHERE b.a_id = a.id) AS anon_1 FROM a ORDER BY anon_1 Included in these fixes are a variety of heisenbugs that could corrupt the state of an ``aliased()`` construct such that the labeling logic would again fail; these have also been fixed. :ticket:`3148` :ticket:`3188` New Features and Improvements - Core ==================================== .. _feature_3034: Select/Query LIMIT / OFFSET may be specified as an arbitrary SQL expression ---------------------------------------------------------------------------- The :meth:`.Select.limit` and :meth:`.Select.offset` methods now accept any SQL expression, in addition to integer values, as arguments. The ORM :class:`.Query` object also passes through any expression to the underlying :class:`.Select` object. Typically this is used to allow a bound parameter to be passed, which can be substituted with a value later:: sel = select([table]).limit(bindparam('mylimit')).offset(bindparam('myoffset')) Dialects which don't support non-integer LIMIT or OFFSET expressions may continue to not support this behavior; third party dialects may also need modification in order to take advantage of the new behavior. A dialect which currently uses the ``._limit`` or ``._offset`` attributes will continue to function for those cases where the limit/offset was specified as a simple integer value. However, when a SQL expression is specified, these two attributes will instead raise a :class:`.CompileError` on access. A third-party dialect which wishes to support the new feature should now call upon the ``._limit_clause`` and ``._offset_clause`` attributes to receive the full SQL expression, rather than the integer value. .. _feature_3282: The ``use_alter`` flag on ``ForeignKeyConstraint`` is (usually) no longer needed -------------------------------------------------------------------------------- The :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` methods will now make use of a system that automatically renders an ALTER statement for foreign key constraints that are involved in mutually-dependent cycles between tables, without the need to specify :paramref:`.ForeignKeyConstraint.use_alter`. Additionally, the foreign key constraints no longer need to have a name in order to be created via ALTER; only the DROP operation requires a name. In the case of a DROP, the feature will ensure that only constraints which have explicit names are actually included as ALTER statements. In the case of an unresolvable cycle within a DROP, the system emits a succinct and clear error message now if the DROP cannot proceed. The :paramref:`.ForeignKeyConstraint.use_alter` and :paramref:`.ForeignKey.use_alter` flags remain in place, and continue to have the same effect of establishing those constraints for which ALTER is required during a CREATE/DROP scenario. As of version 1.0.1, special logic takes over in the case of SQLite, which does not support ALTER, in the case that during a DROP, the given tables have an unresolvable cycle; in this case a warning is emitted, and the tables are dropped with **no** ordering, which is usually fine on SQLite unless constraints are enabled. To resolve the warning and proceed with at least a partial ordering on a SQLite database, particuarly one where constraints are enabled, re-apply "use_alter" flags to those :class:`.ForeignKey` and :class:`.ForeignKeyConstraint` objects which should be explicitly omitted from the sort. .. seealso:: :ref:`use_alter` - full description of the new behavior. :ticket:`3282` .. _change_3330: ResultProxy "auto close" is now a "soft" close ---------------------------------------------- For many releases, the :class:`.ResultProxy` object has always been automatically closed out at the point at which all result rows have been fetched. This was to allow usage of the object without the need to call upon :meth:`.ResultProxy.close` explicitly; as all DBAPI resources had been freed, the object was safe to discard. However, the object maintained a strict "closed" behavior, which meant that any subsequent calls to :meth:`.ResultProxy.fetchone`, :meth:`.ResultProxy.fetchmany` or :meth:`.ResultProxy.fetchall` would now raise a :class:`.ResourceClosedError`:: >>> result = connection.execute(stmt) >>> result.fetchone() (1, 'x') >>> result.fetchone() None # indicates no more rows >>> result.fetchone() exception: ResourceClosedError This behavior is inconsistent vs. what pep-249 states, which is that you can call upon the fetch methods repeatedly even after results are exhausted. It also interferes with behavior for some implementations of result proxy, such as the :class:`.BufferedColumnResultProxy` used by the cx_oracle dialect for certain datatypes. To solve this, the "closed" state of the :class:`.ResultProxy` has been broken into two states; a "soft close" which does the majority of what "close" does, in that it releases the DBAPI cursor and in the case of a "close with result" object will also release the connection, and a "closed" state which is everything included by "soft close" as well as establishing the fetch methods as "closed". The :meth:`.ResultProxy.close` method is now never called implicitly, only the :meth:`.ResultProxy._soft_close` method which is non-public:: >>> result = connection.execute(stmt) >>> result.fetchone() (1, 'x') >>> result.fetchone() None # indicates no more rows >>> result.fetchone() None # still None >>> result.fetchall() [] >>> result.close() >>> result.fetchone() exception: ResourceClosedError # *now* it raises :ticket:`3330` :ticket:`3329` CHECK Constraints now support the ``%(column_0_name)s`` token in naming conventions ----------------------------------------------------------------------------------- The ``%(column_0_name)s`` will derive from the first column found in the expression of a :class:`.CheckConstraint`:: metadata = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} ) foo = Table('foo', metadata, Column('value', Integer), ) CheckConstraint(foo.c.value > 5) Will render:: CREATE TABLE foo ( value INTEGER, CONSTRAINT ck_foo_value CHECK (value > 5) ) The combination of naming conventions with the constraint produced by a :class:`.SchemaType` such as :class:`.Boolean` or :class:`.Enum` will also now make use of all CHECK constraint conventions. .. seealso:: :ref:`naming_check_constraints` :ref:`naming_schematypes` :ticket:`3299` .. _change_3341: Constraints referring to unattached Columns can auto-attach to the Table when their referred columns are attached ----------------------------------------------------------------------------------------------------------------- Since at least version 0.8, a :class:`.Constraint` has had the ability to "auto-attach" itself to a :class:`.Table` based on being passed table-attached columns:: from sqlalchemy import Table, Column, MetaData, Integer, UniqueConstraint m = MetaData() t = Table('t', m, Column('a', Integer), Column('b', Integer) ) uq = UniqueConstraint(t.c.a, t.c.b) # will auto-attach to Table assert uq in t.constraints In order to assist with some cases that tend to come up with declarative, this same auto-attachment logic can now function even if the :class:`.Column` objects are not yet associated with the :class:`.Table`; additional events are established such that when those :class:`.Column` objects are associated, the :class:`.Constraint` is also added:: from sqlalchemy import Table, Column, MetaData, Integer, UniqueConstraint m = MetaData() a = Column('a', Integer) b = Column('b', Integer) uq = UniqueConstraint(a, b) t = Table('t', m, a, b) assert uq in t.constraints # constraint auto-attached The above feature was a late add as of version 1.0.0b3. A fix as of version 1.0.4 for :ticket:`3411` ensures that this logic does not occur if the :class:`.Constraint` refers to a mixture of :class:`.Column` objects and string column names; as we do not yet have tracking for the addition of names to a :class:`.Table`:: from sqlalchemy import Table, Column, MetaData, Integer, UniqueConstraint m = MetaData() a = Column('a', Integer) b = Column('b', Integer) uq = UniqueConstraint(a, 'b') t = Table('t', m, a, b) # constraint *not* auto-attached, as we do not have tracking # to locate when a name 'b' becomes available on the table assert uq not in t.constraints Above, the attachment event for column "a" to table "t" will fire off before column "b" is attached (as "a" is stated in the :class:`.Table` constructor before "b"), and the constraint will fail to locate "b" if it were to attempt an attachment. For consistency, if the constraint refers to any string names, the autoattach-on-column-attach logic is skipped. The original auto-attach logic of course remains in place, if the :class:`.Table` already contains all the target :class:`.Column` objects at the time the :class:`.Constraint` is constructed:: from sqlalchemy import Table, Column, MetaData, Integer, UniqueConstraint m = MetaData() a = Column('a', Integer) b = Column('b', Integer) t = Table('t', m, a, b) uq = UniqueConstraint(a, 'b') # constraint auto-attached normally as in older versions assert uq in t.constraints :ticket:`3341` :ticket:`3411` .. _change_2051: .. _feature_insert_from_select_defaults: INSERT FROM SELECT now includes Python and SQL-expression defaults ------------------------------------------------------------------- :meth:`.Insert.from_select` now includes Python and SQL-expression defaults if otherwise unspecified; the limitation where non-server column defaults aren't included in an INSERT FROM SELECT is now lifted and these expressions are rendered as constants into the SELECT statement:: from sqlalchemy import Table, Column, MetaData, Integer, select, func m = MetaData() t = Table( 't', m, Column('x', Integer), Column('y', Integer, default=func.somefunction())) stmt = select([t.c.x]) print t.insert().from_select(['x'], stmt) Will render:: INSERT INTO t (x, y) SELECT t.x, somefunction() AS somefunction_1 FROM t The feature can be disabled using :paramref:`.Insert.from_select.include_defaults`. .. _change_3087: Column server defaults now render literal values ------------------------------------------------ The "literal binds" compiler flag is switched on when a :class:`.DefaultClause`, set up by :paramref:`.Column.server_default` is present as a SQL expression to be compiled. This allows literals embedded in SQL to render correctly, such as:: from sqlalchemy import Table, Column, MetaData, Text from sqlalchemy.schema import CreateTable from sqlalchemy.dialects.postgresql import ARRAY, array from sqlalchemy.dialects import postgresql metadata = MetaData() tbl = Table("derp", metadata, Column("arr", ARRAY(Text), server_default=array(["foo", "bar", "baz"])), ) print(CreateTable(tbl).compile(dialect=postgresql.dialect())) Now renders:: CREATE TABLE derp ( arr TEXT[] DEFAULT ARRAY['foo', 'bar', 'baz'] ) Previously, the literal values ``"foo", "bar", "baz"`` would render as bound parameters, which are useless in DDL. :ticket:`3087` .. _feature_3184: UniqueConstraint is now part of the Table reflection process ------------------------------------------------------------ A :class:`.Table` object populated using ``autoload=True`` will now include :class:`.UniqueConstraint` constructs as well as :class:`.Index` constructs. This logic has a few caveats for Postgresql and Mysql: Postgresql ^^^^^^^^^^ Postgresql has the behavior such that when a UNIQUE constraint is created, it implicitly creates a UNIQUE INDEX corresponding to that constraint as well. The :meth:`.Inspector.get_indexes` and the :meth:`.Inspector.get_unique_constraints` methods will continue to **both** return these entries distinctly, where :meth:`.Inspector.get_indexes` now features a token ``duplicates_constraint`` within the index entry indicating the corresponding constraint when detected. However, when performing full table reflection using ``Table(..., autoload=True)``, the :class:`.Index` construct is detected as being linked to the :class:`.UniqueConstraint`, and is **not** present within the :attr:`.Table.indexes` collection; only the :class:`.UniqueConstraint` will be present in the :attr:`.Table.constraints` collection. This deduplication logic works by joining to the ``pg_constraint`` table when querying ``pg_index`` to see if the two constructs are linked. MySQL ^^^^^ MySQL does not have separate concepts for a UNIQUE INDEX and a UNIQUE constraint. While it supports both syntaxes when creating tables and indexes, it does not store them any differently. The :meth:`.Inspector.get_indexes` and the :meth:`.Inspector.get_unique_constraints` methods will continue to **both** return an entry for a UNIQUE index in MySQL, where :meth:`.Inspector.get_unique_constraints` features a new token ``duplicates_index`` within the constraint entry indicating that this is a dupe entry corresponding to that index. However, when performing full table reflection using ``Table(..., autoload=True)``, the :class:`.UniqueConstraint` construct is **not** part of the fully reflected :class:`.Table` construct under any circumstances; this construct is always represented by a :class:`.Index` with the ``unique=True`` setting present in the :attr:`.Table.indexes` collection. .. seealso:: :ref:`postgresql_index_reflection` :ref:`mysql_unique_constraints` :ticket:`3184` New systems to safely emit parameterized warnings ------------------------------------------------- For a long time, there has been a restriction that warning messages could not refer to data elements, such that a particular function might emit an infinite number of unique warnings. The key place this occurs is in the ``Unicode type received non-unicode bind param value`` warning. Placing the data value in this message would mean that the Python ``__warningregistry__`` for that module, or in some cases the Python-global ``warnings.onceregistry``, would grow unbounded, as in most warning scenarios, one of these two collections is populated with every distinct warning message. The change here is that by using a special ``string`` type that purposely changes how the string is hashed, we can control that a large number of parameterized messages are hashed only on a small set of possible hash values, such that a warning such as ``Unicode type received non-unicode bind param value`` can be tailored to be emitted only a specific number of times; beyond that, the Python warnings registry will begin recording them as duplicates. To illustrate, the following test script will show only ten warnings being emitted for ten of the parameter sets, out of a total of 1000:: from sqlalchemy import create_engine, Unicode, select, cast import random import warnings e = create_engine("sqlite://") # Use the "once" filter (which is also the default for Python # warnings). Exactly ten of these warnings will # be emitted; beyond that, the Python warnings registry will accumulate # new values as dupes of one of the ten existing. warnings.filterwarnings("once") for i in range(1000): e.execute(select([cast( ('foo_%d' % random.randint(0, 1000000)).encode('ascii'), Unicode)])) The format of the warning here is:: /path/lib/sqlalchemy/sql/sqltypes.py:186: SAWarning: Unicode type received non-unicode bind param value 'foo_4852'. (this warning may be suppressed after 10 occurrences) :ticket:`3178` Key Behavioral Changes - ORM ============================ .. _bug_3228: query.update() now resolves string names into mapped attribute names -------------------------------------------------------------------- The documentation for :meth:`.Query.update` states that the given ``values`` dictionary is "a dictionary with attributes names as keys", implying that these are mapped attribute names. Unfortunately, the function was designed more in mind to receive attributes and SQL expressions and not as much strings; when strings were passed, these strings would be passed through straight to the core update statement without any resolution as far as how these names are represented on the mapped class, meaning the name would have to match that of a table column exactly, not how an attribute of that name was mapped onto the class. The string names are now resolved as attribute names in earnest:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column('user_name', String(50)) Above, the column ``user_name`` is mapped as ``name``. Previously, a call to :meth:`.Query.update` that was passed strings would have to have been called as follows:: session.query(User).update({'user_name': 'moonbeam'}) The given string is now resolved against the entity:: session.query(User).update({'name': 'moonbeam'}) It is typically preferable to use the attribute directly, to avoid any ambiguity:: session.query(User).update({User.name: 'moonbeam'}) The change also indicates that synonyms and hybrid attributes can be referred to by string name as well:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column('user_name', String(50)) @hybrid_property def fullname(self): return self.name session.query(User).update({'fullname': 'moonbeam'}) :ticket:`3228` .. _bug_3371: Warnings emitted when comparing objects with None values to relationships ------------------------------------------------------------------------- This change is new as of 1.0.1. Some users are performing queries that are essentially of this form:: session.query(Address).filter(Address.user == User(id=None)) This pattern is not currently supported in SQLAlchemy. For all versions, it emits SQL resembling:: SELECT address.id AS address_id, address.user_id AS address_user_id, address.email_address AS address_email_address FROM address WHERE ? = address.user_id (None,) Note above, there is a comparison ``WHERE ? = address.user_id`` where the bound value ``?`` is receving ``None``, or ``NULL`` in SQL. **This will always return False in SQL**. The comparison here would in theory generate SQL as follows:: SELECT address.id AS address_id, address.user_id AS address_user_id, address.email_address AS address_email_address FROM address WHERE address.user_id IS NULL But right now, **it does not**. Applications which are relying upon the fact that "NULL = NULL" produces False in all cases run the risk that someday, SQLAlchemy might fix this issue to generate "IS NULL", and the queries will then produce different results. Therefore with this kind of operation, you will see a warning:: SAWarning: Got None for value of column user.id; this is unsupported for a relationship comparison and will not currently produce an IS comparison (but may in a future release) Note that this pattern was broken in most cases for release 1.0.0 including all of the betas; a value like ``SYMBOL('NEVER_SET')`` would be generated. This issue has been fixed, but as a result of identifying this pattern, the warning is now there so that we can more safely repair this broken behavior (now captured in :ticket:`3373`) in a future release. :ticket:`3371` .. _bug_3374: A "negated contains or equals" relationship comparison will use the current value of attributes, not the database value ------------------------------------------------------------------------------------------------------------------------- This change is new as of 1.0.1; while we would have preferred for this to be in 1.0.0, it only became apparent as a result of :ticket:`3371`. Given a mapping:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('a.id')) a = relationship("A") Given ``A``, with primary key of 7, but which we changed to be 10 without flushing:: s = Session(autoflush=False) a1 = A(id=7) s.add(a1) s.commit() a1.id = 10 A query against a many-to-one relationship with this object as the target will use the value 10 in the bound parameters:: s.query(B).filter(B.a == a1) Produces:: SELECT b.id AS b_id, b.a_id AS b_a_id FROM b WHERE ? = b.a_id (10,) However, before this change, the negation of this criteria would **not** use 10, it would use 7, unless the object were flushed first:: s.query(B).filter(B.a != a1) Produces (in 0.9 and all versions prior to 1.0.1):: SELECT b.id AS b_id, b.a_id AS b_a_id FROM b WHERE b.a_id != ? OR b.a_id IS NULL (7,) For a transient object, it would produce a broken query:: SELECT b.id, b.a_id FROM b WHERE b.a_id != :a_id_1 OR b.a_id IS NULL {u'a_id_1': symbol('NEVER_SET')} This inconsistency has been repaired, and in all queries the current attribute value, in this example ``10``, will now be used. :ticket:`3374` .. _migration_3061: Changes to attribute events and other operations regarding attributes that have no pre-existing value ------------------------------------------------------------------------------------------------------ In this change, the default return value of ``None`` when accessing an object is now returned dynamically on each access, rather than implicitly setting the attribute's state with a special "set" operation when it is first accessed. The visible result of this change is that ``obj.__dict__`` is not implicitly modified on get, and there are also some minor behavioral changes for :func:`.attributes.get_history` and related functions. Given an object with no state:: >>> obj = Foo() It has always been SQLAlchemy's behavior such that if we access a scalar or many-to-one attribute that was never set, it is returned as ``None``:: >>> obj.someattr None This value of ``None`` is in fact now part of the state of ``obj``, and is not unlike as though we had set the attribute explicitly, e.g. ``obj.someattr = None``. However, the "set on get" here would behave differently as far as history and events. It would not emit any attribute event, and additionally if we view history, we see this:: >>> inspect(obj).attrs.someattr.history History(added=(), unchanged=[None], deleted=()) # 0.9 and below That is, it's as though the attribute were always ``None`` and were never changed. This is explicitly different from if we had set the attribute first instead:: >>> obj = Foo() >>> obj.someattr = None >>> inspect(obj).attrs.someattr.history History(added=[None], unchanged=(), deleted=()) # all versions The above means that the behavior of our "set" operation can be corrupted by the fact that the value was accessed via "get" earlier. In 1.0, this inconsistency has been resolved, by no longer actually setting anything when the default "getter" is used. >>> obj = Foo() >>> obj.someattr None >>> inspect(obj).attrs.someattr.history History(added=(), unchanged=(), deleted=()) # 1.0 >>> obj.someattr = None >>> inspect(obj).attrs.someattr.history History(added=[None], unchanged=(), deleted=()) The reason the above behavior hasn't had much impact is because the INSERT statement in relational databases considers a missing value to be the same as NULL in most cases. Whether SQLAlchemy received a history event for a particular attribute set to None or not would usually not matter; as the difference between sending None/NULL or not wouldn't have an impact. However, as :ticket:`3060` (described here in :ref:`migration_3060`) illustrates, there are some seldom edge cases where we do in fact want to positively have ``None`` set. Also, allowing the attribute event here means it's now possible to create "default value" functions for ORM mapped attributes. As part of this change, the generation of the implicit "None" is now disabled for other situations where this used to occur; this includes when an attribute set operation on a many-to-one is received; previously, the "old" value would be "None" if it had been not set otherwise; it now will send the value :data:`.orm.attributes.NEVER_SET`, which is a value that may be sent to an attribute listener now. This symbol may also be received when calling on mapper utility functions such as :meth:`.Mapper.primary_key_from_instance`; if the primary key attributes have no setting at all, whereas the value would be ``None`` before, it will now be the :data:`.orm.attributes.NEVER_SET` symbol, and no change to the object's state occurs. :ticket:`3061` .. _migration_3060: Priority of attribute changes on relationship-bound attributes vs. FK-bound may appear to change ------------------------------------------------------------------------------------------------ As a side effect of :ticket:`3060`, setting a relationship-bound attribute to ``None`` is now a tracked history event which refers to the intention of persisting ``None`` to that attribute. As it has always been the case that setting a relationship-bound attribute will trump direct assignment to the foreign key attributes, a change in behavior can be seen here when assigning None. Given a mapping:: class A(Base): __tablename__ = 'table_a' id = Column(Integer, primary_key=True) class B(Base): __tablename__ = 'table_b' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('table_a.id')) a = relationship(A) In 1.0, the relationship-bound attribute takes precedence over the FK-bound attribute in all cases, whether or not the value we assign is a reference to an ``A`` object or is ``None``. In 0.9, the behavior is inconsistent and only takes effect if a value is assigned; the None is not considered:: a1 = A(id=1) a2 = A(id=2) session.add_all([a1, a2]) session.flush() b1 = B() b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 b2 = B() b2.a = None # we expect a_id to be None; takes precedence only in 1.0 b1.a_id = 2 b2.a_id = 2 session.add_all([b1, b2]) session.commit() assert b1.a is a1 # passes in both 0.9 and 1.0 assert b2.a is None # passes in 1.0, in 0.9 it's a2 :ticket:`3060` .. _bug_3139: session.expunge() will fully detach an object that's been deleted ----------------------------------------------------------------- The behavior of :meth:`.Session.expunge` had a bug that caused an inconsistency in behavior regarding deleted objects. The :func:`.object_session` function as well as the :attr:`.InstanceState.session` attribute would still report object as belonging to the :class:`.Session` subsequent to the expunge:: u1 = sess.query(User).first() sess.delete(u1) sess.flush() assert u1 not in sess assert inspect(u1).session is sess # this is normal before commit sess.expunge(u1) assert u1 not in sess assert inspect(u1).session is None # would fail Note that it is normal for ``u1 not in sess`` to be True while ``inspect(u1).session`` still refers to the session, while the transaction is ongoing subsequent to the delete operation and :meth:`.Session.expunge` has not been called; the full detachment normally completes once the transaction is committed. This issue would also impact functions that rely on :meth:`.Session.expunge` such as :func:`.make_transient`. :ticket:`3139` .. _migration_yield_per_eager_loading: Joined/Subquery eager loading explicitly disallowed with yield_per ------------------------------------------------------------------ In order to make the :meth:`.Query.yield_per` method easier to use, an exception is raised if any subquery eager loaders, or joined eager loaders that would use collections, are to take effect when yield_per is used, as these are currently not compatible with yield-per (subquery loading could be in theory, however). When this error is raised, the :func:`.lazyload` option can be sent with an asterisk:: q = sess.query(Object).options(lazyload('*')).yield_per(100) or use :meth:`.Query.enable_eagerloads`:: q = sess.query(Object).enable_eagerloads(False).yield_per(100) The :func:`.lazyload` option has the advantage that additional many-to-one joined loader options can still be used:: q = sess.query(Object).options( lazyload('*'), joinedload("some_manytoone")).yield_per(100) .. _bug_3233: Changes and fixes in handling of duplicate join targets -------------------------------------------------------- Changes here encompass bugs where an unexpected and inconsistent behavior would occur in some scenarios when joining to an entity twice, or to multple single-table entities against the same table, without using a relationship-based ON clause, as well as when joining multiple times to the same target relationship. Starting with a mapping as:: from sqlalchemy import Integer, Column, String, ForeignKey from sqlalchemy.orm import Session, relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) bs = relationship("B") class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('a.id')) A query that joins to ``A.bs`` twice:: print s.query(A).join(A.bs).join(A.bs) Will render:: SELECT a.id AS a_id FROM a JOIN b ON a.id = b.a_id The query deduplicates the redundant ``A.bs`` because it is attempting to support a case like the following:: s.query(A).join(A.bs).\ filter(B.foo == 'bar').\ reset_joinpoint().join(A.bs, B.cs).filter(C.bar == 'bat') That is, the ``A.bs`` is part of a "path". As part of :ticket:`3367`, arriving at the same endpoint twice without it being part of a larger path will now emit a warning:: SAWarning: Pathed join target A.bs has already been joined to; skipping The bigger change involves when joining to an entity without using a relationship-bound path. If we join to ``B`` twice:: print s.query(A).join(B, B.a_id == A.id).join(B, B.a_id == A.id) In 0.9, this would render as follows:: SELECT a.id AS a_id FROM a JOIN b ON b.a_id = a.id JOIN b AS b_1 ON b_1.a_id = a.id This is problematic since the aliasing is implicit and in the case of different ON clauses can lead to unpredictable results. In 1.0, no automatic aliasing is applied and we get:: SELECT a.id AS a_id FROM a JOIN b ON b.a_id = a.id JOIN b ON b.a_id = a.id This will raise an error from the database. While it might be nice if the "duplicate join target" acted identically if we joined both from redundant relationships vs. redundant non-relationship based targets, for now we are only changing the behavior in the more serious case where implicit aliasing would have occurred previously, and only emitting a warning in the relationship case. Ultimately, joining to the same thing twice without any aliasing to disambiguate should raise an error in all cases. The change also has an impact on single-table inheritance targets. Using a mapping as follows:: from sqlalchemy import Integer, Column, String, ForeignKey from sqlalchemy.orm import Session, relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'a'} class ASub1(A): __mapper_args__ = {'polymorphic_identity': 'asub1'} class ASub2(A): __mapper_args__ = {'polymorphic_identity': 'asub2'} class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey("a.id")) a = relationship("A", primaryjoin="B.a_id == A.id", backref='b') s = Session() print s.query(ASub1).join(B, ASub1.b).join(ASub2, B.a) print s.query(ASub1).join(B, ASub1.b).join(ASub2, ASub2.id == B.a_id) The two queries at the bottom are equivalent, and should both render the identical SQL:: SELECT a.id AS a_id, a.type AS a_type FROM a JOIN b ON b.a_id = a.id JOIN a ON b.a_id = a.id AND a.type IN (:type_1) WHERE a.type IN (:type_2) The above SQL is invalid, as it renders "a" within the FROM list twice. However, the implicit aliasing bug would occur with the second query only and render this instead:: SELECT a.id AS a_id, a.type AS a_type FROM a JOIN b ON b.a_id = a.id JOIN a AS a_1 ON a_1.id = b.a_id AND a_1.type IN (:type_1) WHERE a_1.type IN (:type_2) Where above, the second join to "a" is aliased. While this seems convenient, it's not how single-inheritance queries work in general and is misleading and inconsistent. The net effect is that applications which were relying on this bug will now have an error raised by the database. The solution is to use the expected form. When referring to multiple subclasses of a single-inheritance entity in a query, you must manually use aliases to disambiguate the table, as all the subclasses normally refer to the same table:: asub2_alias = aliased(ASub2) print s.query(ASub1).join(B, ASub1.b).join(asub2_alias, B.a.of_type(asub2_alias)) :ticket:`3233` :ticket:`3367` Deferred Columns No Longer Implicitly Undefer --------------------------------------------- Mapped attributes marked as deferred without explicit undeferral will now remain "deferred" even if their column is otherwise present in the result set in some way. This is a performance enhancement in that an ORM load no longer spends time searching for each deferred column when the result set is obtained. However, for an application that has been relying upon this, an explicit :func:`.undefer` or similar option should now be used, in order to prevent a SELECT from being emitted when the attribute is accessed. .. _migration_deprecated_orm_events: Deprecated ORM Event Hooks Removed ---------------------------------- The following ORM event hooks, some of which have been deprecated since 0.5, have been removed: ``translate_row``, ``populate_instance``, ``append_result``, ``create_instance``. The use cases for these hooks originated in the very early 0.1 / 0.2 series of SQLAlchemy and have long since been unnecessary. In particular, the hooks were largely unusable as the behavioral contracts within these events was strongly linked to the surrounding internals, such as how an instance needs to be created and initialized as well as how columns are located within an ORM-generated row. The removal of these hooks greatly simplifies the mechanics of ORM object loading. .. _bundle_api_change: API Change for new Bundle feature when custom row loaders are used ------------------------------------------------------------------ The new :class:`.Bundle` object of 0.9 has a small change in API, when the ``create_row_processor()`` method is overridden on a custom class. Previously, the sample code looked like:: from sqlalchemy.orm import Bundle class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" def proc(row, result): return dict( zip(labels, (proc(row, result) for proc in procs)) ) return proc The unused ``result`` member is now removed:: from sqlalchemy.orm import Bundle class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" def proc(row): return dict( zip(labels, (proc(row) for proc in procs)) ) return proc .. seealso:: :ref:`bundles` .. _migration_3008: Right inner join nesting now the default for joinedload with innerjoin=True --------------------------------------------------------------------------- The behavior of :paramref:`.joinedload.innerjoin` as well as :paramref:`.relationship.innerjoin` is now to use "nested" inner joins, that is, right-nested, as the default behavior when an inner join joined eager load is chained to an outer join eager load. In order to get the old behavior of chaining all joined eager loads as outer join when an outer join is present, use ``innerjoin="unnested"``. As introduced in :ref:`feature_2976` from version 0.9, the behavior of ``innerjoin="nested"`` is that an inner join eager load chained to an outer join eager load will use a right-nested join. ``"nested"`` is now implied when using ``innerjoin=True``:: query(User).options( joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) With the new default, this will render the FROM clause in the form:: FROM users LEFT OUTER JOIN (orders JOIN items ON ) ON That is, using a right-nested join for the INNER join so that the full result of ``users`` can be returned. The use of an INNER join is more efficient than using an OUTER join, and allows the :paramref:`.joinedload.innerjoin` optimization parameter to take effect in all cases. To get the older behavior, use ``innerjoin="unnested"``:: query(User).options( joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested")) This will avoid right-nested joins and chain the joins together using all OUTER joins despite the innerjoin directive:: FROM users LEFT OUTER JOIN orders ON LEFT OUTER JOIN items ON As noted in the 0.9 notes, the only database backend that has difficulty with right-nested joins is SQLite; SQLAlchemy as of 0.9 converts a right-nested join into a subquery as a join target on SQLite. .. seealso:: :ref:`feature_2976` - description of the feature as introduced in 0.9.4. :ticket:`3008` .. _change_3249: Subqueries no longer applied to uselist=False joined eager loads ---------------------------------------------------------------- Given a joined eager load like the following:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('a.id')) s = Session() print(s.query(A).options(joinedload(A.b)).limit(5)) SQLAlchemy considers the relationship ``A.b`` to be a "one to many, loaded as a single value", which is essentially a "one to one" relationship. However, joined eager loading has always treated the above as a situation where the main query needs to be inside a subquery, as would normally be needed for a collection of B objects where the main query has a LIMIT applied:: SELECT anon_1.a_id AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS b_1_a_id FROM (SELECT a.id AS a_id FROM a LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON anon_1.a_id = b_1.a_id However, since the relationship of the inner query to the outer one is that at most only one row is shared in the case of ``uselist=False`` (in the same way as a many-to-one), the "subquery" used with LIMIT + joined eager loading is now dropped in this case:: SELECT a.id AS a_id, b_1.id AS b_1_id, b_1.a_id AS b_1_a_id FROM a LEFT OUTER JOIN b AS b_1 ON a.id = b_1.a_id LIMIT :param_1 In the case that the LEFT OUTER JOIN returns more than one row, the ORM has always emitted a warning here and ignored addtional results for ``uselist=False``, so the results in that error situation should not change. :ticket:`3249` query.update() / query.delete() raises if used with join(), select_from(), from_self() -------------------------------------------------------------------------------------- A warning is emitted in SQLAlchemy 0.9.10 (not yet released as of June 9, 2015) when the :meth:`.Query.update` or :meth:`.Query.delete` methods are invoked against a query which has also called upon :meth:`.Query.join`, :meth:`.Query.outerjoin`, :meth:`.Query.select_from` or :meth:`.Query.from_self`. These are unsupported use cases which silently fail in the 0.9 series up until 0.9.10 where it emits a warning. In 1.0, these cases raise an exception. :ticket:`3349` query.update() with ``synchronize_session='evaluate'`` raises on multi-table update ----------------------------------------------------------------------------------- The "evaluator" for :meth:`.Query.update` won't work with multi-table updates, and needs to be set to ``synchronize_session=False`` or ``synchronize_session='fetch'`` when multiple tables are present. The new behavior is that an explicit exception is now raised, with a message to change the synchronize setting. This is upgraded from a warning emitted as of 0.9.7. :ticket:`3117` Resurrect Event has been Removed -------------------------------- The "resurrect" ORM event has been removed entirely. This event ceased to have any function since version 0.8 removed the older "mutable" system from the unit of work. .. _migration_3177: Change to single-table-inheritance criteria when using from_self(), count() --------------------------------------------------------------------------- Given a single-table inheritance mapping, such as:: class Widget(Base): __table__ = 'widget_table' class FooWidget(Widget): pass Using :meth:`.Query.from_self` or :meth:`.Query.count` against a subclass would produce a subquery, but then add the "WHERE" criteria for subtypes to the outside:: sess.query(FooWidget).from_self().all() rendering:: SELECT anon_1.widgets_id AS anon_1_widgets_id, anon_1.widgets_type AS anon_1_widgets_type FROM (SELECT widgets.id AS widgets_id, widgets.type AS widgets_type, FROM widgets) AS anon_1 WHERE anon_1.widgets_type IN (?) The issue with this is that if the inner query does not specify all columns, then we can't add the WHERE clause on the outside (it actually tries, and produces a bad query). This decision apparently goes way back to 0.6.5 with the note "may need to make more adjustments to this". Well, those adjustments have arrived! So now the above query will render:: SELECT anon_1.widgets_id AS anon_1_widgets_id, anon_1.widgets_type AS anon_1_widgets_type FROM (SELECT widgets.id AS widgets_id, widgets.type AS widgets_type, FROM widgets WHERE widgets.type IN (?)) AS anon_1 So that queries that don't include "type" will still work!:: sess.query(FooWidget.id).count() Renders:: SELECT count(*) AS count_1 FROM (SELECT widgets.id AS widgets_id FROM widgets WHERE widgets.type IN (?)) AS anon_1 :ticket:`3177` .. _migration_3222: single-table-inheritance criteria added to all ON clauses unconditionally ------------------------------------------------------------------------- When joining to a single-table inheritance subclass target, the ORM always adds the "single table criteria" when joining on a relationship. Given a mapping as:: class Widget(Base): __tablename__ = 'widget' id = Column(Integer, primary_key=True) type = Column(String) related_id = Column(ForeignKey('related.id')) related = relationship("Related", backref="widget") __mapper_args__ = {'polymorphic_on': type} class FooWidget(Widget): __mapper_args__ = {'polymorphic_identity': 'foo'} class Related(Base): __tablename__ = 'related' id = Column(Integer, primary_key=True) It's been the behavior for quite some time that a JOIN on the relationship will render a "single inheritance" clause for the type:: s.query(Related).join(FooWidget, Related.widget).all() SQL output:: SELECT related.id AS related_id FROM related JOIN widget ON related.id = widget.related_id AND widget.type IN (:type_1) Above, because we joined to a subclass ``FooWidget``, :meth:`.Query.join` knew to add the ``AND widget.type IN ('foo')`` criteria to the ON clause. The change here is that the ``AND widget.type IN()`` criteria is now appended to *any* ON clause, not just those generated from a relationship, including one that is explicitly stated:: # ON clause will now render as # related.id = widget.related_id AND widget.type IN (:type_1) s.query(Related).join(FooWidget, FooWidget.related_id == Related.id).all() As well as the "implicit" join when no ON clause of any kind is stated:: # ON clause will now render as # related.id = widget.related_id AND widget.type IN (:type_1) s.query(Related).join(FooWidget).all() Previously, the ON clause for these would not include the single-inheritance criteria. Applications that are already adding this criteria to work around this will want to remove its explicit use, though it should continue to work fine if the criteria happens to be rendered twice in the meantime. .. seealso:: :ref:`bug_3233` :ticket:`3222` Key Behavioral Changes - Core ============================= .. _migration_2992: Warnings emitted when coercing full SQL fragments into text() ------------------------------------------------------------- Since SQLAlchemy's inception, there has always been an emphasis on not getting in the way of the usage of plain text. The Core and ORM expression systems were intended to allow any number of points at which the user can just use plain text SQL expressions, not just in the sense that you can send a full SQL string to :meth:`.Connection.execute`, but that you can send strings with SQL expressions into many functions, such as :meth:`.Select.where`, :meth:`.Query.filter`, and :meth:`.Select.order_by`. Note that by "SQL expressions" we mean a **full fragment of a SQL string**, such as:: # the argument sent to where() is a full SQL expression stmt = select([sometable]).where("somecolumn = 'value'") and we are **not talking about string arguments**, that is, the normal behavior of passing string values that become parameterized:: # This is a normal Core expression with a string argument - # we aren't talking about this!! stmt = select([sometable]).where(sometable.c.somecolumn == 'value') The Core tutorial has long featured an example of the use of this technique, using a :func:`.select` construct where virtually all components of it are specified as straight strings. However, despite this long-standing behavior and example, users are apparently surprised that this behavior exists, and when asking around the community, I was unable to find any user that was in fact *not* surprised that you can send a full string into a method like :meth:`.Query.filter`. So the change here is to encourage the user to qualify textual strings when composing SQL that is partially or fully composed from textual fragments. When composing a select as below:: stmt = select(["a", "b"]).where("a = b").select_from("sometable") The statement is built up normally, with all the same coercions as before. However, one will see the following warnings emitted:: SAWarning: Textual column expression 'a' should be explicitly declared with text('a'), or use column('a') for more specificity (this warning may be suppressed after 10 occurrences) SAWarning: Textual column expression 'b' should be explicitly declared with text('b'), or use column('b') for more specificity (this warning may be suppressed after 10 occurrences) SAWarning: Textual SQL expression 'a = b' should be explicitly declared as text('a = b') (this warning may be suppressed after 10 occurrences) SAWarning: Textual SQL FROM expression 'sometable' should be explicitly declared as text('sometable'), or use table('sometable') for more specificity (this warning may be suppressed after 10 occurrences) These warnings attempt to show exactly where the issue is by displaying the parameters as well as where the string was received. The warnings make use of the :ref:`feature_3178` so that parameterized warnings can be emitted safely without running out of memory, and as always, if one wishes the warnings to be exceptions, the `Python Warnings Filter `_ should be used:: import warnings warnings.simplefilter("error") # all warnings raise an exception Given the above warnings, our statement works just fine, but to get rid of the warnings we would rewrite our statement as follows:: from sqlalchemy import select, text stmt = select([ text("a"), text("b") ]).where(text("a = b")).select_from(text("sometable")) and as the warnings suggest, we can give our statement more specificity about the text if we use :func:`.column` and :func:`.table`:: from sqlalchemy import select, text, column, table stmt = select([column("a"), column("b")]).\ where(text("a = b")).select_from(table("sometable")) Where note also that :func:`.table` and :func:`.column` can now be imported from "sqlalchemy" without the "sql" part. The behavior here applies to :func:`.select` as well as to key methods on :class:`.Query`, including :meth:`.Query.filter`, :meth:`.Query.from_statement` and :meth:`.Query.having`. ORDER BY and GROUP BY are special cases ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There is one case where usage of a string has special meaning, and as part of this change we have enhanced its functionality. When we have a :func:`.select` or :class:`.Query` that refers to some column name or named label, we might want to GROUP BY and/or ORDER BY known columns or labels:: stmt = select([ user.c.name, func.count(user.c.id).label("id_count") ]).group_by("name").order_by("id_count") In the above statement we expect to see "ORDER BY id_count", as opposed to a re-statement of the function. The string argument given is actively matched to an entry in the columns clause during compilation, so the above statement would produce as we expect, without warnings (though note that the ``"name"`` expression has been resolved to ``users.name``!):: SELECT users.name, count(users.id) AS id_count FROM users GROUP BY users.name ORDER BY id_count However, if we refer to a name that cannot be located, then we get the warning again, as below:: stmt = select([ user.c.name, func.count(user.c.id).label("id_count") ]).order_by("some_label") The output does what we say, but again it warns us:: SAWarning: Can't resolve label reference 'some_label'; converting to text() (this warning may be suppressed after 10 occurrences) SELECT users.name, count(users.id) AS id_count FROM users ORDER BY some_label The above behavior applies to all those places where we might want to refer to a so-called "label reference"; ORDER BY and GROUP BY, but also within an OVER clause as well as a DISTINCT ON clause that refers to columns (e.g. the Postgresql syntax). We can still specify any arbitrary expression for ORDER BY or others using :func:`.text`:: stmt = select([users]).order_by(text("some special expression")) The upshot of the whole change is that SQLAlchemy now would like us to tell it when a string is sent that this string is explicitly a :func:`.text` construct, or a column, table, etc., and if we use it as a label name in an order by, group by, or other expression, SQLAlchemy expects that the string resolves to something known, else it should again be qualified with :func:`.text` or similar. :ticket:`2992` .. _bug_3288: Python-side defaults invoked for each row invidually when using a multivalued insert ------------------------------------------------------------------------------------ Support for Python-side column defaults when using the multi-valued version of :meth:`.Insert.values` were essentially not implemented, and would only work "by accident" in specific situations, when the dialect in use was using a non-positional (e.g. named) style of bound parameter, and when it was not necessary that a Python-side callable be invoked for each row. The feature has been overhauled so that it works more similarly to that of an "executemany" style of invocation:: import itertools counter = itertools.count(1) t = Table( 'my_table', metadata, Column('id', Integer, default=lambda: next(counter)), Column('data', String) ) conn.execute(t.insert().values([ {"data": "d1"}, {"data": "d2"}, {"data": "d3"}, ])) The above example will invoke ``next(counter)`` for each row individually as would be expected:: INSERT INTO my_table (id, data) VALUES (?, ?), (?, ?), (?, ?) (1, 'd1', 2, 'd2', 3, 'd3') Previously, a positional dialect would fail as a bind would not be generated for additional positions:: Incorrect number of bindings supplied. The current statement uses 6, and there are 4 supplied. [SQL: u'INSERT INTO my_table (id, data) VALUES (?, ?), (?, ?), (?, ?)'] [parameters: (1, 'd1', 'd2', 'd3')] And with a "named" dialect, the same value for "id" would be re-used in each row (hence this change is backwards-incompatible with a system that relied on this):: INSERT INTO my_table (id, data) VALUES (:id, :data_0), (:id, :data_1), (:id, :data_2) {u'data_2': 'd3', u'data_1': 'd2', u'data_0': 'd1', 'id': 1} The system will also refuse to invoke a "server side" default as inline-rendered SQL, since it cannot be guaranteed that a server side default is compatible with this. If the VALUES clause renders for a specific column, then a Python-side value is required; if an omitted value only refers to a server-side default, an exception is raised:: t = Table( 'my_table', metadata, Column('id', Integer, primary_key=True), Column('data', String, server_default='some default') ) conn.execute(t.insert().values([ {"data": "d1"}, {"data": "d2"}, {}, ])) will raise:: sqlalchemy.exc.CompileError: INSERT value for column my_table.data is explicitly rendered as a boundparameter in the VALUES clause; a Python-side value or SQL expression is required Previously, the value "d1" would be copied into that of the third row (but again, only with named format!):: INSERT INTO my_table (data) VALUES (:data_0), (:data_1), (:data_0) {u'data_1': 'd2', u'data_0': 'd1'} :ticket:`3288` .. _change_3163: Event listeners can not be added or removed from within that event's runner --------------------------------------------------------------------------- Removal of an event listener from inside that same event itself would modify the elements of a list during iteration, which would cause still-attached event listeners to silently fail to fire. To prevent this while still maintaining performance, the lists have been replaced with ``collections.deque()``, which does not allow any additions or removals during iteration, and instead raises ``RuntimeError``. :ticket:`3163` .. _change_3169: The INSERT...FROM SELECT construct now implies ``inline=True`` -------------------------------------------------------------- Using :meth:`.Insert.from_select` now implies ``inline=True`` on :func:`.insert`. This helps to fix a bug where an INSERT...FROM SELECT construct would inadvertently be compiled as "implicit returning" on supporting backends, which would cause breakage in the case of an INSERT that inserts zero rows (as implicit returning expects a row), as well as arbitrary return data in the case of an INSERT that inserts multiple rows (e.g. only the first row of many). A similar change is also applied to an INSERT..VALUES with multiple parameter sets; implicit RETURNING will no longer emit for this statement either. As both of these constructs deal with varible numbers of rows, the :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. Previously, there was a documentation note that one may prefer ``inline=True`` with INSERT..FROM SELECT as some databases don't support returning and therefore can't do "implicit" returning, but there's no reason an INSERT...FROM SELECT needs implicit returning in any case. Regular explicit :meth:`.Insert.returning` should be used to return variable numbers of result rows if inserted data is needed. :ticket:`3169` .. _change_3027: ``autoload_with`` now implies ``autoload=True`` ----------------------------------------------- A :class:`.Table` can be set up for reflection by passing :paramref:`.Table.autoload_with` alone:: my_table = Table('my_table', metadata, autoload_with=some_engine) :ticket:`3027` .. _change_3266: DBAPI exception wrapping and handle_error() event improvements -------------------------------------------------------------- SQLAlchemy's wrapping of DBAPI exceptions was not taking place in the case where a :class:`.Connection` object was invalidated, and then tried to reconnect and encountered an error; this has been resolved. Additionally, the recently added :meth:`.ConnectionEvents.handle_error` event is now invoked for errors that occur upon initial connect, upon reconnect, and when :func:`.create_engine` is used given a custom connection function via :paramref:`.create_engine.creator`. The :class:`.ExceptionContext` object has a new datamember :attr:`.ExceptionContext.engine` that will always refer to the :class:`.Engine` in use, in those cases when the :class:`.Connection` object is not available (e.g. on initial connect). :ticket:`3266` .. _change_3243: ForeignKeyConstraint.columns is now a ColumnCollection ------------------------------------------------------ :attr:`.ForeignKeyConstraint.columns` was previously a plain list containing either strings or :class:`.Column` objects, depending on how the :class:`.ForeignKeyConstraint` was constructed and whether it was associated with a table. The collection is now a :class:`.ColumnCollection`, and is only initialized after the :class:`.ForeignKeyConstraint` is associated with a :class:`.Table`. A new accessor :attr:`.ForeignKeyConstraint.column_keys` is added to unconditionally return string keys for the local set of columns regardless of how the object was constructed or its current state. .. _feature_3084: MetaData.sorted_tables accessor is "deterministic" ----------------------------------------------------- The sorting of tables resulting from the :attr:`.MetaData.sorted_tables` accessor is "deterministic"; the ordering should be the same in all cases regardless of Python hashing. This is done by first sorting the tables by name before passing them to the topological algorithm, which maintains that ordering as it iterates. Note that this change does **not** yet apply to the ordering applied when emitting :meth:`.MetaData.create_all` or :meth:`.MetaData.drop_all`. :ticket:`3084` .. _bug_3170: null(), false() and true() constants are no longer singletons ------------------------------------------------------------- These three constants were changed to return a "singleton" value in 0.9; unfortunately, that would lead to a query like the following to not render as expected:: select([null(), null()]) rendering only ``SELECT NULL AS anon_1``, because the two :func:`.null` constructs would come out as the same ``NULL`` object, and SQLAlchemy's Core model is based on object identity in order to determine lexical significance. The change in 0.9 had no importance other than the desire to save on object overhead; in general, an unnamed construct needs to stay lexically unique so that it gets labeled uniquely. :ticket:`3170` .. _change_3204: SQLite/Oracle have distinct methods for temporary table/view name reporting --------------------------------------------------------------------------- The :meth:`.Inspector.get_table_names` and :meth:`.Inspector.get_view_names` methods in the case of SQLite/Oracle would also return the names of temporary tables and views, which is not provided by any other dialect (in the case of MySQL at least it is not even possible). This logic has been moved out to two new methods :meth:`.Inspector.get_temp_table_names` and :meth:`.Inspector.get_temp_view_names`. Note that reflection of a specific named temporary table or temporary view, either by ``Table('name', autoload=True)`` or via methods like :meth:`.Inspector.get_columns` continues to function for most if not all dialects. For SQLite specifically, there is a bug fix for UNIQUE constraint reflection from temp tables as well, which is :ticket:`3203`. :ticket:`3204` Dialect Improvements and Changes - Postgresql ============================================= .. _change_3319: Overhaul of ENUM type create/drop rules --------------------------------------- The rules for Postgresql :class:`.postgresql.ENUM` have been made more strict with regards to creating and dropping of the TYPE. An :class:`.postgresql.ENUM` that is created **without** being explicitly associated with a :class:`.MetaData` object will be created *and* dropped corresponding to :meth:`.Table.create` and :meth:`.Table.drop`:: table = Table('sometable', metadata, Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) ) table.create(engine) # will emit CREATE TYPE and CREATE TABLE table.drop(engine) # will emit DROP TABLE and DROP TYPE - new for 1.0 This means that if a second table also has an enum named 'myenum', the above DROP operation will now fail. In order to accomodate the use case of a common shared enumerated type, the behavior of a metadata-associated enumeration has been enhanced. An :class:`.postgresql.ENUM` that is created **with** being explicitly associated with a :class:`.MetaData` object will *not* be created *or* dropped corresponding to :meth:`.Table.create` and :meth:`.Table.drop`, with the exception of :meth:`.Table.create` called with the ``checkfirst=True`` flag:: my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) table = Table('sometable', metadata, Column('some_enum', my_enum) ) # will fail: ENUM 'my_enum' does not exist table.create(engine) # will check for enum and emit CREATE TYPE table.create(engine, checkfirst=True) table.drop(engine) # will emit DROP TABLE, *not* DROP TYPE metadata.drop_all(engine) # will emit DROP TYPE metadata.create_all(engine) # will emit CREATE TYPE :ticket:`3319` New Postgresql Table options ----------------------------- Added support for PG table options TABLESPACE, ON COMMIT, WITH(OUT) OIDS, and INHERITS, when rendering DDL via the :class:`.Table` construct. .. seealso:: :ref:`postgresql_table_options` :ticket:`2051` .. _feature_get_enums: New get_enums() method with Postgresql Dialect ---------------------------------------------- The :func:`.inspect` method returns a :class:`.PGInspector` object in the case of Postgresql, which includes a new :meth:`.PGInspector.get_enums` method that returns information on all available ``ENUM`` types:: from sqlalchemy import inspect, create_engine engine = create_engine("postgresql+psycopg2://host/dbname") insp = inspect(engine) print(insp.get_enums()) .. seealso:: :meth:`.PGInspector.get_enums` .. _feature_2891: Postgresql Dialect reflects Materialized Views, Foreign Tables -------------------------------------------------------------- Changes are as follows: * the :class:`Table` construct with ``autoload=True`` will now match a name that exists in the database as a materialized view or foreign table. * :meth:`.Inspector.get_view_names` will return plain and materialized view names. * :meth:`.Inspector.get_table_names` does **not** change for Postgresql, it continues to return only the names of plain tables. * A new method :meth:`.PGInspector.get_foreign_table_names` is added which will return the names of tables that are specifically marked as "foreign" in the Postgresql schema tables. The change to reflection involves adding ``'m'`` and ``'f'`` to the list of qualifiers we use when querying ``pg_class.relkind``, but this change is new in 1.0.0 to avoid any backwards-incompatible surprises for those running 0.9 in production. :ticket:`2891` .. _change_3264: Postgresql ``has_table()`` now works for temporary tables --------------------------------------------------------- This is a simple fix such that "has table" for temporary tables now works, so that code like the following may proceed:: from sqlalchemy import * metadata = MetaData() user_tmp = Table( "user_tmp", metadata, Column("id", INT, primary_key=True), Column('name', VARCHAR(50)), prefixes=['TEMPORARY'] ) e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') with e.begin() as conn: user_tmp.create(conn, checkfirst=True) # checkfirst will succeed user_tmp.create(conn, checkfirst=True) The very unlikely case that this behavior will cause a non-failing application to behave differently, is because Postgresql allows a non-temporary table to silently overwrite a temporary table. So code like the following will now act completely differently, no longer creating the real table following the temporary table:: from sqlalchemy import * metadata = MetaData() user_tmp = Table( "user_tmp", metadata, Column("id", INT, primary_key=True), Column('name', VARCHAR(50)), prefixes=['TEMPORARY'] ) e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') with e.begin() as conn: user_tmp.create(conn, checkfirst=True) m2 = MetaData() user = Table( "user_tmp", m2, Column("id", INT, primary_key=True), Column('name', VARCHAR(50)), ) # in 0.9, *will create* the new table, overwriting the old one. # in 1.0, *will not create* the new table user.create(conn, checkfirst=True) :ticket:`3264` .. _feature_gh134: Postgresql FILTER keyword ------------------------- The SQL standard FILTER keyword for aggregate functions is now supported by Postgresql as of 9.4. SQLAlchemy allows this using :meth:`.FunctionElement.filter`:: func.count(1).filter(True) .. seealso:: :meth:`.FunctionElement.filter` :class:`.FunctionFilter` PG8000 dialect supports client side encoding --------------------------------------------- The :paramref:`.create_engine.encoding` parameter is now honored by the pg8000 dialect, using on connect handler which emits ``SET CLIENT_ENCODING`` matching the selected encoding. PG8000 native JSONB support -------------------------------------- Support for PG8000 versions greater than 1.10.1 has been added, where JSONB is supported natively. Support for psycopg2cffi Dialect on Pypy ---------------------------------------- Support for the pypy psycopg2cffi dialect is added. .. seealso:: :mod:`sqlalchemy.dialects.postgresql.psycopg2cffi` Dialect Improvements and Changes - MySQL ============================================= .. _change_3155: MySQL TIMESTAMP Type now renders NULL / NOT NULL in all cases -------------------------------------------------------------- The MySQL dialect has always worked around MySQL's implicit NOT NULL default associated with TIMESTAMP columns by emitting NULL for such a type, if the column is set up with ``nullable=True``. However, MySQL 5.6.6 and above features a new flag `explicit_defaults_for_timestamp `_ which repairs MySQL's non-standard behavior to make it behave like any other type; to accommodate this, SQLAlchemy now emits NULL/NOT NULL unconditionally for all TIMESTAMP columns. .. seealso:: :ref:`mysql_timestamp_null` :ticket:`3155` .. _change_3283: MySQL SET Type Overhauled to support empty sets, unicode, blank value handling ------------------------------------------------------------------------------- The :class:`.mysql.SET` type historically not included a system of handling blank sets and empty values separately; as different drivers had different behaviors for treatment of empty strings and empty-string-set representations, the SET type tried only to hedge between these behaviors, opting to treat the empty set as ``set([''])`` as is still the current behavior for the MySQL-Connector-Python DBAPI. Part of the rationale here was that it was otherwise impossible to actually store a blank string within a MySQL SET, as the driver gives us back strings with no way to discern between ``set([''])`` and ``set()``. It was left to the user to determine if ``set([''])`` actually meant "empty set" or not. The new behavior moves the use case for the blank string, which is an unusual case that isn't even documented in MySQL's documentation, into a special case, and the default behavior of :class:`.mysql.SET` is now: * to treat the empty string ``''`` as returned by MySQL-python into the empty set ``set()``; * to convert the single-blank value set ``set([''])`` returned by MySQL-Connector-Python into the empty set ``set()``; * To handle the case of a set type that actually wishes includes the blank value ``''`` in its list of possible values, a new feature (required in this use case) is implemented whereby the set value is persisted and loaded as a bitwise integer value; the flag :paramref:`.mysql.SET.retrieve_as_bitwise` is added in order to enable this. Using the :paramref:`.mysql.SET.retrieve_as_bitwise` flag allows the set to be persisted and retrieved with no ambiguity of values. Theoretically this flag can be turned on in all cases, as long as the given list of values to the type matches the ordering exactly as declared in the database; it only makes the SQL echo output a bit more unusual. The default behavior of :class:`.mysql.SET` otherwise remains the same, roundtripping values using strings. The string-based behavior now supports unicode fully including MySQL-python with use_unicode=0. :ticket:`3283` MySQL internal "no such table" exceptions not passed to event handlers ---------------------------------------------------------------------- The MySQL dialect will now disable :meth:`.ConnectionEvents.handle_error` events from firing for those statements which it uses internally to detect if a table exists or not. This is achieved using an execution option ``skip_user_error_events`` that disables the handle error event for the scope of that execution. In this way, user code that rewrites exceptions doesn't need to worry about the MySQL dialect or other dialects that occasionally need to catch SQLAlchemy specific exceptions. Changed the default value of ``raise_on_warnings`` for MySQL-Connector ---------------------------------------------------------------------- Changed the default value of "raise_on_warnings" to False for MySQL-Connector. This was set at True for some reason. The "buffered" flag unfortunately must stay at True as MySQLconnector does not allow a cursor to be closed unless all results are fully fetched. :ticket:`2515` .. _bug_3186: MySQL boolean symbols "true", "false" work again ------------------------------------------------ 0.9's overhaul of the IS/IS NOT operators as well as boolean types in :ticket:`2682` disallowed the MySQL dialect from making use of the "true" and "false" symbols in the context of "IS" / "IS NOT". Apparently, even though MySQL has no "boolean" type, it supports IS / IS NOT when the special "true" and "false" symbols are used, even though these are otherwise synonymous with "1" and "0" (and IS/IS NOT don't work with the numerics). So the change here is that the MySQL dialect remains "non native boolean", but the :func:`.true` and :func:`.false` symbols again produce the keywords "true" and "false", so that an expression like ``column.is_(true())`` again works on MySQL. :ticket:`3186` .. _change_3263: The match() operator now returns an agnostic MatchType compatible with MySQL's floating point return value ---------------------------------------------------------------------------------------------------------- The return type of a :meth:`.ColumnOperators.match` expression is now a new type called :class:`.MatchType`. This is a subclass of :class:`.Boolean`, that can be intercepted by the dialect in order to produce a different result type at SQL execution time. Code like the following will now function correctly and return floating points on MySQL:: >>> connection.execute( ... select([ ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'), ... matchtable.c.title.match('Dive Python').label('python'), ... matchtable.c.title ... ]).order_by(matchtable.c.id) ... ) [ (2.0, 0.0, 'Agile Web Development with Ruby On Rails'), (0.0, 2.0, 'Dive Into Python'), (2.0, 0.0, "Programming Matz's Ruby"), (0.0, 0.0, 'The Definitive Guide to Django'), (0.0, 1.0, 'Python in a Nutshell') ] :ticket:`3263` .. _change_2984: Drizzle Dialect is now an External Dialect ------------------------------------------ The dialect for `Drizzle `_ is now an external dialect, available at https://bitbucket.org/zzzeek/sqlalchemy-drizzle. This dialect was added to SQLAlchemy right before SQLAlchemy was able to accommodate third party dialects well; going forward, all databases that aren't within the "ubiquitous use" category are third party dialects. The dialect's implementation hasn't changed and is still based on the MySQL + MySQLdb dialects within SQLAlchemy. The dialect is as of yet unreleased and in "attic" status; however it passes the majority of tests and is generally in decent working order, if someone wants to pick up on polishing it. Dialect Improvements and Changes - SQLite ============================================= SQLite named and unnamed UNIQUE and FOREIGN KEY constraints will inspect and reflect ------------------------------------------------------------------------------------- UNIQUE and FOREIGN KEY constraints are now fully reflected on SQLite both with and without names. Previously, foreign key names were ignored and unnamed unique constraints were skipped. In particular this will help with Alembic's new SQLite migration features. To achieve this, for both foreign keys and unique constraints, the result of PRAGMA foreign_keys, index_list, and index_info is combined with regular expression parsing of the CREATE TABLE statement overall to form a complete picture of the names of constraints, as well as differentiating UNIQUE constraints that were created as UNIQUE vs. unnamed INDEXes. :ticket:`3244` :ticket:`3261` Dialect Improvements and Changes - SQL Server ============================================= .. _change_3182: PyODBC driver name is required with hostname-based SQL Server connections ------------------------------------------------------------------------- Connecting to SQL Server with PyODBC using a DSN-less connection, e.g. with an explicit hostname, now requires a driver name - SQLAlchemy will no longer attempt to guess a default:: engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0") SQLAlchemy's previously hardcoded default of "SQL Server" is obsolete on Windows, and SQLAlchemy cannot be tasked with guessing the best driver based on operation system/driver detection. Using a DSN is always preferred when using ODBC to avoid this issue entirely. :ticket:`3182` SQL Server 2012 large text / binary types render as VARCHAR, NVARCHAR, VARBINARY -------------------------------------------------------------------------------- The rendering of the :class:`.Text`, :class:`.UnicodeText`, and :class:`.LargeBinary` types has been changed for SQL Server 2012 and greater, with options to control the behavior completely, based on deprecation guidelines from Microsoft. See :ref:`mssql_large_type_deprecation` for details. Dialect Improvements and Changes - Oracle ============================================= .. _change_3220: Improved support for CTEs in Oracle ----------------------------------- CTE support has been fixed up for Oracle, and there is also a new feature :meth:`.CTE.with_suffixes` that can assist with Oracle's special directives:: included_parts = select([ part.c.sub_part, part.c.part, part.c.quantity ]).where(part.c.part == "p1").\ cte(name="included_parts", recursive=True).\ suffix_with( "search depth first by part set ord1", "cycle part set y_cycle to 1 default 0", dialect='oracle') :ticket:`3220` New Oracle Keywords for DDL ----------------------------- Keywords such as COMPRESS, ON COMMIT, BITMAP: :ref:`oracle_table_options` :ref:`oracle_index_options` SQLAlchemy-1.0.11/doc/build/changelog/changelog_06.rst0000664000175000017500000051415512636375552023434 0ustar classicclassic00000000000000 ============== 0.6 Changelog ============== .. changelog:: :version: 0.6.9 :released: Sat May 05 2012 .. change:: :tags: general :tickets: 2279 Adjusted the "importlater" mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes. .. change:: :tags: orm :tickets: 2197 Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together. .. change:: :tags: orm, bug :tickets: 2310 fixed inappropriate evaluation of user-mapped object in a boolean context within query.get(). .. change:: :tags: orm :tickets: 2228 Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer. .. change:: :tags: orm :tickets: 2234 Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately. .. change:: :tags: orm :tickets: 2287 Fixed bug whereby mapper.order_by attribute would be ignored in the "inner" query within a subquery eager load. . .. change:: :tags: orm :tickets: 2215 Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a "composite" (i.e. non-single-entity) result set. .. change:: :tags: orm :tickets: 2188 Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. . .. change:: :tags: orm :tickets: 2207 The join condition produced by with_parent as well as when using a "dynamic" relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. . .. change:: :tags: orm :tickets: 2199 Repaired the "no statement condition" assertion in Query which would attempt to raise if a generative method were called after from_statement() were called.. .. change:: :tags: orm :tickets: 1776 Cls.column.collate("some collation") now works. .. change:: :tags: orm, bug :tickets: 2297 Fixed the error formatting raised when a tuple is inadvertently passed to session.query(). .. change:: :tags: engine :tickets: 2317 Backported the fix for introduced in 0.7.4, which ensures that the connection is in a valid state before attempting to call rollback()/prepare()/release() on savepoint and two-phase transactions. .. change:: :tags: sql :tickets: 2188 Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been "grouped" and loses itself. Affects. .. change:: :tags: sql :tickets: Fixed bug whereby "warn on unicode" flag would get set for the String type when used with certain dialects. This bug is not in 0.7. .. change:: :tags: sql :tickets: 2270 Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. However, the FROM behavior is still incorrect here, so you need 0.7 in any case for this use case to be usable. .. change:: :tags: schema :tickets: Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found. .. change:: :tags: postgresql :tickets: 2291, 2141 Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column.. .. change:: :tags: mysql :tickets: 2186 Fixed OurSQL dialect to use ansi-neutral quote symbol "'" for XA commands instead of '"'. . .. change:: :tags: mysql :tickets: 2225 a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL's arbitrary rules regarding if it will actually work or not. .. change:: :tags: mssql, bug :tickets: 2269 Decode incoming values when retrieving list of index names and the names of columns within those indexes. .. change:: :tags: oracle :tickets: 2200 Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,. .. change:: :tags: oracle :tickets: 2220 repaired the oracle.RAW type which did not generate the correct DDL. .. change:: :tags: oracle :tickets: 2212 added CURRENT to reserved word list. .. change:: :tags: examples :tickets: 2266 Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases. .. changelog:: :version: 0.6.8 :released: Sun Jun 05 2011 .. change:: :tags: orm :tickets: 2144 Calling query.get() against a column-based entity is invalid, this condition now raises a deprecation warning. .. change:: :tags: orm :tickets: 2151 a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that's normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper .. change:: :tags: orm :tickets: 2148 Backported 0.7's identity map implementation, which does not use a mutex around removal. This as some users were still getting deadlocks despite the adjustments in 0.6.7; the 0.7 approach that doesn't use a mutex does not appear to produce "dictionary changed size" issues, the original rationale for the mutex. .. change:: :tags: orm :tickets: 2163 Fixed the error message emitted for "can't execute syncrule for destination column 'q'; mapper 'X' does not map this column" to reference the correct mapper. . .. change:: :tags: orm :tickets: 2149 Fixed bug where determination of "self referential" relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition. .. change:: :tags: orm :tickets: 2153 mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class. This is equivalent to behavior already applied to declarative. Note that 0.7 has a more comprehensive solution to this, altering how join() itself determines an FK error. .. change:: :tags: orm :tickets: 2171 Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name. .. change:: :tags: orm :tickets: 2170 Modify the text of the message which occurs when the "identity" key isn't detected on flush, to include the common cause that the Column isn't set up to detect auto-increment correctly;. .. change:: :tags: orm :tickets: 2182 Fixed bug where transaction-level "deleted" collection wouldn't be cleared of expunged states, raising an error if they later became transient. .. change:: :tags: sql :tickets: 2147 Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent "column" assigned, added test coverage for all column default assignment patterns. .. change:: :tags: sql :tickets: 2167 Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). . .. change:: :tags: engine :tickets: 2178 Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string.. .. change:: :tags: postgresql :tickets: 2141 Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. . .. change:: :tags: postgresql :tickets: 2175 Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. . .. change:: :tags: mssql :tickets: 2169 Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements. .. change:: :tags: mssql :tickets: 2159 Fixed bug whereby DATETIME2 type would fail on the "adapt" step when used in result sets or bound parameters. This issue is not in 0.7. .. changelog:: :version: 0.6.7 :released: Wed Apr 13 2011 .. change:: :tags: orm :tickets: 2087 Tightened the iterate vs. remove mutex around the identity map iteration, attempting to reduce the chance of an (extremely rare) reentrant gc operation causing a deadlock. Might remove the mutex in 0.7. .. change:: :tags: orm :tickets: 2030 Added a `name` argument to `Query.subquery()`, to allow a fixed name to be assigned to the alias object. .. change:: :tags: orm :tickets: 2019 A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table). .. change:: :tags: orm :tickets: 2038 Fixed bug where "middle" class in a polymorphic hierarchy would have no 'polymorphic_on' column if it didn't also specify a 'polymorphic_identity', leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance. .. change:: :tags: orm :tickets: 1995 Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError. .. change:: :tags: orm :tickets: 2046 A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. This will be an exception in 0.7. .. change:: :tags: orm :tickets: 2098 Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.7 has an updated version of this fix. .. change:: :tags: orm :tickets: 2063 Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype. .. change:: :tags: orm :tickets: 2123 Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn't properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch. .. change:: :tags: orm :tickets: 2130 a "having" clause would be copied from the inside to the outside query if from_self() were used.. .. change:: :tags: sql :tickets: 2028 Column.copy(), as used in table.tometadata(), copies the 'doc' attribute. .. change:: :tags: sql :tickets: 2023 Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4. .. change:: :tags: sql :tickets: 2042 The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules. .. change:: :tags: sql :tickets: 2089 Added accessors to ResultProxy "returns_rows", "is_insert" .. change:: :tags: sql :tickets: 2116 The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer. .. change:: :tags: engine :tickets: 2102 Fixed bug in QueuePool, SingletonThreadPool whereby connections that were discarded via overflow or periodic cleanup() were not explicitly closed, leaving garbage collection to the task instead. This generally only affects non-reference-counting backends like Jython and Pypy. Thanks to Jaimy Azle for spotting this. .. change:: :tags: sqlite :tickets: 2115 Fixed bug where reflection of foreign key created as "REFERENCES " without col name would fail. .. change:: :tags: postgresql :tickets: 1083 When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses. .. change:: :tags: postgresql :tickets: 2044 Added an additional libpq message to the list of "disconnect" exceptions, "could not receive data from server" .. change:: :tags: postgresql :tickets: 2092 Added RESERVED_WORDS for postgresql dialect. .. change:: :tags: postgresql :tickets: 2073 Fixed the BIT type to allow a "length" parameter, "varying" parameter. Reflection also fixed. .. change:: :tags: informix :tickets: 2092 Added RESERVED_WORDS informix dialect. .. change:: :tags: mssql :tickets: 2071 Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned. .. change:: :tags: mysql :tickets: 2047 oursql dialect accepts the same "ssl" arguments in create_engine() as that of MySQLdb. .. change:: :tags: firebird :tickets: 2083 The "implicit_returning" flag on create_engine() is honored if set to False. .. change:: :tags: oracle :tickets: 2100 Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle. .. change:: :tags: oracle :tickets: 2116 Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle. .. change:: :tags: ext :tickets: 2090 The horizontal_shard ShardedSession class accepts the common Session argument "query_cls" as a constructor argument, to enable further subclassing of ShardedQuery. .. change:: :tags: declarative :tickets: 2050 Added an explicit check for the case that the name 'metadata' is used for a column attribute on a declarative class. .. change:: :tags: declarative :tickets: 2061 Fix error message referencing old @classproperty name to reference @declared_attr .. change:: :tags: declarative :tickets: 2091 Arguments in __mapper_args__ that aren't "hashable" aren't mistaken for always-hashable, possibly-column arguments. .. change:: :tags: documentation :tickets: 2029 Documented SQLite DATE/TIME/DATETIME types. .. change:: :tags: examples :tickets: 2090 The Beaker caching example allows a "query_cls" argument to the query_callable() function. .. changelog:: :version: 0.6.6 :released: Sat Jan 08 2011 .. change:: :tags: orm :tickets: Fixed bug whereby a non-"mutable" attribute modified event which occurred on an object that was clean except for preceding mutable attribute changes would fail to strongly reference itself in the identity map. This would cause the object to be garbage collected, losing track of any changes that weren't previously saved in the "mutable changes" dictionary. .. change:: :tags: orm :tickets: 2013 Fixed bug whereby "passive_deletes='all'" wasn't passing the correct symbols to lazy loaders during flush, thereby causing an unwarranted load. .. change:: :tags: orm :tickets: 1997 Fixed bug which prevented composite mapped attributes from being used on a mapped select statement.. Note the workings of composite are slated to change significantly in 0.7. .. change:: :tags: orm :tickets: 1976 active_history flag also added to composite(). The flag has no effect in 0.6, but is instead a placeholder flag for forwards compatibility, as it applies in 0.7 for composites. .. change:: :tags: orm :tickets: 2002 Fixed uow bug whereby expired objects passed to Session.delete() would not have unloaded references or collections taken into account when deleting objects, despite passive_deletes remaining at its default of False. .. change:: :tags: orm :tickets: 1987 A warning is emitted when version_id_col is specified on an inheriting mapper when the inherited mapper already has one, if those column expressions are not the same. .. change:: :tags: orm :tickets: 1954 "innerjoin" flag doesn't take effect along the chain of joinedload() joins if a previous join in that chain is an outer join, thus allowing primary rows without a referenced child row to be correctly returned in results. .. change:: :tags: orm :tickets: 1964 Fixed bug regarding "subqueryload" strategy whereby strategy would fail if the entity was an aliased() construct. .. change:: :tags: orm :tickets: 2014 Fixed bug regarding "subqueryload" strategy whereby the join would fail if using a multi-level load of the form from A->joined-subclass->C .. change:: :tags: orm :tickets: 1968 Fixed indexing of Query objects by -1. It was erroneously transformed to the empty slice -1:0 that resulted in IndexError. .. change:: :tags: orm :tickets: 1971 The mapper argument "primary_key" can be passed as a single column as well as a list or tuple. The documentation examples that illustrated it as a scalar value have been changed to lists. .. change:: :tags: orm :tickets: 1961 Added active_history flag to relationship() and column_property(), forces attribute events to always load the "old" value, so that it's available to attributes.get_history(). .. change:: :tags: orm :tickets: 1977 Query.get() will raise if the number of params in a composite key is too large, as well as too small. .. change:: :tags: orm :tickets: 1992 Backport of "optimized get" fix from 0.7, improves the generation of joined-inheritance "load expired row" behavior. .. change:: :tags: orm :tickets: A little more verbiage to the "primaryjoin" error, in an unusual condition that the join condition "works" for viewonly but doesn't work for non-viewonly, and foreign_keys wasn't used - adds "foreign_keys" to the suggestion. Also add "foreign_keys" to the suggestion for the generic "direction" error. .. change:: :tags: sql :tickets: 1984 Fixed operator precedence rules for multiple chains of a single non-associative operator. I.e. "x - (y - z)" will compile as "x - (y - z)" and not "x - y - z". Also works with labels, i.e. "x - (y - z).label('foo')" .. change:: :tags: sql :tickets: 1967 The 'info' attribute of Column is copied during Column.copy(), i.e. as occurs when using columns in declarative mixins. .. change:: :tags: sql :tickets: Added a bind processor for booleans which coerces to int, for DBAPIs such as pymssql that naively call str() on values. .. change:: :tags: sql :tickets: 2000 CheckConstraint will copy its 'initially', 'deferrable', and '_create_rule' attributes within a copy()/tometadata() .. change:: :tags: engine :tickets: The "unicode warning" against non-unicode bind data is now raised only when the Unicode type is used explicitly; not when convert_unicode=True is used on the engine or String type. .. change:: :tags: engine :tickets: 1978 Fixed memory leak in C version of Decimal result processor. .. change:: :tags: engine :tickets: 1871 Implemented sequence check capability for the C version of RowProxy, as well as 2.7 style "collections.Sequence" registration for RowProxy. .. change:: :tags: engine :tickets: 1998 Threadlocal engine methods rollback(), commit(), prepare() won't raise if no transaction is in progress; this was a regression introduced in 0.6. .. change:: :tags: engine :tickets: 2004 Threadlocal engine returns itself upon begin(), begin_nested(); engine then implements contextmanager methods to allow the "with" statement. .. change:: :tags: postgresql :tickets: 1984 Single element tuple expressions inside an IN clause parenthesize correctly, also from .. change:: :tags: postgresql :tickets: 1955 Ensured every numeric, float, int code, scalar + array, are recognized by psycopg2 and pg8000's "numeric" base type. .. change:: :tags: postgresql :tickets: 1956 Added as_uuid=True flag to the UUID type, will receive and return values as Python UUID() objects rather than strings. Currently, the UUID type is only known to work with psycopg2. .. change:: :tags: postgresql :tickets: 1989 Fixed bug whereby KeyError would occur with non-ENUM supported PG versions after a pool dispose+recreate would occur. .. change:: :tags: mysql :tickets: 1960 Fixed error handling for Jython + zxjdbc, such that has_table() property works again. Regression from 0.6.3 (we don't have a Jython buildbot, sorry) .. change:: :tags: sqlite :tickets: 1851 The REFERENCES clause in a CREATE TABLE that includes a remote schema to another table with the same schema name now renders the remote name without the schema clause, as required by SQLite. .. change:: :tags: sqlite :tickets: On the same theme, the REFERENCES clause in a CREATE TABLE that includes a remote schema to a *different* schema than that of the parent table doesn't render at all, as cross-schema references do not appear to be supported. .. change:: :tags: mssql :tickets: 1770 The rewrite of index reflection in was unfortunately not tested correctly, and returned incorrect results. This regression is now fixed. .. change:: :tags: oracle :tickets: 1953 The cx_oracle "decimal detection" logic, which takes place for result set columns with ambiguous numeric characteristics, now uses the decimal point character determined by the locale/ NLS_LANG setting, using an on-first-connect detection of this character. cx_oracle 5.0.3 or greater is also required when using a non-period-decimal-point NLS_LANG setting.. .. change:: :tags: firebird :tickets: 2012 Firebird numeric type now checks for Decimal explicitly, lets float() pass right through, thereby allowing special values such as float('inf'). .. change:: :tags: declarative :tickets: 1972 An error is raised if __table_args__ is not in tuple or dict format, and is not None. .. change:: :tags: sqlsoup :tickets: 1975 Added "map_to()" method to SqlSoup, which is a "master" method which accepts explicit arguments for each aspect of the selectable and mapping, including a base class per mapping. .. change:: :tags: sqlsoup :tickets: Mapped selectables used with the map(), with_labels(), join() methods no longer put the given argument into the internal "cache" dictionary. Particularly since the join() and select() objects are created in the method itself this was pretty much a pure memory leaking behavior. .. change:: :tags: examples :tickets: The versioning example now supports detection of changes in an associated relationship(). .. changelog:: :version: 0.6.5 :released: Sun Oct 24 2010 .. change:: :tags: orm :tickets: 1914 Added a new "lazyload" option "immediateload". Issues the usual "lazy" load operation automatically as the object is populated. The use case here is when loading objects to be placed in an offline cache, or otherwise used after the session isn't available, and straight 'select' loading, not 'joined' or 'subquery', is desired. .. change:: :tags: orm :tickets: 1920 New Query methods: query.label(name), query.as_scalar(), return the query's statement as a scalar subquery with /without label; query.with_entities(\*ent), replaces the SELECT list of the query with new entities. Roughly equivalent to a generative form of query.values() which accepts mapped entities as well as column expressions. .. change:: :tags: orm :tickets: Fixed recursion bug which could occur when moving an object from one reference to another, with backrefs involved, where the initiating parent was a subclass (with its own mapper) of the previous parent. .. change:: :tags: orm :tickets: 1918 Fixed a regression in 0.6.4 which occurred if you passed an empty list to "include_properties" on mapper() .. change:: :tags: orm :tickets: Fixed labeling bug in Query whereby the NamedTuple would mis-apply labels if any of the column expressions were un-labeled. .. change:: :tags: orm :tickets: 1925 Patched a case where query.join() would adapt the right side to the right side of the left's join inappropriately .. change:: :tags: orm :tickets: Query.select_from() has been beefed up to help ensure that a subsequent call to query.join() will use the select_from() entity, assuming it's a mapped entity and not a plain selectable, as the default "left" side, not the first entity in the Query object's list of entities. .. change:: :tags: orm :tickets: The exception raised by Session when it is used subsequent to a subtransaction rollback (which is what happens when a flush fails in autocommit=False mode) has now been reworded (this is the "inactive due to a rollback in a subtransaction" message). In particular, if the rollback was due to an exception during flush(), the message states this is the case, and reiterates the string form of the original exception that occurred during flush. If the session is closed due to explicit usage of subtransactions (not very common), the message just states this is the case. .. change:: :tags: orm :tickets: The exception raised by Mapper when repeated requests to its initialization are made after initialization already failed no longer assumes the "hasattr" case, since there's other scenarios in which this message gets emitted, and the message also does not compound onto itself multiple times - you get the same message for each attempt at usage. The misnomer "compiles" is being traded out for "initialize". .. change:: :tags: orm :tickets: 1935 Fixed bug in query.update() where 'evaluate' or 'fetch' expiration would fail if the column expression key was a class attribute with a different keyname as the actual column name. .. change:: :tags: orm :tickets: Added an assertion during flush which ensures that no NULL-holding identity keys were generated on "newly persistent" objects. This can occur when user defined code inadvertently triggers flushes on not-fully-loaded objects. .. change:: :tags: orm :tickets: 1910 lazy loads for relationship attributes now use the current state, not the "committed" state, of foreign and primary key attributes when issuing SQL, if a flush is not in process. Previously, only the database-committed state would be used. In particular, this would cause a many-to-one get()-on-lazyload operation to fail, as autoflush is not triggered on these loads when the attributes are determined and the "committed" state may not be available. .. change:: :tags: orm :tickets: A new flag on relationship(), load_on_pending, allows the lazy loader to fire off on pending objects without a flush taking place, as well as a transient object that's been manually "attached" to the session. Note that this flag blocks attribute events from taking place when an object is loaded, so backrefs aren't available until after a flush. The flag is only intended for very specific use cases. .. change:: :tags: orm :tickets: Another new flag on relationship(), cascade_backrefs, disables the "save-update" cascade when the event was initiated on the "reverse" side of a bidirectional relationship. This is a cleaner behavior so that many-to-ones can be set on a transient object without it getting sucked into the child object's session, while still allowing the forward collection to cascade. We *might* default this to False in 0.7. .. change:: :tags: orm :tickets: Slight improvement to the behavior of "passive_updates=False" when placed only on the many-to-one side of a relationship; documentation has been clarified that passive_updates=False should really be on the one-to-many side. .. change:: :tags: orm :tickets: Placing passive_deletes=True on a many-to-one emits a warning, since you probably intended to put it on the one-to-many side. .. change:: :tags: orm :tickets: Fixed bug that would prevent "subqueryload" from working correctly with single table inheritance for a relationship from a subclass - the "where type in (x, y, z)" only gets placed on the inside, instead of repeatedly. .. change:: :tags: orm :tickets: When using from_self() with single table inheritance, the "where type in (x, y, z)" is placed on the outside of the query only, instead of repeatedly. May make some more adjustments to this. .. change:: :tags: orm :tickets: 1924 scoped_session emits a warning when configure() is called if a Session is already present (checks only the current thread) .. change:: :tags: orm :tickets: 1932 reworked the internals of mapper.cascade_iterator() to cut down method calls by about 9% in some circumstances. .. change:: :tags: sql :tickets: Fixed bug in TypeDecorator whereby the dialect-specific type was getting pulled in to generate the DDL for a given type, which didn't always return the correct result. .. change:: :tags: sql :tickets: TypeDecorator can now have a fully constructed type specified as its "impl", in addition to a type class. .. change:: :tags: sql :tickets: TypeDecorator will now place itself as the resulting type for a binary expression where the type coercion rules would normally return its impl type - previously, a copy of the impl type would be returned which would have the TypeDecorator embedded into it as the "dialect" impl, this was probably an unintentional way of achieving the desired effect. .. change:: :tags: sql :tickets: TypeDecorator.load_dialect_impl() returns "self.impl" by default, i.e. not the dialect implementation type of "self.impl". This to support compilation correctly. Behavior can be user-overridden in exactly the same way as before to the same effect. .. change:: :tags: sql :tickets: Added type_coerce(expr, type\_) expression element. Treats the given expression as the given type when evaluating expressions and processing result rows, but does not affect the generation of SQL, other than an anonymous label. .. change:: :tags: sql :tickets: Table.tometadata() now copies Index objects associated with the Table as well. .. change:: :tags: sql :tickets: Table.tometadata() issues a warning if the given Table is already present in the target MetaData - the existing Table object is returned. .. change:: :tags: sql :tickets: An informative error message is raised if a Column which has not yet been assigned a name, i.e. as in declarative, is used in a context where it is exported to the columns collection of an enclosing select() construct, or if any construct involving that column is compiled before its name is assigned. .. change:: :tags: sql :tickets: 1862 as_scalar(), label() can be called on a selectable which contains a Column that is not yet named. .. change:: :tags: sql :tickets: 1907 Fixed recursion overflow which could occur when operating with two expressions both of type "NullType", but not the singleton NULLTYPE instance. .. change:: :tags: declarative :tickets: 1922 @classproperty (soon/now @declared_attr) takes effect for __mapper_args__, __table_args__, __tablename__ on a base class that is not a mixin, as well as mixins. .. change:: :tags: declarative :tickets: 1915 @classproperty 's official name/location for usage with declarative is sqlalchemy.ext.declarative.declared_attr. Same thing, but moving there since it is more of a "marker" that's specific to declararative, not just an attribute technique. .. change:: :tags: declarative :tickets: 1931, 1930 Fixed bug whereby columns on a mixin wouldn't propagate correctly to a single-table, or joined-table, inheritance scheme where the attribute name is different than that of the column.,. .. change:: :tags: declarative :tickets: A mixin can now specify a column that overrides a column of the same name associated with a superclass. Thanks to Oystein Haaland. .. change:: :tags: engine :tickets: Fixed a regression in 0.6.4 whereby the change that allowed cursor errors to be raised consistently broke the result.lastrowid accessor. Test coverage has been added for result.lastrowid. Note that lastrowid is only supported by Pysqlite and some MySQL drivers, so isn't super-useful in the general case. .. change:: :tags: engine :tickets: the logging message emitted by the engine when a connection is first used is now "BEGIN (implicit)" to emphasize that DBAPI has no explicit begin(). .. change:: :tags: engine :tickets: 1936 added "views=True" option to metadata.reflect(), will add the list of available views to those being reflected. .. change:: :tags: engine :tickets: 1899 engine_from_config() now accepts 'debug' for 'echo', 'echo_pool', 'force' for 'convert_unicode', boolean values for 'use_native_unicode'. .. change:: :tags: postgresql :tickets: Added "as_tuple" flag to ARRAY type, returns results as tuples instead of lists to allow hashing. .. change:: :tags: postgresql :tickets: 1933 Fixed bug which prevented "domain" built from a custom type such as "enum" from being reflected. .. change:: :tags: mysql :tickets: 1940 Fixed bug involving reflection of CURRENT_TIMESTAMP default used with ON UPDATE clause, thanks to Taavi Burns .. change:: :tags: oracle :tickets: 1878 The implicit_retunring argument to create_engine() is now honored regardless of detected version of Oracle. Previously, the flag would be forced to False if server version info was < 10. .. change:: :tags: mssql :tickets: 1946 Fixed reflection bug which did not properly handle reflection of unknown types. .. change:: :tags: mssql :tickets: 1943 Fixed bug where aliasing of tables with "schema" would fail to compile properly. .. change:: :tags: mssql :tickets: 1770 Rewrote the reflection of indexes to use sys. catalogs, so that column names of any configuration (spaces, embedded commas, etc.) can be reflected. Note that reflection of indexes requires SQL Server 2005 or greater. .. change:: :tags: mssql :tickets: 1952 mssql+pymssql dialect now honors the "port" portion of the URL instead of discarding it. .. change:: :tags: informix :tickets: 1906 *Major* cleanup / modernization of the Informix dialect for 0.6, courtesy Florian Apolloner. .. change:: :tags: tests :tickets: the NoseSQLAlchemyPlugin has been moved to a new package "sqlalchemy_nose" which installs along with "sqlalchemy". This so that the "nosetests" script works as always but also allows the --with-coverage option to turn on coverage before SQLAlchemy modules are imported, allowing coverage to work correctly. .. change:: :tags: misc :tickets: 1890 CircularDependencyError now has .cycles and .edges members, which are the set of elements involved in one or more cycles, and the set of edges as 2-tuples. .. changelog:: :version: 0.6.4 :released: Tue Sep 07 2010 .. change:: :tags: orm :tickets: The name ConcurrentModificationError has been changed to StaleDataError, and descriptive error messages have been revised to reflect exactly what the issue is. Both names will remain available for the forseeable future for schemes that may be specifying ConcurrentModificationError in an "except:" clause. .. change:: :tags: orm :tickets: 1891 Added a mutex to the identity map which mutexes remove operations against iteration methods, which now pre-buffer before returning an iterable. This because asyncrhonous gc can remove items via the gc thread at any time. .. change:: :tags: orm :tickets: The Session class is now present in sqlalchemy.orm.*. We're moving away from the usage of create_session(), which has non-standard defaults, for those situations where a one-step Session constructor is desired. Most users should stick with sessionmaker() for general use, however. .. change:: :tags: orm :tickets: query.with_parent() now accepts transient objects and will use the non-persistent values of their pk/fk attributes in order to formulate the criterion. Docs are also clarified as to the purpose of with_parent(). .. change:: :tags: orm :tickets: The include_properties and exclude_properties arguments to mapper() now accept Column objects as members in addition to strings. This so that same-named Column objects, such as those within a join(), can be disambiguated. .. change:: :tags: orm :tickets: 1896 A warning is now emitted if a mapper is created against a join or other single selectable that includes multiple columns with the same name in its .c. collection, and those columns aren't explicitly named as part of the same or separate attributes (or excluded). In 0.7 this warning will be an exception. Note that this warning is not emitted when the combination occurs as a result of inheritance, so that attributes still allow being overridden naturally.. In 0.7 this will be improved further. .. change:: :tags: orm :tickets: 1896 The primary_key argument to mapper() can now specify a series of columns that are only a subset of the calculated "primary key" columns of the mapped selectable, without an error being raised. This helps for situations where a selectable's effective primary key is simpler than the number of columns in the selectable that are actually marked as "primary_key", such as a join against two tables on their primary key columns. .. change:: :tags: orm :tickets: An object that's been deleted now gets a flag 'deleted', which prohibits the object from being re-add()ed to the session, as previously the object would live in the identity map silently until its attributes were accessed. The make_transient() function now resets this flag along with the "key" flag. .. change:: :tags: orm :tickets: make_transient() can be safely called on an already transient instance. .. change:: :tags: orm :tickets: a warning is emitted in mapper() if the polymorphic_on column is not present either in direct or derived form in the mapped selectable or in the with_polymorphic selectable, instead of silently ignoring it. Look for this to become an exception in 0.7. .. change:: :tags: orm :tickets: Another pass through the series of error messages emitted when relationship() is configured with ambiguous arguments. The "foreign_keys" setting is no longer mentioned, as it is almost never needed and it is preferable users set up correct ForeignKey metadata, which is now the recommendation. If 'foreign_keys' is used and is incorrect, the message suggests the attribute is probably unnecessary. Docs for the attribute are beefed up. This because all confused relationship() users on the ML appear to be attempting to use foreign_keys due to the message, which only confuses them further since Table metadata is much clearer. .. change:: :tags: orm :tickets: 1877 If the "secondary" table has no ForeignKey metadata and no foreign_keys is set, even though the user is passing screwed up information, it is assumed that primary/secondaryjoin expressions should consider only and all cols in "secondary" to be foreign. It's not possible with "secondary" for the foreign keys to be elsewhere in any case. A warning is now emitted instead of an error, and the mapping succeeds. .. change:: :tags: orm :tickets: 1856 Moving an o2m object from one collection to another, or vice versa changing the referenced object by an m2o, where the foreign key is also a member of the primary key, will now be more carefully checked during flush if the change in value of the foreign key on the "many" side is the result of a change in the primary key of the "one" side, or if the "one" is just a different object. In one case, a cascade-capable DB would have cascaded the value already and we need to look at the "new" PK value to do an UPDATE, in the other we need to continue looking at the "old". We now look at the "old", assuming passive_updates=True, unless we know it was a PK switch that triggered the change. .. change:: :tags: orm :tickets: 1857 The value of version_id_col can be changed manually, and this will result in an UPDATE of the row. Versioned UPDATEs and DELETEs now use the "committed" value of the version_id_col in the WHERE clause and not the pending changed value. The version generator is also bypassed if manual changes are present on the attribute. .. change:: :tags: orm :tickets: Repaired the usage of merge() when used with concrete inheriting mappers. Such mappers frequently have so-called "concrete" attributes, which are subclass attributes that "disable" propagation from the parent - these needed to allow a merge() operation to pass through without effect. .. change:: :tags: orm :tickets: 1863 Specifying a non-column based argument for column_mapped_collection, including string, text() etc., will raise an error message that specifically asks for a column element, no longer misleads with incorrect information about text() or literal(). .. change:: :tags: orm :tickets: Similarly, for relationship(), foreign_keys, remote_side, order_by - all column-based expressions are enforced - lists of strings are explicitly disallowed since this is a very common error .. change:: :tags: orm :tickets: 1864 Dynamic attributes don't support collection population - added an assertion for when set_committed_value() is called, as well as when joinedload() or subqueryload() options are applied to a dynamic attribute, instead of failure / silent failure. .. change:: :tags: orm :tickets: 1852 Fixed bug whereby generating a Query derived from one which had the same column repeated with different label names, typically in some UNION situations, would fail to propagate the inner columns completely to the outer query. .. change:: :tags: orm :tickets: 1881 object_session() raises the proper UnmappedInstanceError when presented with an unmapped instance. .. change:: :tags: orm :tickets: Applied further memoizations to calculated Mapper properties, with significant (~90%) runtime mapper.py call count reduction in heavily polymorphic mapping configurations. .. change:: :tags: orm :tickets: mapper _get_col_to_prop private method used by the versioning example is deprecated; now use mapper.get_property_by_column() which will remain the public method for this. .. change:: :tags: orm :tickets: the versioning example works correctly now if versioning on a col that was formerly NULL. .. change:: :tags: sql :tickets: Calling execute() on an alias() construct is pending deprecation for 0.7, as it is not itself an "executable" construct. It currently "proxies" its inner element and is conditionally "executable" but this is not the kind of ambiguity we like these days. .. change:: :tags: sql :tickets: The execute() and scalar() methods of ClauseElement are now moved appropriately to the Executable subclass. ClauseElement.execute()/ scalar() are still present and are pending deprecation in 0.7, but note these would always raise an error anyway if you were not an Executable (unless you were an alias(), see previous note). .. change:: :tags: sql :tickets: Added basic math expression coercion for Numeric->Integer, so that resulting type is Numeric regardless of the direction of the expression. .. change:: :tags: sql :tickets: 1855 Changed the scheme used to generate truncated "auto" index names when using the "index=True" flag on Column. The truncation only takes place with the auto-generated name, not one that is user-defined (an error would be raised instead), and the truncation scheme itself is now based on a fragment of an md5 hash of the identifier name, so that multiple indexes on columns with similar names still have unique names. .. change:: :tags: sql :tickets: 1412 The generated index name also is based on a "max index name length" attribute which is separate from the "max identifier length" - this to appease MySQL who has a max length of 64 for index names, separate from their overall max length of 255. .. change:: :tags: sql :tickets: the text() construct, if placed in a column oriented situation, will at least return NULLTYPE for its type instead of None, allowing it to be used a little more freely for ad-hoc column expressions than before. literal_column() is still the better choice, however. .. change:: :tags: sql :tickets: Added full description of parent table/column, target table/column in error message raised when ForeignKey can't resolve target. .. change:: :tags: sql :tickets: 1865 Fixed bug whereby replacing composite foreign key columns in a reflected table would cause an attempt to remove the reflected constraint from the table a second time, raising a KeyError. .. change:: :tags: sql :tickets: the _Label construct, i.e. the one that is produced whenever you say somecol.label(), now counts itself in its "proxy_set" unioned with that of its contained column's proxy set, instead of directly returning that of the contained column. This allows column correspondence operations which depend on the identity of the _Labels themselves to return the correct result .. change:: :tags: sql :tickets: 1852 fixes ORM bug. .. change:: :tags: engine :tickets: Calling fetchone() or similar on a result that has already been exhausted, has been closed, or is not a result-returning result now raises ResourceClosedError, a subclass of InvalidRequestError, in all cases, regardless of backend. Previously, some DBAPIs would raise ProgrammingError (i.e. pysqlite), others would return None leading to downstream breakages (i.e. MySQL-python). .. change:: :tags: engine :tickets: 1894 Fixed bug in Connection whereby if a "disconnect" event occurred in the "initialize" phase of the first connection pool connect, an AttributeError would be raised when the Connection would attempt to invalidate the DBAPI connection. .. change:: :tags: engine :tickets: Connection, ResultProxy, as well as Session use ResourceClosedError for all "this connection/transaction/result is closed" types of errors. .. change:: :tags: engine :tickets: Connection.invalidate() can be called more than once and subsequent calls do nothing. .. change:: :tags: declarative :tickets: if @classproperty is used with a regular class-bound mapper property attribute, it will be called to get the actual attribute value during initialization. Currently, there's no advantage to using @classproperty on a column or relationship attribute of a declarative class that isn't a mixin - evaluation is at the same time as if @classproperty weren't used. But here we at least allow it to function as expected. .. change:: :tags: declarative :tickets: Fixed bug where "Can't add additional column" message would display the wrong name. .. change:: :tags: postgresql :tickets: Fixed the psycopg2 dialect to use its set_isolation_level() method instead of relying upon the base "SET SESSION ISOLATION" command, as psycopg2 resets the isolation level on each new transaction otherwise. .. change:: :tags: mssql :tickets: Fixed "default schema" query to work with pymssql backend. .. change:: :tags: firebird :tickets: Fixed bug whereby a column default would fail to reflect if the "default" keyword were lower case. .. change:: :tags: oracle :tickets: 1879 Added ROWID type to the Oracle dialect, for those cases where an explicit CAST might be needed. .. change:: :tags: oracle :tickets: 1867 Oracle reflection of indexes has been tuned so that indexes which include some or all primary key columns, but not the same set of columns as that of the primary key, are reflected. Indexes which contain the identical columns as that of the primary key are skipped within reflection, as the index in that case is assumed to be the auto-generated primary key index. Previously, any index with PK columns present would be skipped. Thanks to Kent Bower for the patch. .. change:: :tags: oracle :tickets: 1868 Oracle now reflects the names of primary key constraints - also thanks to Kent Bower. .. change:: :tags: informix :tickets: 1904 Applied patches from to get basic Informix functionality up again. We rely upon end-user testing to ensure that Informix is working to some degree. .. change:: :tags: documentation :tickets: The docs have been reorganized such that the "API Reference" section is gone - all the docstrings from there which were public API are moved into the context of the main doc section that talks about it. Main docs divided into "SQLAlchemy Core" and "SQLAlchemy ORM" sections, mapper/relationship docs have been broken out. Lots of sections rewritten and/or reorganized. .. change:: :tags: examples :tickets: The beaker_caching example has been reorganized such that the Session, cache manager, declarative_base are part of environment, and custom cache code is portable and now within "caching_query.py". This allows the example to be easier to "drop in" to existing projects. .. change:: :tags: examples :tickets: 1887 the history_meta versioning recipe sets "unique=False" when copying columns, so that the versioning table handles multiple rows with repeating values. .. changelog:: :version: 0.6.3 :released: Thu Jul 15 2010 .. change:: :tags: orm :tickets: 1845 Removed errant many-to-many load in unitofwork which triggered unnecessarily on expired/unloaded collections. This load now takes place only if passive_updates is False and the parent primary key has changed, or if passive_deletes is False and a delete of the parent has occurred. .. change:: :tags: orm :tickets: 1853 Column-entities (i.e. query(Foo.id)) copy their state more fully when queries are derived from themselves + a selectable (i.e. from_self(), union(), etc.), so that join() and such have the correct state to work from. .. change:: :tags: orm :tickets: 1853 Fixed bug where Query.join() would fail if querying a non-ORM column then joining without an on clause when a FROM clause is already present, now raises a checked exception the same way it does when the clause is not present. .. change:: :tags: orm :tickets: 1142 Improved the check for an "unmapped class", including the case where the superclass is mapped but the subclass is not. Any attempts to access cls._sa_class_manager.mapper now raise UnmappedClassError(). .. change:: :tags: orm :tickets: Added "column_descriptions" accessor to Query, returns a list of dictionaries containing naming/typing information about the entities the Query will return. Can be helpful for building GUIs on top of ORM queries. .. change:: :tags: mysql :tickets: 1848 The _extract_error_code() method now works correctly with each MySQL dialect ( MySQL-python, OurSQL, MySQL-Connector-Python, PyODBC). Previously, the reconnect logic would fail for OperationalError conditions, however since MySQLdb and OurSQL have their own reconnect feature, there was no symptom for these drivers here unless one watched the logs. .. change:: :tags: oracle :tickets: 1840 More tweaks to cx_oracle Decimal handling. "Ambiguous" numerics with no decimal place are coerced to int at the connection handler level. The advantage here is that ints come back as ints without SQLA type objects being involved and without needless conversion to Decimal first. Unfortunately, some exotic subquery cases can even see different types between individual result rows, so the Numeric handler, when instructed to return Decimal, can't take full advantage of "native decimal" mode and must run isinstance() on every value to check if its Decimal already. Reopen of .. changelog:: :version: 0.6.2 :released: Tue Jul 06 2010 .. change:: :tags: orm :tickets: Query.join() will check for a call of the form query.join(target, clause_expression), i.e. missing the tuple, and raise an informative error message that this is the wrong calling form. .. change:: :tags: orm :tickets: 1824 Fixed bug regarding flushes on self-referential bi-directional many-to-many relationships, where two objects made to mutually reference each other in one flush would fail to insert a row for both sides. Regression from 0.5. .. change:: :tags: orm :tickets: the post_update feature of relationship() has been reworked architecturally to integrate more closely with the new 0.6 unit of work. The motivation for the change is so that multiple "post update" calls, each affecting different foreign key columns of the same row, are executed in a single UPDATE statement, rather than one UPDATE statement per column per row. Multiple row updates are also batched into executemany()s as possible, while maintaining consistent row ordering. .. change:: :tags: orm :tickets: Query.statement, Query.subquery(), etc. now transfer the values of bind parameters, i.e. those specified by query.params(), into the resulting SQL expression. Previously the values would not be transferred and bind parameters would come out as None. .. change:: :tags: orm :tickets: Subquery-eager-loading now works with Query objects which include params(), as well as get() Queries. .. change:: :tags: orm :tickets: Can now call make_transient() on an instance that is referenced by parent objects via many-to-one, without the parent's foreign key value getting temporarily set to None - this was a function of the "detect primary key switch" flush handler. It now ignores objects that are no longer in the "persistent" state, and the parent's foreign key identifier is left unaffected. .. change:: :tags: orm :tickets: query.order_by() now accepts False, which cancels any existing order_by() state on the Query, allowing subsequent generative methods to be called which do not support ORDER BY. This is not the same as the already existing feature of passing None, which suppresses any existing order_by() settings, including those configured on the mapper. False will make it as though order_by() was never called, while None is an active setting. .. change:: :tags: orm :tickets: An instance which is moved to "transient", has an incomplete or missing set of primary key attributes, and contains expired attributes, will raise an InvalidRequestError if an expired attribute is accessed, instead of getting a recursion overflow. .. change:: :tags: orm :tickets: The make_transient() function is now in the generated documentation. .. change:: :tags: orm :tickets: make_transient() removes all "loader" callables from the state being made transient, removing any "expired" state - all unloaded attributes reset back to undefined, None/empty on access. .. change:: :tags: sql :tickets: 1822 The warning emitted by the Unicode and String types with convert_unicode=True no longer embeds the actual value passed. This so that the Python warning registry does not continue to grow in size, the warning is emitted once as per the warning filter settings, and large string values don't pollute the output. .. change:: :tags: sql :tickets: Fixed bug that would prevent overridden clause compilation from working for "annotated" expression elements, which are often generated by the ORM. .. change:: :tags: sql :tickets: 1400 The argument to "ESCAPE" of a LIKE operator or similar is passed through render_literal_value(), which may implement escaping of backslashes. .. change:: :tags: sql :tickets: Fixed bug in Enum type which blew away native_enum flag when used with TypeDecorators or other adaption scenarios. .. change:: :tags: sql :tickets: Inspector hits bind.connect() when invoked to ensure initialize has been called. the internal name ".conn" is changed to ".bind", since that's what it is. .. change:: :tags: sql :tickets: Modified the internals of "column annotation" such that a custom Column subclass can safely override _constructor to return Column, for the purposes of making "configurational" column classes that aren't involved in proxying, etc. .. change:: :tags: sql :tickets: 1829 Column.copy() takes along the "unique" attribute among others, fixes regarding declarative mixins .. change:: :tags: postgresql :tickets: 1400 render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. Ultimately this will have to detect the value of "standard_conforming_strings" for full behavior. .. change:: :tags: postgresql :tickets: 1836 Won't generate "CREATE TYPE" / "DROP TYPE" if using types.Enum on a PG version prior to 8.3 - the supports_native_enum flag is fully honored. .. change:: :tags: mysql :tickets: 1826 MySQL dialect doesn't emit CAST() for MySQL version detected < 4.0.2. This allows the unicode check on connect to proceed. .. change:: :tags: mysql :tickets: MySQL dialect now detects NO_BACKSLASH_ESCAPES sql mode, in addition to ANSI_QUOTES. .. change:: :tags: mysql :tickets: 1400 render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. This behavior is derived from detecting the value of NO_BACKSLASH_ESCAPES. .. change:: :tags: oracle :tickets: 1819 Fixed ora-8 compatibility flags such that they don't cache a stale value from before the first database connection actually occurs. .. change:: :tags: oracle :tickets: 1840 Oracle's "native decimal" metadata begins to return ambiguous typing information about numerics when columns are embedded in subqueries as well as when ROWNUM is consulted with subqueries, as we do for limit/offset. We've added these ambiguous conditions to the cx_oracle "convert to Decimal()" handler, so that we receive numerics as Decimal in more cases instead of as floats. These are then converted, if requested, into Integer or Float, or otherwise kept as the lossless Decimal. .. change:: :tags: mssql :tickets: 1825 If server_version_info is outside the usual range of (8, ), (9, ), (10, ), a warning is emitted which suggests checking that the FreeTDS version configuration is using 7.0 or 8.0, not 4.2. .. change:: :tags: firebird :tickets: 1823 Fixed incorrect signature in do_execute(), error introduced in 0.6.1. .. change:: :tags: firebird :tickets: 1813 Firebird dialect adds CHAR, VARCHAR types which accept a "charset" flag, to support Firebird "CHARACTER SET" clause. .. change:: :tags: declarative :tickets: 1805, 1796, 1751 Added support for @classproperty to provide any kind of schema/mapping construct from a declarative mixin, including columns with foreign keys, relationships, column_property, deferred. This solves all such issues on declarative mixins. An error is raised if any MapperProperty subclass is specified on a mixin without using @classproperty. .. change:: :tags: declarative :tickets: 1821 a mixin class can now define a column that matches one which is present on a __table__ defined on a subclass. It cannot, however, define one that is not present in the __table__, and the error message here now works. .. change:: :tags: extension, compiler :tickets: 1838 The 'default' compiler is automatically copied over when overriding the compilation of a built in clause construct, so no KeyError is raised if the user-defined compiler is specific to certain backends and compilation for a different backend is invoked. .. change:: :tags: documentation :tickets: 1820 Added documentation for the Inspector. .. change:: :tags: documentation :tickets: 1830 Fixed @memoized_property and @memoized_instancemethod decorators so that Sphinx documentation picks up these attributes and methods, such as ResultProxy.inserted_primary_key. .. changelog:: :version: 0.6.1 :released: Mon May 31 2010 .. change:: :tags: orm :tickets: 1782 Fixed regression introduced in 0.6.0 involving improper history accounting on mutable attributes. .. change:: :tags: orm :tickets: 1807 Fixed regression introduced in 0.6.0 unit of work refactor that broke updates for bi-directional relationship() with post_update=True. .. change:: :tags: orm :tickets: 1789 session.merge() will not expire attributes on the returned instance if that instance is "pending". .. change:: :tags: orm :tickets: 1802 fixed __setstate__ method of CollectionAdapter to not fail during deserialize where parent InstanceState not yet unserialized. .. change:: :tags: orm :tickets: 1797 Added internal warning in case an instance without a full PK happened to be expired and then was asked to refresh. .. change:: :tags: orm :tickets: Added more aggressive caching to the mapper's usage of UPDATE, INSERT, and DELETE expressions. Assuming the statement has no per-object SQL expressions attached, the expression objects are cached by the mapper after the first create, and their compiled form is stored persistently in a cache dictionary for the duration of the related Engine. The cache is an LRUCache for the rare case that a mapper receives an extremely high number of different column patterns as UPDATEs. .. change:: :tags: sql :tickets: 1793 expr.in_() now accepts a text() construct as the argument. Grouping parenthesis are added automatically, i.e. usage is like `col.in_(text("select id from table"))`. .. change:: :tags: sql :tickets: Columns of _Binary type (i.e. LargeBinary, BLOB, etc.) will coerce a "basestring" on the right side into a _Binary as well so that required DBAPI processing takes place. .. change:: :tags: sql :tickets: 1801 Added table.add_is_dependent_on(othertable), allows manual placement of dependency rules between two Table objects for use within create_all(), drop_all(), sorted_tables. .. change:: :tags: sql :tickets: 1778 Fixed bug that prevented implicit RETURNING from functioning properly with composite primary key that contained zeroes. .. change:: :tags: sql :tickets: Fixed errant space character when generating ADD CONSTRAINT for a named UNIQUE constraint. .. change:: :tags: sql :tickets: 1571 Fixed "table" argument on constructor of ForeginKeyConstraint .. change:: :tags: sql :tickets: 1786 Fixed bug in connection pool cursor wrapper whereby if a cursor threw an exception on close(), the logging of the message would fail. .. change:: :tags: sql :tickets: the _make_proxy() method of ColumnClause and Column now use self.__class__ to determine the class of object to be returned instead of hardcoding to ColumnClause/Column, making it slightly easier to produce specific subclasses of these which work in alias/subquery situations. .. change:: :tags: sql :tickets: 1798 func.XXX() doesn't inadvertently resolve to non-Function classes (e.g. fixes func.text()). .. change:: :tags: engines :tickets: 1781 Fixed building the C extensions on Python 2.4. .. change:: :tags: engines :tickets: Pool classes will reuse the same "pool_logging_name" setting after a dispose() occurs. .. change:: :tags: engines :tickets: Engine gains an "execution_options" argument and update_execution_options() method, which will apply to all connections generated by this engine. .. change:: :tags: mysql :tickets: 1794 func.sysdate() emits "SYSDATE()", i.e. with the ending parenthesis, on MySQL. .. change:: :tags: sqlite :tickets: 1812 Fixed concatenation of constraints when "PRIMARY KEY" constraint gets moved to column level due to SQLite AUTOINCREMENT keyword being rendered. .. change:: :tags: oracle :tickets: 1775 Added a check for cx_oracle versions lower than version 5, in which case the incompatible "output type handler" won't be used. This will impact decimal accuracy and some unicode handling issues. .. change:: :tags: oracle :tickets: 1790 Fixed use_ansi=False mode, which was producing broken WHERE clauses in pretty much all cases. .. change:: :tags: oracle :tickets: 1808 Re-established support for Oracle 8 with cx_oracle, including that use_ansi is set to False automatically, NVARCHAR2 and NCLOB are not rendered for Unicode, "native unicode" check doesn't fail, cx_oracle "native unicode" mode is disabled, VARCHAR() is emitted with bytes count instead of char count. .. change:: :tags: oracle :tickets: 1670 oracle_xe 5 doesn't accept a Python unicode object in its connect string in normal Python 2.x mode - so we coerce to str() directly. non-ascii characters aren't supported in connect strings here since we don't know what encoding we could use. .. change:: :tags: oracle :tickets: 1815 FOR UPDATE is emitted in the syntactically correct position when limit/offset is used, i.e. the ROWNUM subquery. However, Oracle can't really handle FOR UPDATE with ORDER BY or with subqueries, so its still not very usable, but at least SQLA gets the SQL past the Oracle parser. .. change:: :tags: firebird :tickets: 1521 Added a label to the query used within has_table() and has_sequence() to work with older versions of Firebird that don't provide labels for result columns. .. change:: :tags: firebird :tickets: 1779 Added integer coercion to the "type_conv" attribute when passed via query string, so that it is properly interpreted by Kinterbasdb. .. change:: :tags: firebird :tickets: 1646 Added 'connection shutdown' to the list of exception strings which indicate a dropped connection. .. change:: :tags: sqlsoup :tickets: 1783 the SqlSoup constructor accepts a `base` argument which specifies the base class to use for mapped classes, the default being `object`. .. changelog:: :version: 0.6.0 :released: Sun Apr 18 2010 .. change:: :tags: orm :tickets: 1742, 1081 Unit of work internals have been rewritten. Units of work with large numbers of objects interdependent objects can now be flushed without recursion overflows as there is no longer reliance upon recursive calls. The number of internal structures now stays constant for a particular session state, regardless of how many relationships are present on mappings. The flow of events now corresponds to a linear list of steps, generated by the mappers and relationships based on actual work to be done, filtered through a single topological sort for correct ordering. Flush actions are assembled using far fewer steps and less memory. .. change:: :tags: orm :tickets: Along with the UOW rewrite, this also removes an issue introduced in 0.6beta3 regarding topological cycle detection for units of work with long dependency cycles. We now use an algorithm written by Guido (thanks Guido!). .. change:: :tags: orm :tickets: 1764 one-to-many relationships now maintain a list of positive parent-child associations within the flush, preventing previous parents marked as deleted from cascading a delete or NULL foreign key set on those child objects, despite the end-user not removing the child from the old association. .. change:: :tags: orm :tickets: 1495 A collection lazy load will switch off default eagerloading on the reverse many-to-one side, since that loading is by definition unnecessary. .. change:: :tags: orm :tickets: Session.refresh() now does an equivalent expire() on the given instance first, so that the "refresh-expire" cascade is propagated. Previously, refresh() was not affected in any way by the presence of "refresh-expire" cascade. This is a change in behavior versus that of 0.6beta2, where the "lockmode" flag passed to refresh() would cause a version check to occur. Since the instance is first expired, refresh() always upgrades the object to the most recent version. .. change:: :tags: orm :tickets: 1754 The 'refresh-expire' cascade, when reaching a pending object, will expunge the object if the cascade also includes "delete-orphan", or will simply detach it otherwise. .. change:: :tags: orm :tickets: 1756 id(obj) is no longer used internally within topological.py, as the sorting functions now require hashable objects only. .. change:: :tags: orm :tickets: The ORM will set the docstring of all generated descriptors to None by default. This can be overridden using 'doc' (or if using Sphinx, attribute docstrings work too). .. change:: :tags: orm :tickets: Added kw argument 'doc' to all mapper property callables as well as Column(). Will assemble the string 'doc' as the '__doc__' attribute on the descriptor. .. change:: :tags: orm :tickets: 1761 Usage of version_id_col on a backend that supports cursor.rowcount for execute() but not executemany() now works when a delete is issued (already worked for saves, since those don't use executemany()). For a backend that doesn't support cursor.rowcount at all, a warning is emitted the same as with saves. .. change:: :tags: orm :tickets: The ORM now short-term caches the "compiled" form of insert() and update() constructs when flushing lists of objects of all the same class, thereby avoiding redundant compilation per individual INSERT/UPDATE within an individual flush() call. .. change:: :tags: orm :tickets: internal getattr(), setattr(), getcommitted() methods on ColumnProperty, CompositeProperty, RelationshipProperty have been underscored (i.e. are private), signature has changed. .. change:: :tags: engines :tickets: 1757 The C extension now also works with DBAPIs which use custom sequences as row (and not only tuples). .. change:: :tags: sql :tickets: 1755 Restored some bind-labeling logic from 0.5 which ensures that tables with column names that overlap another column of the form "_" won't produce errors if column._label is used as a bind name during an UPDATE. Test coverage which wasn't present in 0.5 has been added. .. change:: :tags: sql :tickets: 1729 somejoin.select(fold_equivalents=True) is no longer deprecated, and will eventually be rolled into a more comprehensive version of the feature for. .. change:: :tags: sql :tickets: 1759 the Numeric type raises an *enormous* warning when expected to convert floats to Decimal from a DBAPI that returns floats. This includes SQLite, Sybase, MS-SQL. .. change:: :tags: sql :tickets: Fixed an error in expression typing which caused an endless loop for expressions with two NULL types. .. change:: :tags: sql :tickets: Fixed bug in execution_options() feature whereby the existing Transaction and other state information from the parent connection would not be propagated to the sub-connection. .. change:: :tags: sql :tickets: Added new 'compiled_cache' execution option. A dictionary where Compiled objects will be cached when the Connection compiles a clause expression into a dialect- and parameter- specific Compiled object. It is the user's responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the "batch" mode for an INSERT or UPDATE statement. .. change:: :tags: sql :tickets: 1769 Added get_pk_constraint() to reflection.Inspector, similar to get_primary_keys() except returns a dict that includes the name of the constraint, for supported backends (PG so far). .. change:: :tags: sql :tickets: 1771 Table.create() and Table.drop() no longer apply metadata- level create/drop events. .. change:: :tags: ext :tickets: the compiler extension now allows @compiles decorators on base classes that extend to child classes, @compiles decorators on child classes that aren't broken by a @compiles decorator on the base class. .. change:: :tags: ext :tickets: Declarative will raise an informative error message if a non-mapped class attribute is referenced in the string-based relationship() arguments. .. change:: :tags: ext :tickets: Further reworked the "mixin" logic in declarative to additionally allow __mapper_args__ as a @classproperty on a mixin, such as to dynamically assign polymorphic_identity. .. change:: :tags: postgresql :tickets: 1071 Postgresql now reflects sequence names associated with SERIAL columns correctly, after the name of the sequence has been changed. Thanks to Kumar McMillan for the patch. .. change:: :tags: postgresql :tickets: Repaired missing import in psycopg2._PGNumeric type when unknown numeric is received. .. change:: :tags: postgresql :tickets: psycopg2/pg8000 dialects now aware of REAL[], FLOAT[], DOUBLE_PRECISION[], NUMERIC[] return types without raising an exception. .. change:: :tags: postgresql :tickets: 1769 Postgresql reflects the name of primary key constraints, if one exists. .. change:: :tags: oracle :tickets: Now using cx_oracle output converters so that the DBAPI returns natively the kinds of values we prefer: .. change:: :tags: oracle :tickets: 1759 NUMBER values with positive precision + scale convert to cx_oracle.STRING and then to Decimal. This allows perfect precision for the Numeric type when using cx_oracle. .. change:: :tags: oracle :tickets: STRING/FIXED_CHAR now convert to unicode natively. SQLAlchemy's String types then don't need to apply any kind of conversions. .. change:: :tags: firebird :tickets: The functionality of result.rowcount can be disabled on a per-engine basis by setting 'enable_rowcount=False' on create_engine(). Normally, cursor.rowcount is called after any UPDATE or DELETE statement unconditionally, because the cursor is then closed and Firebird requires an open cursor in order to get a rowcount. This call is slightly expensive however so it can be disabled. To re-enable on a per-execution basis, the 'enable_rowcount=True' execution option may be used. .. change:: :tags: examples :tickets: Updated attribute_shard.py example to use a more robust method of searching a Query for binary expressions which compare columns against literal values. .. changelog:: :version: 0.6beta3 :released: Sun Mar 28 2010 .. change:: :tags: orm :tickets: 1675 Major feature: Added new "subquery" loading capability to relationship(). This is an eager loading option which generates a second SELECT for each collection represented in a query, across all parents at once. The query re-issues the original end-user query wrapped in a subquery, applies joins out to the target collection, and loads all those collections fully in one result, similar to "joined" eager loading but using all inner joins and not re-fetching full parent rows repeatedly (as most DBAPIs seem to do, even if columns are skipped). Subquery loading is available at mapper config level using "lazy='subquery'" and at the query options level using "subqueryload(props..)", "subqueryload_all(props...)". .. change:: :tags: orm :tickets: To accommodate the fact that there are now two kinds of eager loading available, the new names for eagerload() and eagerload_all() are joinedload() and joinedload_all(). The old names will remain as synonyms for the foreseeable future. .. change:: :tags: orm :tickets: The "lazy" flag on the relationship() function now accepts a string argument for all kinds of loading: "select", "joined", "subquery", "noload" and "dynamic", where the default is now "select". The old values of True/ False/None still retain their usual meanings and will remain as synonyms for the foreseeable future. .. change:: :tags: orm :tickets: 921 Added with_hint() method to Query() construct. This calls directly down to select().with_hint() and also accepts entities as well as tables and aliases. See with_hint() in the SQL section below. .. change:: :tags: orm :tickets: Fixed bug in Query whereby calling q.join(prop).from_self(...). join(prop) would fail to render the second join outside the subquery, when joining on the same criterion as was on the inside. .. change:: :tags: orm :tickets: Fixed bug in Query whereby the usage of aliased() constructs would fail if the underlying table (but not the actual alias) were referenced inside the subquery generated by q.from_self() or q.select_from(). .. change:: :tags: orm :tickets: Fixed bug which affected all eagerload() and similar options such that "remote" eager loads, i.e. eagerloads off of a lazy load such as query(A).options(eagerload(A.b, B.c)) wouldn't eagerload anything, but using eagerload("b.c") would work fine. .. change:: :tags: orm :tickets: Query gains an add_columns(\*columns) method which is a multi- version of add_column(col). add_column(col) is future deprecated. .. change:: :tags: orm :tickets: Query.join() will detect if the end result will be "FROM A JOIN A", and will raise an error if so. .. change:: :tags: orm :tickets: Query.join(Cls.propname, from_joinpoint=True) will check more carefully that "Cls" is compatible with the current joinpoint, and act the same way as Query.join("propname", from_joinpoint=True) in that regard. .. change:: :tags: sql :tickets: 921 Added with_hint() method to select() construct. Specify a table/alias, hint text, and optional dialect name, and "hints" will be rendered in the appropriate place in the statement. Works for Oracle, Sybase, MySQL. .. change:: :tags: sql :tickets: 1747 Fixed bug introduced in 0.6beta2 where column labels would render inside of column expressions already assigned a label. .. change:: :tags: postgresql :tickets: 877 The psycopg2 dialect will log NOTICE messages via the "sqlalchemy.dialects.postgresql" logger name. .. change:: :tags: postgresql :tickets: 997 the TIME and TIMESTAMP types are now available from the postgresql dialect directly, which add the PG-specific argument 'precision' to both. 'precision' and 'timezone' are correctly reflected for both TIME and TIMEZONE types. .. change:: :tags: mysql :tickets: 1752 No longer guessing that TINYINT(1) should be BOOLEAN when reflecting - TINYINT(1) is returned. Use Boolean/ BOOLEAN in table definition to get boolean conversion behavior. .. change:: :tags: oracle :tickets: 1744 The Oracle dialect will issue VARCHAR type definitions using character counts, i.e. VARCHAR2(50 CHAR), so that the column is sized in terms of characters and not bytes. Column reflection of character types will also use ALL_TAB_COLUMNS.CHAR_LENGTH instead of ALL_TAB_COLUMNS.DATA_LENGTH. Both of these behaviors take effect when the server version is 9 or higher - for version 8, the old behaviors are used. .. change:: :tags: declarative :tickets: 1746 Using a mixin won't break if the mixin implements an unpredictable __getattribute__(), i.e. Zope interfaces. .. change:: :tags: declarative :tickets: 1749 Using @classdecorator and similar on mixins to define __tablename__, __table_args__, etc. now works if the method references attributes on the ultimate subclass. .. change:: :tags: declarative :tickets: 1751 relationships and columns with foreign keys aren't allowed on declarative mixins, sorry. .. change:: :tags: ext :tickets: The sqlalchemy.orm.shard module now becomes an extension, sqlalchemy.ext.horizontal_shard. The old import works with a deprecation warning. .. changelog:: :version: 0.6beta2 :released: Sat Mar 20 2010 .. change:: :tags: py3k :tickets: Improved the installation/test setup regarding Python 3, now that Distribute runs on Py3k. distribute_setup.py is now included. See README.py3k for Python 3 installation/ testing instructions. .. change:: :tags: orm :tickets: 1740 The official name for the relation() function is now relationship(), to eliminate confusion over the relational algebra term. relation() however will remain available in equal capacity for the foreseeable future. .. change:: :tags: orm :tickets: 1692 Added "version_id_generator" argument to Mapper, this is a callable that, given the current value of the "version_id_col", returns the next version number. Can be used for alternate versioning schemes such as uuid, timestamps. .. change:: :tags: orm :tickets: added "lockmode" kw argument to Session.refresh(), will pass through the string value to Query the same as in with_lockmode(), will also do version check for a version_id_col-enabled mapping. .. change:: :tags: orm :tickets: 1188 Fixed bug whereby calling query(A).join(A.bs).add_entity(B) in a joined inheritance scenario would double-add B as a target and produce an invalid query. .. change:: :tags: orm :tickets: 1674 Fixed bug in session.rollback() which involved not removing formerly "pending" objects from the session before re-integrating "deleted" objects, typically occurred with natural primary keys. If there was a primary key conflict between them, the attach of the deleted would fail internally. The formerly "pending" objects are now expunged first. .. change:: :tags: orm :tickets: 1719 Removed a lot of logging that nobody really cares about, logging that remains will respond to live changes in the log level. No significant overhead is added. .. change:: :tags: orm :tickets: Fixed bug in session.merge() which prevented dict-like collections from merging. .. change:: :tags: orm :tickets: session.merge() works with relations that specifically don't include "merge" in their cascade options - the target is ignored completely. .. change:: :tags: orm :tickets: 1681 session.merge() will not expire existing scalar attributes on an existing target if the target has a value for that attribute, even if the incoming merged doesn't have a value for the attribute. This prevents unnecessary loads on existing items. Will still mark the attr as expired if the destination doesn't have the attr, though, which fulfills some contracts of deferred cols. .. change:: :tags: orm :tickets: 1680 The "allow_null_pks" flag is now called "allow_partial_pks", defaults to True, acts like it did in 0.5 again. Except, it also is implemented within merge() such that a SELECT won't be issued for an incoming instance with partially NULL primary key if the flag is False. .. change:: :tags: orm :tickets: 1737 Fixed bug in 0.6-reworked "many-to-one" optimizations such that a many-to-one that is against a non-primary key column on the remote table (i.e. foreign key against a UNIQUE column) will pull the "old" value in from the database during a change, since if it's in the session we will need it for proper history/backref accounting, and we can't pull from the local identity map on a non-primary key column. .. change:: :tags: orm :tickets: 1731 fixed internal error which would occur if calling has() or similar complex expression on a single-table inheritance relation(). .. change:: :tags: orm :tickets: 1688 query.one() no longer applies LIMIT to the query, this to ensure that it fully counts all object identities present in the result, even in the case where joins may conceal multiple identities for two or more rows. As a bonus, one() can now also be called with a query that issued from_statement() to start with since it no longer modifies the query. .. change:: :tags: orm :tickets: 1727 query.get() now returns None if queried for an identifier that is present in the identity map with a different class than the one requested, i.e. when using polymorphic loading. .. change:: :tags: orm :tickets: 1706 A major fix in query.join(), when the "on" clause is an attribute of an aliased() construct, but there is already an existing join made out to a compatible target, query properly joins to the right aliased() construct instead of sticking onto the right side of the existing join. .. change:: :tags: orm :tickets: 1362 Slight improvement to the fix for to not issue needless updates of the primary key column during a so-called "row switch" operation, i.e. add + delete of two objects with the same PK. .. change:: :tags: orm :tickets: Now uses sqlalchemy.orm.exc.DetachedInstanceError when an attribute load or refresh action fails due to object being detached from any Session. UnboundExecutionError is specific to engines bound to sessions and statements. .. change:: :tags: orm :tickets: Query called in the context of an expression will render disambiguating labels in all cases. Note that this does not apply to the existing .statement and .subquery() accessor/method, which still honors the .with_labels() setting that defaults to False. .. change:: :tags: orm :tickets: 1676 Query.union() retains disambiguating labels within the returned statement, thus avoiding various SQL composition errors which can result from column name conflicts. .. change:: :tags: orm :tickets: Fixed bug in attribute history that inadvertently invoked __eq__ on mapped instances. .. change:: :tags: orm :tickets: Some internal streamlining of object loading grants a small speedup for large results, estimates are around 10-15%. Gave the "state" internals a good solid cleanup with less complexity, datamembers, method calls, blank dictionary creates. .. change:: :tags: orm :tickets: 1689 Documentation clarification for query.delete() .. change:: :tags: orm :tickets: Fixed cascade bug in many-to-one relation() when attribute was set to None, introduced in r6711 (cascade deleted items into session during add()). .. change:: :tags: orm :tickets: 1736 Calling query.order_by() or query.distinct() before calling query.select_from(), query.with_polymorphic(), or query.from_statement() raises an exception now instead of silently dropping those criterion. .. change:: :tags: orm :tickets: 1735 query.scalar() now raises an exception if more than one row is returned. All other behavior remains the same. .. change:: :tags: orm :tickets: 1692 Fixed bug which caused "row switch" logic, that is an INSERT and DELETE replaced by an UPDATE, to fail when version_id_col was in use. .. change:: :tags: sql :tickets: 1714 join() will now simulate a NATURAL JOIN by default. Meaning, if the left side is a join, it will attempt to join the right side to the rightmost side of the left first, and not raise any exceptions about ambiguous join conditions if successful even if there are further join targets across the rest of the left. .. change:: :tags: sql :tickets: The most common result processors conversion function were moved to the new "processors" module. Dialect authors are encouraged to use those functions whenever they correspond to their needs instead of implementing custom ones. .. change:: :tags: sql :tickets: 1694, 1698 SchemaType and subclasses Boolean, Enum are now serializable, including their ddl listener and other event callables. .. change:: :tags: sql :tickets: Some platforms will now interpret certain literal values as non-bind parameters, rendered literally into the SQL statement. This to support strict SQL-92 rules that are enforced by some platforms including MS-SQL and Sybase. In this model, bind parameters aren't allowed in the columns clause of a SELECT, nor are certain ambiguous expressions like "?=?". When this mode is enabled, the base compiler will render the binds as inline literals, but only across strings and numeric values. Other types such as dates will raise an error, unless the dialect subclass defines a literal rendering function for those. The bind parameter must have an embedded literal value already or an error is raised (i.e. won't work with straight bindparam('x')). Dialects can also expand upon the areas where binds are not accepted, such as within argument lists of functions (which don't work on MS-SQL when native SQL binding is used). .. change:: :tags: sql :tickets: Added "unicode_errors" parameter to String, Unicode, etc. Behaves like the 'errors' keyword argument to the standard library's string.decode() functions. This flag requires that `convert_unicode` is set to `"force"` - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as an absolute last resort for reading strings from a column with varied or corrupted encodings, which only applies to databases that accept invalid encodings in the first place (i.e. MySQL. *not* PG, Sqlite, etc.) .. change:: :tags: sql :tickets: Added math negation operator support, -x. .. change:: :tags: sql :tickets: FunctionElement subclasses are now directly executable the same way any func.foo() construct is, with automatic SELECT being applied when passed to execute(). .. change:: :tags: sql :tickets: The "type" and "bind" keyword arguments of a func.foo() construct are now local to "func." constructs and are not part of the FunctionElement base class, allowing a "type" to be handled in a custom constructor or class-level variable. .. change:: :tags: sql :tickets: Restored the keys() method to ResultProxy. .. change:: :tags: sql :tickets: 1647, 1683 The type/expression system now does a more complete job of determining the return type from an expression as well as the adaptation of the Python operator into a SQL operator, based on the full left/right/operator of the given expression. In particular the date/time/interval system created for Postgresql EXTRACT in has now been generalized into the type system. The previous behavior which often occurred of an expression "column + literal" forcing the type of "literal" to be the same as that of "column" will now usually not occur - the type of "literal" is first derived from the Python type of the literal, assuming standard native Python types + date types, before falling back to that of the known type on the other side of the expression. If the "fallback" type is compatible (i.e. CHAR from String), the literal side will use that. TypeDecorator types override this by default to coerce the "literal" side unconditionally, which can be changed by implementing the coerce_compared_value() method. Also part of. .. change:: :tags: sql :tickets: Made sqlalchemy.sql.expressions.Executable part of public API, used for any expression construct that can be sent to execute(). FunctionElement now inherits Executable so that it gains execution_options(), which are also propagated to the select() that's generated within execute(). Executable in turn subclasses _Generative which marks any ClauseElement that supports the @_generative decorator - these may also become "public" for the benefit of the compiler extension at some point. .. change:: :tags: sql :tickets: 1579 A change to the solution for - an end-user defined bind parameter name that directly conflicts with a column-named bind generated directly from the SET or VALUES clause of an update/insert generates a compile error. This reduces call counts and eliminates some cases where undesirable name conflicts could still occur. .. change:: :tags: sql :tickets: 1705 Column() requires a type if it has no foreign keys (this is not new). An error is now raised if a Column() has no type and no foreign keys. .. change:: :tags: sql :tickets: 1717 the "scale" argument of the Numeric() type is honored when coercing a returned floating point value into a string on its way to Decimal - this allows accuracy to function on SQLite, MySQL. .. change:: :tags: sql :tickets: the copy() method of Column now copies over uninitialized "on table attach" events. Helps with the new declarative "mixin" capability. .. change:: :tags: engines :tickets: Added an optional C extension to speed up the sql layer by reimplementing RowProxy and the most common result processors. The actual speedups will depend heavily on your DBAPI and the mix of datatypes used in your tables, and can vary from a 30% improvement to more than 200%. It also provides a modest (~15-20%) indirect improvement to ORM speed for large queries. Note that it is *not* built/installed by default. See README for installation instructions. .. change:: :tags: engines :tickets: the execution sequence pulls all rowcount/last inserted ID info from the cursor before commit() is called on the DBAPI connection in an "autocommit" scenario. This helps mxodbc with rowcount and is probably a good idea overall. .. change:: :tags: engines :tickets: 1719 Opened up logging a bit such that isEnabledFor() is called more often, so that changes to the log level for engine/pool will be reflected on next connect. This adds a small amount of method call overhead. It's negligible and will make life a lot easier for all those situations when logging just happens to be configured after create_engine() is called. .. change:: :tags: engines :tickets: The assert_unicode flag is deprecated. SQLAlchemy will raise a warning in all cases where it is asked to encode a non-unicode Python string, as well as when a Unicode or UnicodeType type is explicitly passed a bytestring. The String type will do nothing for DBAPIs that already accept Python unicode objects. .. change:: :tags: engines :tickets: Bind parameters are sent as a tuple instead of a list. Some backend drivers will not accept bind parameters as a list. .. change:: :tags: engines :tickets: threadlocal engine wasn't properly closing the connection upon close() - fixed that. .. change:: :tags: engines :tickets: Transaction object doesn't rollback or commit if it isn't "active", allows more accurate nesting of begin/rollback/commit. .. change:: :tags: engines :tickets: Python unicode objects as binds result in the Unicode type, not string, thus eliminating a certain class of unicode errors on drivers that don't support unicode binds. .. change:: :tags: engines :tickets: 1555 Added "logging_name" argument to create_engine(), Pool() constructor as well as "pool_logging_name" argument to create_engine() which filters down to that of Pool. Issues the given string name within the "name" field of logging messages instead of the default hex identifier string. .. change:: :tags: engines :tickets: The visit_pool() method of Dialect is removed, and replaced with on_connect(). This method returns a callable which receives the raw DBAPI connection after each one is created. The callable is assembled into a first_connect/connect pool listener by the connection strategy if non-None. Provides a simpler interface for dialects. .. change:: :tags: engines :tickets: 1728 StaticPool now initializes, disposes and recreates without opening a new connection - the connection is only opened when first requested. dispose() also works on AssertionPool now. .. change:: :tags: ticket: 1673, metadata :tickets: Added the ability to strip schema information when using "tometadata" by passing "schema=None" as an argument. If schema is not specified then the table's schema is retained. .. change:: :tags: declarative :tickets: DeclarativeMeta exclusively uses cls.__dict__ (not dict\_) as the source of class information; _as_declarative exclusively uses the dict\_ passed to it as the source of class information (which when using DeclarativeMeta is cls.__dict__). This should in theory make it easier for custom metaclasses to modify the state passed into _as_declarative. .. change:: :tags: declarative :tickets: 1707 declarative now accepts mixin classes directly, as a means to provide common functional and column-based elements on all subclasses, as well as a means to propagate a fixed set of __table_args__ or __mapper_args__ to subclasses. For custom combinations of __table_args__/__mapper_args__ from an inherited mixin to local, descriptors can now be used. New details are all up in the Declarative documentation. Thanks to Chris Withers for putting up with my strife on this. .. change:: :tags: declarative :tickets: 1393 the __mapper_args__ dict is copied when propagating to a subclass, and is taken straight off the class __dict__ to avoid any propagation from the parent. mapper inheritance already propagates the things you want from the parent mapper. .. change:: :tags: declarative :tickets: 1732 An exception is raised when a single-table subclass specifies a column that is already present on the base class. .. change:: :tags: mysql :tickets: 1655 Fixed reflection bug whereby when COLLATE was present, nullable flag and server defaults would not be reflected. .. change:: :tags: mysql :tickets: Fixed reflection of TINYINT(1) "boolean" columns defined with integer flags like UNSIGNED. .. change:: :tags: mysql :tickets: 1668 Further fixes for the mysql-connector dialect. .. change:: :tags: mysql :tickets: 1496 Composite PK table on InnoDB where the "autoincrement" column isn't first will emit an explicit "KEY" phrase within CREATE TABLE thereby avoiding errors. .. change:: :tags: mysql :tickets: 1634 Added reflection/create table support for a wide range of MySQL keywords. .. change:: :tags: mysql :tickets: 1580 Fixed import error which could occur reflecting tables on a Windows host .. change:: :tags: mssql :tickets: Re-established support for the pymssql dialect. .. change:: :tags: mssql :tickets: Various fixes for implicit returning, reflection, etc. - the MS-SQL dialects aren't quite complete in 0.6 yet (but are close) .. change:: :tags: mssql :tickets: 1710 Added basic support for mxODBC. .. change:: :tags: mssql :tickets: Removed the text_as_varchar option. .. change:: :tags: oracle :tickets: "out" parameters require a type that is supported by cx_oracle. An error will be raised if no cx_oracle type can be found. .. change:: :tags: oracle :tickets: Oracle 'DATE' now does not perform any result processing, as the DATE type in Oracle stores full date+time objects, that's what you'll get. Note that the generic types.Date type *will* still call value.date() on incoming values, however. When reflecting a table, the reflected type will be 'DATE'. .. change:: :tags: oracle :tickets: 1670 Added preliminary support for Oracle's WITH_UNICODE mode. At the very least this establishes initial support for cx_Oracle with Python 3. When WITH_UNICODE mode is used in Python 2.xx, a large and scary warning is emitted asking that the user seriously consider the usage of this difficult mode of operation. .. change:: :tags: oracle :tickets: 1712 The except_() method now renders as MINUS on Oracle, which is more or less equivalent on that platform. .. change:: :tags: oracle :tickets: 651 Added support for rendering and reflecting TIMESTAMP WITH TIME ZONE, i.e. TIMESTAMP(timezone=True). .. change:: :tags: oracle :tickets: Oracle INTERVAL type can now be reflected. .. change:: :tags: sqlite :tickets: 1685 Added "native_datetime=True" flag to create_engine(). This will cause the DATE and TIMESTAMP types to skip all bind parameter and result row processing, under the assumption that PARSE_DECLTYPES has been enabled on the connection. Note that this is not entirely compatible with the "func.current_date()", which will be returned as a string. .. change:: :tags: sybase :tickets: Implemented a preliminary working dialect for Sybase, with sub-implementations for Python-Sybase as well as Pyodbc. Handles table creates/drops and basic round trip functionality. Does not yet include reflection or comprehensive support of unicode/special expressions/etc. .. change:: :tags: examples :tickets: Changed the beaker cache example a bit to have a separate RelationCache option for lazyload caching. This object does a lookup among any number of potential attributes more efficiently by grouping several into a common structure. Both FromCache and RelationCache are simpler individually. .. change:: :tags: documentation :tickets: 1700 Major cleanup work in the docs to link class, function, and method names into the API docs. .. changelog:: :version: 0.6beta1 :released: Wed Feb 03 2010 .. change:: :tags: release, major :tickets: For the full set of feature descriptions, see http://www.sqlalchemy.org/trac/wiki/06Migration . This document is a work in progress. .. change:: :tags: release, major :tickets: All bug fixes and feature enhancements from the most recent 0.5 version and below are also included within 0.6. .. change:: :tags: release, major :tickets: Platforms targeted now include Python 2.4/2.5/2.6, Python 3.1, Jython2.5. .. change:: :tags: orm :tickets: Changes to query.update() and query.delete(): - the 'expire' option on query.update() has been renamed to 'fetch', thus matching that of query.delete(). 'expire' is deprecated and issues a warning. - query.update() and query.delete() both default to 'evaluate' for the synchronize strategy. - the 'synchronize' strategy for update() and delete() raises an error on failure. There is no implicit fallback onto "fetch". Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure. .. change:: :tags: orm :tickets: 1186, 1492, 1544 Enhancements on many-to-one relations: - many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the "old" value when a new one is replaced. - many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the "use_get" condition), i.e. Related->Sub(Base), without the need to redefine the primaryjoin condition in terms of the base table. - specifying a foreign key with a declarative column, i.e. ForeignKey(MyRelatedClass.id) doesn't break the "use_get" condition from taking place - relation(), eagerload(), and eagerload_all() now feature an option called "innerjoin". Specify `True` or `False` to control whether an eager join is constructed as an INNER or OUTER join. Default is `False` as always. The mapper options will override whichever setting is specified on relation(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance. - the behavior of eagerloading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result. .. change:: :tags: orm :tickets: Enhancements / Changes on Session.merge(): .. change:: :tags: orm :tickets: the "dont_load=True" flag on Session.merge() is deprecated and is now "load=False". .. change:: :tags: orm :tickets: Session.merge() is performance optimized, using half the call counts for "load=False" mode compared to 0.5 and significantly fewer SQL queries in the case of collections for "load=True" mode. .. change:: :tags: orm :tickets: merge() will not issue a needless merge of attributes if the given instance is the same instance which is already present. .. change:: :tags: orm :tickets: merge() now also merges the "options" associated with a given state, i.e. those passed through query.options() which follow along with an instance, such as options to eagerly- or lazyily- load various attributes. This is essential for the construction of highly integrated caching schemes. This is a subtle behavioral change vs. 0.5. .. change:: :tags: orm :tickets: A bug was fixed regarding the serialization of the "loader path" present on an instance's state, which is also necessary when combining the usage of merge() with serialized state and associated options that should be preserved. .. change:: :tags: orm :tickets: The all new merge() is showcased in a new comprehensive example of how to integrate Beaker with SQLAlchemy. See the notes in the "examples" note below. .. change:: :tags: orm :tickets: 1362 Primary key values can now be changed on a joined-table inheritance object, and ON UPDATE CASCADE will be taken into account when the flush happens. Set the new "passive_updates" flag to False on mapper() when using SQLite or MySQL/MyISAM. .. change:: :tags: orm :tickets: 1671 flush() now detects when a primary key column was updated by an ON UPDATE CASCADE operation from another primary key, and can then locate the row for a subsequent UPDATE on the new PK value. This occurs when a relation() is there to establish the relationship as well as passive_updates=True. .. change:: :tags: orm :tickets: the "save-update" cascade will now cascade the pending *removed* values from a scalar or collection attribute into the new session during an add() operation. This so that the flush() operation will also delete or modify rows of those disconnected items. .. change:: :tags: orm :tickets: 1531 Using a "dynamic" loader with a "secondary" table now produces a query where the "secondary" table is *not* aliased. This allows the secondary Table object to be used in the "order_by" attribute of the relation(), and also allows it to be used in filter criterion against the dynamic relation. .. change:: :tags: orm :tickets: 1643 relation() with uselist=False will emit a warning when an eager or lazy load locates more than one valid value for the row. This may be due to primaryjoin/secondaryjoin conditions which aren't appropriate for an eager LEFT OUTER JOIN or for other conditions. .. change:: :tags: orm :tickets: 1633 an explicit check occurs when a synonym() is used with map_column=True, when a ColumnProperty (deferred or otherwise) exists separately in the properties dictionary sent to mapper with the same keyname. Instead of silently replacing the existing property (and possible options on that property), an error is raised. .. change:: :tags: orm :tickets: a "dynamic" loader sets up its query criterion at construction time so that the actual query is returned from non-cloning accessors like "statement". .. change:: :tags: orm :tickets: the "named tuple" objects returned when iterating a Query() are now pickleable. .. change:: :tags: orm :tickets: 1542 mapping to a select() construct now requires that you make an alias() out of it distinctly. This to eliminate confusion over such issues as .. change:: :tags: orm :tickets: 1537 query.join() has been reworked to provide more consistent behavior and more flexibility (includes) .. change:: :tags: orm :tickets: query.select_from() accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses. .. change:: :tags: orm :tickets: query.select_from() also accepts mapped classes, aliased() constructs, and mappers as arguments. In particular this helps when querying from multiple joined-table classes to ensure the full join gets rendered. .. change:: :tags: orm :tickets: 1135 query.get() can be used with a mapping to an outer join where one or more of the primary key values are None. .. change:: :tags: orm :tickets: 1568 query.from_self(), query.union(), others which do a "SELECT * from (SELECT...)" type of nesting will do a better job translating column expressions within the subquery to the columns clause of the outer query. This is potentially backwards incompatible with 0.5, in that this may break queries with literal expressions that do not have labels applied (i.e. literal('foo'), etc.) .. change:: :tags: orm :tickets: 1622 relation primaryjoin and secondaryjoin now check that they are column-expressions, not just clause elements. this prohibits things like FROM expressions being placed there directly. .. change:: :tags: orm :tickets: 1415 `expression.null()` is fully understood the same way None is when comparing an object/collection-referencing attribute within query.filter(), filter_by(), etc. .. change:: :tags: orm :tickets: 1052 added "make_transient()" helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.) .. change:: :tags: orm :tickets: 1339 the allow_null_pks flag on mapper() is deprecated, and the feature is turned "on" by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join. .. change:: :tags: orm :tickets: the mechanics of "backref" have been fully merged into the finer grained "back_populates" system, and take place entirely within the _generate_backref() method of RelationProperty. This makes the initialization procedure of RelationProperty simpler and allows easier propagation of settings (such as from subclasses of RelationProperty) into the reverse reference. The internal BackRef() is gone and backref() returns a plain tuple that is understood by RelationProperty. .. change:: :tags: orm :tickets: 1569 The version_id_col feature on mapper() will raise a warning when used with dialects that don't support "rowcount" adequately. .. change:: :tags: orm :tickets: added "execution_options()" to Query, to so options can be passed to the resulting statement. Currently only Select-statements have these options, and the only option used is "stream_results", and the only dialect which knows "stream_results" is psycopg2. .. change:: :tags: orm :tickets: Query.yield_per() will set the "stream_results" statement option automatically. .. change:: :tags: orm :tickets: Deprecated or removed: * 'allow_null_pks' flag on mapper() is deprecated. It does nothing now and the setting is "on" in all cases. * 'transactional' flag on sessionmaker() and others is removed. Use 'autocommit=True' to indicate 'transactional=False'. * 'polymorphic_fetch' argument on mapper() is removed. Loading can be controlled using the 'with_polymorphic' option. * 'select_table' argument on mapper() is removed. Use 'with_polymorphic=("*", )' for this functionality. * 'proxy' argument on synonym() is removed. This flag did nothing throughout 0.5, as the "proxy generation" behavior is now automatic. * Passing a single list of elements to eagerload(), eagerload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional \*args is deprecated. * Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional \*args is deprecated. * query.iterate_instances() is removed. Use query.instances(). * Query.query_from_parent() is removed. Use the sqlalchemy.orm.with_parent() function to produce a "parent" clause, or alternatively query.with_parent(). * query._from_self() is removed, use query.from_self() instead. * the "comparator" argument to composite() is removed. Use "comparator_factory". * RelationProperty._get_join() is removed. * the 'echo_uow' flag on Session is removed. Use logging on the "sqlalchemy.orm.unitofwork" name. * session.clear() is removed. use session.expunge_all(). * session.save(), session.update(), session.save_or_update() are removed. Use session.add() and session.add_all(). * the "objects" flag on session.flush() remains deprecated. * the "dont_load=True" flag on session.merge() is deprecated in favor of "load=False". * ScopedSession.mapper remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper * passing an InstanceState (internal SQLAlchemy state object) to attributes.init_collection() or attributes.get_history() is deprecated. These functions are public API and normally expect a regular mapped object instance. * the 'engine' parameter to declarative_base() is removed. Use the 'bind' keyword argument. .. change:: :tags: sql :tickets: the "autocommit" flag on select() and text() as well as select().autocommit() are deprecated - now call .execution_options(autocommit=True) on either of those constructs, also available directly on Connection and orm.Query. .. change:: :tags: sql :tickets: the autoincrement flag on column now indicates the column which should be linked to cursor.lastrowid, if that method is used. See the API docs for details. .. change:: :tags: sql :tickets: 1566 an executemany() now requires that all bound parameter sets require that all keys are present which are present in the first bound parameter set. The structure and behavior of an insert/update statement is very much determined by the first parameter set, including which defaults are going to fire off, and a minimum of guesswork is performed with all the rest so that performance is not impacted. For this reason defaults would otherwise silently "fail" for missing parameters, so this is now guarded against. .. change:: :tags: sql :tickets: returning() support is native to insert(), update(), delete(). Implementations of varying levels of functionality exist for Postgresql, Firebird, MSSQL and Oracle. returning() can be called explicitly with column expressions which are then returned in the resultset, usually via fetchone() or first(). insert() constructs will also use RETURNING implicitly to get newly generated primary key values, if the database version in use supports it (a version number check is performed). This occurs if no end-user returning() was specified. .. change:: :tags: sql :tickets: 1665 union(), intersect(), except() and other "compound" types of statements have more consistent behavior w.r.t. parenthesizing. Each compound element embedded within another will now be grouped with parenthesis - previously, the first compound element in the list would not be grouped, as SQLite doesn't like a statement to start with parenthesis. However, Postgresql in particular has precedence rules regarding INTERSECT, and it is more consistent for parenthesis to be applied equally to all sub-elements. So now, the workaround for SQLite is also what the workaround for PG was previously - when nesting compound elements, the first one usually needs ".alias().select()" called on it to wrap it inside of a subquery. .. change:: :tags: sql :tickets: 1579 insert() and update() constructs can now embed bindparam() objects using names that match the keys of columns. These bind parameters will circumvent the usual route to those keys showing up in the VALUES or SET clause of the generated SQL. .. change:: :tags: sql :tickets: 1524 the Binary type now returns data as a Python string (or a "bytes" type in Python 3), instead of the built- in "buffer" type. This allows symmetric round trips of binary data. .. change:: :tags: sql :tickets: Added a tuple_() construct, allows sets of expressions to be compared to another set, typically with IN against composite primary keys or similar. Also accepts an IN with multiple columns. The "scalar select can have only one column" error message is removed - will rely upon the database to report problems with col mismatch. .. change:: :tags: sql :tickets: User-defined "default" and "onupdate" callables which accept a context should now call upon "context.current_parameters" to get at the dictionary of bind parameters currently being processed. This dict is available in the same way regardless of single-execute or executemany-style statement execution. .. change:: :tags: sql :tickets: 1428 multi-part schema names, i.e. with dots such as "dbo.master", are now rendered in select() labels with underscores for dots, i.e. "dbo_master_table_column". This is a "friendly" label that behaves better in result sets. .. change:: :tags: sql :tickets: removed needless "counter" behavior with select() labelnames that match a column name in the table, i.e. generates "tablename_id" for "id", instead of "tablename_id_1" in an attempt to avoid naming conflicts, when the table has a column actually named "tablename_id" - this is because the labeling logic is always applied to all columns so a naming conflict will never occur. .. change:: :tags: sql :tickets: 1628 calling expr.in_([]), i.e. with an empty list, emits a warning before issuing the usual "expr != expr" clause. The "expr != expr" can be very expensive, and it's preferred that the user not issue in_() if the list is empty, instead simply not querying, or modifying the criterion as appropriate for more complex situations. .. change:: :tags: sql :tickets: Added "execution_options()" to select()/text(), which set the default options for the Connection. See the note in "engines". .. change:: :tags: sql :tickets: 1131 Deprecated or removed: * "scalar" flag on select() is removed, use select.as_scalar(). * "shortname" attribute on bindparam() is removed. * postgres_returning, firebird_returning flags on insert(), update(), delete() are deprecated, use the new returning() method. * fold_equivalents flag on join is deprecated (will remain until is implemented) .. change:: :tags: engines :tickets: 443 transaction isolation level may be specified with create_engine(... isolation_level="..."); available on postgresql and sqlite. .. change:: :tags: engines :tickets: Connection has execution_options(), generative method which accepts keywords that affect how the statement is executed w.r.t. the DBAPI. Currently supports "stream_results", causes psycopg2 to use a server side cursor for that statement, as well as "autocommit", which is the new location for the "autocommit" option from select() and text(). select() and text() also have .execution_options() as well as ORM Query(). .. change:: :tags: engines :tickets: 1630 fixed the import for entrypoint-driven dialects to not rely upon silly tb_info trick to determine import error status. .. change:: :tags: engines :tickets: added first() method to ResultProxy, returns first row and closes result set immediately. .. change:: :tags: engines :tickets: RowProxy objects are now pickleable, i.e. the object returned by result.fetchone(), result.fetchall() etc. .. change:: :tags: engines :tickets: RowProxy no longer has a close() method, as the row no longer maintains a reference to the parent. Call close() on the parent ResultProxy instead, or use autoclose. .. change:: :tags: engines :tickets: 1586 ResultProxy internals have been overhauled to greatly reduce method call counts when fetching columns. Can provide a large speed improvement (up to more than 100%) when fetching large result sets. The improvement is larger when fetching columns that have no type-level processing applied and when using results as tuples (instead of as dictionaries). Many thanks to Elixir's Gaëtan de Menten for this dramatic improvement ! .. change:: :tags: engines :tickets: Databases which rely upon postfetch of "last inserted id" to get at a generated sequence value (i.e. MySQL, MS-SQL) now work correctly when there is a composite primary key where the "autoincrement" column is not the first primary key column in the table. .. change:: :tags: engines :tickets: the last_inserted_ids() method has been renamed to the descriptor "inserted_primary_key". .. change:: :tags: engines :tickets: 1554 setting echo=False on create_engine() now sets the loglevel to WARN instead of NOTSET. This so that logging can be disabled for a particular engine even if logging for "sqlalchemy.engine" is enabled overall. Note that the default setting of "echo" is `None`. .. change:: :tags: engines :tickets: ConnectionProxy now has wrapper methods for all transaction lifecycle events, including begin(), rollback(), commit() begin_nested(), begin_prepared(), prepare(), release_savepoint(), etc. .. change:: :tags: engines :tickets: Connection pool logging now uses both INFO and DEBUG log levels for logging. INFO is for major events such as invalidated connections, DEBUG for all the acquire/return logging. `echo_pool` can be False, None, True or "debug" the same way as `echo` works. .. change:: :tags: engines :tickets: 1621 All pyodbc-dialects now support extra pyodbc-specific kw arguments 'ansi', 'unicode_results', 'autocommit'. .. change:: :tags: engines :tickets: the "threadlocal" engine has been rewritten and simplified and now supports SAVEPOINT operations. .. change:: :tags: engines :tickets: deprecated or removed * result.last_inserted_ids() is deprecated. Use result.inserted_primary_key * dialect.get_default_schema_name(connection) is now public via dialect.default_schema_name. * the "connection" argument from engine.transaction() and engine.run_callable() is removed - Connection itself now has those methods. All four methods accept \*args and \**kwargs which are passed to the given callable, as well as the operating connection. .. change:: :tags: schema :tickets: 1541 the `__contains__()` method of `MetaData` now accepts strings or `Table` objects as arguments. If given a `Table`, the argument is converted to `table.key` first, i.e. "[schemaname.]" .. change:: :tags: schema :tickets: deprecated MetaData.connect() and ThreadLocalMetaData.connect() have been removed - send the "bind" attribute to bind a metadata. .. change:: :tags: schema :tickets: deprecated metadata.table_iterator() method removed (use sorted_tables) .. change:: :tags: schema :tickets: deprecated PassiveDefault - use DefaultClause. .. change:: :tags: schema :tickets: the "metadata" argument is removed from DefaultGenerator and subclasses, but remains locally present on Sequence, which is a standalone construct in DDL. .. change:: :tags: schema :tickets: Removed public mutability from Index and Constraint objects: * ForeignKeyConstraint.append_element() * Index.append_column() * UniqueConstraint.append_column() * PrimaryKeyConstraint.add() * PrimaryKeyConstraint.remove() These should be constructed declaratively (i.e. in one construction). .. change:: :tags: schema :tickets: 1545 The "start" and "increment" attributes on Sequence now generate "START WITH" and "INCREMENT BY" by default, on Oracle and Postgresql. Firebird doesn't support these keywords right now. .. change:: :tags: schema :tickets: UniqueConstraint, Index, PrimaryKeyConstraint all accept lists of column names or column objects as arguments. .. change:: :tags: schema :tickets: Other removed things: - Table.key (no idea what this was for) - Table.primary_key is not assignable - use table.append_constraint(PrimaryKeyConstraint(...)) - Column.bind (get via column.table.bind) - Column.metadata (get via column.table.metadata) - Column.sequence (use column.default) - ForeignKey(constraint=some_parent) (is now private _constraint) .. change:: :tags: schema :tickets: The use_alter flag on ForeignKey is now a shortcut option for operations that can be hand-constructed using the DDL() event system. A side effect of this refactor is that ForeignKeyConstraint objects with use_alter=True will *not* be emitted on SQLite, which does not support ALTER for foreign keys. .. change:: :tags: schema :tickets: 1605 ForeignKey and ForeignKeyConstraint objects now correctly copy() all their public keyword arguments. .. change:: :tags: reflection/inspection :tickets: Table reflection has been expanded and generalized into a new API called "sqlalchemy.engine.reflection.Inspector". The Inspector object provides fine-grained information about a wide variety of schema information, with room for expansion, including table names, column names, view definitions, sequences, indexes, etc. .. change:: :tags: reflection/inspection :tickets: Views are now reflectable as ordinary Table objects. The same Table constructor is used, with the caveat that "effective" primary and foreign key constraints aren't part of the reflection results; these have to be specified explicitly if desired. .. change:: :tags: reflection/inspection :tickets: The existing autoload=True system now uses Inspector underneath so that each dialect need only return "raw" data about tables and other objects - Inspector is the single place that information is compiled into Table objects so that consistency is at a maximum. .. change:: :tags: ddl :tickets: the DDL system has been greatly expanded. the DDL() class now extends the more generic DDLElement(), which forms the basis of many new constructs: - CreateTable() - DropTable() - AddConstraint() - DropConstraint() - CreateIndex() - DropIndex() - CreateSequence() - DropSequence() These support "on" and "execute-at()" just like plain DDL() does. User-defined DDLElement subclasses can be created and linked to a compiler using the sqlalchemy.ext.compiler extension. .. change:: :tags: ddl :tickets: The signature of the "on" callable passed to DDL() and DDLElement() is revised as follows: ddl the DDLElement object itself event the string event name. target previously "schema_item", the Table or MetaData object triggering the event. connection the Connection object in use for the operation. \**kw keyword arguments. In the case of MetaData before/after create/drop, the list of Table objects for which CREATE/DROP DDL is to be issued is passed as the kw argument "tables". This is necessary for metadata-level DDL that is dependent on the presence of specific tables. The "schema_item" attribute of DDL has been renamed to "target". .. change:: :tags: dialect, refactor :tickets: Dialect modules are now broken into database dialects plus DBAPI implementations. Connect URLs are now preferred to be specified using dialect+driver://..., i.e. "mysql+mysqldb://scott:tiger@localhost/test". See the 0.6 documentation for examples. .. change:: :tags: dialect, refactor :tickets: the setuptools entrypoint for external dialects is now called "sqlalchemy.dialects". .. change:: :tags: dialect, refactor :tickets: the "owner" keyword argument is removed from Table. Use "schema" to represent any namespaces to be prepended to the table name. .. change:: :tags: dialect, refactor :tickets: server_version_info becomes a static attribute. .. change:: :tags: dialect, refactor :tickets: dialects receive an initialize() event on initial connection to determine connection properties. .. change:: :tags: dialect, refactor :tickets: dialects receive a visit_pool event have an opportunity to establish pool listeners. .. change:: :tags: dialect, refactor :tickets: cached TypeEngine classes are cached per-dialect class instead of per-dialect. .. change:: :tags: dialect, refactor :tickets: new UserDefinedType should be used as a base class for new types, which preserves the 0.5 behavior of get_col_spec(). .. change:: :tags: dialect, refactor :tickets: The result_processor() method of all type classes now accepts a second argument "coltype", which is the DBAPI type argument from cursor.description. This argument can help some types decide on the most efficient processing of result values. .. change:: :tags: dialect, refactor :tickets: Deprecated Dialect.get_params() removed. .. change:: :tags: dialect, refactor :tickets: Dialect.get_rowcount() has been renamed to a descriptor "rowcount", and calls cursor.rowcount directly. Dialects which need to hardwire a rowcount in for certain calls should override the method to provide different behavior. .. change:: :tags: dialect, refactor :tickets: 1566 DefaultRunner and subclasses have been removed. The job of this object has been simplified and moved into ExecutionContext. Dialects which support sequences should add a `fire_sequence()` method to their execution context implementation. .. change:: :tags: dialect, refactor :tickets: Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form "visit_" and "visit__fn" to provide customed processing. This replaces the need to copy the "functions" and "operators" dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in. .. change:: :tags: postgresql :tickets: New dialects: pg8000, zxjdbc, and pypostgresql on py3k. .. change:: :tags: postgresql :tickets: The "postgres" dialect is now named "postgresql" ! Connection strings look like: postgresql://scott:tiger@localhost/test postgresql+pg8000://scott:tiger@localhost/test The "postgres" name remains for backwards compatibility in the following ways: - There is a "postgres.py" dummy dialect which allows old URLs to work, i.e. postgres://scott:tiger@localhost/test - The "postgres" name can be imported from the old "databases" module, i.e. "from sqlalchemy.databases import postgres" as well as "dialects", "from sqlalchemy.dialects.postgres import base as pg", will send a deprecation warning. - Special expression arguments are now named "postgresql_returning" and "postgresql_where", but the older "postgres_returning" and "postgres_where" names still work with a deprecation warning. .. change:: :tags: postgresql :tickets: "postgresql_where" now accepts SQL expressions which can also include literals, which will be quoted as needed. .. change:: :tags: postgresql :tickets: The psycopg2 dialect now uses psycopg2's "unicode extension" on all new connections, which allows all String/Text/etc. types to skip the need to post-process bytestrings into unicode (an expensive step due to its volume). Other dialects which return unicode natively (pg8000, zxjdbc) also skip unicode post-processing. .. change:: :tags: postgresql :tickets: 1511 Added new ENUM type, which exists as a schema-level construct and extends the generic Enum type. Automatically associates itself with tables and their parent metadata to issue the appropriate CREATE TYPE/DROP TYPE commands as needed, supports unicode labels, supports reflection. .. change:: :tags: postgresql :tickets: INTERVAL supports an optional "precision" argument corresponding to the argument that PG accepts. .. change:: :tags: postgresql :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: postgresql :tickets: 1279 somewhat better support for % signs in table/column names; psycopg2 can't handle a bind parameter name of %(foobar)s however and SQLA doesn't want to add overhead just to treat that one non-existent use case. .. change:: :tags: postgresql :tickets: 1516 Inserting NULL into a primary key + foreign key column will allow the "not null constraint" error to raise, not an attempt to execute a nonexistent "col_id_seq" sequence. .. change:: :tags: postgresql :tickets: autoincrement SELECT statements, i.e. those which select from a procedure that modifies rows, now work with server-side cursor mode (the named cursor isn't used for such statements.) .. change:: :tags: postgresql :tickets: 1636 postgresql dialect can properly detect pg "devel" version strings, i.e. "8.5devel" .. change:: :tags: postgresql :tickets: 1619 The psycopg2 now respects the statement option "stream_results". This option overrides the connection setting "server_side_cursors". If true, server side cursors will be used for the statement. If false, they will not be used, even if "server_side_cursors" is true on the connection. .. change:: :tags: mysql :tickets: New dialects: oursql, a new native dialect, MySQL Connector/Python, a native Python port of MySQLdb, and of course zxjdbc on Jython. .. change:: :tags: mysql :tickets: VARCHAR/NVARCHAR will not render without a length, raises an error before passing to MySQL. Doesn't impact CAST since VARCHAR is not allowed in MySQL CAST anyway, the dialect renders CHAR/NCHAR in those cases. .. change:: :tags: mysql :tickets: all the _detect_XXX() functions now run once underneath dialect.initialize() .. change:: :tags: mysql :tickets: 1279 somewhat better support for % signs in table/column names; MySQLdb can't handle % signs in SQL when executemany() is used, and SQLA doesn't want to add overhead just to treat that one non-existent use case. .. change:: :tags: mysql :tickets: the BINARY and MSBinary types now generate "BINARY" in all cases. Omitting the "length" parameter will generate "BINARY" with no length. Use BLOB to generate an unlengthed binary column. .. change:: :tags: mysql :tickets: the "quoting='quoted'" argument to MSEnum/ENUM is deprecated. It's best to rely upon the automatic quoting. .. change:: :tags: mysql :tickets: ENUM now subclasses the new generic Enum type, and also handles unicode values implicitly, if the given labelnames are unicode objects. .. change:: :tags: mysql :tickets: 1539 a column of type TIMESTAMP now defaults to NULL if "nullable=False" is not passed to Column(), and no default is present. This is now consistent with all other types, and in the case of TIMESTAMP explicitly renders "NULL" due to MySQL's "switching" of default nullability for TIMESTAMP columns. .. change:: :tags: oracle :tickets: unit tests pass 100% with cx_oracle ! .. change:: :tags: oracle :tickets: support for cx_Oracle's "native unicode" mode which does not require NLS_LANG to be set. Use the latest 5.0.2 or later of cx_oracle. .. change:: :tags: oracle :tickets: an NCLOB type is added to the base types. .. change:: :tags: oracle :tickets: use_ansi=False won't leak into the FROM/WHERE clause of a statement that's selecting from a subquery that also uses JOIN/OUTERJOIN. .. change:: :tags: oracle :tickets: 1467 added native INTERVAL type to the dialect. This supports only the DAY TO SECOND interval type so far due to lack of support in cx_oracle for YEAR TO MONTH. .. change:: :tags: oracle :tickets: usage of the CHAR type results in cx_oracle's FIXED_CHAR dbapi type being bound to statements. .. change:: :tags: oracle :tickets: 885 the Oracle dialect now features NUMBER which intends to act justlike Oracle's NUMBER type. It is the primary numeric type returned by table reflection and attempts to return Decimal()/float/int based on the precision/scale parameters. .. change:: :tags: oracle :tickets: func.char_length is a generic function for LENGTH .. change:: :tags: oracle :tickets: ForeignKey() which includes onupdate= will emit a warning, not emit ON UPDATE CASCADE which is unsupported by oracle .. change:: :tags: oracle :tickets: the keys() method of RowProxy() now returns the result column names *normalized* to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations. .. change:: :tags: oracle :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: oracle :tickets: 1125 using types.BigInteger with Oracle will generate NUMBER(19) .. change:: :tags: oracle :tickets: "case sensitivity" feature will detect an all-lowercase case-sensitive column name during reflect and add "quote=True" to the generated Column, so that proper quoting is maintained. .. change:: :tags: firebird :tickets: the keys() method of RowProxy() now returns the result column names *normalized* to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations. .. change:: :tags: firebird :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: firebird :tickets: "case sensitivity" feature will detect an all-lowercase case-sensitive column name during reflect and add "quote=True" to the generated Column, so that proper quoting is maintained. .. change:: :tags: mssql :tickets: MSSQL + Pyodbc + FreeTDS now works for the most part, with possible exceptions regarding binary data as well as unicode schema identifiers. .. change:: :tags: mssql :tickets: the "has_window_funcs" flag is removed. LIMIT/OFFSET usage will use ROW NUMBER as always, and if on an older version of SQL Server, the operation fails. The behavior is exactly the same except the error is raised by SQL server instead of the dialect, and no flag setting is required to enable it. .. change:: :tags: mssql :tickets: the "auto_identity_insert" flag is removed. This feature always takes effect when an INSERT statement overrides a column that is known to have a sequence on it. As with "has_window_funcs", if the underlying driver doesn't support this, then you can't do this operation in any case, so there's no point in having a flag. .. change:: :tags: mssql :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: mssql :tickets: removed references to sequence which is no longer used. implicit identities in mssql work the same as implicit sequences on any other dialects. Explicit sequences are enabled through the use of "default=Sequence()". See the MSSQL dialect documentation for more information. .. change:: :tags: sqlite :tickets: DATE, TIME and DATETIME types can now take optional storage_format and regexp argument. storage_format can be used to store those types using a custom string format. regexp allows to use a custom regular expression to match string values from the database. .. change:: :tags: sqlite :tickets: Time and DateTime types now use by a default a stricter regular expression to match strings from the database. Use the regexp argument if you are using data stored in a legacy format. .. change:: :tags: sqlite :tickets: __legacy_microseconds__ on SQLite Time and DateTime types is not supported anymore. You should use the storage_format argument instead. .. change:: :tags: sqlite :tickets: Date, Time and DateTime types are now stricter in what they accept as bind parameters: Date type only accepts date objects (and datetime ones, because they inherit from date), Time only accepts time objects, and DateTime only accepts date and datetime objects. .. change:: :tags: sqlite :tickets: 1016 Table() supports a keyword argument "sqlite_autoincrement", which applies the SQLite keyword "AUTOINCREMENT" to the single integer primary key column when generating DDL. Will prevent generation of a separate PRIMARY KEY constraint. .. change:: :tags: types :tickets: The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in lib/sqlalchemy/dialects/type_migration_guidelines.txt . .. change:: :tags: types :tickets: Types no longer make any guesses as to default parameters. In particular, Numeric, Float, NUMERIC, FLOAT, DECIMAL don't generate any length or scale unless specified. .. change:: :tags: types :tickets: 1664 types.Binary is renamed to types.LargeBinary, it only produces BLOB, BYTEA, or a similar "long binary" type. New base BINARY and VARBINARY types have been added to access these MySQL/MS-SQL specific types in an agnostic way. .. change:: :tags: types :tickets: String/Text/Unicode types now skip the unicode() check on each result column value if the dialect has detected the DBAPI as returning Python unicode objects natively. This check is issued on first connect using "SELECT CAST 'some text' AS VARCHAR(10)" or equivalent, then checking if the returned object is a Python unicode. This allows vast performance increases for native-unicode DBAPIs, including pysqlite/sqlite3, psycopg2, and pg8000. .. change:: :tags: types :tickets: Most types result processors have been checked for possible speed improvements. Specifically, the following generic types have been optimized, resulting in varying speed improvements: Unicode, PickleType, Interval, TypeDecorator, Binary. Also the following dbapi-specific implementations have been improved: Time, Date and DateTime on Sqlite, ARRAY on Postgresql, Time on MySQL, Numeric(as_decimal=False) on MySQL, oursql and pypostgresql, DateTime on cx_oracle and LOB-based types on cx_oracle. .. change:: :tags: types :tickets: Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types. .. change:: :tags: types :tickets: 1511, 1109 Added a new Enum generic type. Enum is a schema-aware object to support databases which require specific DDL in order to use enum or equivalent; in the case of PG it handles the details of `CREATE TYPE`, and on other databases without native enum support will by generate VARCHAR + an inline CHECK constraint to enforce the enum. .. change:: :tags: types :tickets: 1467 The Interval type includes a "native" flag which controls if native INTERVAL types (postgresql + oracle) are selected if available, or not. "day_precision" and "second_precision" arguments are also added which propagate as appropriately to these native types. Related to. .. change:: :tags: types :tickets: 1589 The Boolean type, when used on a backend that doesn't have native boolean support, will generate a CHECK constraint "col IN (0, 1)" along with the int/smallint- based column type. This can be switched off if desired with create_constraint=False. Note that MySQL has no native boolean *or* CHECK constraint support so this feature isn't available on that platform. .. change:: :tags: types :tickets: PickleType now uses == for comparison of values when mutable=True, unless the "comparator" argument with a comparsion function is specified to the type. Objects being pickled will be compared based on identity (which defeats the purpose of mutable=True) if __eq__() is not overridden or a comparison function is not provided. .. change:: :tags: types :tickets: The default "precision" and "scale" arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided. .. change:: :tags: types :tickets: AbstractType.get_search_list() is removed - the games that was used for are no longer necessary. .. change:: :tags: types :tickets: 1125 Added a generic BigInteger type, compiles to BIGINT or NUMBER(19). .. change:: :tags: types :tickets: sqlsoup has been overhauled to explicitly support an 0.5 style session, using autocommit=False, autoflush=True. Default behavior of SQLSoup now requires the usual usage of commit() and rollback(), which have been added to its interface. An explcit Session or scoped_session can be passed to the constructor, allowing these arguments to be overridden. .. change:: :tags: types :tickets: sqlsoup db..update() and delete() now call query(cls).update() and delete(), respectively. .. change:: :tags: types :tickets: sqlsoup now has execute() and connection(), which call upon the Session methods of those names, ensuring that the bind is in terms of the SqlSoup object's bind. .. change:: :tags: types :tickets: sqlsoup objects no longer have the 'query' attribute - it's not needed for sqlsoup's usage paradigm and it gets in the way of a column that is actually named 'query'. .. change:: :tags: types :tickets: 1259 The signature of the proxy_factory callable passed to association_proxy is now (lazy_collection, creator, value_attr, association_proxy), adding a fourth argument that is the parent AssociationProxy argument. Allows serializability and subclassing of the built in collections. .. change:: :tags: types :tickets: 1372 association_proxy now has basic comparator methods .any(), .has(), .contains(), ==, !=, thanks to Scott Torborg. SQLAlchemy-1.0.11/doc/build/changelog/migration_07.rst0000664000175000017500000014100112636375552023461 0ustar classicclassic00000000000000============================== What's New in SQLAlchemy 0.7? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.6, last released May 5, 2012, and SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012. Document date: July 27, 2011 Introduction ============ This guide introduces what's new in SQLAlchemy version 0.7, and also documents changes which affect users migrating their applications from the 0.6 series of SQLAlchemy to 0.7. To as great a degree as possible, changes are made in such a way as to not break compatibility with applications built for 0.6. The changes that are necessarily not backwards compatible are very few, and all but one, the change to mutable attribute defaults, should affect an exceedingly small portion of applications - many of the changes regard non-public APIs and undocumented hacks some users may have been attempting to use. A second, even smaller class of non-backwards-compatible changes is also documented. This class of change regards those features and behaviors that have been deprecated at least since version 0.5 and have been raising warnings since their deprecation. These changes would only affect applications that are still using 0.4- or early 0.5-style APIs. As the project matures, we have fewer and fewer of these kinds of changes with 0.x level releases, which is a product of our API having ever fewer features that are less than ideal for the use cases they were meant to solve. An array of existing functionalities have been superseded in SQLAlchemy 0.7. There's not much difference between the terms "superseded" and "deprecated", except that the former has a much weaker suggestion of the old feature would ever be removed. In 0.7, features like ``synonym`` and ``comparable_property``, as well as all the ``Extension`` and other event classes, have been superseded. But these "superseded" features have been re-implemented such that their implementations live mostly outside of core ORM code, so their continued "hanging around" doesn't impact SQLAlchemy's ability to further streamline and refine its internals, and we expect them to remain within the API for the foreseeable future. New Features ============ New Event System ---------------- SQLAlchemy started early with the ``MapperExtension`` class, which provided hooks into the persistence cycle of mappers. As SQLAlchemy quickly became more componentized, pushing mappers into a more focused configurational role, many more "extension", "listener", and "proxy" classes popped up to solve various activity-interception use cases in an ad-hoc fashion. Part of this was driven by the divergence of activities; ``ConnectionProxy`` objects wanted to provide a system of rewriting statements and parameters; ``AttributeExtension`` provided a system of replacing incoming values, and ``DDL`` objects had events that could be switched off of dialect-sensitive callables. 0.7 re-implements virtually all of these plugin points with a new, unified approach, which retains all the functionalities of the different systems, provides more flexibility and less boilerplate, performs better, and eliminates the need to learn radically different APIs for each event subsystem. The pre-existing classes ``MapperExtension``, ``SessionExtension``, ``AttributeExtension``, ``ConnectionProxy``, ``PoolListener`` as well as the ``DDLElement.execute_at`` method are deprecated and now implemented in terms of the new system - these APIs remain fully functional and are expected to remain in place for the foreseeable future. The new approach uses named events and user-defined callables to associate activities with events. The API's look and feel was driven by such diverse sources as JQuery, Blinker, and Hibernate, and was also modified further on several occasions during conferences with dozens of users on Twitter, which appears to have a much higher response rate than the mailing list for such questions. It also features an open-ended system of target specification that allows events to be associated with API classes, such as for all ``Session`` or ``Engine`` objects, with specific instances of API classes, such as for a specific ``Pool`` or ``Mapper``, as well as for related objects like a user- defined class that's mapped, or something as specific as a certain attribute on instances of a particular subclass of a mapped parent class. Individual listener subsystems can apply wrappers to incoming user- defined listener functions which modify how they are called - an mapper event can receive either the instance of the object being operated upon, or its underlying ``InstanceState`` object. An attribute event can opt whether or not to have the responsibility of returning a new value. Several systems now build upon the new event API, including the new "mutable attributes" API as well as composite attributes. The greater emphasis on events has also led to the introduction of a handful of new events, including attribute expiration and refresh operations, pickle loads/dumps operations, completed mapper construction operations. .. seealso:: :ref:`event_toplevel` :ticket:`1902` Hybrid Attributes, implements/supersedes synonym(), comparable_property() ------------------------------------------------------------------------- The "derived attributes" example has now been turned into an official extension. The typical use case for ``synonym()`` is to provide descriptor access to a mapped column; the use case for ``comparable_property()`` is to be able to return a ``PropComparator`` from any descriptor. In practice, the approach of "derived" is easier to use, more extensible, is implemented in a few dozen lines of pure Python with almost no imports, and doesn't require the ORM core to even be aware of it. The feature is now known as the "Hybrid Attributes" extension. ``synonym()`` and ``comparable_property()`` are still part of the ORM, though their implementations have been moved outwards, building on an approach that is similar to that of the hybrid extension, so that the core ORM mapper/query/property modules aren't really aware of them otherwise. .. seealso:: :ref:`hybrids_toplevel` :ticket:`1903` Speed Enhancements ------------------ As is customary with all major SQLA releases, a wide pass through the internals to reduce overhead and callcounts has been made which further reduces the work needed in common scenarios. Highlights of this release include: * The flush process will now bundle INSERT statements into batches fed to ``cursor.executemany()``, for rows where the primary key is already present. In particular this usually applies to the "child" table on a joined table inheritance configuration, meaning the number of calls to ``cursor.execute`` for a large bulk insert of joined- table objects can be cut in half, allowing native DBAPI optimizations to take place for those statements passed to ``cursor.executemany()`` (such as re-using a prepared statement). * The codepath invoked when accessing a many-to-one reference to a related object that's already loaded has been greatly simplified. The identity map is checked directly without the need to generate a new ``Query`` object first, which is expensive in the context of thousands of in-memory many-to-ones being accessed. The usage of constructed-per-call "loader" objects is also no longer used for the majority of lazy attribute loads. * The rewrite of composites allows a shorter codepath when mapper internals access mapped attributes within a flush. * New inlined attribute access functions replace the previous usage of "history" when the "save-update" and other cascade operations need to cascade among the full scope of datamembers associated with an attribute. This reduces the overhead of generating a new ``History`` object for this speed-critical operation. * The internals of the ``ExecutionContext``, the object corresponding to a statement execution, have been inlined and simplified. * The ``bind_processor()`` and ``result_processor()`` callables generated by types for each statement execution are now cached (carefully, so as to avoid memory leaks for ad-hoc types and dialects) for the lifespan of that type, further reducing per-statement call overhead. * The collection of "bind processors" for a particular ``Compiled`` instance of a statement is also cached on the ``Compiled`` object, taking further advantage of the "compiled cache" used by the flush process to re-use the same compiled form of INSERT, UPDATE, DELETE statements. A demonstration of callcount reduction including a sample benchmark script is at http://techspot.zzzeek.org/2010/12/12/a-tale-of-three- profiles/ Composites Rewritten -------------------- The "composite" feature has been rewritten, like ``synonym()`` and ``comparable_property()``, to use a lighter weight implementation based on descriptors and events, rather than building into the ORM internals. This allowed the removal of some latency from the mapper/unit of work internals, and simplifies the workings of composite. The composite attribute now no longer conceals the underlying columns it builds upon, which now remain as regular attributes. Composites can also act as a proxy for ``relationship()`` as well as ``Column()`` attributes. The major backwards-incompatible change of composites is that they no longer use the ``mutable=True`` system to detect in-place mutations. Please use the `Mutation Tracking `_ extension to establish in-place change events to existing composite usage. .. seealso:: :ref:`mapper_composite` :ref:`mutable_toplevel` :ticket:`2008` :ticket:`2024` More succinct form of query.join(target, onclause) -------------------------------------------------- The default method of issuing ``query.join()`` to a target with an explicit onclause is now: :: query.join(SomeClass, SomeClass.id==ParentClass.some_id) In 0.6, this usage was considered to be an error, because ``join()`` accepts multiple arguments corresponding to multiple JOIN clauses - the two-argument form needed to be in a tuple to disambiguate between single-argument and two- argument join targets. In the middle of 0.6 we added detection and an error message for this specific calling style, since it was so common. In 0.7, since we are detecting the exact pattern anyway, and since having to type out a tuple for no reason is extremely annoying, the non- tuple method now becomes the "normal" way to do it. The "multiple JOIN" use case is exceedingly rare compared to the single join case, and multiple joins these days are more clearly represented by multiple calls to ``join()``. The tuple form will remain for backwards compatibility. Note that all the other forms of ``query.join()`` remain unchanged: :: query.join(MyClass.somerelation) query.join("somerelation") query.join(MyTarget) # ... etc `Querying with Joins `_ :ticket:`1923` .. _07_migration_mutation_extension: Mutation event extension, supersedes "mutable=True" --------------------------------------------------- A new extension, :ref:`mutable_toplevel`, provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents. The extension includes an approach for scalar database values, such as those managed by :class:`.PickleType`, ``postgresql.ARRAY``, or other custom ``MutableType`` classes, as well as an approach for ORM "composites", those configured using :func:`~.sqlalchemy.orm.composite`. .. seealso:: :ref:`mutable_toplevel` NULLS FIRST / NULLS LAST operators ---------------------------------- These are implemented as an extension to the ``asc()`` and ``desc()`` operators, called ``nullsfirst()`` and ``nullslast()``. .. seealso:: :func:`.nullsfirst` :func:`.nullslast` :ticket:`723` select.distinct(), query.distinct() accepts \*args for Postgresql DISTINCT ON ----------------------------------------------------------------------------- This was already available by passing a list of expressions to the ``distinct`` keyword argument of ``select()``, the ``distinct()`` method of ``select()`` and ``Query`` now accept positional arguments which are rendered as DISTINCT ON when a Postgresql backend is used. `distinct() `_ `Query.distinct() `_ :ticket:`1069` ``Index()`` can be placed inline inside of ``Table``, ``__table_args__`` ------------------------------------------------------------------------ The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table. That is: :: Table('mytable', metadata, Column('id',Integer, primary_key=True), Column('name', String(50), nullable=False), Index('idx_name', 'name') ) The primary rationale here is for the benefit of declarative ``__table_args__``, particularly when used with mixins: :: class HasNameMixin(object): name = Column('name', String(50), nullable=False) @declared_attr def __table_args__(cls): return (Index('name'), {}) class User(HasNameMixin, Base): __tablename__ = 'user' id = Column('id', Integer, primary_key=True) `Indexes `_ Window Function SQL Construct ----------------------------- A "window function" provides to a statement information about the result set as it's produced. This allows criteria against various things like "row number", "rank" and so forth. They are known to be supported at least by Postgresql, SQL Server and Oracle, possibly others. The best introduction to window functions is on Postgresql's site, where window functions have been supported since version 8.4: http://www.postgresql.org/docs/9.0/static/tutorial- window.html SQLAlchemy provides a simple construct typically invoked via an existing function clause, using the ``over()`` method, which accepts ``order_by`` and ``partition_by`` keyword arguments. Below we replicate the first example in PG's tutorial: :: from sqlalchemy.sql import table, column, select, func empsalary = table('empsalary', column('depname'), column('empno'), column('salary')) s = select([ empsalary, func.avg(empsalary.c.salary). over(partition_by=empsalary.c.depname). label('avg') ]) print s SQL: :: SELECT empsalary.depname, empsalary.empno, empsalary.salary, avg(empsalary.salary) OVER (PARTITION BY empsalary.depname) AS avg FROM empsalary `sqlalchemy.sql.expression.over `_ :ticket:`1844` execution_options() on Connection accepts "isolation_level" argument -------------------------------------------------------------------- This sets the transaction isolation level for a single ``Connection``, until that ``Connection`` is closed and its underlying DBAPI resource returned to the connection pool, upon which the isolation level is reset back to the default. The default isolation level is set using the ``isolation_level`` argument to ``create_engine()``. Transaction isolation support is currently only supported by the Postgresql and SQLite backends. `execution_options() `_ :ticket:`2001` ``TypeDecorator`` works with integer primary key columns -------------------------------------------------------- A ``TypeDecorator`` which extends the behavior of ``Integer`` can be used with a primary key column. The "autoincrement" feature of ``Column`` will now recognize that the underlying database column is still an integer so that lastrowid mechanisms continue to function. The ``TypeDecorator`` itself will have its result value processor applied to newly generated primary keys, including those received by the DBAPI ``cursor.lastrowid`` accessor. :ticket:`2005` :ticket:`2006` ``TypeDecorator`` is present in the "sqlalchemy" import space ------------------------------------------------------------- No longer need to import this from ``sqlalchemy.types``, it's now mirrored in ``sqlalchemy``. New Dialects ------------ Dialects have been added: * a MySQLdb driver for the Drizzle database: `Drizzle `_ * support for the pymysql DBAPI: `pymsql Notes `_ * psycopg2 now works with Python 3 Behavioral Changes (Backwards Compatible) ========================================= C Extensions Build by Default ----------------------------- This is as of 0.7b4. The exts will build if cPython 2.xx is detected. If the build fails, such as on a windows install, that condition is caught and the non-C install proceeds. The C exts won't build if Python 3 or Pypy is used. Query.count() simplified, should work virtually always ------------------------------------------------------ The very old guesswork which occurred within ``Query.count()`` has been modernized to use ``.from_self()``. That is, ``query.count()`` is now equivalent to: :: query.from_self(func.count(literal_column('1'))).scalar() Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a "subquery" condition, such as a column-based query that might have aggregates in it, or a query with DISTINCT, would go through a convoluted process of rewriting the columns clause. This logic failed in complex conditions, particularly those involving joined table inheritance, and was long obsolete by the more comprehensive ``.from_self()`` call. The SQL emitted by ``query.count()`` is now always of the form: :: SELECT count(1) AS count_1 FROM ( SELECT user.id AS user_id, user.name AS user_name from user ) AS anon_1 that is, the original query is preserved entirely inside of a subquery, with no more guessing as to how count should be applied. :ticket:`2093` To emit a non-subquery form of count() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MySQL users have already reported that the MyISAM engine not surprisingly falls over completely with this simple change. Note that for a simple ``count()`` that optimizes for DBs that can't handle simple subqueries, ``func.count()`` should be used: :: from sqlalchemy import func session.query(func.count(MyClass.id)).scalar() or for ``count(*)``: :: from sqlalchemy import func, literal_column session.query(func.count(literal_column('*'))).select_from(MyClass).scalar() LIMIT/OFFSET clauses now use bind parameters -------------------------------------------- The LIMIT and OFFSET clauses, or their backend equivalents (i.e. TOP, ROW NUMBER OVER, etc.), use bind parameters for the actual values, for all backends which support it (most except for Sybase). This allows better query optimizer performance as the textual string for multiple statements with differing LIMIT/OFFSET are now identical. :ticket:`805` Logging enhancements -------------------- Vinay Sajip has provided a patch to our logging system such that the "hex string" embedded in logging statements for engines and pools is no longer needed to allow the ``echo`` flag to work correctly. A new system that uses filtered logging objects allows us to maintain our current behavior of ``echo`` being local to individual engines without the need for additional identifying strings local to those engines. :ticket:`1926` Simplified polymorphic_on assignment ------------------------------------ The population of the ``polymorphic_on`` column-mapped attribute, when used in an inheritance scenario, now occurs when the object is constructed, i.e. its ``__init__`` method is called, using the init event. The attribute then behaves the same as any other column-mapped attribute. Previously, special logic would fire off during flush to populate this column, which prevented any user code from modifying its behavior. The new approach improves upon this in three ways: 1. the polymorphic identity is now present on the object as soon as its constructed; 2. the polymorphic identity can be changed by user code without any difference in behavior from any other column-mapped attribute; 3. the internals of the mapper during flush are simplified and no longer need to make special checks for this column. :ticket:`1895` contains_eager() chains across multiple paths (i.e. "all()") ------------------------------------------------------------ The ```contains_eager()```` modifier now will chain itself for a longer path without the need to emit individual ````contains_eager()``` calls. Instead of: :: session.query(A).options(contains_eager(A.b), contains_eager(A.b, B.c)) you can say: :: session.query(A).options(contains_eager(A.b, B.c)) :ticket:`2032` Flushing of orphans that have no parent is allowed -------------------------------------------------- We've had a long standing behavior that checks for a so- called "orphan" during flush, that is, an object which is associated with a ``relationship()`` that specifies "delete- orphan" cascade, has been newly added to the session for an INSERT, and no parent relationship has been established. This check was added years ago to accommodate some test cases which tested the orphan behavior for consistency. In modern SQLA, this check is no longer needed on the Python side. The equivalent behavior of the "orphan check" is accomplished by making the foreign key reference to the object's parent row NOT NULL, where the database does its job of establishing data consistency in the same way SQLA allows most other operations to do. If the object's parent foreign key is nullable, then the row can be inserted. The "orphan" behavior runs when the object was persisted with a particular parent, and is then disassociated with that parent, leading to a DELETE statement emitted for it. :ticket:`1912` Warnings generated when collection members, scalar referents not part of the flush ---------------------------------------------------------------------------------- Warnings are now emitted when related objects referenced via a loaded ``relationship()`` on a parent object marked as "dirty" are not present in the current ``Session``. The ``save-update`` cascade takes effect when objects are added to the ``Session``, or when objects are first associated with a parent, so that an object and everything related to it are usually all present in the same ``Session``. However, if ``save-update`` cascade is disabled for a particular ``relationship()``, then this behavior does not occur, and the flush process does not try to correct for it, instead staying consistent to the configured cascade behavior. Previously, when such objects were detected during the flush, they were silently skipped. The new behavior is that a warning is emitted, for the purposes of alerting to a situation that more often than not is the source of unexpected behavior. :ticket:`1973` Setup no longer installs a Nose plugin -------------------------------------- Since we moved to nose we've used a plugin that installs via setuptools, so that the ``nosetests`` script would automatically run SQLA's plugin code, necessary for our tests to have a full environment. In the middle of 0.6, we realized that the import pattern here meant that Nose's "coverage" plugin would break, since "coverage" requires that it be started before any modules to be covered are imported; so in the middle of 0.6 we made the situation worse by adding a separate ``sqlalchemy-nose`` package to the build to overcome this. In 0.7 we've done away with trying to get ``nosetests`` to work automatically, since the SQLAlchemy module would produce a large number of nose configuration options for all usages of ``nosetests``, not just the SQLAlchemy unit tests themselves, and the additional ``sqlalchemy-nose`` install was an even worse idea, producing an extra package in Python environments. The ``sqla_nose.py`` script in 0.7 is now the only way to run the tests with nose. :ticket:`1949` Non-``Table``-derived constructs can be mapped ---------------------------------------------- A construct that isn't against any ``Table`` at all, like a function, can be mapped. :: from sqlalchemy import select, func from sqlalchemy.orm import mapper class Subset(object): pass selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) :ticket:`1876` aliased() accepts ``FromClause`` elements ----------------------------------------- This is a convenience helper such that in the case a plain ``FromClause``, such as a ``select``, ``Table`` or ``join`` is passed to the ``orm.aliased()`` construct, it passes through to the ``.alias()`` method of that from construct rather than constructing an ORM level ``AliasedClass``. :ticket:`2018` Session.connection(), Session.execute() accept 'bind' ----------------------------------------------------- This is to allow execute/connection operations to participate in the open transaction of an engine explicitly. It also allows custom subclasses of ``Session`` that implement their own ``get_bind()`` method and arguments to use those custom arguments with both the ``execute()`` and ``connection()`` methods equally. `Session.connection `_ `Session.execute `_ :ticket:`1996` Standalone bind parameters in columns clause auto-labeled. ---------------------------------------------------------- Bind parameters present in the "columns clause" of a select are now auto-labeled like other "anonymous" clauses, which among other things allows their "type" to be meaningful when the row is fetched, as in result row processors. SQLite - relative file paths are normalized through os.path.abspath() --------------------------------------------------------------------- This so that a script that changes the current directory will continue to target the same location as subsequent SQLite connections are established. :ticket:`2036` MS-SQL - ``String``/``Unicode``/``VARCHAR``/``NVARCHAR``/``VARBINARY`` emit "max" for no length ----------------------------------------------------------------------------------------------- On the MS-SQL backend, the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, as well as VARBINARY (:ticket:`1833`) emit "max" as the length when no length is specified. This makes it more compatible with Postgresql's VARCHAR type which is similarly unbounded when no length specified. SQL Server defaults the length on these types to '1' when no length is specified. Behavioral Changes (Backwards Incompatible) =========================================== Note again, aside from the default mutability change, most of these changes are \*extremely minor* and will not affect most users. ``PickleType`` and ARRAY mutability turned off by default --------------------------------------------------------- This change refers to the default behavior of the ORM when mapping columns that have either the ``PickleType`` or ``postgresql.ARRAY`` datatypes. The ``mutable`` flag is now set to ``False`` by default. If an existing application uses these types and depends upon detection of in-place mutations, the type object must be constructed with ``mutable=True`` to restore the 0.6 behavior: :: Table('mytable', metadata, # .... Column('pickled_data', PickleType(mutable=True)) ) The ``mutable=True`` flag is being phased out, in favor of the new `Mutation Tracking `_ extension. This extension provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents. The previous approach of using ``mutable=True`` does not provide for change events - instead, the ORM must scan through all mutable values present in a session and compare them against their original value for changes every time ``flush()`` is called, which is a very time consuming event. This is a holdover from the very early days of SQLAlchemy when ``flush()`` was not automatic and the history tracking system was not nearly as sophisticated as it is now. Existing applications which use ``PickleType``, ``postgresql.ARRAY`` or other ``MutableType`` subclasses, and require in-place mutation detection, should migrate to the new mutation tracking system, as ``mutable=True`` is likely to be deprecated in the future. :ticket:`1980` Mutability detection of ``composite()`` requires the Mutation Tracking Extension -------------------------------------------------------------------------------- So-called "composite" mapped attributes, those configured using the technique described at `Composite Column Types `_, have been re-implemented such that the ORM internals are no longer aware of them (leading to shorter and more efficient codepaths in critical sections). While composite types are generally intended to be treated as immutable value objects, this was never enforced. For applications that use composites with mutability, the `Mutation Tracking `_ extension offers a base class which establishes a mechanism for user-defined composite types to send change event messages back to the owning parent or parents of each object. Applications which use composite types and rely upon in- place mutation detection of these objects should either migrate to the "mutation tracking" extension, or change the usage of the composite types such that in-place changes are no longer needed (i.e., treat them as immutable value objects). SQLite - the SQLite dialect now uses ``NullPool`` for file-based databases -------------------------------------------------------------------------- This change is **99.999% backwards compatible**, unless you are using temporary tables across connection pool connections. A file-based SQLite connection is blazingly fast, and using ``NullPool`` means that each call to ``Engine.connect`` creates a new pysqlite connection. Previously, the ``SingletonThreadPool`` was used, which meant that all connections to a certain engine in a thread would be the same connection. It's intended that the new approach is more intuitive, particularly when multiple connections are used. ``SingletonThreadPool`` is still the default engine when a ``:memory:`` database is used. Note that this change **breaks temporary tables used across Session commits**, due to the way SQLite handles temp tables. See the note at http://www.sqlalchemy.org/docs/dialects/sqlite.html#using- temporary-tables-with-sqlite if temporary tables beyond the scope of one pool connection are desired. :ticket:`1921` ``Session.merge()`` checks version ids for versioned mappers ------------------------------------------------------------ Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don't match. This is the correct behavior, in that if incoming state contains a stale version id, it should be assumed the state is stale. If merging data into a versioned state, the version id attribute can be left undefined, and no version check will take place. This check was confirmed by examining what Hibernate does - both the ``merge()`` and the versioning features were originally adapted from Hibernate. :ticket:`2027` Tuple label names in Query Improved ----------------------------------- This improvement is potentially slightly backwards incompatible for an application that relied upon the old behavior. Given two mapped classes ``Foo`` and ``Bar`` each with a column ``spam``: :: qa = session.query(Foo.spam) qb = session.query(Bar.spam) qu = qa.union(qb) The name given to the single column yielded by ``qu`` will be ``spam``. Previously it would be something like ``foo_spam`` due to the way the ``union`` would combine things, which is inconsistent with the name ``spam`` in the case of a non-unioned query. :ticket:`1942` Mapped column attributes reference the most specific column first ----------------------------------------------------------------- This is a change to the behavior involved when a mapped column attribute references multiple columns, specifically when dealing with an attribute on a joined-table subclass that has the same name as that of an attribute on the superclass. Using declarative, the scenario is this: :: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) Above, the attribute ``Child.id`` refers to both the ``child.id`` column as well as ``parent.id`` - this due to the name of the attribute. If it were named differently on the class, such as ``Child.child_id``, it then maps distinctly to ``child.id``, with ``Child.id`` being the same attribute as ``Parent.id``. When the ``id`` attribute is made to reference both ``parent.id`` and ``child.id``, it stores them in an ordered list. An expression such as ``Child.id`` then refers to just *one* of those columns when rendered. Up until 0.6, this column would be ``parent.id``. In 0.7, it is the less surprising ``child.id``. The legacy of this behavior deals with behaviors and restrictions of the ORM that don't really apply anymore; all that was needed was to reverse the order. A primary advantage of this approach is that it's now easier to construct ``primaryjoin`` expressions that refer to the local column: :: class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) some_related = relationship("SomeRelated", primaryjoin="Child.id==SomeRelated.child_id") class SomeRelated(Base): __tablename__ = 'some_related' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) Prior to 0.7 the ``Child.id`` expression would reference ``Parent.id``, and it would be necessary to map ``child.id`` to a distinct attribute. It also means that a query like this one changes its behavior: :: session.query(Parent).filter(Child.id > 7) In 0.6, this would render: :: SELECT parent.id AS parent_id FROM parent WHERE parent.id > :id_1 in 0.7, you get: :: SELECT parent.id AS parent_id FROM parent, child WHERE child.id > :id_1 which you'll note is a cartesian product - this behavior is now equivalent to that of any other attribute that is local to ``Child``. The ``with_polymorphic()`` method, or a similar strategy of explicitly joining the underlying ``Table`` objects, is used to render a query against all ``Parent`` objects with criteria against ``Child``, in the same manner as that of 0.5 and 0.6: :: print s.query(Parent).with_polymorphic([Child]).filter(Child.id > 7) Which on both 0.6 and 0.7 renders: :: SELECT parent.id AS parent_id, child.id AS child_id FROM parent LEFT OUTER JOIN child ON parent.id = child.id WHERE child.id > :id_1 Another effect of this change is that a joined-inheritance load across two tables will populate from the child table's value, not that of the parent table. An unusual case is that a query against "Parent" using ``with_polymorphic="*"`` issues a query against "parent", with a LEFT OUTER JOIN to "child". The row is located in "Parent", sees the polymorphic identity corresponds to "Child", but suppose the actual row in "child" has been *deleted*. Due to this corruption, the row comes in with all the columns corresponding to "child" set to NULL - this is now the value that gets populated, not the one in the parent table. :ticket:`1892` Mapping to joins with two or more same-named columns requires explicit declaration ---------------------------------------------------------------------------------- This is somewhat related to the previous change in :ticket:`1892`. When mapping to a join, same-named columns must be explicitly linked to mapped attributes, i.e. as described in `Mapping a Class Against Multiple Tables `_. Given two tables ``foo`` and ``bar``, each with a primary key column ``id``, the following now produces an error: :: foobar = foo.join(bar, foo.c.id==bar.c.foo_id) mapper(FooBar, foobar) This because the ``mapper()`` refuses to guess what column is the primary representation of ``FooBar.id`` - is it ``foo.c.id`` or is it ``bar.c.id`` ? The attribute must be explicit: :: foobar = foo.join(bar, foo.c.id==bar.c.foo_id) mapper(FooBar, foobar, properties={ 'id':[foo.c.id, bar.c.id] }) :ticket:`1896` Mapper requires that polymorphic_on column be present in the mapped selectable ------------------------------------------------------------------------------ This is a warning in 0.6, now an error in 0.7. The column given for ``polymorphic_on`` must be in the mapped selectable. This to prevent some occasional user errors such as: :: mapper(SomeClass, sometable, polymorphic_on=some_lookup_table.c.id) where above the polymorphic_on needs to be on a ``sometable`` column, in this case perhaps ``sometable.c.some_lookup_id``. There are also some "polymorphic union" scenarios where similar mistakes sometimes occur. Such a configuration error has always been "wrong", and the above mapping doesn't work as specified - the column would be ignored. It is however potentially backwards incompatible in the rare case that an application has been unknowingly relying upon this behavior. :ticket:`1875` ``DDL()`` constructs now escape percent signs --------------------------------------------- Previously, percent signs in ``DDL()`` strings would have to be escaped, i.e. ``%%`` depending on DBAPI, for those DBAPIs that accept ``pyformat`` or ``format`` binds (i.e. psycopg2, mysql-python), which was inconsistent versus ``text()`` constructs which did this automatically. The same escaping now occurs for ``DDL()`` as for ``text()``. :ticket:`1897` ``Table.c`` / ``MetaData.tables`` refined a bit, don't allow direct mutation ---------------------------------------------------------------------------- Another area where some users were tinkering around in such a way that doesn't actually work as expected, but still left an exceedingly small chance that some application was relying upon this behavior, the construct returned by the ``.c`` attribute on ``Table`` and the ``.tables`` attribute on ``MetaData`` is explicitly non-mutable. The "mutable" version of the construct is now private. Adding columns to ``.c`` involves using the ``append_column()`` method of ``Table``, which ensures things are associated with the parent ``Table`` in the appropriate way; similarly, ``MetaData.tables`` has a contract with the ``Table`` objects stored in this dictionary, as well as a little bit of new bookkeeping in that a ``set()`` of all schema names is tracked, which is satisfied only by using the public ``Table`` constructor as well as ``Table.tometadata()``. It is of course possible that the ``ColumnCollection`` and ``dict`` collections consulted by these attributes could someday implement events on all of their mutational methods such that the appropriate bookkeeping occurred upon direct mutation of the collections, but until someone has the motivation to implement all that along with dozens of new unit tests, narrowing the paths to mutation of these collections will ensure no application is attempting to rely upon usages that are currently not supported. :ticket:`1893` :ticket:`1917` server_default consistently returns None for all inserted_primary_key values ---------------------------------------------------------------------------- Established consistency when server_default is present on an Integer PK column. SQLA doesn't pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these - some backends may have returned a value previously. Using a server_default on a primary key column is extremely unusual. If a special function or SQL expression is used to generate primary key defaults, this should be established as a Python-side "default" instead of server_default. Regarding reflection for this case, reflection of an int PK col with a server_default sets the "autoincrement" flag to False, except in the case of a PG SERIAL col where we detected a sequence default. :ticket:`2020` :ticket:`2021` The ``sqlalchemy.exceptions`` alias in sys.modules is removed ------------------------------------------------------------- For a few years we've added the string ``sqlalchemy.exceptions`` to ``sys.modules``, so that a statement like "``import sqlalchemy.exceptions``" would work. The name of the core exceptions module has been ``exc`` for a long time now, so the recommended import for this module is: :: from sqlalchemy import exc The ``exceptions`` name is still present in "``sqlalchemy``" for applications which might have said ``from sqlalchemy import exceptions``, but they should also start using the ``exc`` name. Query Timing Recipe Changes --------------------------- While not part of SQLAlchemy itself, it's worth mentioning that the rework of the ``ConnectionProxy`` into the new event system means it is no longer appropriate for the "Timing all Queries" recipe. Please adjust query-timers to use the ``before_cursor_execute()`` and ``after_cursor_execute()`` events, demonstrated in the updated recipe UsageRecipes/Profiling. Deprecated API ============== Default constructor on types will not accept arguments ------------------------------------------------------ Simple types like ``Integer``, ``Date`` etc. in the core types module don't accept arguments. The default constructor that accepts/ignores a catchall ``\*args, \**kwargs`` is restored as of 0.7b4/0.7.0, but emits a deprecation warning. If arguments are being used with a core type like ``Integer``, it may be that you intended to use a dialect specific type, such as ``sqlalchemy.dialects.mysql.INTEGER`` which does accept a "display_width" argument for example. compile_mappers() renamed configure_mappers(), simplified configuration internals --------------------------------------------------------------------------------- This system slowly morphed from something small, implemented local to an individual mapper, and poorly named into something that's more of a global "registry-" level function and poorly named, so we've fixed both by moving the implementation out of ``Mapper`` altogether and renaming it to ``configure_mappers()``. It is of course normally not needed for an application to call ``configure_mappers()`` as this process occurs on an as-needed basis, as soon as the mappings are needed via attribute or query access. :ticket:`1966` Core listener/proxy superseded by event listeners ------------------------------------------------- ``PoolListener``, ``ConnectionProxy``, ``DDLElement.execute_at`` are superseded by ``event.listen()``, using the ``PoolEvents``, ``EngineEvents``, ``DDLEvents`` dispatch targets, respectively. ORM extensions superseded by event listeners -------------------------------------------- ``MapperExtension``, ``AttributeExtension``, ``SessionExtension`` are superseded by ``event.listen()``, using the ``MapperEvents``/``InstanceEvents``, ``AttributeEvents``, ``SessionEvents``, dispatch targets, respectively. Sending a string to 'distinct' in select() for MySQL should be done via prefixes -------------------------------------------------------------------------------- This obscure feature allows this pattern with the MySQL backend: :: select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY']) The ``prefixes`` keyword or ``prefix_with()`` method should be used for non-standard or unusual prefixes: :: select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL') ``useexisting`` superseded by ``extend_existing`` and ``keep_existing`` ----------------------------------------------------------------------- The ``useexisting`` flag on Table has been superseded by a new pair of flags ``keep_existing`` and ``extend_existing``. ``extend_existing`` is equivalent to ``useexisting`` - the existing Table is returned, and additional constructor elements are added. With ``keep_existing``, the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created. Backwards Incompatible API Changes ================================== Callables passed to ``bindparam()`` don't get evaluated - affects the Beaker example ------------------------------------------------------------------------------------ :ticket:`1950` Note this affects the Beaker caching example, where the workings of the ``_params_from_query()`` function needed a slight adjustment. If you're using code from the Beaker example, this change should be applied. types.type_map is now private, types._type_map ---------------------------------------------- We noticed some users tapping into this dictionary inside of ``sqlalchemy.types`` as a shortcut to associating Python types with SQL types. We can't guarantee the contents or format of this dictionary, and additionally the business of associating Python types in a one-to-one fashion has some grey areas that should are best decided by individual applications, so we've underscored this attribute. :ticket:`1870` Renamed the ``alias`` keyword arg of standalone ``alias()`` function to ``name`` -------------------------------------------------------------------------------- This so that the keyword argument ``name`` matches that of the ``alias()`` methods on all ``FromClause`` objects as well as the ``name`` argument on ``Query.subquery()``. Only code that uses the standalone ``alias()`` function, and not the method bound functions, and passes the alias name using the explicit keyword name ``alias``, and not positionally, would need modification here. Non-public ``Pool`` methods underscored --------------------------------------- All methods of ``Pool`` and subclasses which are not intended for public use have been renamed with underscores. That they were not named this way previously was a bug. Pooling methods now underscored or removed: ``Pool.create_connection()`` -> ``Pool._create_connection()`` ``Pool.do_get()`` -> ``Pool._do_get()`` ``Pool.do_return_conn()`` -> ``Pool._do_return_conn()`` ``Pool.do_return_invalid()`` -> removed, was not used ``Pool.return_conn()`` -> ``Pool._return_conn()`` ``Pool.get()`` -> ``Pool._get()``, public API is ``Pool.connect()`` ``SingletonThreadPool.cleanup()`` -> ``_cleanup()`` ``SingletonThreadPool.dispose_local()`` -> removed, use ``conn.invalidate()`` :ticket:`1982` Previously Deprecated, Now Removed ================================== Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments ---------------------------------------------------------------------------------------------------------------------- Passing a list of attributes or attribute names to ``Query.join``, ``eagerload()``, and similar has been deprecated since 0.5: :: # old way, deprecated since 0.5 session.query(Houses).join([Houses.rooms, Room.closets]) session.query(Houses).options(eagerload_all([Houses.rooms, Room.closets])) These methods all accept \*args as of the 0.5 series: :: # current way, in place since 0.5 session.query(Houses).join(Houses.rooms, Room.closets) session.query(Houses).options(eagerload_all(Houses.rooms, Room.closets)) ``ScopedSession.mapper`` is removed ----------------------------------- This feature provided a mapper extension which linked class- based functionality with a particular ``ScopedSession``, in particular providing the behavior such that new object instances would be automatically associated with that session. The feature was overused by tutorials and frameworks which led to great user confusion due to its implicit behavior, and was deprecated in 0.5.5. Techniques for replicating its functionality are at [wiki:UsageRecipes/SessionAwareMapper] SQLAlchemy-1.0.11/doc/build/changelog/migration_04.rst0000664000175000017500000006237712636375552023500 0ustar classicclassic00000000000000============================= What's new in SQLAlchemy 0.4? ============================= .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.3, last released October 14, 2007, and SQLAlchemy version 0.4, last released October 12, 2008. Document date: March 21, 2008 First Things First ================== If you're using any ORM features, make sure you import from ``sqlalchemy.orm``: :: from sqlalchemy import * from sqlalchemy.orm import * Secondly, anywhere you used to say ``engine=``, ``connectable=``, ``bind_to=``, ``something.engine``, ``metadata.connect()``, use ``bind``: :: myengine = create_engine('sqlite://') meta = MetaData(myengine) meta2 = MetaData() meta2.bind = myengine session = create_session(bind=myengine) statement = select([table], bind=myengine) Got those ? Good! You're now (95%) 0.4 compatible. If you're using 0.3.10, you can make these changes immediately; they'll work there too. Module Imports ============== In 0.3, "``from sqlachemy import *``" would import all of sqlachemy's sub-modules into your namespace. Version 0.4 no longer imports sub-modules into the namespace. This may mean you need to add extra imports into your code. In 0.3, this code worked: :: from sqlalchemy import * class UTCDateTime(types.TypeDecorator): pass In 0.4, one must do: :: from sqlalchemy import * from sqlalchemy import types class UTCDateTime(types.TypeDecorator): pass Object Relational Mapping ========================= Querying -------- New Query API ^^^^^^^^^^^^^ Query is standardized on the generative interface (old interface is still there, just deprecated). While most of the generative interface is available in 0.3, the 0.4 Query has the inner guts to match the generative outside, and has a lot more tricks. All result narrowing is via ``filter()`` and ``filter_by()``, limiting/offset is either through array slices or ``limit()``/``offset()``, joining is via ``join()`` and ``outerjoin()`` (or more manually, through ``select_from()`` as well as manually-formed criteria). To avoid deprecation warnings, you must make some changes to your 03 code User.query.get_by( \**kwargs ) :: User.query.filter_by(**kwargs).first() User.query.select_by( \**kwargs ) :: User.query.filter_by(**kwargs).all() User.query.select() :: User.query.filter(xxx).all() New Property-Based Expression Constructs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By far the most palpable difference within the ORM is that you can now construct your query criterion using class-based attributes directly. The ".c." prefix is no longer needed when working with mapped classes: :: session.query(User).filter(and_(User.name == 'fred', User.id > 17)) While simple column-based comparisons are no big deal, the class attributes have some new "higher level" constructs available, including what was previously only available in ``filter_by()``: :: # comparison of scalar relations to an instance filter(Address.user == user) # return all users who contain a particular address filter(User.addresses.contains(address)) # return all users who *dont* contain the address filter(~User.address.contains(address)) # return all users who contain a particular address with # the email_address like '%foo%' filter(User.addresses.any(Address.email_address.like('%foo%'))) # same, email address equals 'foo@bar.com'. can fall back to keyword # args for simple comparisons filter(User.addresses.any(email_address = 'foo@bar.com')) # return all Addresses whose user attribute has the username 'ed' filter(Address.user.has(name='ed')) # return all Addresses whose user attribute has the username 'ed' # and an id > 5 (mixing clauses with kwargs) filter(Address.user.has(User.id > 5, name='ed')) The ``Column`` collection remains available on mapped classes in the ``.c`` attribute. Note that property-based expressions are only available with mapped properties of mapped classes. ``.c`` is still used to access columns in regular tables and selectable objects produced from SQL Expressions. Automatic Join Aliasing ^^^^^^^^^^^^^^^^^^^^^^^ We've had join() and outerjoin() for a while now: :: session.query(Order).join('items')... Now you can alias them: :: session.query(Order).join('items', aliased=True). filter(Item.name='item 1').join('items', aliased=True).filter(Item.name=='item 3') The above will create two joins from orders->items using aliases. the ``filter()`` call subsequent to each will adjust its table criterion to that of the alias. To get at the ``Item`` objects, use ``add_entity()`` and target each join with an ``id``: :: session.query(Order).join('items', id='j1', aliased=True). filter(Item.name == 'item 1').join('items', aliased=True, id='j2'). filter(Item.name == 'item 3').add_entity(Item, id='j1').add_entity(Item, id='j2') Returns tuples in the form: ``(Order, Item, Item)``. Self-referential Queries ^^^^^^^^^^^^^^^^^^^^^^^^ So query.join() can make aliases now. What does that give us ? Self-referential queries ! Joins can be done without any ``Alias`` objects: :: # standard self-referential TreeNode mapper with backref mapper(TreeNode, tree_nodes, properties={ 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id)) }) # query for node with child containing "bar" two levels deep session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar') To add criterion for each table along the way in an aliased join, you can use ``from_joinpoint`` to keep joining against the same line of aliases: :: # search for the treenode along the path "n1/n12/n122" # first find a Node with name="n122" q = sess.query(Node).filter_by(name='n122') # then join to parent with "n12" q = q.join('parent', aliased=True).filter_by(name='n12') # join again to the next parent with 'n1'. use 'from_joinpoint' # so we join from the previous point, instead of joining off the # root table q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1') node = q.first() ``query.populate_existing()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The eager version of ``query.load()`` (or ``session.refresh()``). Every instance loaded from the query, including all eagerly loaded items, get refreshed immediately if already present in the session: :: session.query(Blah).populate_existing().all() Relations --------- SQL Clauses Embedded in Updates/Inserts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For inline execution of SQL clauses, embedded right in the UPDATE or INSERT, during a ``flush()``: :: myobject.foo = mytable.c.value + 1 user.pwhash = func.md5(password) order.hash = text("select hash from hashing_table") The column-attribute is set up with a deferred loader after the operation, so that it issues the SQL to load the new value when you next access. Self-referential and Cyclical Eager Loading ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since our alias-fu has improved, ``relation()`` can join along the same table \*any number of times*; you tell it how deep you want to go. Lets show the self-referential ``TreeNode`` more clearly: :: nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('name', String(30))) class TreeNode(object): pass mapper(TreeNode, nodes, properties={ 'children':relation(TreeNode, lazy=False, join_depth=3) }) So what happens when we say: :: create_session().query(TreeNode).all() ? A join along aliases, three levels deep off the parent: :: SELECT nodes_3.id AS nodes_3_id, nodes_3.parent_id AS nodes_3_parent_id, nodes_3.name AS nodes_3_name, nodes_2.id AS nodes_2_id, nodes_2.parent_id AS nodes_2_parent_id, nodes_2.name AS nodes_2_name, nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, nodes_1.name AS nodes_1_name, nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.name AS nodes_name FROM nodes LEFT OUTER JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id LEFT OUTER JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id LEFT OUTER JOIN nodes AS nodes_3 ON nodes_2.id = nodes_3.parent_id ORDER BY nodes.oid, nodes_1.oid, nodes_2.oid, nodes_3.oid Notice the nice clean alias names too. The joining doesn't care if it's against the same immediate table or some other object which then cycles back to the beginning. Any kind of chain of eager loads can cycle back onto itself when ``join_depth`` is specified. When not present, eager loading automatically stops when it hits a cycle. Composite Types ^^^^^^^^^^^^^^^ This is one from the Hibernate camp. Composite Types let you define a custom datatype that is composed of more than one column (or one column, if you wanted). Lets define a new type, ``Point``. Stores an x/y coordinate: :: class Point(object): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return self.x, self.y def __eq__(self, other): return other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) The way the ``Point`` object is defined is specific to a custom type; constructor takes a list of arguments, and the ``__composite_values__()`` method produces a sequence of those arguments. The order will match up to our mapper, as we'll see in a moment. Let's create a table of vertices storing two points per row: :: vertices = Table('vertices', metadata, Column('id', Integer, primary_key=True), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) Then, map it ! We'll create a ``Vertex`` object which stores two ``Point`` objects: :: class Vertex(object): def __init__(self, start, end): self.start = start self.end = end mapper(Vertex, vertices, properties={ 'start':composite(Point, vertices.c.x1, vertices.c.y1), 'end':composite(Point, vertices.c.x2, vertices.c.y2) }) Once you've set up your composite type, it's usable just like any other type: :: v = Vertex(Point(3, 4), Point(26,15)) session.save(v) session.flush() # works in queries too q = session.query(Vertex).filter(Vertex.start == Point(3, 4)) If you'd like to define the way the mapped attributes generate SQL clauses when used in expressions, create your own ``sqlalchemy.orm.PropComparator`` subclass, defining any of the common operators (like ``__eq__()``, ``__le__()``, etc.), and send it in to ``composite()``. Composite types work as primary keys too, and are usable in ``query.get()``: :: # a Document class which uses a composite Version # object as primary key document = query.get(Version(1, 'a')) ``dynamic_loader()`` relations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A ``relation()`` that returns a live ``Query`` object for all read operations. Write operations are limited to just ``append()`` and ``remove()``, changes to the collection are not visible until the session is flushed. This feature is particularly handy with an "autoflushing" session which will flush before each query. :: mapper(Foo, foo_table, properties={ 'bars':dynamic_loader(Bar, backref='foo', ) }) session = create_session(autoflush=True) foo = session.query(Foo).first() foo.bars.append(Bar(name='lala')) for bar in foo.bars.filter(Bar.name=='lala'): print bar session.commit() New Options: ``undefer_group()``, ``eagerload_all()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A couple of query options which are handy. ``undefer_group()`` marks a whole group of "deferred" columns as undeferred: :: mapper(Class, table, properties={ 'foo' : deferred(table.c.foo, group='group1'), 'bar' : deferred(table.c.bar, group='group1'), 'bat' : deferred(table.c.bat, group='group1'), ) session.query(Class).options(undefer_group('group1')).filter(...).all() and ``eagerload_all()`` sets a chain of attributes to be eager in one pass: :: mapper(Foo, foo_table, properties={ 'bar':relation(Bar) }) mapper(Bar, bar_table, properties={ 'bat':relation(Bat) }) mapper(Bat, bat_table) # eager load bar and bat session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all() New Collection API ^^^^^^^^^^^^^^^^^^ Collections are no longer proxied by an {{{InstrumentedList}}} proxy, and access to members, methods and attributes is direct. Decorators now intercept objects entering and leaving the collection, and it is now possible to easily write a custom collection class that manages its own membership. Flexible decorators also replace the named method interface of custom collections in 0.3, allowing any class to be easily adapted to use as a collection container. Dictionary-based collections are now much easier to use and fully ``dict``-like. Changing ``__iter__`` is no longer needed for ``dict``s, and new built-in ``dict`` types cover many needs: :: # use a dictionary relation keyed by a column relation(Item, collection_class=column_mapped_collection(items.c.keyword)) # or named attribute relation(Item, collection_class=attribute_mapped_collection('keyword')) # or any function you like relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) Existing 0.3 ``dict``-like and freeform object derived collection classes will need to be updated for the new API. In most cases this is simply a matter of adding a couple decorators to the class definition. Mapped Relations from External Tables/Subqueries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This feature quietly appeared in 0.3 but has been improved in 0.4 thanks to better ability to convert subqueries against a table into subqueries against an alias of that table; this is key for eager loading, aliased joins in queries, etc. It reduces the need to create mappers against select statements when you just need to add some extra columns or subqueries: :: mapper(User, users, properties={ 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')), 'numposts': column_property( select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts') ) }) a typical query looks like: :: SELECT (SELECT count(1) FROM posts WHERE users.id = posts.user_id) AS count, users.firstname || users.lastname AS fullname, users.id AS users_id, users.firstname AS users_firstname, users.lastname AS users_lastname FROM users ORDER BY users.oid Horizontal Scaling (Sharding) API --------------------------------- [browser:/sqlalchemy/trunk/examples/sharding/attribute_shard .py] Sessions -------- New Session Create Paradigm; SessionContext, assignmapper Deprecated ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ That's right, the whole shebang is being replaced with two configurational functions. Using both will produce the most 0.1-ish feel we've had since 0.1 (i.e., the least amount of typing). Configure your own ``Session`` class right where you define your ``engine`` (or anywhere): :: from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker engine = create_engine('myengine://') Session = sessionmaker(bind=engine, autoflush=True, transactional=True) # use the new Session() freely sess = Session() sess.save(someobject) sess.flush() If you need to post-configure your Session, say with an engine, add it later with ``configure()``: :: Session.configure(bind=create_engine(...)) All the behaviors of ``SessionContext`` and the ``query`` and ``__init__`` methods of ``assignmapper`` are moved into the new ``scoped_session()`` function, which is compatible with both ``sessionmaker`` as well as ``create_session()``: :: from sqlalchemy.orm import scoped_session, sessionmaker Session = scoped_session(sessionmaker(autoflush=True, transactional=True)) Session.configure(bind=engine) u = User(name='wendy') sess = Session() sess.save(u) sess.commit() # Session constructor is thread-locally scoped. Everyone gets the same # Session in the thread when scope="thread". sess2 = Session() assert sess is sess2 When using a thread-local ``Session``, the returned class has all of ``Session's`` interface implemented as classmethods, and "assignmapper"'s functionality is available using the ``mapper`` classmethod. Just like the old ``objectstore`` days.... :: # "assignmapper"-like functionality available via ScopedSession.mapper Session.mapper(User, users_table) u = User(name='wendy') Session.commit() Sessions are again Weak Referencing By Default ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The weak_identity_map flag is now set to ``True`` by default on Session. Instances which are externally deferenced and fall out of scope are removed from the session automatically. However, items which have "dirty" changes present will remain strongly referenced until those changes are flushed at which case the object reverts to being weakly referenced (this works for 'mutable' types, like picklable attributes, as well). Setting weak_identity_map to ``False`` restores the old strong-referencing behavior for those of you using the session like a cache. Auto-Transactional Sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^ As you might have noticed above, we are calling ``commit()`` on ``Session``. The flag ``transactional=True`` means the ``Session`` is always in a transaction, ``commit()`` persists permanently. Auto-Flushing Sessions ^^^^^^^^^^^^^^^^^^^^^^ Also, ``autoflush=True`` means the ``Session`` will ``flush()`` before each ``query`` as well as when you call ``flush()`` or ``commit()``. So now this will work: :: Session = sessionmaker(bind=engine, autoflush=True, transactional=True) u = User(name='wendy') sess = Session() sess.save(u) # wendy is flushed, comes right back from a query wendy = sess.query(User).filter_by(name='wendy').one() Transactional methods moved onto sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``commit()`` and ``rollback()``, as well as ``begin()`` are now directly on ``Session``. No more need to use ``SessionTransaction`` for anything (it remains in the background). :: Session = sessionmaker(autoflush=True, transactional=False) sess = Session() sess.begin() # use the session sess.commit() # commit transaction Sharing a ``Session`` with an enclosing engine-level (i.e. non-ORM) transaction is easy: :: Session = sessionmaker(autoflush=True, transactional=False) conn = engine.connect() trans = conn.begin() sess = Session(bind=conn) # ... session is transactional # commit the outermost transaction trans.commit() Nested Session Transactions with SAVEPOINT ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Available at the Engine and ORM level. ORM docs so far: http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging Two-Phase Commit Sessions ^^^^^^^^^^^^^^^^^^^^^^^^^ Available at the Engine and ORM level. ORM docs so far: http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging Inheritance ----------- Polymorphic Inheritance with No Joins or Unions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ New docs for inheritance: http://www.sqlalchemy.org/docs/04 /mappers.html#advdatamapping_mapper_inheritance_joined Better Polymorphic Behavior with ``get()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All classes within a joined-table inheritance hierarchy get an ``_instance_key`` using the base class, i.e. ``(BaseClass, (1, ), None)``. That way when you call ``get()`` a ``Query`` against the base class, it can locate subclass instances in the current identity map without querying the database. Types ----- Custom Subclasses of ``sqlalchemy.types.TypeDecorator`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There is a `New API `_ for subclassing a TypeDecorator. Using the 0.3 API causes compilation errors in some cases. SQL Expressions =============== All New, Deterministic Label/Alias Generation --------------------------------------------- All the "anonymous" labels and aliases use a simple _ format now. SQL is much easier to read and is compatible with plan optimizer caches. Just check out some of the examples in the tutorials: http://www.sqlalchemy.org/docs/04/ormtutorial.html http://www.sqlalchemy.org/docs/04/sqlexpression.html Generative select() Constructs ------------------------------ This is definitely the way to go with ``select()``. See htt p://www.sqlalchemy.org/docs/04/sqlexpression.html#sql_transf orm . New Operator System ------------------- SQL operators and more or less every SQL keyword there is are now abstracted into the compiler layer. They now act intelligently and are type/backend aware, see: http://www.sq lalchemy.org/docs/04/sqlexpression.html#sql_operators All ``type`` Keyword Arguments Renamed to ``type_`` --------------------------------------------------- Just like it says: :: b = bindparam('foo', type_=String) in\_ Function Changed to Accept Sequence or Selectable ------------------------------------------------------ The in\_ function now takes a sequence of values or a selectable as its sole argument. The previous API of passing in values as positional arguments still works, but is now deprecated. This means that :: my_table.select(my_table.c.id.in_(1,2,3) my_table.select(my_table.c.id.in_(*listOfIds) should be changed to :: my_table.select(my_table.c.id.in_([1,2,3]) my_table.select(my_table.c.id.in_(listOfIds) Schema and Reflection ===================== ``MetaData``, ``BoundMetaData``, ``DynamicMetaData``... ------------------------------------------------------- In the 0.3.x series, ``BoundMetaData`` and ``DynamicMetaData`` were deprecated in favor of ``MetaData`` and ``ThreadLocalMetaData``. The older names have been removed in 0.4. Updating is simple: :: +-------------------------------------+-------------------------+ |If You Had | Now Use | +=====================================+=========================+ | ``MetaData`` | ``MetaData`` | +-------------------------------------+-------------------------+ | ``BoundMetaData`` | ``MetaData`` | +-------------------------------------+-------------------------+ | ``DynamicMetaData`` (with one | ``MetaData`` | | engine or threadlocal=False) | | +-------------------------------------+-------------------------+ | ``DynamicMetaData`` | ``ThreadLocalMetaData`` | | (with different engines per thread) | | +-------------------------------------+-------------------------+ The seldom-used ``name`` parameter to ``MetaData`` types has been removed. The ``ThreadLocalMetaData`` constructor now takes no arguments. Both types can now be bound to an ``Engine`` or a single ``Connection``. One Step Multi-Table Reflection ------------------------------- You can now load table definitions and automatically create ``Table`` objects from an entire database or schema in one pass: :: >>> metadata = MetaData(myengine, reflect=True) >>> metadata.tables.keys() ['table_a', 'table_b', 'table_c', '...'] ``MetaData`` also gains a ``.reflect()`` method enabling finer control over the loading process, including specification of a subset of available tables to load. SQL Execution ============= ``engine``, ``connectable``, and ``bind_to`` are all now ``bind`` ----------------------------------------------------------------- ``Transactions``, ``NestedTransactions`` and ``TwoPhaseTransactions`` --------------------------------------------------------------------- Connection Pool Events ---------------------- The connection pool now fires events when new DB-API connections are created, checked out and checked back into the pool. You can use these to execute session-scoped SQL setup statements on fresh connections, for example. Oracle Engine Fixed ------------------- In 0.3.11, there were bugs in the Oracle Engine on how Primary Keys are handled. These bugs could cause programs that worked fine with other engines, such as sqlite, to fail when using the Oracle Engine. In 0.4, the Oracle Engine has been reworked, fixing these Primary Key problems. Out Parameters for Oracle ------------------------- :: result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5) assert result.out_parameters == {'y':10, 'z':75} Connection-bound ``MetaData``, ``Sessions`` ------------------------------------------- ``MetaData`` and ``Session`` can be explicitly bound to a connection: :: conn = engine.connect() sess = create_session(bind=conn) Faster, More Foolproof ``ResultProxy`` Objects ---------------------------------------------- SQLAlchemy-1.0.11/doc/build/copyright.rst0000664000175000017500000000240112636375552021243 0ustar classicclassic00000000000000:orphan: ==================== Appendix: Copyright ==================== This is the MIT license: ``_ Copyright (c) 2005-2015 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. SQLAlchemy-1.0.11/doc/build/texinputs/0000775000175000017500000000000012636376632020547 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/texinputs/sphinx.sty0000664000175000017500000003314712636375552022631 0ustar classicclassic00000000000000% % sphinx.sty % % Adapted from the old python.sty, mostly written by Fred Drake, % by Georg Brandl. % \NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesPackage{sphinx}[2010/01/15 LaTeX package (Sphinx markup)] \RequirePackage{textcomp} \RequirePackage{fancyhdr} \RequirePackage{fancybox} \RequirePackage{titlesec} \RequirePackage{tabulary} \RequirePackage{amsmath} % for \text \RequirePackage{makeidx} \RequirePackage{framed} \RequirePackage{color} % For highlighted code. \RequirePackage{fancyvrb} % For table captions. \RequirePackage{threeparttable} % Handle footnotes in tables. \RequirePackage{footnote} \makesavenoteenv{tabulary} % For floating figures in the text. \RequirePackage{wrapfig} % Separate paragraphs by space by default. \RequirePackage{parskip} % Redefine these colors to your liking in the preamble. \definecolor{TitleColor}{rgb}{0.126,0.263,0.361} \definecolor{InnerLinkColor}{rgb}{0.208,0.374,0.486} \definecolor{OuterLinkColor}{rgb}{0.216,0.439,0.388} % Redefine these colors to something not white if you want to have colored % background and border for code examples. \definecolor{VerbatimColor}{rgb}{1,1,1} \definecolor{VerbatimBorderColor}{rgb}{1,1,1} % Uncomment these two lines to ignore the paper size and make the page % size more like a typical published manual. %\renewcommand{\paperheight}{9in} %\renewcommand{\paperwidth}{8.5in} % typical squarish manual %\renewcommand{\paperwidth}{7in} % O'Reilly ``Programmming Python'' % For graphicx, check if we are compiling under latex or pdflatex. \ifx\pdftexversion\undefined \usepackage{graphicx} \else \usepackage[pdftex]{graphicx} \fi % for PDF output, use colors and maximal compression \newif\ifsphinxpdfoutput\sphinxpdfoutputfalse \ifx\pdfoutput\undefined\else\ifcase\pdfoutput \let\py@NormalColor\relax \let\py@TitleColor\relax \else \sphinxpdfoutputtrue \input{pdfcolor} \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}} \def\py@TitleColor{\color{TitleColor}} \pdfcompresslevel=9 \fi\fi % XeLaTeX can do colors, too \ifx\XeTeXrevision\undefined\else \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}} \def\py@TitleColor{\color{TitleColor}} \fi % Increase printable page size (copied from fullpage.sty) \topmargin 0pt \advance \topmargin by -\headheight \advance \topmargin by -\headsep % attempt to work a little better for A4 users \textheight \paperheight \advance\textheight by -2in \oddsidemargin 0pt \evensidemargin 0pt %\evensidemargin -.25in % for ``manual size'' documents \marginparwidth 0.5in \textwidth \paperwidth \advance\textwidth by -2in % Style parameters and macros used by most documents here \raggedbottom \sloppy \hbadness = 5000 % don't print trivial gripes \pagestyle{empty} % start this way; change for \pagenumbering{roman} % ToC & chapters % Use this to set the font family for headers and other decor: \newcommand{\py@HeaderFamily}{\sffamily\bfseries} % Redefine the 'normal' header/footer style when using "fancyhdr" package: \@ifundefined{fancyhf}{}{ % Use \pagestyle{normal} as the primary pagestyle for text. \fancypagestyle{normal}{ \fancyhf{} \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} \fancyfoot[LO]{{\py@HeaderFamily\nouppercase{\rightmark}}} \fancyfoot[RE]{{\py@HeaderFamily\nouppercase{\leftmark}}} \fancyhead[LE,RO]{{\py@HeaderFamily \@title, \py@release}} \renewcommand{\headrulewidth}{0.4pt} \renewcommand{\footrulewidth}{0.4pt} } % Update the plain style so we get the page number & footer line, % but not a chapter or section title. This is to keep the first % page of a chapter and the blank page between chapters `clean.' \fancypagestyle{plain}{ \fancyhf{} \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} \renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{0.4pt} } } % Some custom font markup commands. % \newcommand{\strong}[1]{{\bf #1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\bfcode}[1]{\code{\bfseries#1}} \newcommand{\samp}[1]{`\code{#1}'} \newcommand{\email}[1]{\textsf{#1}} % Redefine the Verbatim environment to allow border and background colors. % The original environment is still used for verbatims within tables. \let\OriginalVerbatim=\Verbatim \let\endOriginalVerbatim=\endVerbatim % Play with vspace to be able to keep the indentation. \newlength\distancetoright \newlength\leftsidespace \def\mycolorbox#1{% \setlength\leftsidespace{\@totalleftmargin}% \setlength\distancetoright{\linewidth}% \advance\distancetoright -\@totalleftmargin % \noindent\hspace*{\@totalleftmargin}% \fcolorbox{VerbatimBorderColor}{VerbatimColor}{% \begin{minipage}{\distancetoright}% \noindent\hspace*{-\leftsidespace}% #1 \end{minipage}% }% } \def\FrameCommand{\mycolorbox} \renewcommand{\Verbatim}[1][1]{% \OriginalVerbatim[#1]% } \renewcommand{\endVerbatim}{% \endOriginalVerbatim% } % Index-entry generation support. % % Command to generate two index entries (using subentries) \newcommand{\indexii}[2]{\index{#1!#2}\index{#2!#1}} % And three entries (using only one level of subentries) \newcommand{\indexiii}[3]{\index{#1!#2 #3}\index{#2!#3, #1}\index{#3!#1 #2}} % And four (again, using only one level of subentries) \newcommand{\indexiv}[4]{ \index{#1!#2 #3 #4} \index{#2!#3 #4, #1} \index{#3!#4, #1 #2} \index{#4!#1 #2 #3} } % \moduleauthor{name}{email} \newcommand{\moduleauthor}[2]{} % \sectionauthor{name}{email} \newcommand{\sectionauthor}[2]{} % Augment the sectioning commands used to get our own font family in place, % and reset some internal data items: \titleformat{\section}{\Large\py@HeaderFamily}% {\py@TitleColor\thesection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\subsection}{\large\py@HeaderFamily}% {\py@TitleColor\thesubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\subsubsection}{\py@HeaderFamily}% {\py@TitleColor\thesubsubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\paragraph}{\large\py@HeaderFamily}% {\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor} % {fulllineitems} is the main environment for object descriptions. % \newcommand{\py@itemnewline}[1]{% \@tempdima\linewidth% \advance\@tempdima \leftmargin\makebox[\@tempdima][l]{#1}% } \newenvironment{fulllineitems}{ \begin{list}{}{\labelwidth \leftmargin \labelsep 0pt \rightmargin 0pt \topsep -\parskip \partopsep \parskip \itemsep -\parsep \let\makelabel=\py@itemnewline} }{\end{list}} % \optional is used for ``[, arg]``, i.e. desc_optional nodes. \newcommand{\optional}[1]{% {\textnormal{\Large[}}{#1}\hspace{0.5mm}{\textnormal{\Large]}}} \newlength{\py@argswidth} \newcommand{\py@sigparams}[2]{% \parbox[t]{\py@argswidth}{#1\code{)}#2}} \newcommand{\pysigline}[1]{\item[#1]\nopagebreak} \newcommand{\pysiglinewithargsret}[3]{% \settowidth{\py@argswidth}{#1\code{(}}% \addtolength{\py@argswidth}{-2\py@argswidth}% \addtolength{\py@argswidth}{\linewidth}% \item[#1\code{(}\py@sigparams{#2}{#3}]} % This version is being checked in for the historical record; it shows % how I've managed to get some aspects of this to work. It will not % be used in practice, so a subsequent revision will change things % again. This version has problems, but shows how to do something % that proved more tedious than I'd expected, so I don't want to lose % the example completely. % \newcommand{\grammartoken}[1]{\texttt{#1}} \newenvironment{productionlist}[1][\@undefined]{ \def\optional##1{{\Large[}##1{\Large]}} \def\production##1##2{\hypertarget{grammar-token-##1}{}% \code{##1}&::=&\code{##2}\\} \def\productioncont##1{& &\code{##1}\\} \def\token##1{##1} \let\grammartoken=\token \parindent=2em \indent \begin{tabular}{lcl} }{% \end{tabular} } % Notices / Admonitions % \newlength{\py@noticelength} \newcommand{\py@heavybox}{ \setlength{\fboxrule}{1pt} \setlength{\fboxsep}{7pt} \setlength{\py@noticelength}{\linewidth} \addtolength{\py@noticelength}{-2\fboxsep} \addtolength{\py@noticelength}{-2\fboxrule} \setlength{\shadowsize}{3pt} \Sbox \minipage{\py@noticelength} } \newcommand{\py@endheavybox}{ \endminipage \endSbox \fbox{\TheSbox} } % Some are quite plain: \newcommand{\py@noticestart@note}{} \newcommand{\py@noticeend@note}{} \newcommand{\py@noticestart@hint}{} \newcommand{\py@noticeend@hint}{} \newcommand{\py@noticestart@important}{} \newcommand{\py@noticeend@important}{} \newcommand{\py@noticestart@tip}{} \newcommand{\py@noticeend@tip}{} % Others gets more visible distinction: \newcommand{\py@noticestart@warning}{\py@heavybox} \newcommand{\py@noticeend@warning}{\py@endheavybox} \newcommand{\py@noticestart@caution}{\py@heavybox} \newcommand{\py@noticeend@caution}{\py@endheavybox} \newcommand{\py@noticestart@attention}{\py@heavybox} \newcommand{\py@noticeend@attention}{\py@endheavybox} \newcommand{\py@noticestart@danger}{\py@heavybox} \newcommand{\py@noticeend@danger}{\py@endheavybox} \newcommand{\py@noticestart@error}{\py@heavybox} \newcommand{\py@noticeend@error}{\py@endheavybox} \newenvironment{notice}[2]{ \def\py@noticetype{#1} \csname py@noticestart@#1\endcsname \par\strong{#2} }{\csname py@noticeend@\py@noticetype\endcsname} % Allow the release number to be specified independently of the % \date{}. This allows the date to reflect the document's date and % release to specify the release that is documented. % \newcommand{\py@release}{} \newcommand{\version}{} \newcommand{\shortversion}{} \newcommand{\releaseinfo}{} \newcommand{\releasename}{Release} \newcommand{\release}[1]{% \renewcommand{\py@release}{\releasename\space\version}% \renewcommand{\version}{#1}} \newcommand{\setshortversion}[1]{% \renewcommand{\shortversion}{#1}} \newcommand{\setreleaseinfo}[1]{% \renewcommand{\releaseinfo}{#1}} % Allow specification of the author's address separately from the % author's name. This can be used to format them differently, which % is a good thing. % \newcommand{\py@authoraddress}{} \newcommand{\authoraddress}[1]{\renewcommand{\py@authoraddress}{#1}} % This sets up the fancy chapter headings that make the documents look % at least a little better than the usual LaTeX output. % \@ifundefined{ChTitleVar}{}{ \ChNameVar{\raggedleft\normalsize\py@HeaderFamily} \ChNumVar{\raggedleft \bfseries\Large\py@HeaderFamily} \ChTitleVar{\raggedleft \rm\Huge\py@HeaderFamily} % This creates chapter heads without the leading \vspace*{}: \def\@makechapterhead#1{% {\parindent \z@ \raggedright \normalfont \ifnum \c@secnumdepth >\m@ne \DOCH \fi \interlinepenalty\@M \DOTI{#1} } } } % Redefine description environment so that it is usable inside fulllineitems. % \renewcommand{\description}{% \list{}{\labelwidth\z@% \itemindent-\leftmargin% \labelsep5pt% \let\makelabel=\descriptionlabel}} % Definition lists; requested by AMK for HOWTO documents. Probably useful % elsewhere as well, so keep in in the general style support. % \newenvironment{definitions}{% \begin{description}% \def\term##1{\item[##1]\mbox{}\\*[0mm]} }{% \end{description}% } % Tell TeX about pathological hyphenation cases: \hyphenation{Base-HTTP-Re-quest-Hand-ler} % The following is stuff copied from docutils' latex writer. % \newcommand{\optionlistlabel}[1]{\bf #1 \hfill} \newenvironment{optionlist}[1] {\begin{list}{} {\setlength{\labelwidth}{#1} \setlength{\rightmargin}{1cm} \setlength{\leftmargin}{\rightmargin} \addtolength{\leftmargin}{\labelwidth} \addtolength{\leftmargin}{\labelsep} \renewcommand{\makelabel}{\optionlistlabel}} }{\end{list}} \newlength{\lineblockindentation} \setlength{\lineblockindentation}{2.5em} \newenvironment{lineblock}[1] {\begin{list}{} {\setlength{\partopsep}{\parskip} \addtolength{\partopsep}{\baselineskip} \topsep0pt\itemsep0.15\baselineskip\parsep0pt \leftmargin#1} \raggedright} {\end{list}} % Redefine includgraphics for avoiding images larger than the screen size % If the size is not specified. \let\py@Oldincludegraphics\includegraphics \newbox\image@box% \newdimen\image@width% \renewcommand\includegraphics[2][\@empty]{% \ifx#1\@empty% \setbox\image@box=\hbox{\py@Oldincludegraphics{#2}}% \image@width\wd\image@box% \ifdim \image@width>\linewidth% \setbox\image@box=\hbox{\py@Oldincludegraphics[width=\linewidth]{#2}}% \box\image@box% \else% \py@Oldincludegraphics{#2}% \fi% \else% \py@Oldincludegraphics[#1]{#2}% \fi% } % Fix the index and bibliography environments to add an entry to the Table of % Contents; this is much nicer than just having to jump to the end of the book % and flip around, especially with multiple indexes. % \let\py@OldTheindex=\theindex \renewcommand{\theindex}{ \cleardoublepage \phantomsection \py@OldTheindex \addcontentsline{toc}{chapter}{\indexname} } \let\py@OldThebibliography=\thebibliography \renewcommand{\thebibliography}[1]{ \cleardoublepage \phantomsection \py@OldThebibliography{1} \addcontentsline{toc}{chapter}{\bibname} } % Include hyperref last. \RequirePackage[colorlinks,breaklinks, linkcolor=InnerLinkColor,filecolor=OuterLinkColor, menucolor=OuterLinkColor,urlcolor=OuterLinkColor, citecolor=InnerLinkColor]{hyperref} % Fix anchor placement for figures with captions. % (Note: we don't use a package option here; instead, we give an explicit % \capstart for figures that actually have a caption.) \RequirePackage{hypcap} % From docutils.writers.latex2e \providecommand{\DUspan}[2]{% {% group ("span") to limit the scope of styling commands \@for\node@class@name:=#1\do{% \ifcsname docutilsrole\node@class@name\endcsname% \csname docutilsrole\node@class@name\endcsname% \fi% }% {#2}% node content }% close "span" } SQLAlchemy-1.0.11/doc/build/texinputs/Makefile0000664000175000017500000000345112636375552022212 0ustar classicclassic00000000000000# Makefile for Sphinx LaTeX output ALLDOCS = $(basename $(wildcard *.tex)) ALLPDF = $(addsuffix .pdf,$(ALLDOCS)) ALLDVI = $(addsuffix .dvi,$(ALLDOCS)) # Prefix for archive names ARCHIVEPRREFIX = # Additional LaTeX options LATEXOPTS = -interaction=nonstopmode all: $(ALLPDF) all-pdf: $(ALLPDF) all-dvi: $(ALLDVI) all-ps: all-dvi for f in *.dvi; do dvips $$f; done all-pdf-ja: $(wildcard *.tex) ebb $(wildcard *.pdf *.png *.gif *.jpeg) platex -kanji=utf8 $(LATEXOPTS) '$<' platex -kanji=utf8 $(LATEXOPTS) '$<' platex -kanji=utf8 $(LATEXOPTS) '$<' -mendex -U -f -d '$(basename $<).dic' -s python.ist '$(basename $<).idx' platex -kanji=utf8 $(LATEXOPTS) '$<' platex -kanji=utf8 $(LATEXOPTS) '$<' dvipdfmx '$(basename $<).dvi' zip: all-$(FMT) mkdir $(ARCHIVEPREFIX)docs-$(FMT) cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) zip -q -r -9 $(ARCHIVEPREFIX)docs-$(FMT).zip $(ARCHIVEPREFIX)docs-$(FMT) rm -r $(ARCHIVEPREFIX)docs-$(FMT) tar: all-$(FMT) mkdir $(ARCHIVEPREFIX)docs-$(FMT) cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) tar cf $(ARCHIVEPREFIX)docs-$(FMT).tar $(ARCHIVEPREFIX)docs-$(FMT) rm -r $(ARCHIVEPREFIX)docs-$(FMT) bz2: tar bzip2 -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar # The number of LaTeX runs is quite conservative, but I don't expect it # to get run often, so the little extra time won't hurt. %.dvi: %.tex -latex $(LATEXOPTS) '$<' -latex $(LATEXOPTS) '$<' -latex $(LATEXOPTS) '$<' -makeindex -s python.ist '$(basename $<).idx' -latex $(LATEXOPTS) '$<' -latex $(LATEXOPTS) '$<' %.pdf: %.tex -pdflatex $(LATEXOPTS) '$<' -pdflatex $(LATEXOPTS) '$<' -pdflatex $(LATEXOPTS) '$<' -makeindex -s python.ist '$(basename $<).idx' -pdflatex $(LATEXOPTS) '$<' -pdflatex $(LATEXOPTS) '$<' clean: rm -f *.dvi *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla .PHONY: all all-pdf all-dvi all-ps clean SQLAlchemy-1.0.11/doc/build/requirements.txt0000664000175000017500000000016212636375552021767 0ustar classicclassic00000000000000changelog>=0.3.4 sphinx-paramlinks>=0.2.2 git+https://bitbucket.org/zzzeek/zzzeeksphinx.git@HEAD#egg=zzzeeksphinx SQLAlchemy-1.0.11/doc/build/intro.rst0000664000175000017500000001731012636375552020373 0ustar classicclassic00000000000000.. _overview_toplevel: .. _overview: ======== Overview ======== The SQLAlchemy SQL Toolkit and Object Relational Mapper is a comprehensive set of tools for working with databases and Python. It has several distinct areas of functionality which can be used individually or combined together. Its major components are illustrated in below, with component dependencies organized into layers: .. image:: sqla_arch_small.png Above, the two most significant front-facing portions of SQLAlchemy are the **Object Relational Mapper** and the **SQL Expression Language**. SQL Expressions can be used independently of the ORM. When using the ORM, the SQL Expression language remains part of the public facing API as it is used within object-relational configurations and queries. .. _doc_overview: Documentation Overview ====================== The documentation is separated into three sections: :ref:`orm_toplevel`, :ref:`core_toplevel`, and :ref:`dialect_toplevel`. In :ref:`orm_toplevel`, the Object Relational Mapper is introduced and fully described. New users should begin with the :ref:`ormtutorial_toplevel`. If you want to work with higher-level SQL which is constructed automatically for you, as well as management of Python objects, proceed to this tutorial. In :ref:`core_toplevel`, the breadth of SQLAlchemy's SQL and database integration and description services are documented, the core of which is the SQL Expression language. The SQL Expression Language is a toolkit all its own, independent of the ORM package, which can be used to construct manipulable SQL expressions which can be programmatically constructed, modified, and executed, returning cursor-like result sets. In contrast to the ORM's domain-centric mode of usage, the expression language provides a schema-centric usage paradigm. New users should begin here with :ref:`sqlexpression_toplevel`. SQLAlchemy engine, connection, and pooling services are also described in :ref:`core_toplevel`. In :ref:`dialect_toplevel`, reference documentation for all provided database and DBAPI backends is provided. Code Examples ============= Working code examples, mostly regarding the ORM, are included in the SQLAlchemy distribution. A description of all the included example applications is at :ref:`examples_toplevel`. There is also a wide variety of examples involving both core SQLAlchemy constructs as well as the ORM on the wiki. See `Theatrum Chemicum `_. .. _installation: Installation Guide ================== Supported Platforms ------------------- SQLAlchemy has been tested against the following platforms: * cPython since version 2.6, through the 2.xx series * cPython version 3, throughout all 3.xx series * `Pypy `_ 2.1 or greater .. versionchanged:: 0.9 Python 2.6 is now the minimum Python version supported. Platforms that don't currently have support include Jython, IronPython. Jython has been supported in the past and may be supported in future releases as well, depending on the state of Jython itself. Supported Installation Methods ------------------------------- SQLAlchemy supports installation using standard Python "distutils" or "setuptools" methodologies. An overview of potential setups is as follows: * **Plain Python Distutils** - SQLAlchemy can be installed with a clean Python install using the services provided via `Python Distutils `_, using the ``setup.py`` script. The C extensions as well as Python 3 builds are supported. * **Setuptools or Distribute** - When using `setuptools `_, SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C extensions are supported. * **pip** - `pip `_ is an installer that rides on top of ``setuptools`` or ``distribute``, replacing the usage of ``easy_install``. It is often preferred for its simpler mode of usage. Install via pip --------------- When ``pip`` is available, the distribution can be downloaded from Pypi and installed in one step:: pip install SQLAlchemy This command will download the latest **released** version of SQLAlchemy from the `Python Cheese Shop `_ and install it to your system. In order to install the latest **prerelease** version, such as ``1.0.0b1``, pip requires that the ``--pre`` flag be used:: pip install --pre SQLAlchemy Where above, if the most recent version is a prerelease, it will be installed instead of the latest released version. Installing using setup.py ---------------------------------- Otherwise, you can install from the distribution using the ``setup.py`` script:: python setup.py install Installing the C Extensions ---------------------------------- SQLAlchemy includes C extensions which provide an extra speed boost for dealing with result sets. The extensions are supported on both the 2.xx and 3.xx series of cPython. .. versionchanged:: 0.9.0 The C extensions now compile on Python 3 as well as Python 2. ``setup.py`` will automatically build the extensions if an appropriate platform is detected. If the build of the C extensions fails, due to missing compiler or other issue, the setup process will output a warning message, and re-run the build without the C extensions, upon completion reporting final status. To run the build/install without even attempting to compile the C extensions, the ``DISABLE_SQLALCHEMY_CEXT`` environment variable may be specified. The use case for this is either for special testing circumstances, or in the rare case of compatibility/build issues not overcome by the usual "rebuild" mechanism:: # *** only in SQLAlchemy 0.9.4 / 0.8.6 or greater *** export DISABLE_SQLALCHEMY_CEXT=1; python setup.py install .. versionadded:: 0.9.4,0.8.6 Support for disabling the build of C extensions using the ``DISABLE_SQLALCHEMY_CEXT`` environment variable has been added. This allows control of C extension building whether or not setuptools is available, and additionally works around the fact that setuptools will possibly be **removing support** for command-line switches such as ``--without-extensions`` in a future release. For versions of SQLAlchemy prior to 0.9.4 or 0.8.6, the ``--without-cextensions`` option may be used to disable the attempt to build C extensions, provided setupools is in use, and provided the ``Feature`` construct is supported by the installed version of setuptools:: python setup.py --without-cextensions install Or with pip:: pip install --global-option='--without-cextensions' SQLAlchemy Installing on Python 3 ---------------------------------- SQLAlchemy runs directly on Python 2 or Python 3, and can be installed in either environment without any adjustments or code conversion. .. versionchanged:: 0.9.0 Python 3 is now supported in place with no 2to3 step required. Installing a Database API ---------------------------------- SQLAlchemy is designed to operate with a :term:`DBAPI` implementation built for a particular database, and includes support for the most popular databases. The individual database sections in :doc:`/dialects/index` enumerate the available DBAPIs for each database, including external links. Checking the Installed SQLAlchemy Version ------------------------------------------ This documentation covers SQLAlchemy version 1.0. If you're working on a system that already has SQLAlchemy installed, check the version from your Python prompt like this: .. sourcecode:: python+sql >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest: +SKIP 1.0.0 .. _migration: 0.9 to 1.0 Migration ===================== Notes on what's changed from 0.9 to 1.0 is available here at :doc:`changelog/migration_10`. SQLAlchemy-1.0.11/doc/build/corrections.py0000664000175000017500000000224612636375552021414 0ustar classicclassic00000000000000targets = {} quit = False def missing_reference(app, env, node, contnode): global quit if quit: return reftarget = node.attributes['reftarget'] reftype = node.attributes['reftype'] refdoc = node.attributes['refdoc'] rawsource = node.rawsource if reftype == 'paramref': return target = rawsource if target in targets: return print "\n%s" % refdoc print "Reftarget: %s" % rawsource correction = raw_input("? ") correction = correction.strip() if correction == ".": correction = ":%s:`.%s`" % (reftype, reftarget) elif correction == 'q': quit = True else: targets[target] = correction def write_corrections(app, exception): print "#!/bin/sh\n\n" for targ, corr in targets.items(): if not corr: continue print """find lib/ -print -type f -name "*.py" -exec sed -i '' 's/%s/%s/g' {} \;""" % (targ, corr) print """find doc/build/ -print -type f -name "*.rst" -exec sed -i '' 's/%s/%s/g' {} \;""" % (targ, corr) def setup(app): app.connect('missing-reference', missing_reference) app.connect('build-finished', write_corrections) SQLAlchemy-1.0.11/doc/build/dialects/0000775000175000017500000000000012636376632020274 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/dialects/mysql.rst0000664000175000017500000000566612636375552022210 0ustar classicclassic00000000000000.. _mysql_toplevel: MySQL ===== .. automodule:: sqlalchemy.dialects.mysql.base MySQL Data Types ------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:: from sqlalchemy.dialects.mysql import \ BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \ DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \ LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \ TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.mysql .. autoclass:: BIGINT :members: __init__ .. autoclass:: BINARY :members: __init__ .. autoclass:: BIT :members: __init__ .. autoclass:: BLOB :members: __init__ .. autoclass:: BOOLEAN :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DATE :members: __init__ .. autoclass:: DATETIME :members: __init__ .. autoclass:: DECIMAL :members: __init__ .. autoclass:: DOUBLE :members: __init__ .. autoclass:: ENUM :members: __init__ .. autoclass:: FLOAT :members: __init__ .. autoclass:: INTEGER :members: __init__ .. autoclass:: LONGBLOB :members: __init__ .. autoclass:: LONGTEXT :members: __init__ .. autoclass:: MEDIUMBLOB :members: __init__ .. autoclass:: MEDIUMINT :members: __init__ .. autoclass:: MEDIUMTEXT :members: __init__ .. autoclass:: NCHAR :members: __init__ .. autoclass:: NUMERIC :members: __init__ .. autoclass:: NVARCHAR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: SET :members: __init__ .. autoclass:: SMALLINT :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIME :members: __init__ .. autoclass:: TIMESTAMP :members: __init__ .. autoclass:: TINYBLOB :members: __init__ .. autoclass:: TINYINT :members: __init__ .. autoclass:: TINYTEXT :members: __init__ .. autoclass:: VARBINARY :members: __init__ .. autoclass:: VARCHAR :members: __init__ .. autoclass:: YEAR :members: __init__ MySQL-Python -------------------- .. automodule:: sqlalchemy.dialects.mysql.mysqldb pymysql ------------- .. automodule:: sqlalchemy.dialects.mysql.pymysql MySQL-Connector ---------------------- .. automodule:: sqlalchemy.dialects.mysql.mysqlconnector cymysql ------------ .. automodule:: sqlalchemy.dialects.mysql.cymysql OurSQL -------------- .. automodule:: sqlalchemy.dialects.mysql.oursql Google App Engine ----------------------- .. automodule:: sqlalchemy.dialects.mysql.gaerdbms pyodbc ------ .. automodule:: sqlalchemy.dialects.mysql.pyodbc zxjdbc -------------- .. automodule:: sqlalchemy.dialects.mysql.zxjdbc SQLAlchemy-1.0.11/doc/build/dialects/sybase.rst0000664000175000017500000000047612636375552022323 0ustar classicclassic00000000000000.. _sybase_toplevel: Sybase ====== .. automodule:: sqlalchemy.dialects.sybase.base python-sybase ------------------- .. automodule:: sqlalchemy.dialects.sybase.pysybase pyodbc ------------ .. automodule:: sqlalchemy.dialects.sybase.pyodbc mxodbc ------------ .. automodule:: sqlalchemy.dialects.sybase.mxodbc SQLAlchemy-1.0.11/doc/build/dialects/sqlite.rst0000664000175000017500000000145712636375552022336 0ustar classicclassic00000000000000.. _sqlite_toplevel: SQLite ====== .. automodule:: sqlalchemy.dialects.sqlite.base SQLite Data Types ------------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.sqlite import \ BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \ INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, \ VARCHAR .. module:: sqlalchemy.dialects.sqlite .. autoclass:: DATETIME .. autoclass:: DATE .. autoclass:: TIME Pysqlite -------- .. automodule:: sqlalchemy.dialects.sqlite.pysqlite Pysqlcipher ----------- .. automodule:: sqlalchemy.dialects.sqlite.pysqlcipher SQLAlchemy-1.0.11/doc/build/dialects/postgresql.rst0000664000175000017500000001024612636375552023234 0ustar classicclassic00000000000000.. _postgresql_toplevel: PostgreSQL ========== .. automodule:: sqlalchemy.dialects.postgresql.base PostgreSQL Data Types ------------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Postgresql are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.postgresql import \ ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \ DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \ INTERVAL, JSON, JSONB, MACADDR, NUMERIC, OID, REAL, SMALLINT, TEXT, \ TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \ DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.postgresql .. autoclass:: array .. autoclass:: ARRAY :members: __init__, Comparator .. autoclass:: Any .. autoclass:: All .. autoclass:: BIT :members: __init__ .. autoclass:: BYTEA :members: __init__ .. autoclass:: CIDR :members: __init__ .. autoclass:: DOUBLE_PRECISION :members: __init__ .. autoclass:: ENUM :members: __init__, create, drop .. autoclass:: HSTORE :members: .. autoclass:: hstore :members: .. autoclass:: INET :members: __init__ .. autoclass:: INTERVAL :members: __init__ .. autoclass:: JSON :members: .. autoclass:: JSONB :members: .. autoclass:: JSONElement :members: .. autoclass:: MACADDR :members: __init__ .. autoclass:: OID :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: TSVECTOR :members: __init__ .. autoclass:: UUID :members: __init__ Range Types ~~~~~~~~~~~ The new range column types found in PostgreSQL 9.2 onwards are catered for by the following types: .. autoclass:: INT4RANGE .. autoclass:: INT8RANGE .. autoclass:: NUMRANGE .. autoclass:: DATERANGE .. autoclass:: TSRANGE .. autoclass:: TSTZRANGE The types above get most of their functionality from the following mixin: .. autoclass:: sqlalchemy.dialects.postgresql.ranges.RangeOperators :members: .. warning:: The range type DDL support should work with any Postgres DBAPI driver, however the data types returned may vary. If you are using ``psycopg2``, it's recommended to upgrade to version 2.5 or later before using these column types. When instantiating models that use these column types, you should pass whatever data type is expected by the DBAPI driver you're using for the column type. For :mod:`psycopg2` these are :class:`~psycopg2.extras.NumericRange`, :class:`~psycopg2.extras.DateRange`, :class:`~psycopg2.extras.DateTimeRange` and :class:`~psycopg2.extras.DateTimeTZRange` or the class you've registered with :func:`~psycopg2.extras.register_range`. For example: .. code-block:: python from psycopg2.extras import DateTimeRange from sqlalchemy.dialects.postgresql import TSRANGE class RoomBooking(Base): __tablename__ = 'room_booking' room = Column(Integer(), primary_key=True) during = Column(TSRANGE()) booking = RoomBooking( room=101, during=DateTimeRange(datetime(2013, 3, 23), None) ) PostgreSQL Constraint Types --------------------------- SQLAlchemy supports Postgresql EXCLUDE constraints via the :class:`ExcludeConstraint` class: .. autoclass:: ExcludeConstraint :members: __init__ For example:: from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE class RoomBooking(Base): __tablename__ = 'room_booking' room = Column(Integer(), primary_key=True) during = Column(TSRANGE()) __table_args__ = ( ExcludeConstraint(('room', '='), ('during', '&&')), ) psycopg2 -------------- .. automodule:: sqlalchemy.dialects.postgresql.psycopg2 pg8000 -------------- .. automodule:: sqlalchemy.dialects.postgresql.pg8000 psycopg2cffi -------------- .. automodule:: sqlalchemy.dialects.postgresql.psycopg2cffi py-postgresql -------------------- .. automodule:: sqlalchemy.dialects.postgresql.pypostgresql zxjdbc -------------- .. automodule:: sqlalchemy.dialects.postgresql.zxjdbc SQLAlchemy-1.0.11/doc/build/dialects/firebird.rst0000664000175000017500000000035312636375552022615 0ustar classicclassic00000000000000.. _firebird_toplevel: Firebird ======== .. automodule:: sqlalchemy.dialects.firebird.base fdb --- .. automodule:: sqlalchemy.dialects.firebird.fdb kinterbasdb ----------- .. automodule:: sqlalchemy.dialects.firebird.kinterbasdb SQLAlchemy-1.0.11/doc/build/dialects/mssql.rst0000664000175000017500000000415712636375552022174 0ustar classicclassic00000000000000.. _mssql_toplevel: Microsoft SQL Server ==================== .. automodule:: sqlalchemy.dialects.mssql.base SQL Server Data Types ----------------------- As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.mssql import \ BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \ DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, MONEY, \ NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \ SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \ TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.mssql .. autoclass:: BIT :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DATETIME2 :members: __init__ .. autoclass:: DATETIMEOFFSET :members: __init__ .. autoclass:: IMAGE :members: __init__ .. autoclass:: MONEY :members: __init__ .. autoclass:: NCHAR :members: __init__ .. autoclass:: NTEXT :members: __init__ .. autoclass:: NVARCHAR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: SMALLDATETIME :members: __init__ .. autoclass:: SMALLMONEY :members: __init__ .. autoclass:: SQL_VARIANT :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIME :members: __init__ .. autoclass:: TINYINT :members: __init__ .. autoclass:: UNIQUEIDENTIFIER :members: __init__ .. autoclass:: VARCHAR :members: __init__ PyODBC ------ .. automodule:: sqlalchemy.dialects.mssql.pyodbc mxODBC ------ .. automodule:: sqlalchemy.dialects.mssql.mxodbc pymssql ------- .. automodule:: sqlalchemy.dialects.mssql.pymssql zxjdbc -------------- .. automodule:: sqlalchemy.dialects.mssql.zxjdbc AdoDBAPI -------- .. automodule:: sqlalchemy.dialects.mssql.adodbapi SQLAlchemy-1.0.11/doc/build/dialects/index.rst0000664000175000017500000000575412636375552022150 0ustar classicclassic00000000000000.. _dialect_toplevel: Dialects ======== The **dialect** is the system SQLAlchemy uses to communicate with various types of :term:`DBAPI` implementations and databases. The sections that follow contain reference documentation and notes specific to the usage of each backend, as well as notes for the various DBAPIs. All dialects require that an appropriate DBAPI driver is installed. Included Dialects ----------------- .. toctree:: :maxdepth: 1 :glob: firebird mssql mysql oracle postgresql sqlite sybase .. _external_toplevel: External Dialects ----------------- .. versionchanged:: 0.8 As of SQLAlchemy 0.8, several dialects have been moved to external projects, and dialects for new databases will also be published as external projects. The rationale here is to keep the base SQLAlchemy install and test suite from growing inordinately large. The "classic" dialects such as SQLite, MySQL, Postgresql, Oracle, SQL Server, and Firebird will remain in the Core for the time being. .. versionchanged:: 1.0 The Drizzle dialect has been moved into the third party system. Current external dialect projects for SQLAlchemy include: Production Ready ^^^^^^^^^^^^^^^^ * `ibm_db_sa `_ - driver for IBM DB2 and Informix, developed jointly by IBM and SQLAlchemy developers. * `sqlalchemy-redshift `_ - driver for Amazon Redshift, adapts the existing Postgresql/psycopg2 driver. * `sqlalchemy_exasol `_ - driver for EXASolution. * `sqlalchemy-sqlany `_ - driver for SAP Sybase SQL Anywhere, developed by SAP. * `sqlalchemy-monetdb `_ - driver for MonetDB. Experimental / Incomplete ^^^^^^^^^^^^^^^^^^^^^^^^^^ Dialects that are in an incomplete state or are considered somewhat experimental. * `CALCHIPAN `_ - Adapts `Pandas `_ dataframes to SQLAlchemy. * `sqlalchemy-cubrid `_ - driver for the CUBRID database. Attic ^^^^^ Dialects in the "attic" are those that were contributed for SQLAlchemy long ago but have received little attention or demand since then, and are now moved out to their own repositories in at best a semi-working state. Community members interested in these dialects should feel free to pick up on their current codebase and fork off into working libraries. * `sqlalchemy-access `_ - driver for Microsoft Access. * `sqlalchemy-drizzle `_ - driver for the Drizzle MySQL variant. * `sqlalchemy-informixdb `_ - driver for the informixdb DBAPI. * `sqlalchemy-maxdb `_ - driver for the MaxDB database SQLAlchemy-1.0.11/doc/build/dialects/oracle.rst0000664000175000017500000000234512636375552022277 0ustar classicclassic00000000000000.. _oracle_toplevel: Oracle ====== .. automodule:: sqlalchemy.dialects.oracle.base Oracle Data Types ------------------- As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.oracle import \ BFILE, BLOB, CHAR, CLOB, DATE, \ DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, \ NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \ VARCHAR2 Types which are specific to Oracle, or have Oracle-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.oracle .. autoclass:: BFILE :members: __init__ .. autoclass:: DATE :members: __init__ .. autoclass:: DOUBLE_PRECISION :members: __init__ .. autoclass:: INTERVAL :members: __init__ .. autoclass:: NCLOB :members: __init__ .. autoclass:: NUMBER :members: __init__ .. autoclass:: LONG :members: __init__ .. autoclass:: RAW :members: __init__ cx_Oracle ---------- .. automodule:: sqlalchemy.dialects.oracle.cx_oracle zxjdbc ------- .. automodule:: sqlalchemy.dialects.oracle.zxjdbc SQLAlchemy-1.0.11/doc/build/Makefile0000664000175000017500000001212212636375552020142 0ustar classicclassic00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = -v SPHINXBUILD = sphinx-build PAPER = BUILDDIR = output # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest dist-html site-mako gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " gettext to make PO message catalogs" @echo " dist-html same as html, but places files in /doc" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." dist-html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) .. @echo @echo "Build finished. The HTML pages are in ../." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/SQLAlchemy.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/SQLAlchemy.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/SQLAlchemy" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/SQLAlchemy" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex cp texinputs/* $(BUILDDIR)/latex/ @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex cp texinputs/* $(BUILDDIR)/latex/ @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) . @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." SQLAlchemy-1.0.11/doc/build/conf.py0000664000175000017500000002773712636375621020020 0ustar classicclassic00000000000000# -*- coding: utf-8 -*- # # SQLAlchemy documentation build configuration file, created by # sphinx-quickstart on Wed Nov 26 19:50:10 2008. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import traceback def force_install_reqs(): import logging log = logging.getLogger("pip") handler = logging.StreamHandler(sys.stderr) handler.setFormatter(logging.Formatter("[pip] %(message)s")) log.addHandler(handler) log.setLevel(logging.INFO) log.info("READTHEDOCS is set, force-installing requirements.txt") from pip.commands import install req = os.path.join(os.path.dirname(__file__), "requirements.txt") cmd = install.InstallCommand() options, args = cmd.parse_args(["-v", "-U", "-r", req]) cmd.run(options, args) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../lib')) sys.path.insert(0, os.path.abspath('../..')) # examples sys.path.insert(0, os.path.abspath('.')) import sqlalchemy # attempt to force pip to definitely get the latest # versions of libraries, see # https://github.com/rtfd/readthedocs.org/issues/1293 rtd = os.environ.get('READTHEDOCS', None) == 'True' if rtd: try: force_install_reqs() except: traceback.print_exc() # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'zzzeeksphinx', 'changelog', 'sphinx_paramlinks', #'corrections' ] # Add any paths that contain templates here, relative to this directory. # not sure why abspath() is needed here, some users # have reported this. templates_path = [os.path.abspath('templates')] nitpicky = True # The suffix of source filenames. source_suffix = '.rst' # section names used by the changelog extension. changelog_sections = ["general", "orm", "orm declarative", "orm querying", \ "orm configuration", "engine", "sql", \ "schema", \ "postgresql", "mysql", "sqlite", "mssql", \ "oracle", "firebird"] # tags to sort on inside of sections changelog_inner_tag_sort = ["feature", "changed", "removed", "bug", "moved"] # how to render changelog links changelog_render_ticket = "http://www.sqlalchemy.org/trac/ticket/%s" changelog_render_pullreq = { "bitbucket": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s", "default": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s", "github": "https://github.com/zzzeek/sqlalchemy/pull/%s", } changelog_render_changeset = "http://www.sqlalchemy.org/trac/changeset/%s" autodocmods_convert_modname = { "sqlalchemy.sql.sqltypes": "sqlalchemy.types", "sqlalchemy.sql.type_api": "sqlalchemy.types", "sqlalchemy.sql.schema": "sqlalchemy.schema", "sqlalchemy.sql.elements": "sqlalchemy.sql.expression", "sqlalchemy.sql.selectable": "sqlalchemy.sql.expression", "sqlalchemy.sql.dml": "sqlalchemy.sql.expression", "sqlalchemy.sql.ddl": "sqlalchemy.schema", "sqlalchemy.sql.base": "sqlalchemy.sql.expression", "sqlalchemy.engine.base": "sqlalchemy.engine", "sqlalchemy.engine.result": "sqlalchemy.engine", } autodocmods_convert_modname_w_class = { ("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine", ("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base", } # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project. project = u'SQLAlchemy' copyright = u'2007-2015, the SQLAlchemy authors and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "1.0" # The full version, including alpha/beta/rc tags. release = "1.0.11" release_date = "December 12, 2015" site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" site_adapter_py = "docs_adapter.py" # arbitrary number recognized by builders.py, incrementing this # will force a rebuild build_number = 3 # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # have the "gettext" build generate .pot for each individual # .rst gettext_compact = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'zzzeeksphinx' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = "%s %s Documentation" % (project, version) # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%m/%d/%Y %H:%M:%S' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/. #html_copy_source = True html_copy_source = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'SQLAlchemydoc' #autoclass_content = 'both' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('contents', 'sqlalchemy_%s.tex' % release.replace('.', '_'), ur'SQLAlchemy Documentation', ur'Mike Bayer', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. # sets TOC depth to 2. latex_preamble = '\setcounter{tocdepth}{3}' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True #latex_elements = { # 'papersize': 'letterpaper', # 'pointsize': '10pt', #} # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'sqlalchemy', u'SQLAlchemy Documentation', [u'SQLAlchemy authors'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'SQLAlchemy' epub_author = u'SQLAlchemy authors' epub_publisher = u'SQLAlchemy authors' epub_copyright = u'2007-2015, SQLAlchemy authors' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files that should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True intersphinx_mapping = { 'alembic': ('http://alembic.readthedocs.org/en/latest/', None), 'psycopg2': ('http://pythonhosted.org/psycopg2', None), } SQLAlchemy-1.0.11/doc/build/index.rst0000664000175000017500000000615612636375552020355 0ustar classicclassic00000000000000:orphan: .. _index_toplevel: ======================== SQLAlchemy Documentation ======================== Getting Started =============== A high level view and getting set up. :doc:`Overview ` | :ref:`Installation Guide ` | :doc:`Frequently Asked Questions ` | :doc:`Migration from 0.9 ` | :doc:`Glossary ` | :doc:`Changelog catalog ` SQLAlchemy ORM ============== Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial. * **Read this first:** :doc:`orm/tutorial` * **ORM Configuration:** :doc:`Mapper Configuration ` | :doc:`Relationship Configuration ` * **Configuration Extensions:** :doc:`Declarative Extension ` | :doc:`Association Proxy ` | :doc:`Hybrid Attributes ` | :doc:`Automap ` | :doc:`Mutable Scalars ` * **ORM Usage:** :doc:`Session Usage and Guidelines ` | :doc:`Loading Objects ` | :doc:`Cached Query Extension ` * **Extending the ORM:** :doc:`ORM Events and Internals ` * **Other:** :doc:`Introduction to Examples ` SQLAlchemy Core =============== The breadth of SQLAlchemy's SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM's domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm. * **Read this first:** :doc:`core/tutorial` * **All the Built In SQL:** :doc:`SQL Expression API ` * **Engines, Connections, Pools:** :doc:`Engine Configuration ` | :doc:`Connections, Transactions ` | :doc:`Connection Pooling ` * **Schema Definition:** :doc:`Overview ` | :ref:`Tables and Columns ` | :ref:`Database Introspection (Reflection) ` | :ref:`Insert/Update Defaults ` | :ref:`Constraints and Indexes ` | :ref:`Using Data Definition Language (DDL) ` * **Datatypes:** :ref:`Overview ` | :ref:`Building Custom Types ` | :ref:`API ` * **Core Basics:** :doc:`Overview ` | :doc:`Runtime Inspection API ` | :doc:`Event System ` | :doc:`Core Event Interfaces ` | :doc:`Creating Custom SQL Constructs ` | Dialect Documentation ====================== The **dialect** is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases. This section describes notes, options, and usage patterns regarding individual dialects. :doc:`Index of all Dialects ` SQLAlchemy-1.0.11/doc/build/sqla_arch_small.png0000664000175000017500000012335312636375552022346 0ustar classicclassic00000000000000‰PNG  IHDRÕMVŒõÙMiCCPICC ProfilexÕYy8•ß·ßï{&ŽspdvÌóœyžÇÌs¦cžÉL‘) Q’„"D(J%I”R¡Idˆ’H2÷Õðýýîó»÷¿ûÏÝÏsöû9k¯½ö>ïZ{XŸû522f ,<&ÊÖXâìâJÁ½(Àè-P¦úDGêZ[[€ÿµ|Ðvã3©m[ÿ«ÚÿÜ@òõ‹ö²Fš½}£}Â|XÏ'2*Ô*"‰`ô=3G!Dð›mð/lcï_ƒþ¥co«† •Q‘Sâ|;D°Lá¾Aá¬åHõ€½Ñ‘ ‹ØÆÝõþ7;ÿ†©TïlR©ÿàß¿é‰ lJMüõåÿ² EÞׯ„ԄðPËmß°"Ÿ_ª9òäB>›‘¡¿|†è@~ávˆlK†{[ZýÁZþQF¶FúBÖ‘1zÛygdŒµýyJR ¾%‚ ˆ¼È/Úð¯Ê`ªÙ¶ÏèyKT¬­‚|':ÎÎÁHDA“IöNt¾ùúü‘ð‘éo˜)(Æt{,fÄçü!æÛs@Æ‚•€9~ D!u8@ü©¥€? "-qH[4ÓCzD }"Lù£§ÿ£_ý~ÿÝ"ø º±ÿŒù{4 2æ_›AÀÁåTdŒí¶íÙE{¥ÿkÌ¿Ûö~ÍF¶QvVöÇß9¡…ÑòhE´Z­…V4+šH¡w¢Uкhm´:Ò¦ ŒÀ$b9àï·í‡µøÇ•D$ª9"­Û¿Ýûo+pü¥ôÏ÷ÿ˜zðl@„ƒˆb‡x!!H’‡T -Ȳ€l!È €Â¡Xh”åB…P)tª‡.AסNè>ôz C³ÐWhFÁ˜憅aXÖ…Ía{Ø€÷ÀIp&œ—ÀUðy¸î„Â/à1ø¼Œ(:+Š%…RA飬P®(T*•ƒ*FU¡šPí¨>Ô3Ôjµ†Æ¢Éh Z ‰S´Ú½‚ÎC—¢Ï¡[ÑÝègèqôzCÄpa$0jSŒ3&ÉÂcj1W1=˜˜)Ìw,ËŠÁ*cM°.Ø`ì^lö¶{û;]Æápì8 œ&Î GÅÅà²p'qçq·qƒ¸)Ü*  /<+M8M:M1MÍ-šAš4´Œ´B´j´V´¾´‰´´5´í´´S´x^¯‰·ÇãÓð%ø&|þ ~‰ŽŽŽŸN•Ά.ˆ.•®„î"Ý=ºqº5Aœ Op#Äò u„;„—„%"‘(LÔ!ºcˆùÄzâ]â(q•žL/MoJïK¿Ÿ¾Œ¾•~~ž–AˆA—Áƒ!‰¡˜á2ÃÃ#-£0£>#•1…±Œñ:ã0ã2‰L’#Y‘ÂHy¤Ò}Ò ŽI˜ÉÉ—)“©šé.ÓE ë“}ÈäryŠË,ÂlÊÌœË|ù1ó ËNG––2–›,c¬(VaVSÖPÖÖÖ!ÖõÜ;twøí8´£iÇàŽ6N66?¶¶f¶lëìvCöö£ìרßr 9Ä9l8â9NsôpÌq2sªsúpæp¶p¾â‚¹Ä¹l¹örUsõs-sópsGrŸä¾Ë=ÇÃÊ£ÃÌSÄs‹g–—Ì«ÅÄ[Ä{›÷#……¢K ¥”Pº) |\|&|±|gøómð‹ð;ð§ó7ó¿À ¨ø  t ,ò îÜ'Ø(øJˆVHE(Pè„PŸÐаˆ°“p¶ð5á6S‘$‘F‘7¢DQmÑ=¢U¢ÏŰb*b!b§ÄžˆÃâŠââeâ°„’DÄ)‰§’IUÉpÉ*Éa)‚”®TœT£Ô¸4«´…tºô5éyAW™£2}2›²Š²¡²5²¯å˜äÌäÒåÚå¾Ê‹ËûÈ—É?W *)ìWhSXÜ)±Óoçé#ŠdÅ]ŠÙŠ]Š?•”•¢”š”f••½”Ë•‡U˜U¬UòTî©bTõT÷«v¨®©)©Å¨µ¨}Q—RQoPŸÑÑðӨјÐäפjžÑÓ¢hyiUjióiSµ«´ßëèøêÔê|ÐÓ Ö=¯;¯'«¥wUoE_M?YÿŽÊÀØ Çà±!“¡ƒa©á¨¿Q€Q£Ñ‚±¢ñ^ã;&s“£&æܦ>¦õ¦ fÊfÉfÝæs;óRó÷âQí»à]f»Žízc)dnyÍ X™Z³zk-b½Çú† ÖÆÚ¦ÌfÚVÎvŸmŸÙÎÓ®Áž}ýkQ‡X‡.GG7ÇzÇ'§B§1gçdç‡..A.m®8WG×Z×å݆»ïžrStËrrqOp¿ïÁáêqÓ“Á“êyÙ ãåäÕàõƒjE­¢.{›z—{/øèûœðùä«ã[ä;ë§éWè÷Á_Ó¿Ð&@3àXÀl v`qà\~PiÐb°IpEðJˆUH]ÈV¨ShsM˜WØõp¦ðð„ˆ§‘‘Y‘c{Ôöß³eU E»G·Å0#—ÃþXÑØ±ãqZqeq«ñŽñ—H á ý‰â‰‡?$%Ý‹Þë³·kß¾´}ãɺÉgR ï”®ýû3÷O¥§žKç…¤=J—M/Lÿ–á”ўəš9qÀø@c}VTÖp¶zvÅAôÁ ƒ):yh3Ç7çA®lnqî<Ÿ¼‡å—ÞÊ÷Ï\ TpúöHø‘¡£ÚGÏ’ “ 'Ží:ÖZD)Ê)úvÜóøýâÅ'ð'bOŒ•X”´3xÖûÜôùÖ/ž9 » øŽÌ¼ }¹ø*îÕÆëÔ7˜79oßrV½{×<¦4vsÜ`¼ÿ½Ýû×>Ÿ&£'LeN§‹?ð~¨Ÿ‘Ÿé˜5š}òq÷Ç©O‘Ÿ6æ²>“>—Ï‹Î_ù¢ó¥Áyaj1jqëkÞûRݷߺ–­—G¿‡}ßXÉYe_=·¦²Ö·î´þa#þîGÉO±Ÿí›æ›o¶Â¶¶"©QÔ_wRÃþþ|­Cr$wxžþwNñKIW DÁŽÐ!xJ¹ÛMaîaëp4‰´~øÝt¢#½-ƒ+£))œœÅÜÃJÚ±›­–}‘Sƒ+›û%¯,%…ï¹€„`†Ð[5ѱïö’W¤YdeÇå-Z¹”²”¿¨Ú«uhðkÔš×±Õ½ªÏf`øÒXÅä„é²¹½Eã® +²µ®M˜í »NûiG¼“˜³‘‹‡kÔî ·#îeg1Ù·çZT]tqÌØ˜8ŸxÛÝDɤ{¡½sû†“;SÎï?‘š––Ÿ)}€# “µ˜ýæà½CWr*sóò.ÈÏ/È?’ôpaEéÇ“‹cN„–xŸt,5-S/—<ÅušîôzÅtåã3—«Jª÷ÕxŸ5©•®c®Û8÷¾þAÃ¥ÆÒóéB›šµ/Š]"_Úl™¹üìJÛÕòÖÔk>m†×EÚiÚgnôuœ½™vËí¶òÆ;³]%wûõ{Ø{{Ÿô]¾W|?éÇC½~ÑGô–¿è~rþéñÁ”gÏ­_¨ ‡WFÆ_¾êzÝò¦êíÑÑÔwcîãfï'(“øÉ¯S/§o¨™Éž ù¸ë“ôanøså|ÈåÌÂðâù¯™K^ßt—…¾3|ÿ¹²°:³ö~ýýÆÄ韟7¿mmýò¿!¬‹’AÍ£;1ÙX;œ4 ÍÚ^|-]!ŽèIo ÌÈM¢#m’e˜=XŠXûÙPìÊœg¸ÞòpðÚPòùúð‚ÆB„ï‹Òˆ™‰çK<—â•”¹*‡’·R8¹sBIJ9^¥KNÝN£Ls\[L'L·EoÍ@Ë0èÃxÄdÖtÕkAÚÅc)b%c­`£d«d§`/å äÈéÄà œ¿¸Œº>ÚÝîvÆ=Õc·§¢ƒ×,µÛ»Òg¯¯ƒŸŒ?Þ*àN`YP|°Mˆd(.ô}ØÍð“Q‘{„£@Ô«èË1‡câtã9ã¿%QY’v’ZªW&PŽ.Ÿ:Õwº¾"·2üŒu•B5KõjÍë³·j«êÒÏy×ë5ð7¢'Îß½PÓ”Ñì{Ñà’` ºeòrÏ•³W3[}¯´ ^Ç^ŸkqãVGýÍc·Rn‡Üqé4îR¾+ÚÍÑCß ÷®öÍß›ºÿöÁÐÃþûú <}òìéÐàȳWÏß¼~?2ùrúÕ‡×ß̽]]z÷}lí=4!4i9•8]÷áå,é£å§£s/çÅ¿d,L|µXº±¬ð½yUaíÆ†ÑáÍ ?þ—†>Á§PvhôuLV »ˆk£I¡5Ásáèº ÅÄ`zC>†M–‰•,άÊbÈê¸#˜-•ý8ÇήQîu^fŠ,Ÿ%„@¾à¡á%Q61mñ‰<É‹RÃÒ?eùäŒärvžW|¤4§BPU3Q÷ÕØ¯Y®Õ¤Ý¨sZ·@/E?ÄÀÙPÇHؘÖxƤÇô´YŒ¹‘³ÅØ®Ë(+%«uë›6)¶j¶ËvíC„Þ9–:Ù;Ó;÷¹¤¹ª¹.í>ïàÎë>ìqÄÓÔ òê &y«z¯ø´úÆøÉû-ú7„ŠÎÕû†PBÞ†–†9†“Ã"r#Mö`öÜJ‹ÖŠþs=6!N1n1¾)!$Q$q"©‰Ö}ƒÉù)&ûQûï¦æ!±"”¾˜q33ç€c–@Ö—ì³Ùæðä|̽š—zØ<Ÿ5¼àü‘¸£z… …oŽÕÅ×/&žh,I°JÔ0! ¢c!mäüèÀš€½*€ðx@yýÿœrŠ`þ p ¬Š Ð@òL ö‚$›¼Žä³ âD²D3ÈJ†J +Hø¦‡¥à]p|¾À?Qü(ST$ªÕ‰ú„œEzèpt9úz #Šäe9˜Ì¬Ö›íÅnâ”qQ¸ ¸4"4þ4µÈn%FF{‰v¯…ÏÂÐqÐùÑ]"` TÂM"qñ½>}=™!…á3£ã3’9©ÉezÉäÌTæE– VvÖK;lw¬²U²›±¯pÔqºp¹º¹SxÔxÖxÛ)É|úüþZÁx!3aAHdL´K¬Ùé$ý¤ì¤ eÔB\^XAh§ˆ¢„’¼²ºŠœª˜šº€† ¦¨–Œ¶šŽ™®›^Œ~A“á Ñº‰°©£YŽy—ÅOK«Bë [»ÃÈé§ëTá¼åJÝÝë.åQìQ#¼ßùÚúõè^Ö és _ˆ<%}?vOà­sˆ˜3œW•qD§tlòøõ'ËôOQNoUŽWõÔ4Õ–œ;Ð{ÞµIÿ¢L ï¦V|¦ÛA¼Å~G¢K¿›Ú›u¯åÁûGìO‹Ÿs Õ½Tx}cTmìòÿTö‡÷æ’æÛæ—(Ëf+Ñk'7nüûµ@ÿŠEx02àFø†Ód‡ðL à0¨A8„a°1B¢ä%@EP âû¯0V„á}H~^@±¡ôPa¨“¨^ÔZm‡ÎDòóY$+·Arñ›˜oX)l¶ûÇsÕãÞÒPh|iêihUhÓhûñlx?üe:=]-&Uˆ5HÖ›A¿ÂÆðчq‚@šgJ$ãÈ'™e˜ï±ø"ùhí6<Û]öýš›œ]\9Üö<|<‹¼=”R¾~kA&ÁïBcÂD:D›ÅjÅ+$Ê%ˤNK×È4ɶË=UXVdP’T6WñPU+PoÔx 9§MÒQÓõÓ+Òï5Ø0’77i4ýl.o‘°«ÇŠÝzM¿˜ýA‡9'çvWáÝEîÏyj ÷„¯ßd@pàBpZ(sXc„AäxTZŒ`l|l¢@Òó}S´÷¯¥µe$ÐÎÆìÍÉÌÓÏGôÍ9fyœ\ü²¤¢Ô¿\êÔrÅÍ3ÙÕ¶gykçÎÝhÈ=oÙDn~{©þrüU“kœm‹í;jo¸ã×eÚ-ÓËzº?÷ðÕ£‡7Ÿ^zõBjèÃHå+§7ø·×ÞùŒÓ¾o˜4™ý>³þ1}õ9u~c!rqbÉþÛï+Åk`=`ãÑOÅͲ?þG#Ü; p„i2BÖ~Èe  ‚Ä÷’)Âe#P?´³À°/|nƒ'Q$”²âÏ ž¡qh t4º=áFV{fË„µÃǾÄñâüq͸u„E)¢™¤U¢=Dû¯ˆÏÇÏÒÒUèÉ„¯Ä`â4ÂoÌ0D0l0æ‘H·™¼Étävæ –OÈÊNcsd—å çXàæêâná©å­ ”ñâ¯hìº/<,2+‰sI¨KzJ”¾*3-Ç%ï¤pbçk%AåH•[j$õ -AíC:KzÞúC†VFý&V¦Cæ>K–áVS6ž¶£öžÓNΛ®GÜDÜ;=½¨hïF_'lÀ È‘ÐñðJän.½{+¾ Ñg¯F2kÊrêHú­Ìú¬âƒI9yòùä‚Õ££ÇúŽ·œ¨›üÑó“"rc|û¹n>ê‹Ò—Õ…ë‹1_¥¿Î.•³Z†—/~w_Á¯\Yu_î]X·]_Ý8õCûÇøÏŒMÍ®-¯mÿGû+ œ%R ‚B?Žnm- #\e!?nmmTmmý¬F’ ä?;¡¿ÿ¯ØVÆ"œ{yë6êÕÏLÝ~þ{ù/×Q‚D4²ûœ IDATxì]|Å×=)/½’)$zïEº ˆHUŠ(E:JUzA¥(MAAý¬ 6ÄDš€ -¡$@zß½&,—dƒ!ÊÝrçÌfÏÜ;wfmÌ AA@þ3¶ÿ¹)@A@4„TåAA@Š !ÕbRŠA@„TåA@Š !ÕbRŠA@„TåA@Š !ÕbRŠA@„TåA@Š {#åtéÒû÷ïGvv¶–Ü××›7oF™2e´k¹/øÈó!ò~÷#ÂÝÆo¾ù&–,Y‚„„üôÓO Òx/¿ÿlŒì¨tîÜ9dddä•agg§*9È}ÁGžùûP/y?Èû‘®»…˜L¯\¹‚&MšàÇDhh¨zÔ­ ‘ªÕœ)‚€ Ü#Ìž=£G†··w-R-¹)‚€ @3ÿº»»ÃÆÆ¦@8 9*ñœ)›x%‚€ ÷"…*ãbˆTÙ E?gv/*mA@ CÀ©*¯¾Â “û‚€ ‚À݈€Q‹­¡%57 Pzzº–ÕÑÑÑjYYYàÒÕÉÉÉjmµ ;8’ÛÎ8ñ‘q²µ54ιƒ[,¢ ‚€ pç!`Ôbkè îççW¤—ýñãÇѰaCT¬XQû………aÇŽÈÌÌÌCræÌ™¨Q£†v¿R¥J(W®þüóϼµ°=zôÀŠ+œœœ—Çò¤ÿþˆŒŒDTTTÞ­óçÏ£qãÆ8}út^œåɆ ðÊ+¯hÏ–÷Jòú·ß~Cƒ ò0(_¾<^ýu¤¤¤hbÄÆÆjmaüô?öBc7o£FÒò3æúð÷ßk}Àí,C}9A@°Ž€Q‹­!Mõ³Ï>ƒ¿¿¿õš¬ÄöéÓG{¡4ÎÎÎXºt)ˆ¯¾ú ááá`2ܲe /^ŒFid;qâDtíÚUÛT¢jÕª`r¼páBÉZVÃDÁk†˜LNœ8Ò¥KÃÞÞ^KÏ$Ëp~!)) ©©©ùÝ.‘x–}øðáhÙ²% OOO Ÿ  ..N#KîDn˼yóP¡Bm`sôèQ̘1xð`­í'OžÄ®]»´Eɬñs`l8 ‘oNNN‰´I*Aà^GÀ¦ª_È[`ñññš§0gõêÕ5Mrîܹ𦕖–†þùßÿ=6n܈îÝ»kdQ¹re¬]»V#î)S¦àÒ¥KZ5lÍ/lÚ´ >>>š¦·råJ\¾|ÙjRÞ ƒ5bÖôôZà7ß|£×̓€‹/æÕ¹páBT«VM#ü_ýU#hÖ 'L˜€Õ«W£víÚ`ûúü¡#""4Í—‰ž_|ñ…fÒåÿúë/Œ;6¯MJHŽcBåô\6üqLš4 ¯½öÚuéYóf¶^½zèÙ³§†/“.cÄ¿^xŒ=P°e@i¼ªN9 ‚€ ÜF-¶†Hµ("°ÆÅó‚?ü0˜LY[bmõÝwßÕöçŸÖŠc³­‹‹‹vÎë~X`ÖØ:&ßÂk¿lú|ùå—ñË/¿€µOËÀ„ʦR&êçž{L–‡†ƒƒöíÛ§ÕÉš!ËÄdÉe0i-_¾\“½}ûöèׯŸ¦ 3A}òÉ'X³f &Ož¬™ª;uê„:uêh×lfe­™ÛÊĨHzÚ´išæ©—Íº¬…sÙysÉ®®®èÕ«—F–¼„I™”Ùœqbâä¼¼ïX’˜˜¨*—Éç<¨¹löÚ.hp¢%”ÿA@ D€-¶üN-,"U£^O\*›yÛ´i£Í‰¶k×kĬ1r›ÂÈôÌó½*¨öñ=ΣlúæºXkU6þùçøòË/5M™ÍÞ,“6cÉm›:u*–-[¦ ˜ÈyPÀù-Ë××%ç‚€ Å‹€!RU„b¤jž+­[·®6ŸW¶lYmΔµ7ÖêØÌDÅd wBbOà½{÷jZ k޼& E@ªn&‘>úH›‹d/Þ÷Þ{O›eg%&UKaÒTeðØLÊeèÓ)òRfÖõë×ãÛo¿Å÷4÷ËD6dÈä” êÈùTÙú¸#F`ÕªU˜>}:ºuë///u[;òÞ‘œ—ç…•ÉV%à›š•ã™àYÛgoêÖ­[ksªÜe&çÁ@«V­4Bå¹U¾æ:YsÎo£ê“£ ‚@áµØ"Õ«»–‚_î¬mñ%ïìÏäuìØ1M³ªU«–F¸L*lBeX&€;w⡇‘#G0lØ0P¸ÄÝ»wkNOÑÑÑš,{sZ6ý²)—¿ œw˜<Ùùˆµ?LZì1Ë)ÏQ²)˜,“úi‰¯þÇÎl.e‚Ј”µlöÀÕ“°Êc­ ¾÷ÄOhš*2Ï‘²§®>°llZ^·n6Ë5—Ń…Y³fzok&nEÞÜN.OoÖ弬á³ÃÏ]³§5“ª5™õrȹ ‚€1ŒZl¯Ù$ (W™G H’w‹—¶¼ôÒKš©•=z™@˜8Ù¨eË–Úœ!›2Y›cG¾ÏdYªT)­ &ž;dm•µEÖÌTà8Ö^Y›e™”I• †=x™¤xΑ“;*5oÞ\‹c‚aÂg홽v­&,Ö´ÙüËs§¬åñ|,›˜Y[7²™2—˄Ȥ̚/›ƒõ¨êå:x³‡ùóçkZ-ËËš%/—á9Z5_Ê顪¼l‚þàƒП<¬õa̘1¡ò€…çfÊ«/CÎA@nDÀ¨ÅÖÐWjX»SZÜUÝÃfIåʤÉäÁšÿAð}6×2ñ0™ñ¼$_³“/koʼ©jà²øÇÄÊeñ¹ ¬³s{ÅÆÄÄä}LÖ^™€YÃc-™Ó1a2épl‚U×|d’ç4&“)On&e6Y3Y2Iêóp\½Ú=еF^‡Êš7ÏZ \kÝ\–Ò6g5¯ÌÈØ[N3vìÕË–‰çX¹^¼¨¶s{ùdz)YánM‰A@(öa¡ÿé÷T•ù± :§Q÷™Ðɪ¸‚›yëîêå*J-ìùûàƒjNZì¨ÄZnam1‚SQd´‚€ Å‹[AyŠŽ­CšjAȽë`'£W_}U35ó¦¬íJA@¸³0j±5DªìõÄ»±RBÁ°ÖÉs£ú%1绂€ w †•Œz=E™:õi­Åéïßiçj•ÍÙA@þ7X›z³WœÒ"U£^O–‚1Y*´/J0DªF¼~ùœ U)ïïË?&Qþf(¯ñá=tyƒ{Þàf.Jã$­ ‚€  `ŽbNâ½ð.|üéOÞî–7bcbå 4Z•F-¶†–Ôè ¶v®'TE¦¼ãïîÃ{ß2™òÉy+Bµãµr$NA@¸•07ñn}Û·o×¶Ô7ožö™LÞÕ5Yþ)­U¯ü]§zmŸ¿ZQØîü–¤ÊBó7Õç½{™PyK=!Ô@–[‚€ ·õQþtgçεýÞÕÖ¯¬²µ•5Zæ5}°Ü.VOnˆT òz⊭‘*ï·Ëùš5k¦í·«g|½r.‚€ ”4¬äñGVø+j¼×<+‚–¤jI¬Fd4DªÌÜùE¨j>•÷îUêõñãÇQ½zuÑPóOâA@þgðçHÏž=«í‚ǼÅü¥4UKmµ0‹­j„!RU‰ó;*be–ד*k«l§-5?ä$^Aà…ó¯FáŸ"Uæ1KBeù ²Øêå7Dª…y=1©ê½~Y8µlF_™œ ‚€ ·ÌYüSæ_æ3KÓoA[}{ -©)h*W¬4U®T¯­Z ¥¯XÎA@Û½–Ê<¦4Õ›á0C¤šß:UE¨ê(¤z;<"ƒ ‚@QàiKþ)MUqš:òfa[UŸ!RU‰­¹RfuÅìzbå{A@Û&Sþ)Ósš’» ‹­JÃGCsªF¼ž˜@•Ú¬ˆUHUµœ ‚€ ÜŽ( •¹‹Ö¸«ÄÖ©*€”z¦W÷ä(‚€ Ü®Xò–â³›‘צÊÌ]P`ÔÓ) Ê#÷A@Ûæ/æ-ŠËø¨F,¶œÞ©ê .è\ Ãi,*(ŸÜA@þ×è9ÌR–]§jY¹\ ‚€ w…YlU[ yÿõzR…ÊQA@¸0DªFשêM¾úó{Xi³ ‚À€ž¯” X,Ñuª–)A,ãåúD =± iä`†£‡ŽºÕ"víצ–}ö’Ó-Ó__þñ÷G£Ïk?âB¢…¼údY 8²÷0µ1,ù‘ O áÒo•Èm.Æ~³ £µ;Ž"ù:Œ×Ù„yÕ~øtÿ%¤ç:3jÇþ±Mê×Äeß ÞóbífŠJ8¸­Z¾ƒCÑIn?;ôôÿMÖ¾Ö‡Wûó©ëq"19ü|À!öä×hwß|üu÷þÍ´JòÜm]§jÈükÔëénQÚssD}·ÝG-Fè#1­kK”uÏÂÑŸ6a‚8š´o ©øSQˆñKC¶%oÚz£Ó ðóƒƒíMè¾Á€Þ‰Ø¸ù „»ë`ÆÅý?ãTÝǰ¨G=8˜€ìtÝù>íŠR›¾E·ºap²ÓeѦ%Åâ|B:Ék)°.¢ÑopÌÛ´-ÃáU©;žJ££C>…ê³å<+ GΜ‰´»M…“)ÑQ8}r6ÿ6Í#ÂÑ™ëNÇ_-Æñ#áA–ƒ¬ÛPAŠ#í9¼9`>ZŒ~¥}œ]è³Ó N™ÉˆŠ:ƒ~S¡Y¹ØjpåÌ.L|~&b]B°l`MC8À¯)ži0 Ë¿¨‡{·„Ÿ+=ÿˆ€!MÕ¨×Ó”E²ß dÂêñ«àöÀTÌ}zîo\Õª7@§'fãÍI­±wÙ»ø3*¶.ÀžOßÅœÑÝP­r$†L}GãH˰ÉÄå q%5“4ÒJNí‚áõQ­Rú=µ¿ž¾Œ\å,;7,@½šÕHciƒ©¯…è¿¿ÄØ^óphÿt<½è}œ¼’ªi> VsV&êDÖC«&­Ðºuk´nû>;5²RqâB"2ˆáó«OO¥ÉQ`åÄ^¨Ar‡‘¦»˜4ݤäSXûôc8þ÷~<6h~þ÷ ’.Ç"î21™¿Ïí݆‰ ê 2iU]ŸZ€Ý§ãµv¤G}‡Ùó×ཷ^DÝÚ5¨-ý°þ§HÍ¢³bñÕÚ™¨[³ª¦•=µ`#ibdN·§Ý˸QVÈ15§aÓ¦_Mi5¹þÁ–."9)G»æ8«m måYHò¼µ|j¶múMÅW‡c‘‘“Ž]báªÕX0ª»Öö!sÖã_M3¤ÓÏaÛª‰¨S£2ºÁ›¤E§fähåÍ^´m|uëŒÀ7G.]í?ÊC!ê»×ðº©nOS”¡g'·ÿ›£aóÖ¸¯ õ#õåÃ}ŸÁôšY8›œB²ša;´:?/ù?ì=wØ r…•ÿïiŒZl ‘ê=¤4¾H¤Ÿú œ¿ˆ®›¡l€7ììH“°ƒ£«'švîƒ ™ßc賓gÓ wïÿ!%üAÌœ:'è¥ÛîÕ/qöb ¾zg /½È/ý†=㇇0}Á ;²’kÇ$bÿ†ç0rÚ§èüÔT¼<¡!6-Ž÷٠ѣmáìÖ ÍëW†§£ý se餦Ûyðç 3’pâ—°7½2Ê–ö)aO¾õ]³¤&ck¡xñx&¼´ {bÅ„ùøñL*¶ê çZèÞ¡>B¼‘½Ëÿ9‹Øc_ Û€gÝf–¼¾u®Áã-^ÅèD¤'ŸÇƵó1rÃi 7 ×Û™3þ§.%ãØÇÏ`ÜœŸÐ㙹xméTœ[3ë¾9„D½]ײw’€'çLAý+ðÏ9™=ø¶ùÜN \µ•éfs>m8GÄ{o‘<ãvš0yþ<4Oy#:ÎÇ_§/âø®·°rö$ ïŒùóFáäºÙºøkÄ&^Ä'ã»bì;ñøäç1­A:^Òüu Ô¾×§cè¨_вKS„ø9ãš"û?}^÷×A°·+² =;ç‘ÎÄ! YiôLúæ¥S¿aÏ_i(ãì{®À€[Åx4éì;~i×™Ò-•ë{£[Cæ_£»óßë Kû šÑº‚¼a²WfÉ\dL®^ð±åoÚ -‘âcû?ŠH_"rŽ£ãô£¸Ø×I‰WÈ4›ƒ¿Ý€'Û`Õ›ƒÐ¼¢Z…ëž‚?6Àþç·á¾§–c`·æðslƒÌ¸D¤z… m»êpÙ‰v‰T®7ç9ºGWGË7œ‘k5#351Íf¢a¨'¢xÉz}'E•¼I7{Tœ0+*4C“p_¤‹EÚìCHËvE³ûÛ ±ËN<ø`”ñrÂñ,{øeÚâôwkp:ì ,ü(j…¸£qy|×b ~ý÷•ÊFJ²f?7½†"»Âe¼Õs7‘jªWèi+‚кqy8eEá¯øü@4†5 ÊÔÊÿ™]Ù-Rg-¶î=ƒû*øc×ÚuhØíiÔßÿ¶k ϯ ÄÀ¶ä¼“ÜËÆ?ŽÕK£CüÐr,ví ×óIH¼*F÷쌊~Ê8ƒ‡Ÿ;ŒS­“0}Û¿ðò2ômS n¨Œ³«Úà›ÿ¢y‹t¤$ú`úÛóðHÓ ðõtºŠ= Ÿ‰øcɈìèG{ßyv”‚~c»·Ä³&´QΜÌ4Äǔź֑ðp1Á”ÑÎUëdâÃQè×´"ÜM×?3”B‚  !`ÔbkˆTz= ö‚@.¿"&>™y‘^Z×0I¿p {ÉÛÜÕv‰6x¤s5ù¸Ãɉ^òõ”ñ 9õÔ£9¿\oÌÌÔl2~ˆ?&׿ì4œ=‹Óã4SqÃòÁðpu£ƒ ºŒ™,GšC=± 8;+Zj2Úc2f÷n /“ ìí²p`ëR<³j7Ž]ê‹à®ïƒë‹OE2)ª`vˆÇš~1ú iš4¿w.¦.L´9™ƒÉŽ4Ágg«ˆÃ–â3°ÇTmð$B|=(‚Ë!Ô>±Iô©)/vbz ÃÊÀÓÅ)ž^°É¾LdÃäè€CoôǬÑñHJÏDbìÔmc«Óô”D׎fÂ<Û)­ïŽÅëá|3¬û>¨ ÇC°¹Ú ëm`z¢µP5°\aF²¦Ñg±2`& êDFÀßÃŽ$[ÅÚuÈìû¢.¹"65/ï‚Ušu  ÑÐMÕn> õ‡·' f®g‘ɨîG"ÇÜPسã@ørÊPŒy:Z”ó…­ƒ=RþÝ^ÃàÀ¿qhWÑ“d5†«îÙätœFŽmìý)Aø¯"ÕüÖ©þ×Ê%ÿ݇€CPt¡9¿w¾ÞªÃÙÝ!¯‘ÿüº“L—Q)¤ÎxÙàŸ ÉÈÐLy¤³$] aUÒ² ¥àfg^‚9ü ¬^Ò ¡ôB6eÅáࡳ¨Z7)Ú:Ðí%Ã?~K嚢&{>ågÆ£[>¨V¥&ÜMZÞH—.˜¶j#ÕM_§È¯¾ú¡H=›ËY±»1dÚ40ãÚÔEiÇÛz½šù…L?úGܪ ¶pöËÄßôÒÎ++1”°¦#µA+–HÊV¿ÜˆËJÆs&a­ÿ#XüV;Ô tÂö±í°…¸‡ïÒÓìÔô!„M^Š÷Vÿˆ¼úà¹PK!6"²)¸ Tò©LØ‘°šh6&¤Ñ™¹J's¾9 ¤ÆŸÓúÌߎÒ#Ï­zmÂKÁÞdÆ™ÃûaS®&‡º¤™žÇK[¡jå꘿ÓuB÷aâ„M8I/ç*21§ÿSøáßXÔm(Wèýã0§›™âÛ bDUt™r׎F`/2Sˆ ßuSyn0™ÜQó‘^ؽâ4¨^ FoE“æUqtÝ+Øs6^Äò<0°lP®]€ÝÖ m»õAfNe<ܪ"ÜœžSæFÔ†Ó±ŒYV>ÝÕ*Ö@§1¯¡ûìa¨ZŠ–8ß^þÃ:5@dµ&ÿY-,Ò !e›`úšIØñÚD4¨ZµZ<÷ûF¢O½²pãL4ËêDƒËI‚ª÷?€c.ƒGCÆžJH1Ò—h×"èv#:5íª^]0Z1ñG°äp&<¼\i:@_žvWþò0ºNÕ†æ ÷áááøöÛošWŸð§røã®éä}—’’‚¤¤$òöKÀåË—qéÒ% :{öì¹!ßu…ÈÅ]ˆ€É ˆ9…èØÍ´çZª4BB‚PŠHÓ†&ö’/F#1ÇDÚØEÄÑÜ©§‚K—‚sÎa ®Ðe—Ž1÷W…Cz<¢ÏœE<½tܼDÄIŽ(6¤ÆŸÂÙK ȶ#-¥ iL¥\aKó®§OŸEºÊ“³TîaLk8©Î$øºëú®¢‘íÁe›“–€s±Éðð‡ ‘·?Yk¿Ròiƒß•OQgt Ö¼Ô‘4Îغz l­µuÉÀ»}«b]ÄÌéQ t ž"S¾‹‰´ÆŒdœŠÂ…¸Ø;.4¢ðvMVŠÖ_j“æŒtýãž»‹˜¢Çú­èZ¯œí {v¨'u9ç•IýK™nðó |ÎÀÁ'Þ‚æŸUÇ/õGµ2î:,¯—S®î^ÂÂÂ0iÒ$TªT ^^^ððð€››\\È_ƒü ìíÉ"BÖ£þª F½ž /IRÜØÀ–ЗwG™°Ü1/«¹¶”‚´ ß`ÐÀÏ¡”„MxH?…UãÃÖ‹0ÃÙ–FØÁÉÝå+•ÒÖ¬òƒG ¶ð&‡Ï@ÊLÎAyeÛ;#´\y2×ZjF6p¡:iyìõÁƾÁÁyqùÕgró)BZpó E¥Rœ‡êÕ*ö†iެ;„ÂÝÿªiº–ÇÅ3€´;_2YR.Ò0óÚarC°*˜J´1¹"8XC°÷D¹JÕ4k¯ú£öö¦¹N®H's®Tü?kk×Úbçäಳ_ y˜Óu~mH>H†ÚSö(†ÊÁnÚË$Û dþA&ûòžÔ§äfÊí³«eÛ:Üå"Q†:Ó†ú)¯?ôíQ‚èŽö¾u0ýÙzè÷Ön4ˆ(çBž ŒòÊ¢þ¥>Qýë`‡äƒ˜þüvL\4åhP“×'yeʉ p ¶Ø®\¹…ù"ÕkÅÊ™ PØV9uæ—Ó¨·eùúƒ±°m$:Ô. Vä8ðKÚz1äè“÷öÎM››Á’Pu÷ œæ_ßµÌ<{-䪺fγx`¡Ïe-eË¢/N#TËD7ym­ Žþõ0ïÅtÒœµ¹çku;¢ñ¢xÕ¿|h©’6²¬—n—×™–7ó¹¦AMµGgáåÀKð ÝŸx¦ÕiäÙɧH£ÑYé¶è»à]4j×ëLÉFKt÷²Nõ^êí»¥­&4ïÞf{G8YÙ¸ániæíÜ“outïLó¯´d桲Ä&„·í‚0[- 2n 3ÒV{—P´l“ƒeFrß|{¯´iMK¡hÉëÛzóeJλ£[Cšª¬S½{”Û«edîu½jú¼½»w¤!kó5_±ëÚmïètÕèºèb¸ b#/ñ@„ÿEµ%ÞN©°D0Dª…ÙKTb©LA@(aŒ®S5Dª%,»T'‚€ ÜVµØš‘uª·Uߊ0‚€ %Œ€Ñuª†HÕ¨×S ·QªA@n+ ‘ªQ¯§Ûªe"Œ ‚€ PLµØ"Õ¢ÈÄëèŠs-]Qê–´‚€ ‚ÀÍ PwµØ"Õü¼ž”ú£jŒ«BBŽ‚€ ·3z¾Òó™:gÙZl yÿõzºAÙA@[€!R•uª·º¤|A@Ûü,¶–2"UËLù]ëUe>çÝýªÌù•)ñ‚€ ‚À­@€ùIû„âÕÂõfYŸQ‹­¡9Õ¼ž” |ä }M„ÎYÃ=zô(22´¯7[Ê(ׂ€ ‚Àÿ #GŽÀÇÇçºÏ»Yò™®ÄשZ*+«Ë¿þú+ÒÒÒ”\rA@n vîÜ©};Õ?æ@Š úĢⳛÒ¦jÄ„ËB°­~lúm×®6n܈íÛ·k2¿% ‚€ Pܰ·hÑ"4lØžžžw1©2‡Y#ÕÂ,¶J¾ÿ<§ªØ]™|ùÈ„j2™ˆ:`üøñà5>íÛ·G­Zµàìì¬ê—£ ‚€ P"°‚xøðaüüóÏÚÇ›6mŠŠ+jœÄ¼¥•yLqšÌè:UC¤šŸ×ªþÇ)BeušÉ“îÑ£~øálݺñññšó’ÙÌŸ#– ‚€ ” L”ÞÞÞprrjåÊ•áááAßÔuÐA=±ê¹¥3b±åt†Hµ ¯'U1 kIª,8 \¾|yÍn˜˜ˆäädÍq)333\…`¹+$‚€ 'ŠŸ˜›ØzÊäéJßlvssƒ¿¿?¼¼¼4‚µ$U¥¥rþ¢C¤ZØ:U®˜…V¤Ê:::jšª"ONãîîŽÔÔÔ!Re¯§•+WjëNõ™ù\ ÍB°àLœL–Š09%¡²öª4X¥© ±2RA@Šæ&õSÜÄüĤÊ$Ê**“«žT9-s–%±f±U2"Õ‚¼ž”ЊTY[eÆWã9ŽÂª"T¥Å*RåôB¬ 59 ‚€ Ü,ÌKôü¤,©ŠX¹*‚Õ›¡ªrŠ"‡!R-Ìë‰+V¤jIŒj„À Ð*—Éi•y˜…¶Ì[”†HZA@A€Г¡"Hæ"=±2‰Zþø¾ÒTõep™Ylù¾ †HU%ÎïÈ•ë‰U¥SDË‚²ð¬òO™†¡*2UG•_Ž‚€ ‚@QP„¨ŽÌEz>bNÒ¬ºÖªÊ«ê.Èb«ÒðÑ©ñzR¤Ê…ò¹ºV£&Rõc2UJz"ÕŸs9A@¢" 'D=)be^RܤÎù¨ÈWŸ_ÕÍüe$"U#^OJJß%¸ÒN™8õ„Ê×B¦FºJÒ‚€ ÅEê¨øI‘«"QuTéøx³Á©õzR1i*ÖWÊ׊@‰ªë›^ò ‚€ ! x‰Ó¨s>ê –¯9p\~ÁˆÅ–óڱݲýõ¤iy®¿…Õ«*ä(‚€ p" “›¯Îùhí¼ ˆÎ;§íÂÄ bAÁ©õz*¨"uωZ‹Séå(‚€ Ü Š8õy­Åéïÿ×sCæ_£^OF„±Ö kqFÊ’4‚€ ‚Àí„@þd”F½žtYäTA@¸kxóÍ7‘Ph{ ‘j¡¥HA@Aà.F`É’%¸råJ¡-4DªF½ž ­M‚€ w ¬¥òj–‚¡9U#ëT «Hî ‚€ w;†HU­Se/`vZRs¬¾¾¾Ø¼ysÞ×kä¾à#χü}Èû!wçy?Þ]ü0pà@xxx:&0´¤F•Âët222Ô¥¶Á®Z·#÷y>äïC½ ø½ ïy?Þ-üÀæ_ww÷¼õ­ê9·<‰T-3˵ ‚€ \CÀ£Òµär&‚€ ù! ¤š2/‚€ !Õ"&ÉA@üR͉A@Šˆ€j“ä‚€ ‚@~©æ‡ŒÄ ‚€ ED@Hµˆ€IrA@A ?„TóCFâA@""`h›BÙ~P¶”íeûAÙ~P¶d~¹×¶_äO¾ñjxG¥Ÿ~ú AAAÒ¬¡•dûAÙ~P¶”íÕ›D¶”íï¥í'™Lù“oMš4Á?þˆÐÐPõ§`õhˆT­æ”HA@AàA`öìÙ=z4¼½½ l±jðÈMA@Ašù×Ȇú†•xN•MÀA@{þì›M¡M7Dªì¤¢ŸS+´TI ‚€ ܃"Uåõwâ#MA@`ÔbkhIÍÍ♞ž®eutt´ZDVVøg6›áäädHµ¶ZÐÉmW81A@n?ŒZl iª~~~°µ5”TCâøñãhذ!*V¬¨ý°cÇdffæ!5sæLÔ¨QC»_©R%”+Wþù'”VÜ£G¬X±ÉÉÉyy,Oú÷ïÈÈHDEEåÝ:þ<7nŒÓ§OçÅYžlذ¯¼òŠ6ñly¯$¯Ù=»~ýúy0N?ÿü³6Ð(I9¤.A@‚PÜTp*ÀS~öÙg ,¬¬¼û}úôÑHõÓO?ÅW_}…|Ì#:&ÃeË–áÙgŸÅ×_Ï?ÿÕªUC×®]qèÐ!äää€ÉñÂ… y$›WøÕ&[&¥þù'NœÈ#"n8“,kÀù…¤¤$¤¦¦æw»Dâ¿ùæ :;vÄ–-[òpzüñÇqòäIM{/A¤A@bCÀ©êúVs||¼æ)ÌÄY½zuM“œ;w.X KKKÓHðûï¿ÇÆѽ{wT¨P•+WÆÚµkáïï)S¦àÒ¥KZ5lÍ/lÚ´ >>>hРV®\‰Ë—/[MÊ»a°FÌõ¿þúëHIIÑÒ1©5J«›/^Ôâ¹Î… j$ߨQ#üúë¯Aóà &`õêÕ¨]»¶f_ÿã?´cDD„¦ù2ÑOœ8_|ñEžI÷¯¿þÂØ±cóÚ¤„ät<Ø6l˜VkÜŒ/,fRåAÒøYv–‘x0ÁZ<0ïôÁƒŽ«W¯žÖNÖ Òð• rA@0†€a‹-‘H±ÒÍD æàà`ó¬Y³Ìd‡6“Vh&ÍÓLÄæ·ÞzK»OÄqC½Dæððp3™nÍM›65O›6ÍL;YÜŽ#ˆ°Ío¿ý¶yçÎf"3i«Z:ÒRµº‰Ìo¼ñ†V°ùµ×^ÓÊ&"4ÑšÌÏ<󌙴i-ÍóÏ?oNLL4ћ˖-kþøãÍÓ§O7Ù™>l¦%Ef"O3‘¸™Ëã4¥K—ÖÚHD«•Mªå!7³zè!s¿~ýÌDÚÚ5ÿGƒ ­N"B3™Äóâù„q"â7ÇÅÅ™‰$ÍD¾æ_~ùÅ\³fM­Î3gΘÉ|m&s¹™HßüÃ?˜¿ýö[sÕªUÍD¦æO>ùD“™ÈÖLd{]Ùr!‚€ psœ={ÖLÊN¡™ iªF½ž˜ïyî•M¾mÚ´Ñ´©víÚçLY%Ò‚³³³–ÆÚ­›››¶› <_J¤£™˜YS¥Vâ÷ßÏÓ9/DZ7yòdtèÐDlàyZÞb5fžË>|8xàÍ4ÍÎB¬­îÙ³GÓFÛ·oqãÆifâ­[·jšç[ºt)~øaŒ94p@ï޽ѷo_m¾˜5FÖY[gM‘µM6g0žžžyMb­—ÍÔl°·¿ÞWŒÈ^ÃèÃ?Ôp4h&ëæÍ›µ<,#ÏM»¸¸hørûið‚N:áÑGÕÚÃøš5kòÕÞó‘A@CµØ^ÿFϧh£^Oœ ‘´M,Z´¼­“ϛΛ7­[·ÖÌ ù‘&Ïs2Y¶À–͸LLLÜL’<‡ÊõÝwß}y-`Rå4UªTÑÊdòâ9\&õ]»vi„Ì꼃ƒø«ÉÊ„Å$h2™´²ht¢É¬&©XöÒe’d"ã¼Ês™ë ÑÈ´[¹Ml¾Õ“§———&7 BÉÜËmPÍÒL¬ìLÅÒ†µûLà\›¯ùÇus¹,?2ØÌÌ&uÆŽÉqdy$‚€ ”†HUбøÅþâ‹/jÎ7d"Õ²°“ÓºuëpàÀ°ÈÏr<ÊÌ™3µ¹IÖYsä­ 80Z,òG}¤Í]6oÞ¼\çèÑ£Ú5Ï«ºººjyÕL:ª þ™Q5â׎Қ•wòúõë5Ââxž×ä9S=)ª²ù¾*[7bĬZµJ#ænݺITTYdªÕæœy§Ü6žk4i’6¿ÌÚ®Â^ÉËíá \?çc ;&xÖ”ùÇ_“ ‚€ ðß`‹-ûï°bUP0dþ-¨Ë{­ZµÒ4;vèáý™ Ž;ÖøjÕª…ºuëj³ •kX“¥yQÐÜ#Ž9¢9î(SéîÝ»5§§èèhMe`NË›rù«ìœÓ³gOMcc-Oyõ2á±ÔÔ©S5-”·YdS0Y&õÓËÏŽRLò{÷îÕ´E&h&8Ö„©éÓ[+ƒï?ñÄ`MõË/¿D¯^½ HSåe"d-žµQv¤b³8›–¹Ô«ú“£{O³ʤùÒK/iæ\vÎRƒUÍ?ke±ùœ5e&uî|ÖX%‚€ üwŒZl iª†½žHn6W2°©•=z™˜8™0Z¶l ~ñó~ñó<'ßg²,Uª”Öêwß}åË—×´UrÀÑLÆ Ö`Y{eŒeR!{Dz6Ìs¥˜¸˜°X›å8&@&|ÖžÙk×Z`-5m6ÿ²ÖÌdÆó±ì™Ë£‘Í”¹\&g6á²æË$§7ïªzÖ¬/^¬ý8ž5Sž¿eó9¯ÛeyÙCš5{Ö<É1Ëê·üxî˜Më<Èàvó`æÕW_-ôk J9 ‚€ Œ€²œŠ¸‡È¦Ð‰7Öî”WX|Ÿµ.ž£d ŒI“M´lå¿ô9ð}&&&36[ò5y·jäËó¡\Ž>pYücbå²ø\&&%6yÆÄÄh*:“k™¬½2³ÆÈŸíátL˜L’\Ë©®ùÈ$Ïix^UÉÍ0±ÉšÉ’ËÕçá2¸&zµ{›ŒÙŠ—ÌXš¤•̬I2Yªe>Ü.6‰«Ákäüã}—ù—±²¬›ËcKË̲°ÜœÖ™«ºå(‚€ `0òaáMŒØ¦ `ˆT *  {Н‘ZKËiÔ}&4E²*ÎZž’ˆÓËU”ú˜$yý)¯oeS.k¹…µ¥°º »_ù$­ ‚@Ñ`‹'ûó°õ± pKIµ ŠïÖ{¬u²é•M·¼I„ò"¾[Û+íAà^@À¨ÅÖ©õz2,k]–ÁZœeš;åšÛÂf]6ùêÍÓ·«üÖ´hkqE•ßZŸZ‹+j¹’þæ°Ö§ÖâŠZºµ>µWÔr%½ PX{Æ­ÅG]ª CŽJF½žT¡êÈ\êÌò\ŸFß-GžÿdS6ÿî„ ÈÔ9­[keßêû\¥WqêZŽ%€êO®YK?—|?H%‹€zÖ¹Vu^”羨Ò"U£^OªrE&ü"åsý‘ÏÕ V®òʱä°ö€qkÚú#K¦×¾¥ŸK¾¯þKÒÏÿ=É{§"p³Ï½e{Zl ‘ªeáù]+’ä#¿põ?&æüV‘l~åJü­C@=pê¨'RöækýO)K$ý|ëú¥¸KVý«ŽÒÏŰ”w;" žwu4òÜ«´–í1j±5DªFÖ©*BUDÊ$ª~¼_-o-¸oß>í+5¼nU¥·üN¼æN¸Óܼœ‡×ÈòZY^ÄGv¶b‚åŸzàTÿ©>æ£ôóíÿôW?Ó*òþžyÉ—znî ï†÷Êí‚¶zîù¦Þo¼äÑòý¦–[åQmàwœ‘`ÈQɈד%™ò†üãïòÖ{ìŽÌ;ÿð^¼¼–ÓR`#ÂJš[÷#÷/ âÍ&,X m*ÁóÄL¸ÜoüS/Qéç[ß'·¢†ÿÒÏü Dþƒü=ߊž‘2o%ê¹çÝúx;ZË÷›Rôäªä +Éuªü‚ea™ÉÕK–G¯¼ oÓ7dÈmg Þ5ImŽ •ãí‡÷o¼±}ûvmw,þDžûN™OTŸóºbéçÛ¯Ht³ýpÿ±Fª~JCUG&UéçÛ§¿nV#ýÌ}-Ï7‹°ä»àçY½ËøÈüÅ<¦4U½ÌYlõéŠe’“I•‰W?§Ê/aŽ—pw Àÿ¸OõšªôóÝÑ¿ªÒÏ 9ÞK¨çž¿¦¸ÌÒ¨ÄÖ©òK–Êü«×V…TïžÇR=tÜ"Eª'¤z÷ô1·DúùîêOi1ø¹WZ*“ªÒTõÆñF‚!Mõ³Ï>Ó¾§jY "TuTšª2ë²Ì+×wêeËs zRåxéç;«/ ’Vú¹ täÞÝŠs–žT§©cQó ‘*{=å¸RfuÅìzb•—m~¨Ýyñê¡S&±HÜy}hDbég#(Iš» ~ŸéçRõœ¦ÚjdgANkˆTU¡™@•Ú¬ˆUHµ Äî¬{ܧª_Õ§®ÿk?g%' 1= 6¶Îðôäµb–Ød!>64xƒ£ ü<Ý`{c"ËLr}ÜÊ~¾ q$‹ P"0¡ªgŸÖÞiùYl-4äýkÄëI ¡4V>J¸{PýÊõàý·~NÀ® иq#Ô¤¥;5jTB¹Ççà›cñȼúø¤Ÿû 7FÓ:uP¯^=Ô«Q ÷YŽ=’¡I’~£› ÃŽ#‘Q€_\òñmhÚ°Âhó‚ë]ñe!yKºSŽl YWáPT" hÒ-ëÖôó-U Š õÜ«÷™â3}%¶N•+eÔ¯•€|.áî@€ûW=pú£µ‡Ïh‹~Š‘s—ãþQ ѽyØÇÿƒ×ŸˆÁÏøcûýŠ¿1¤Ëlõmަô@½Po\9½¯QúŸÃ{;'¡N™L\<‹Ô¬‚Ì™1ˆŠª…13Û"ØË„kʰª”vƒýµ£âß²t&¯JþT:ü¼nYù|+ú9¿º$^¸]Ð?÷|®~7#Ÿ!MUi'…U „Ï%Ü=¨þä£~Фâo¦¥Ñ{¿ÅyÏÇУcÔ®Z5wÂÌuSÐ:ÌŒôœ ì~g¾ËnŽu &¡W»f¨Y­:šÜÿ^ù`ÍlĆ 1­`2Í“+›žÇðZh{tîÜY÷k ›ãxuÔP¬øx¯V^V,iÇ#‡`ù—{°ýÍç±pÕj,Õ5*GbÈœõø719YQØ0{V­}£jתo!)é,¶­šˆ:5*#¬ë¼¹ã(R3HåΊÇÎ / ^Íjš–<áõ-ˆIÉÈ7>'ý .ÆÅ#'›Õõdìú`ê׺šwÙ§8›LËÕ´ú'â•×_Á„î5Q52 ^ÿ—R2óš|3'ª?‹³ŸoFÉ#ü/àç^ý XÖoÄbËyŠmNÕR¹ CÀ3¸ l£—£Ë˜DŒëõ š×©„òu{bAuxxîA¥­Q;Än¹ªÉÁ¾µÛaRãIøðܤf:VMî}>ž>…‡ˆübè+IZl6œ}#P)$µ"0d├¬÷‡`Õž–X;Ág6¯ÇÊ. í„Řߺ–Nœ¡f/¼3$»6nÀ‡1qhÙ{ÂCœ±ubWLÛW#&?÷SŸà…!aÿÎ×hkþcæü…ǧ¾€H‡ƒ3õiØ—ùÃ|¿·ÿTÈ¿xcyÚvÉBÌWÏaȳ Ç³³ÐÐã$FM€£®XÓ×êÿ?üß…x ›3þÙ/ Fx•íx¼Q8\ä/ÛØs!©ƒ”Ø:UƒòH2Aà Åÿ-óÄÚ÷6âå‰aaŽ ˆ<Ÿœ³c»âÄ®lØ–w„­…mÖÎŽ6p&MÎâÎ uäEØ)5óŒ°A+Oå«ûÌ:¬Þ =‡G6wƨ[#!Î Þ„ZžˆŠKAâýS1ºggTôs@PÆ<üÜaœëã‹TÚ÷¸ÝÄÕ˜úx+ø§þ„æ_žÀ€——£o›jpCeœ]Õßìü•ýÿDÌ%” Cã÷á}ÿZÈ(ãÔ}û­Æ;’›HŽ[©Ç±iÞV´¿ ƒº7ƒŸ³ï$Á£›àb÷FHKICç©oa`¯Öðµo„o–mÅ•„dkþ †ŒPyðȉ Œ€Q‹­¡¿<öz ¼¡F¥*ë*Q~*´º/Ç; eòå~µv~3­IKÏF¥6½1oéFü¼óklÙôF¶Äê©‹°;Ú~Ul‘B\x£Ë[*.ì¡ ¯ÓéC†+f³è`¬ÿèkü²kv]ý­r|\Mpt¯‚'ÇôDZòÄvƒÎÃàá@d5®WÚÃÚk×ÒE™€/Ó½ Õ#áïã ÇÝ‘p IDATŒtħåàåá]P¯z%T®~?G_@Lb›ôðv»1k`{ÔªV/~ºé40iÞßj¼ÿYÒwk3Óp"> Îôe 7g'8PýÕ5‚ùÈiÄ%gà ¥ ­Tžînpö,‡–‘ö°µ'ó•aLnLh­oõ}~c‰î|ô|Åç–¿¢´Ð‘¨ uª–•)a,ãoî: ñ)È" ÆÍÛ–ËÍ*¹nÕ·¼ZßdQ”-?Ð ž‹m#š£¬_‚‚ÃQ1ÌË·Å™øL”©lƒW~‡kwØÅïÅÒ·Ž¢~ÍD¼šŠ~µƒáB+Úç¿@TáIŸ" EX »2ŽÃ[¶"59øôk×a>¹¦åtÚdÛ|•½SãÏÁœS.¤ASœÕJh 3;‰83V½Œ6á¥`o2ãÌáý°)W“öËMG·é›Ñ3+§ïÁ– 1Ï·.VöµßNÍ‹Ò25*ßÍÉ––å pùÌQäùÁÙ>O÷ré—i”67]’®þKP}[<ýü_$‘¼‚@É! žûüj4ºNÕ¦š_%·2þÜÞmÑ 9wÔD­Ú5Y¾–mÙäŒüS’€¦}ÖâHLÒ­K»âwá‰&sðÛÉËÈ¢“BË$j[,ç˽n³‡n™‹¥áF qCØÃe³b6 专•z ¿mÿÌšÕ@ŸkØwl³>DIKðõhÄ'^ÄêÕÓÑ­÷D ~ík‡Â…µIìÁñCûpøÀ¸ú;v–pϱÐÙNÃ߇ä¥á´{ÆÅÔLþäEÌ߆…›>Áôš{0é¥ÿ#-1nÀÑWÞÃ/ÿ\Bzâ)|²x ’Ú•»;}­‡›J£Z®A•ÑÂþþ:~Þaáðµ‹Æä³ñ}Ôeù´›ôR<#q_§nhÖ6)9é8öé£V㳯– ×2hS׫i^öp,™uŽà½É"±vÜ®«Ÿe°h-GI(IÒa^³F¨VóÙß’SZ'VD™Ž½3 _ÙNÖšð?C ?‹­¥@†4UözZ¹r%Š¢±ZVT¤ë¬Sxmð$\h1˺·@ S2~Þ´ ÇŒCð'ëÑ¡j­ Ì™Iˆú9‘æ¢þ÷¯—¨_7cgÕ xÎͤi3.¥kcÎìyäXcƒ¤ÓŸcÔ´0s‡¹ÃÖ¥=”óL‘ºƒÛ¢þÐy˜;/ŒéŠ˜s²‘–†¡/ÎEÔrìŽm͘ûÄt i¿†Ô²LDŸ‹EÍšåqùèZ|÷ç#¨ÔÊ~HÇœ!!ÛžõÆÜP÷™·ñæ°ð#üsÏ©nÄ>oÁþ:‹G]¬~o¦L{µF¾ŠŽÍÛÀ¡ôt¬}t^û¢2ª‘‰:óò‡Öé;8›rÓ‹¿î„`Ï+T¯ }&ŠD2ÕzÖÆô5“ÐaôD4X:6Y)ˆè6}ê•E`ÄTT]0Ý/E‰˜Ws>®‚Z¥g êÒñ7Ä{9Ÿ'Oe²m;xã‘y+ñk§gЧábdÙdâJDw¬Þe<ã´ú¯ÖÏm,ù8W¡•ÃUèù<ò½FâÑz‘pÌ]IM‹ºrApÔÌúE˳r{<• /mYôü’£x0ʆHÕ¨×SñˆNSVÑáýsç1¬k4ª[îö9(>¢Wy.•1Ì8·ïSz‘η1ñ¨ßy ž?å™K3Ïáë^F¯w>DÜåÆ˜¿a º6, 'ûlùn=f_‚ß/ùâáñãñôz99ÆâÝçWãRiòôür5¶¥ÔÀ˜§úÃÿè6Ìyc¼õÇ‹ÓG f;Ò¢ÿÀÛ¯¼ˆ•›÷ !°!ÆŒÁíÉ)ÅñÚË<ZñÆGèÒí-ø¹:if:¸£Y+ÞîÑiÇcàât MZ6G›ÈÒ¸°c &/8‚‰c:¡œí(”p ŸÛˆ;ãä»áWÙŸÏ}?^*‡¡/ÍÀà‡ªku&ŸÚ…e/ŒÆúïbQ»ÝŒ?€K>øW’[bκÉèÑ$ŒHÁÊH¤¸:í&ÊqôŠDŸi¯¡õ€sˆMH‚ yö–òö‡6oòZRª4ée?6ÁÙ ±HÊ6ÁÝÃ>ž.8²}32Býaï䇙?ýŠñ¼ ŠNG/m®TE¹†wÁO»›Ó¦–3´Žð ðÀ–ÏëÃ…òx¹Òà¦ö£4ÇÛ.ޮغÅuƯĜµ`GY==R ÎöþTïnØûÒ|§fvu@ù–ýñÝçmqœ›ìÜàO~Þ.°ñiÅ?VÅéèXÐiá턲Á~p¶ ¤øj7ÆÛ<ŠŸ>0£´/­Ÿ h‹;>GÔ…8¤Qû½ƒC@s¸¶¥´úMyõ»¢Ë[?!Û#îòòUÝ^âG¶‘ÕkÙ4¤÷Ö5s¼½#rÎîÄì·£R`",} ñ—«bƆyx¤ýmÚ'ã×÷_ÇÈç×"¸á#x°º 2ÃÛâ±VÕ~91éôÞË8‡ –!6 Ñ_mÀ–}±x`Â*<ûXKøº8 õäoxuÁD¼ýí)Ô|`$Fˆº!Þ(¦YÇòN­Ð©õz*."pŸ^~d(.ÁµmQ""0zñ|8ÐZ‹œSŸ£kÿIìð6pÂÖqóѳT¶vd 6bêÝðüsÓ·e¦Íª€Úë‡ÃõÐ ô¹ ÷˜žA ˜0s,¢Í¯áÕG½µ¥ÿwÁcæ?…A‡7cÁÈH¸¯æLï§½„É‘u±~X=ìè1/=€Y/ ‚㙯1eÂ|TŽ|­+úÁáÚßMż=á˜ö\\áÚØ‘gk.ùfØ0ìv0Ù:‘>5äè†oÞy­º4"5ñ¿}‚7?»ˆ•ÃÒðç;˱-¶"&,ž6¶`ú³=èE¾«$bDÁ8פ'¦/ˆÄþÓñØ t|øÎ“ˆ~yæýÛó^ˆÔŸ_ÇÜÁÃQæó·Ñ"<€^é·S°ƒ«§Ê»û ŒMž¤ñÙÑÐõÁÖä ßàò(U&Œ^W¹›ùS2øõì‡{g8Ð@Á)8¥ô™¬œÛ˜\ìjåNn”K°Ëµ{&JË×IÈÞcƒáž(VAluȓϥ¨^}°u :ÊE¢L(IJéòbrJâ6xSxPeKKîãb—O¼‚TÉŽTO9x• ÕÚ¯iÆÚ-“Eý6p ¸^U‚KBûû/ôÉ„³ÒTɼ\­ pIŠÅÆ·^@lؘ9n"â>~sg~€ëGÛç`ÈÌÑéé ˆL;ˆ¹/¿¿!‘èÖ¬ ’¢¿Åëñ¾è^øeãÇ´Œ*–ÖKó2ª}˜µ`ª|‰‘IÑóIœnÚÓVÂáeóðÄ“ixýS¨@Ö°’ƒà®­É¨ÅÖ©–4J6N0ëõˆ\¿ K_Š5KrÈÅõ} §>†ÌkqÆï1¼üä#¨솪 °ßO_m7³¶Rs' Á£uËÁ¶B VuÙS—bpfå[(Õ}6ž|¼+B<íá·>ûç:7¦9¼ôœ»=Òî§ÍXò^-þ€®ÕQã쇘BÚM6Ùí*N‡š¡I¸/RŽ‘Ö1ûi–šûçV\ªÓK{sUáèyTlŠ®™)øîÓhéƒß7½‹R=§ ÜÏfÚ·ëô§Ð³Scx›jâü¶møì瓨sj vœlƒUoBóŠ>h ¬{x þ<ñ²ö_Àe?/”._u¼€°¦Ñ¨àë©™¡ —¦äS0Yêú×KÁDt} “sþy}ÞÿråˆÆ‹àUZãã¤'ÔÊä{0Ý8þú6ä&É/^_Àí×ß•óÛwåã¹Oc»ãµ]»ìl›aÍ·+Ј–:¥¦ø`æsÃѧA8ÌéXÛë[œ<;|‚£^ÆàÞ-áozö‡¿Ãóô±Ëš9ËŽ¼Ëí4ÏTtœúôj‹»føvÙgHLLÆñïÞÇog›ã¥AÑ¢¢/Z‡Úá.±ç䣈ôu'«ÈíƒÑ*‰Q‹­!R5êõT\`e‘‰×Èkð¬ºxdÜ4\‰?‹?~Šñ çaIµ ¨ùÅo¨Úêq”÷ó„³£ʵŒ¹÷ÑC½‰Æ‰h>®4<É;3ÕÙ‹æ¶Èa%òÿq~= ­ßž©iÉS¡ -×0#‘/_6®®°sä Ýáïêg2ÕùÂîR.3šⱦ_GŒ¾’ˆôÌdœ‹©KšŸå 4|²í|–a\5ýŒ) =Æ6DïÏbL+g¬Ûq-­ o—Ò•êàÁf•áíá'Û´îÚ[’íž’…Ló‡Øñc8П9; gi¾ñt¼ÝÇNAÓá ðD›¥´½Gzƒˆf4ïWˆrÛÂÛvA˜­IÓˆ-ïʵ `‰@*Eô^ô67­DK²“9Á?Èæ2àìÐBƒéýäŒ OOšo½Bžãˆ¹’Šˆš7us¥4î¨Ó¸*ãi–\›Ó —V‘ R(¢|åxy¸ÁÉTmÉ£ý Ý̤÷ABÚGx²ã§ô> ÷U½ÎÇâ$-Éʤé2g«=Kéåº ŒZl ‘ªÑÝù ¨(÷Nm…^óª`ÝÖ‘¨\¶-µ¡®2¾Þ„g.¡–W:þNNÑ.7)êOl=`Bóú¶'¼àH¦V&Ü96úŸ2;ò=<éUŒìTF"¥óÇp8ÁŸ´ÖÜ´66l\¼q_Ž-ÅÚ +v7†L[‚fæ`\›º(íxc[¯ ³Jn-yíKþÿ·Íí ¥yOCðRV;TéÐY¯|†-îÅOq]ñT…ÒôÇEË7@ž±ii`?úÓÁ©=‘F›Ég^¢O`õ’^¥/»˜²âpðÐYT­ï‡øXôUk¤\<‹ƒ¿)‹ŸA™ÚñdÓŠp• –¼®2rbïè$ÛŽJÒhðœj8™ùCÃÊÁÃâo-…RØ€Ç45@çyï§œL$Ñô‡ƒ‰ÖóÜ…´8rVËæu×¶‹Ù‘Y™^WÚ™½bx¹Wfúe²ÌõÅÊÅ}QN{ÄãС(T¬F»kÉpúzoí•ã$4¯_릫[#\PÍvˆ¹øMêã“‘•™Ž û¿Æ«¦ jhY4lÝæMkðáÓHJ8‰ ½GcêŽh$ÑgJX³|„2mƒÐ¤‹=¾Û}i.Aˆ,ã€?ÇOÅŒ-Çp9oïØëÉ‘Í.Tl6-爹èƒÈZuP=ÂQ?þö¦&"1#ë*áå¦?úvtBÅOòöË3ò¿kx Œ(ÿ;–/|•žl…?rR¡ŒnäÙúÜÛ_âí”séȬû>uËù£ldØý RÉA&")ˆY3?Côå³XýX_, g…ÒUë¡K—û–‘„ òbUm1"Ï‘Æàjˆwl›:sôÿ½ÿº¥[ÿœÏ[âeù~Êr @³ºÞxý£/±ÿ|¢÷|Œ«ÿ‚kZ¦u'#ݲ:õ¯›³¿ã|ªÊWAơ͘=ëœ% X­±¾+þ6¨ÅÖ¨*U¢Mq œÀÓ'£åªñÚD2ӓѨï,Ú¦2"=žÆ ûÆ`Nÿö˜’žŽDgÒÌz5 OÛí´ÀŸÖr]ç㓃3ZOx}Aÿ6o ƒF€ ÎaåÀ(ãÃÒ¦2'eòÃϼÈG‡º¤™žÇK[a~Ž-™Ÿ;¢Nè>Lœ° 56 %uÓÒþz5šôY„2äÅ[ðˆ…@j9èÀgöeЩÿƒX:æ tiW•ÌCdú¡a)×zp ºÐÜ(›ÅCÚOÀ¬¶Uî]KFÆ”áaVz&2’.aÈËŸ¡f`Exî€î3‡â‹yY°¥m‡Ü E‹Áäýkùç¬U}ÿgì 5wpEô; žS}gæ(l¥µ~x_uÜ›XÝ<éei ®{?Ñf›°u,…>/½†Ä)c1 ÕR¤¥F ¦íÐE¯Lš×’›´w•©§WZmú‰ÿª½=†%#þÆäa]17ƒßq¸è#Ô*ã¥Ë‡y›‰kÔbkˆTz=ö.hÒ} ~lÜb¯ L°îä%@ÛµùiÃÎÆÝ'®BýÞç’m ÏR¥Q& œð~z?w)?h¹Ë(ÚÁ§´Í)4ÁÄu_¡7Í3dPJ7Ÿ2.Myì|¯_š -½h_ÊÃÑólÈq‡/-örÑ÷hw6éfZ.QÆŽéãq1Ûå}hÙÄկײ ‹ìýh>ôÚŸ‚%.z¹®y “‰Ú66u'㾊¹HKyNõ™f¢’‡‰>Òm¿ P”övc‹6ùju>‹xúÀ·“›‚ƒBˆŒMðè>ßÕï‰sq¼LÅd>/íçqu^ÆRš»ô:+_­_Žg—|€KW’Ñyø|<=¢+B/bãó¯Ðª`ZBõ¶í¿‚ŽãßÀäǚǰãMG/˜ŠÍ—ë¡§Hºì‰>ú!4é'ÌÙŠá#Ú!„L§¾ÂLuí–„íÖêrwBNü¬_º˜–7ѳóä3¨˜} A¡UEdGY_u×}îÒGì†f9D`êO¿`ŒÅò.Nç@˵üœs°ãýl”öÉÝÑË9¢~ø¥½ŸÜpbÇÔ»=è=æ`g¾EÝ1ƒ†åYĪ¡=_À~ç¸Ùa:•oç@˸xÈî‚Në¿D–‡¿¶­ãÈù¨Ñ™6I¡÷#½B‚xî6ÿ÷Ð òKDÜÑëT¹e&g—¯DKr ¶¶4òSÌEæìá‡ÈÊ4ùOç¶ykôKHË»n…-<BQÙ—МF^‹¥ ×å!9܈̯BíæŠJ¥ôù½áC¶•krNžÁDÙ‡ë墴駰væsxqÃ6ô^8å|\sµÜÌLüAsªÝ=ýQ¾R0-ÛÑ·•êr÷¥øRÚÞ¸¶4O£àaìB#«!˜'bi@rÆô‹wWÜ=þñ37ç<ú̘‹:^—°füL¬«XO7wÈ÷ë.ÝËA×O#¨ýpLëì‚3^À…˜šhØ­²Ïᣵ‰èûXè#5ÈLºv}l{>uuÀÆûà‡f˜<õ9Ä}3ó¶Å`pµ‡ÐÌçFY]5Ue Äù Úð:â`x }°ÿµ›ú÷ÀåßW`›Ѓ3h ÍÒœ0jCmpn²÷¯pçÀå_ ¼Œ*(ïÒ‘–¦…WòÖòèßy ä¤D0¤©õz*v‰™ ˜—䥊HŒÖÍäü_Âõù¯'Ô›.×Ö há÷cx˜6“ðtº*£c^œ·•ÊÖLª©5sr¾ËQ Áî¦e½C2zDöÆ´Ahݸ<œèÛ£…ÌÀç´Õá°¦Aä9mýë.'}€3u†bÙ°¾¨ZÚµKÅ¡m¿`C;=Á6‡–^‘=žÇ)ŒîÚ3Ÿºú…ÁòS˜ôÎp<Ú0 v-‚ðÛ—}`C˰Ž·!Ÿ%QÒò'w² Ü!@‹˜Å‚@ÝA‹ñrÈvüyì ¢@;n­mëÓ6œEu2*tyZ±ˆ{ObÔbkˆTïIK²Ñ&´î5Ù´á… -Ê(˜|Ѿ{gØ9Ñæí%)Ï]P—áxèþ˜5:I4çœ{uÛänÈ@[ߣšúº‹C9íë.q´£Í¾¯w"¤RW–¢¥ZN.Lã_Ê\íÕ7ÊDË×ùÕe“žˆ„ðÎh\¡ ™áœ`ë\ íh#^•‘JÛZ]•ªyµ;Ɉ»à)4ÞGßòhÙí 4¢gÕLÖ/G7òë°·6Œ6^¦¤,^îèuªÅ ÅPšœ\ÙqÉ2ØÂ‘>ù%¡`nüBM2¾˜3 kýÁâ·Ú‘ó–¶m‡-42áÉþñë*× D}ÝÅŸ  DÑýLNÀ÷shIÃÕóܘ m]3G%ÓÖqæòTЉêz.Ÿºx—¨SgLs\¤ãÂ6;‡È8Bç9ù.‰ ¥-ëuçÖ*ÿß ØÂÁÉ…~÷B[ïÌ6µØ Ýÿ΄J¤¾³°ö…š8$\ŒASZfФz5¸\9Šû2p9‘J)’üÿöξª"íÿ¿”{Ó !!ƒz—¢TÅöZVAduWTì\,ï‚"VTÄŽmí¾ººººŠîŠÅ¿XÐE¥£T#„šÒþó;ð„ÃͽÉÉMá–gøæÜSæÌ|gòüÎÌ™BÁÛïöí¹3èwfZªñÙ›°'o%žúÓ=Èßf&Ù`ÛGª~‹VmB᎕x誧°ÛL¸a”ÒLŸçõY ‡a~¯~´; vàËæâ‚ÝàfI8ïC¢t„ä‹úJ  8jþuÚë)hœƒ›€÷jžÅU§%\gVŒ¹Õ‡Á©ÇöÆÏ=ˆ¥ãoAŠi^ó¶ºËáf¸Ò?ÏÁŒ‹NÁ_J Ñ5~J#G ÕL9—tøHÜpú͘{鉸»²9YÛPcä8" GŒŸ€;ï¬ý¬ïþð ^ÿg³ˆÃUvG Š»v@^ çðÒ‡N2C¢Vx•¢C ‚»HjìC”@PS Ñ<Ñd55w73Êû 5©QC°p´Y¡¬ émÍŠ5˜º­ms²1ø‹%p{YÝ¥â×oÝk^}{*’Œ–.£ÿdzx›¸¢ÜíqÁ_cä•fdÒÓ\¨ˆJC»Œ$TŸŽ…CΫý¬ìH,]× 3þñ62Íwñò•¸ÀL¯yxš™+>Ã稦Ƥá)%ÐxÁ=Nµñé×Â@„ÛZ©Åû 5qÈéÑgß÷SÓ#’®U+3‹ãŸj KØ7LaYdæŒ{Ðûô«pZ÷|Ü}×KzõÃÈJ3ŒLW±„43¤*Õ|5=«kFd1à„ïϪø gþ¯™šò´q}ñó½sðC· ÑÏ,Yc:"EùÅ Õ)%Xœ¶Ø:jþuÚë)°hl€ç+KP뀒Ôu<ž}(ÿoé*ü°¼çÿåo8ûä¡hx`ùoŽÿóæ¼>ËÕW?ÿ(º-úë×®@ÂÄñÎØSÐ׬^$½¹}‰òö=¦”@Àp$ªN{=1µ4\õ¯€§¢ôJÀž·ö}¯áÁH3ûTßá'£ëÀãÍÊ!fŸ˜x$X‹¥û›2{…ñ9}±·ÂhÖÏMLŒ53‚vï^{ÞÚ÷ý¥ ÷)` P_Yo‘qª »/ðxL]è°fhÙÿÂÄý*3ô„y,ÇC%¥Q®³’OL“%'Â,—`¶& ±y’ü”¼ Õ|n^Šz°°ë÷=7¦Çi‹­÷¶,"N{=yܦ?•€PJ@ „§-¶Žšöz rš% ”€P~p$ªN{=I•™qá~tt4œª»Ÿñ×Ûš™óké2?é賉°zÿä šÏÍœ-¼“|f9пçÊ}L‹`y¦=G{&¶Nމï´Åö@hr§¾DD"#ße(Æ«W¯ÆÞ½fSuAI`ÕªUhݺµõ‚$…Ïî3Ï5Ÿƒ2kŠ´æóA8ôG˜ðVî=õLP8YБ¨²×Snn®„íÕ÷T^*ûW_}…ÒR3›º $ðÙgŸ™«f¡w3Á.ó˜µ:©½j>e¶ÖŠ´Ó|^²d‰þ=×¢§‚•À¢E‹,ûæv›…Lö·Â1-¢göt±ò öÏ~Üsß‘¨:éõ$—實ßO</¿ü2.\ˆ²2³â¶º "À¢ûFJJŠ•¯Ì[ ,óWó9¨²ÓgdæóI'¤Ï>)ê‰`#Àr?oÞ< :´Æ¾±’@»æMT¦/êVã께ž4iRSÍêÌŽßÖ¸±mºÜ,ªmß¹„„<øàƒ(((@LL ÒÓÓ-£ìŒþ ÌÇ+Và­·Þ‹uàÀf6¢VˆßèXðXè˜ïrÁ|fmVó9@2ÑA4üÍgù{ÎÏϷʃþ=;€­— )÷o¾ù&fΜiÙ·Aƒ!-- ñññˆ‹‹³tŠZÅÊm›Ø;¶ØwÜqHJJª3=Æ0X®ÃÇ¥;vħŸ~Š:ÔºBÄ”M¼%%%(**²tÏž=عs'òòò°víZ|ûí·Öñ]»vYìà±µž¥šŸ (Å´gÏžÈÊʲÞähPET)¦üVÎüf^k>7Þ4å›Ïß|óMÍß3Ë‚þ=7eîhXÍE€•»}ëÕ«—eß(ªl£`ÒÎQ\Y‰ae|:wîŒ>úÈ«Úãë¨÷o]½žInü#¥ª³Y¡ñe¤’““Ñ©S'«&ÃÚ*01k´dþ1ê¤=KZv_òy'ùÆBÅšg›6m¬Ö æ#ßܘ§¼†yMCJWQQ¡ùܲYæ×Ó4Ÿý¦79§å^l›|Ö¢“{õʉs$ªõSåÃi”ETAaª½ˆ'¯á[k³"ª4È*ªN²©ù®‘‚cb¾±)„Â*om"ªÌ[ÞÃ|£Ï‚¦ùÜ|ùÓT!k>7I '˜4´Ü‹¨RËx¯?Α¨Ö7N•§²Û 3k7M©ÑðXDÕSitÅHÛUj©ÒlÈyÞ+‘T¿é Ø ‹ y)b~qc!³o<&×2¯˜wüM§ùÜôyÔ!6w>³„ýSŽþ=;Ë5{¾Ø÷ë»Ûn#íûõÝnçíLi£¸‰}£Mófß¼‰ª?܉*°.Çp“ˆËµò›êÏóÏÛ ñå~õ›€:ñ= 󌛬ü–¼f^q³ç»æsóå—¿!KþŠßØ|–û™×|IfùV'Tñõï¹v®I>Èþ–Çìûü-gö}O®ž¿ym¸;áÌòJg/·v›&•±o^^ë™ £®[žçHTåâº|FB âoy; 0ËÆ?:nöðµpÔE¸iÏI¡c¨öü’}¹VòŽ¿åLÉcúšÏM›gþ„ÖTùìù·É<—|·çµü]3®ž÷øÿP»Gþ~$_ÈÎ~œÇÈ‘> SáÉßtü-›u@ÿ«! |é“£lbÓDD)°Ü—ãrÜ_ Ù©¯ÅV®u$ªNz=I$)îKb$²ò‡çYH´PHV_òJ|É?úÌ;)dâó:qÌ;Íg¡Ø¾ä¯øNóYþ^Y¸Ïûhˆä8 Ãäß7}:žS盀=™ÉßÓI‘£°û)k÷}?IÏ¥Ø1úö}–cûoá.¾'=æçHTöz’È0Ã%²’ùü-…D ŠüvQ½¦ùH¾ñ ²OßóŸçyLœæ³_ò–±•ýºòÙþw*÷Ðg¾ó[êßÿþwkÁŒeË–Y“ºðœÜÃý@qLc Æ‹M=zô@¿~ý0qâDk5(ÚIÆWãMcΦö×_ÝâÍ'Ñ Ä41ÞÆ[xRD9CÜGóÎ;™™™–NÉyò”}áïïhšBæ=Œ¤d¼ç¾„)çå·ú-OÀ^dŸ¾·}o±óÌ[ÉSñy}ß[z¬ù H~òI²_W>SÙ[žz;ÖÐpõzÿxËSoÇ$tæ:ÅTjJ¬-[6†Ž)S¦Xà 8ž¹®°$Lõkàœéœ¾õ‘GÁ'Ÿ|‚Ù³g#;;»æBC<çœs0bÄå]CÅÿ;oÎgï½÷¢cÇŽ52)¬"®žešËŸrêVž¯Ë9U§½žêzžSJ xˆ Ò—ñ嬩>óÌ3øàƒ0þ|tíÚõ ÏÁ“ºÀ‹)Wý™6mš5•ëþðËç‹Í+¯¼b‰­ònÚ<Þ\yíâ‹/¶šÞYcåØ|ÖTEXýyêduÜí´×SAè)% ‚„¹ÔTe•óvs ÊóÏ?ßj³_’¤l4¹ðÈØ±c±|ùr«¹—ßL¹}ýõ×Ê»rMxÿôÓOؽ{·õiƒ-22í <û·ñÞ[ÿ‡éçDàî+OÅ‚Ÿ6¢, tu Ο|.–ænE…;§]v%vÈ„;òÀ÷þ¤½¡÷kúꚟ€4õòI²ï½Ó[G¢ÚüÉÒ'(%ˆĸЗýdzb^ºéohwÆm¸õ² 8öÈ#Ðûˆ¡8oú¸rholÜ\€²ò*ä~ÿ.n<=Í\¬g]3K6î+‰>~ ·<ú<¾ùBѧF_3K·¢²b3^ºí<øÄƒ¸~¬ ³[G\ÿćVMÓŠcY.Þ}ì ì×ϺOº%ûkŹ߿‰+›Ú²yÖù ï—•xòÚó°nÅ8ï¢ûðÅÚmÈÛ¼{ŠÊQeâÐà¸5Ò„µøÎè^s²-¾·ç8m±u$ªN{=y‹ˆSJ ¸ Ôeh¥¬ K¶çã˜ã mF2\Qf¤ÈhÄ'wÃäûŸÆä1=àÎý¿Ÿt¶Œ¾÷?1ƒV?…?÷~ÜR€¢ß~ÀË÷þ ÷öÁ 7^Šø=„;^ø;ÍwÙ/_þþzÓL$Žº×_|^¿k2^[ºŦ)÷ÍigaÊ‹ÛñÇ›fãæÁe¸ë’ÿÁkÿ݈í«ßÅY܈ßF_Œ»çÝŒøwÁ¸ç¾GÆàqpÇõÇØSŽBû„,|áy|—»¿ü»áq+,w„¦®‹ͽ®Àõ\-õñvÚbë¨÷¯Ó^Oµb©”€{EÛ×â§Ê¡˜Ä¦Ôhû{|Ó3,>+?y ;þ L>ýÛ'aX'7>>îZ|õóxU´ÅC¯ÂôKÏCßvñè¸åÜRY…ŠÈJ”—âŒÏâ £=>¼{ò‹QüË'˜õîϘ4÷aüat$¢'~}l4>üìgtŠy›2ÎÃÜ‹Çc`v"z»óýC4ú9Cã—àÔS‡£m+S{.ÌÌs~ùظ±z«., 8U§–eC†ø¶nßšA|›^èu36åšæÜ®™ˆqÉ7Ê üôþ?±&©ö,ü½ÿíÓ“áv¹áÎÎA‡èä™_™¶Îû#;=q±n¤µIGÄŽHP¶8™_Ÿ’”ˆ8wŽïÑ& %¥ØYZ…¹—Ÿ‰Çb¢JäoÙŠ!»ñß/–¢÷È‹Ð)#q1n䌙Œ;މB²{b#Ì %‘±ˆŠ(ØŸôbüø©¹~ð Ž[S°Ó0Zž€½üs_†Ô8m±u$ª-Ÿ,}¢P@ÀÓÀø§ˆØ8dGìÅŸ¯Â9ƒ;#ш¦åŠVá¡g"þÚG1 ­+ÊÍ$RÁ«Ø‹mF6ˆ1MÅp™⪌ðSgýäå¬ûFXkÖÂÜ/Øå•Fn«;cæcs1ºs¢]ÕØ´òDäôÇÎÝÕXQTŒòýµÉÂÍßaÁ.œ|¬‰— ÐuUŒB\†q“ø6Ô'sé0ÓÐ{õú†pZƶØÚÛb|ÆÆi¯'Ÿè % —€» &Ýz–=7·=û~Ù–‚m«ðô çá_«³0¼_W9f,ªŸ{‹~،ҊB|ùÂ,+8};dí«ÙŠØжÝ}L‰“½„Ãzâ¸è\üwÝv´êØéQ[pÓÌÛðÉæbô<úT¿ò-߈ÂüõxéÜ«1ãÓ-(.âÝK±aÓofÜ¢tGv£×1gú7‰X@ûEx÷¶Ø»£µ`7íævÍœ—±±ÀÌ÷lâ^¼ê%Œò–›NeÂØ[’Ö½z5&>þ9¶øñMÙ>¤É[à‡ø˜Óqª½“ùгÓ^O¾î×ãJ@ „1úœ3 %âªû¦âÍ{*U]‚´ã1û•ÿŘ^YHêõgÜþû‹pû£1ÝZY~6®}àóÍ3\lKŒI1ªæw2Ì™¸(S7ˆØwÅþ:0"Rà–§nÄ)W߀ÁLADE1ºü~&&Ù“§à®ï®5Ï: ÓÍÔ‹qÇá¾ ƒ‘‘^ŽÞ]ËÍñkÐnÁ,¤VG >*Nœ†Û¿¼¸qcLƒÅUc×êÍØxä$Üwö‘Èt»°gÓ—xböÍ8nm>þ}÷ùÈIí˯)CFªöž¶ÒÂ<ü–_†JÛ‹Ž÷+½妉¸ó•·q|çl[Û„·k÷X´“¨9íõä$,½F (ð#àŠÏĘI·à³S.ÄÖ]ŨŒt£UÆah—Ù nÊd6Ιñ"†Ÿ¿; +M¦ÃС}&’b£‘0îA¼T•„ô¤}½‹üNŒÂ­‹—À•žeš”)ª 8óÙŨL6÷Ÿxüøø½1غ³ѱ‰hÓ®2[Å#2"cox G›‹âÊH¤¤e¡mfâ\U¸æ¥Å˜`ú'µ3ª±Ñ&ì¤ØÈ†Çm\ƒ&§ÍT{‰‘ÃG"+ÙªŠáÚ¥-†_8þ8ç¶Ûw– Ê|ßFEÞáüïý¯aÇž"œqù_1õгÐ!)ö Zlц/ñð]Wã…ó0àÄkqõ´IÐ>Ì*iºý’YøhÛ.uƵ˜:õ|{ï!M¯>=ÃsR¬k…![lçÏŸúú9Õ@I”ÆC (`%˜„dwJB[~Ë45˨Hû×§Hħd¢[Ÿtk\hdTTMíÔ•˜L[²í¿Ó²³mg"yàw„;Ù9Ýжƒùkžu`S»MÎ@·ž­D˜µa¥‰äÌHj³¯âo ÛŸ¸Ù"»1¦Æï6µT®+jþCû£1©j¾ÿ~ N‹]ƒ¿=RŽ1gV ÿýëðçÛÃÄ™w``ê<5íV<×½;¦žÒÿ@:w}‹+ξ¹ÃÇá–9Ýðã·˜ñ¿exýÅ+ѱð3kHS»S®Á݃c±àÏŸ´¶˜Ë!M¯Fà,ijc:‹.öœ¶Ø:U§½ž!á% ˜{ײbêÃEDšÎF>Îùu˜âíÃ:Sh½Ùíý-ɵ×äq«õ„C}à௥® tÏ‹Š°×4çæT ÂTTSº‹›= £†uB¬™€ã¿ígâ½·à²Q}j°îã—ðéúÑxìé‹pl÷ÖÙxîwÓñÝ/ç ò§gêÒ”ëlº¿š'6ÿŽÓ[G¢ê´×Só'KŸ ”€P-B b'–ÿT‰áçf™¡G¿šGšžØæ·†´üo`ÖÕ»PXVŽ‚¼­4ÚÞ”—°'÷¸ðþ ·y©©®,ů¹yظk'*>æ¦?Ö=¤©EØ<q$ªõµ!7OÔ4T% ”€h9l#8PwßõßOðf~ .NK@tMyþ}ûx¦ÍxÌ{öDÑ. §œˆwÌ­özneù3¢éOxòþ è—蟖ÿŠÞGuÄŠ*0¤©åR_ß“œ¶ØÚ?jÔ¦žWJ@ 4+¢uoš¡бcÇÛh3oïÇæímpÊÖá6³âÌ3fîß¼_t44¤ÁÏ–æþ‚Õë×bÕòŸðåûÏbÌŸnÃÚãoÂI½Û!¾fÂŽJïÌÈν0¼oÄïYG—íÅn¡±©jFΈúõkl-q£S—ö(^ñ:fÝú/lÙ]…~Ç6dHSà€e‹m;ÓÙ­>稦ê´×S}ÓóJ@ (ºT—aóæM8gú}™“‰Èê=ønÁã¸ã¢ÛÑiáýƘ·nøP‹ŠR¬Ú´ ®¢2Duên††”×;4¤®8†â¹3[dÙ«30î5ÓAÈ$°ª² ÃÆMÃÜ© {›D”ï6;›Í•ˆ#ÆOÀw^g†*]‹êÃÇàÔc{ãçÄÒó‡¡VÖuÓ‡NÂýW®ÁôËOÇ,ÓD¼·p.™û/S³MEjö5¸ëÜz†4}ð*Fu3ÓZ¨8›P­sÚbëHTöz:´IÖ§+%ŽÅ1ǎ±4ª¨Âˆ˜ÿÖuXóÛN é‹oÿù0®¾óäí.ĸi`ꤓÑ6!•»Vá…fáþ×¾Áž¬ãqËŒi8wDÄ™i ­Å+Mg›ª2 )³††põ›§WÄ£Mî'xòŸ_ }Ô¥¸gúd‘™ˆj+¬y&¬OpâÅס{åö?ad÷6ˆ¥ê„”KÀ)·.ÆwÓ*­ž×’´„”ÖHM1M¿¦ç–«óx,~­Y鉨? ‡œ‡ü²j$¤·Eë„ LÝV„¶Z#6û!üŸ5ü)§^y/úŸñ+v•U 61Ù‡µGJ¼Ëˆn»ú‡4uL(A&N|G¢ê´×““ê5J@ (º ˜ ôK+ÌxÈ*TE•aí÷ËQ^Öm’±òõ›pÉôoqöÿÎÂäõ¸jÆõX½7O]Ò ¯Ÿ:wµ:³æ<„¨UoáöËNEõÓï`bÿí’…¿˜¡!eÖÐHkõ›·ÐkÜufõ›¾xåf³úÍá}ñøeûÃr›fü;?¼w¾» “ûœŽ£»¤Q µ¯ff(RZ¶Ù|çJ„©¡f˸&34*§G«µ7rÿ°¨V­öÏ‘ë>0ü)*)z¤™×"3•äA=­# iò›CsÆi‹­#Q=4IЧ*%žVáê3ŽÆ .V «±·x^ô0dîÀç-À˜iᢱG##®/šù„_ù›†lÁÃܘ1çRüî(3ÔãøÈ{÷_øû§ëqª©]Ö8Së’¡!e»¼®~³gÍbŠÌ·@+¬J3^Ó|•mo®ÐJT­´éCGÀÑǧ³óºdè“•€1&)Ñf&¤(DG_ÆH&´ÅèAÑxòŰ2ÏÌœ¿ ¿éu 舌=Ð=ê?xó³eØ]R޼ïßÅ]‹‹q\ïl$šõT}:[ÕSvÚnfz¯~´; v˜sæâ³k‰Ï@ôD8pÚb[Gi LšF% @"b͸ÈZµÂˆVç|,9í:L2åØÓe,ž¼|뜊»ï™„ 3/Å«3÷¢zo_p.Ö‰îhcB£H[«Ùt6)6ÓÛò[Ÿýò;"}Þ|ìϘþç«0ìŽw퀼’ƒ‘™Æ¨Ù 8m±u$ªN{=5{ªôJ@ „4„Îgbñ’‘žeÆ£ÚÏJµ™ð¾ÛÌùô=lÞº¥•.´Ênöf¥›3^¿3¯ÅÂc‘·sùšŠÃ:v@FŠY•]¬Õl¸âLbT‡š¡!Õ²Úçê7î]XSÝ 3þñ62͸Íò•¸ÀL«wxZœfG{!Gáš8§®9¯éVAL ÂeV–ÉNðÓa(-;©m;ðkëA"›ŒÝz Û¬‚a&î?°úŒËÜ#WmCC|­~S^‚÷où ^3L×?ß;?t»ý:´1âí;jzF €£×.§½ž©PJ ù ˜åÚLS®×Z£µ Žé´$ßaý‰Œ«=®~þQL?±r×®@ÂÄñÎSSÑ7+Ùçð£÷¶ØæææÖiGÍ¿õ†¢(%’8þо…d"k%Ê…Ì^Ga|N_ì­0s1EÆ 11Ö¬ïY«=ºÖMq€¼9YBU•Ž‹m žõ…a/ßÜ÷åtœª/2z\ (ŸœŸ„ȉˆHâÌ"éÑd8'àëoÀi‹­£šªÓ^OΣ­W*% ”€=ŽDÕi¯§Ðã)RJ@ (%è8U-J@ 4š€ý÷­±žUðE@&¨çyåí‹RÓ·óæ¾BwÚbë¨÷¯Ó^O¾"£Ç•€.žß•äwëÖ­±aÃTT˜UdÔ5)õë×#))ÉzqÞiiiàqåݤ¨­ÀÈ5%%ÅšµKxÛ}Ï':YБ¨:íõä ý­”@ð ¡ãÛ<7þNOOÇŠ+°wïÞàMX€Æœv6!!n·ÛV¶ ´jÕJy7S~‘wrr²%ªRÆù()÷þ>Ö‘¨:íõäo$ô>% ‹x—Ë…‘#GâÁ ’Ú…¦Ë³-[¶`ÆŒèÖ­›U[%knÆ SÞM‡¹&$áݯ_?«¶Êò-/޾DÕi‹­#Q­‰‰î(%òhT<•F‡“ÛwíÚƒ „ ðÊ+¯`ݺu(//y&Í‘@ŽC¥qÿè£@ƒ™™‰îÝ»[µU²f•KÞÑð+ïÆç€÷YgefîÊFŸ>}_Ó2P—¸:m±uÔû×i¯§Æ'[CPJàP° *÷id¸ÑÀs£2dˆå?ú裸÷Þ{‘ŸŸoMTPÍ%ÓÔ9&@¾l~Œ‰‰±j¨½{÷¶š×ù›¢*ßQùÃo­ÊÛ1Z¯Úy÷èÑ ûHÙ–²Îë¼ÕZ¶Ì8U§½ž¼¦D*%”ĸ°RjN±±±HMM€5«ÂÂBYßX)4<*®¾³›Li¼…'¿¡ÆÅÅYLÙi†ûdÌëÈ>y÷ìÙYfqöââbåío­3ÞxóÅ}¸‘7E•ùAÞ"¦µjÀG¢ªãT@T/UAN€†ˆÆ…††›ÔTYƒ¢b'%Š'Ï•”” ´´eeeV30³™M…µv!®äINcNžÂU•çx Yҗב³ò®ÍÖÛ_¼É™/3Wá/Ì¥Ì{W§-¶ŽDÕ[„õ˜P¡KÀÓ Ñ óÛ)7™“–ˆÆHŒ½½¦ª¢Z»l)7Š$Ù‘7ÖLéÓÈ‹ØòV!¶oì#™Ò ?r¥ Ò˜K •>Ï˵ò2#BJ_yàêkOʰ/Þän¯¥Š ²\‹°Jøz†¯ãŽD•«N (ð"@ãΆÞîhth|h˜¤†JÁâÊMÕNlß>´0¥O†ÜÈ—> ;‹Q'CaJ1eK€ò®ÍÕבºx“µ¼ÐpŸ›¼Ð0¼9§-¶ŽDÕÛô˜P¡K€†…âHOã.ÂÊãÜì¢*†Ÿ>¯•-téø—2©=‰±'[n"ªÂ•¡ó8yÒQL)"ªÊÛÂRïõñ–y±!r—üñ|€Ó[G¢ê´×“g$ô·PÁK@Œ  ¹ü¦á¡cOñC/©¥°ª;˜ù‰N_Ä”\å7ùñz©ù“¯òzÎüúx³\“9}{~Øï³?‰yàÄ9U§½žœûlkžJ_éq% “€ˆlx¦¾q©¦¡o¨SÞ %vàzx¸ÛÙž#Qå* ,@ûöí…ªW)% ”€CŽD5''Çêy6yòdL™2¥¦ÆÊq;öu9AñÛo¿]3ó’žW>Z>¬;ªjÔ>†Ž>øz_p$ª›7o¶:'p™"®˜ Uh¶1sP²8~Xg§&ùÀ®ç•–ýûPû°€ÚÇÐÒ)מ¾#Qõ¼I+% ”€Pµ xŸ©öuzD (% ”€¨‡€Šj=€ô´PJ@ (§Ø*)IDATTT’Ò딀PJ@ ÔC@Eµ@zZ (% ”€Sަ)t˜^§”@xð6·cáEESd”Š=.ÞŽÙÏ7žŠjSPÔ0”@ XŠ`zîKòå¼üV_ Jv•}úÞö›*ž*ªMERÃQ!JÀ>ç¬Lž/¾§¸ª¨†h!ÒdyPãÜ¿vŸÉ“ù€›TÕÆÔû•@ˆÁ¤Oµou-C¦Â¢"È’%‚*¾]H9Û7–o¹¶1IUQm =½W „(TRЍlÛ·oÇã?ŽeË–aÍš5Ö¬jr}ˆâÐd1 %…”SîvîÜ—_~¹å»\.ë¸]dy­ÜÃý†:Q©¡Äôz%<Å´¢¢Âšÿ›"zýõ×cðàÁ1bzõê…˜˜˜F¡0À©I ,Ó,¿K–,Á /¼€9sæ`øðላ‹³Ö¦°zŠ«?ÑVQõ‡šÞ£B˜€4÷²f*bÊ9œ·mÛ† &€ kp)È´´4KPC…&-İ—––báÂ…¸çž{0{ölôéÓ±±±–°FGG׫¿µUÕ+4š%ÐX|£A-//·šwiŒî¾ûn«÷ïM7Ý„¬¬,­6´ÞÈ”••aîܹøùçŸ1uêTdffÂívƒÍÁvaõ'‚:ùƒ?Ôô%¢äÛ¨]X)¨%%%Ö2Çs Zµj¥‚¢ù.Éâ'‹Q£FaíÚµÈÏÏ·^Ù*×I–}ù;ð‡‡Šª?Ôô%¢¤éWDUjªl2[·núöí«M¾!š÷á–¬þýûã×_Eaa¡%ª,ë"ª"¬þ0QQõ‡šÞ£B˜€+ßÜí¢Jaå·'¿5…02MZ`Yf+ ›‚鳬³Ì7FP‰AE5 ƒFY 4'Š*ßØå»ªW§BË77iþe¹oLYWQ µ¢éQ @cÂMšíµÕÆšFDIoUÍJÀ^K•æ_ù;ðçÁ*ªþPÓ{”@C"¾ÔT¥ XE53]“d5ûJÓ¯ÔRåoÀŸ2¯¢ª…J (4"¬¥Êw%»°úc`jÖ% Øcÿ–j/ÿþDYEÕjzqPi aUQ ñLÓäÙ¿¥JMµ1(TTCOïU!J@Tj¬ôÕ)P$àYÆ¥ìû›VUÉé}J D Ð¨ÈÆ$ŠÑ Ñäj²ÂœËº¼4J¹§ï¯SQõ—œÞ§Bœ€&³1F&Ä1iòB„€½¼7&I*ª¡§÷*% ”€°PQµÁÐ]% ”€P! ¢Úzz¯!Òüe÷%yÚü+$Ô5ö²m/û²ßÐôª¨6”˜^¯ˆ€¿†%ŒiRC€@S–sÕ(š% @1þsۙث :vìX³]5ç%¬/,%í]ÿ1Ž:¨æ¯ËÉ9-üE{+m‰(ÀÛ7žŽ¡}®Ç¢ »P!gÊÖâΣ‡¢wÇ[#1íÑ·±¹¨ Õ(ÁûW‚ÉÏ}ŠíÅår—ú-H ºŸ¥RJ@ „0jä¯Ú€ƒÿ€ûƉ6fÑë‚Mßà‰Ù31rÝn¼7ç^²›7Gã™s0<'Q•¥X½èU̽êV´ÿ×£8¡[¸#Ì0¦ÜïpËßbÙºOðþò‰èß6ÉnÖʱeÓ ¼ðFŒÚ1FF·þø n¹w ;¿‰?ÌÁî¼\|ùÛ”Vú?,$„3©Ù“¦¢ÚìˆõJ@ „ j#d½z ÂñÃG¡m²UÃ1¬K[ ›4ÿ9ÿLL­BdD6ú3Ã;eÁQ!í 0ûÙëW°•Õm#ª«ÿó:¶õ9WôZ€çß_Š‹†v7¢oa,4BÚéÈc0üèH1 \9bÊ?û7ÿî\2¼½9 ´2Õâ¨p`éÔæßËŽPÁMÀmÔÌåvÁår!&. Ù#Faru¾ûï&””™õ:«óðÃ7ßáÛ¯¿Æ7_,Äü{ç"¿óxtk›«2Z‘‹7çý§þ{\z奈xg~ü­{÷W<ÍŠ¶@Y)\¨4“T£hó÷ø÷²íèÛ.1®}Rªµ¥CW†”ý¡c¯OVJ ¸Z£Ë€d¼Q\ŒR#‚ÕÕkpÏ#Å{1±p¯[ƒwlEiT_ìÞU†Êv@ɪOðÒŽÃ1c@gtã÷>†¿]‡£sÒ­¦á$ÃìÉËγñ.3+‡i*./ÅŽcñê°.HŠÑúé¡.R*ª‡:ôùJ@ „6ŠÝøiy>FOl‹w1"#ûàž{îÆ 9™ˆ©(GiÁZÌ;é2<üÁ2 Ê9ëÞ]€¼Â¯qÍéÃqcTvlÙŠˆ7>Ç%'÷CªQÔCëÔ)·aÂ1Ýá6ªZƒ¬œÎ8<³µ]žUw( ¨¨Júúl% B€©AÚ\þ÷ŸáüBœŸèH~qs£M›,´k×Öªy¢*GŽÇ§…(Ï_ƒçŸý'\w'ÎØq®h”l\„Ëg=‚ï6ŽE§ž€õMµÿ 8²’]¦)8"ÑQZCµ!?¤»*ª‡¿>\ (P#°<÷g¬^ßùQÕØ½y)®¸îV¬9 §÷ÉFÂÎå¦ùw6­Yƒµ{UUÜoàÖ¯6aÄYéÈ_ú.í†Ùg1ÝÙ8Õ‡cñÏãŸ_üŒMí–Cs¢Œ–RH££½w‹Ùj®±Ð 5ÆœÕ@ΛPAE :#埅s^»Óê}[UY†!ã§âî)¢{Fª øôg̼poÔ& ¢Úh„€PJ€âqò¬7±ôú½¦‡ï" )­‘š’`DΨ\‡ðù’%fxûú´.$§·Bj|,ÚÝõÞ‹NEfjìa×>‰/JÝÈl‡9‹ÿ\é™Hty«¥Æá´‡ßÅÈØVHOpÛÂÐÝ–" ¢ÚR¤õ9J@ „8ħµ5[ÉtÅ#;{ßxSoWÅg´3Ò\ÛÅ$g ;yßñÖÙÙµ/¨9FBÍoÝiiÞ^uZ:ú<% ”€P!A@E5$²Q¡”€P@@E5rAã ”€P!A@E5$²Q¡”€P@@E5rAã ”€P!A@E5$²Q¡”€P@@E5rAã ”€P!A@E5$²Q¡”€P@@E5rAã ”@„™ˆ›:%Êš²œ«¨†rIÑ´)Ãb÷åvV!¡~¨°—m{Ù—ý†¦WEµ¡Äôz% ”€P>¨¨ú£‡•€PJ@ 4”€ŠjC‰éõJ LØ›¿¸eÖשּׁÔU:Ã$ûC>™,ˑ֢ñû’j/ïI¼Šjcèé½J  ˆq¡OGÃÃý¶mÛbõêÕØ»wo¦Z“nV­Z…Ö­[›…Þ£kÄÕ³ìûÃDEÕjzqž‚JaÍÈÈÀ³hiiiˆ§^“-Z„ÄÄD¸Ýnë¥Qj­Röýe ¢ê/9½O „06÷ÊÆ·ù“N: /¿ü2.\ˆ²²²N½&-Ô |õÕW˜7o†Š””«œSTYÞ+ªºHy¨—MŸhn²OAu¹\h×®N9åL›6 Ë–-ÃÉ'ŸŒþýû#..®OÐK•À¡!Ào¨+W®ÄâÅ‹1þ|Œ1Ý»w·Ê/˸ª½üûÓˆjãü¹QïQJ ôÐð”——[M¼%%%(**BAAöìÙƒ;w"//k×®Å7ß|cßµkªªª f$ôÊB¨¥ˆ/‰­ZµBll,ˆ^½z!++ iiiVm5)) –Èò¾HRhê´¦ÚPbz½a4ÿ5J `ˆ!Qem•oñâxœÇhœXCA•Z¬ˆ*¯Wajê·$–a:{Y–VVWX{󯪄Óи«¨6”˜^¯Bœ‰ˆª§0Ê[?’]P¥“’4‘ç½!ŽM“ ìb(ÉrkVЍçÆóRSµ‡ÑÐd©¨6”˜^¯Bœ Š]X%¹"´4>4H¬Ú¿¥Š Š˜Š/÷«¯Z‚€¢ø,·ö²ËòkXùmT¹×Ÿøª¨úCMïQ!N@D•Éä¾ü–7~ùÎ*ßZ¥ƒ’]Híû!ŽK“@ì‚h/»"¬,ÃRŽeŸ¾ˆ¯ý~’¥“?øCMïQa@€¢(EÓ¾‰˜ò¼]Påú0À£I p"¨âS4¹/â*"*¾\G¿1NEµ1ôô^%(štvõR9ÏkÔ)@! BÉøÈ>}»ÀŠˆòXS8Õ¦ ¨a(0 `¯…zîKòUT…„ú@@“q‘}úÞö›*¾*ªMERÃQaHÀ›ˆz;†h4ɇ˜€§=ÞŽÙÏ7žŠjSPÔ0”€PJ@ MÓˆ¬(•€PJ@ (U-J@ (% šŠ€ÖT›Š¤†£”€PaO@E5ì‹€PJ@ (¦" ¢ÚT$5% ”€{*ªa_€PJ@ 4Õ¦"©á(% ”@ØPQ û" ”€PJ ©¨¨6I G (% žÀÿ1‚eª´ Þ÷IEND®B`‚SQLAlchemy-1.0.11/doc/build/faq/0000775000175000017500000000000012636376632017253 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/faq/sessions.rst0000664000175000017500000005071712636375552021665 0ustar classicclassic00000000000000Sessions / Queries =================== .. contents:: :local: :class: faq :backlinks: none I'm re-loading data with my Session but it isn't seeing changes that I committed elsewhere ------------------------------------------------------------------------------------------ The main issue regarding this behavior is that the session acts as though the transaction is in the *serializable* isolation state, even if it's not (and it usually is not). In practical terms, this means that the session does not alter any data that it's already read within the scope of a transaction. If the term "isolation level" is unfamiliar, then you first need to read this link: `Isolation Level `_ In short, serializable isolation level generally means that once you SELECT a series of rows in a transaction, you will get *the identical data* back each time you re-emit that SELECT. If you are in the next-lower isolation level, "repeatable read", you'll see newly added rows (and no longer see deleted rows), but for rows that you've *already* loaded, you won't see any change. Only if you are in a lower isolation level, e.g. "read committed", does it become possible to see a row of data change its value. For information on controlling the isolation level when using the SQLAlchemy ORM, see :ref:`session_transaction_isolation`. To simplify things dramatically, the :class:`.Session` itself works in terms of a completely isolated transaction, and doesn't overwrite any mapped attributes it's already read unless you tell it to. The use case of trying to re-read data you've already loaded in an ongoing transaction is an *uncommon* use case that in many cases has no effect, so this is considered to be the exception, not the norm; to work within this exception, several methods are provided to allow specific data to be reloaded within the context of an ongoing transaction. To understand what we mean by "the transaction" when we talk about the :class:`.Session`, your :class:`.Session` is intended to only work within a transaction. An overview of this is at :ref:`unitofwork_transaction`. Once we've figured out what our isolation level is, and we think that our isolation level is set at a low enough level so that if we re-SELECT a row, we should see new data in our :class:`.Session`, how do we see it? Three ways, from most common to least: 1. We simply end our transaction and start a new one on next access with our :class:`.Session` by calling :meth:`.Session.commit` (note that if the :class:`.Session` is in the lesser-used "autocommit" mode, there would be a call to :meth:`.Session.begin` as well). The vast majority of applications and use cases do not have any issues with not being able to "see" data in other transactions because they stick to this pattern, which is at the core of the best practice of **short lived transactions**. See :ref:`session_faq_whentocreate` for some thoughts on this. 2. We tell our :class:`.Session` to re-read rows that it has already read, either when we next query for them using :meth:`.Session.expire_all` or :meth:`.Session.expire`, or immediately on an object using :class:`.Session.refresh`. See :ref:`session_expire` for detail on this. 3. We can run whole queries while setting them to definitely overwrite already-loaded objects as they read rows by using :meth:`.Query.populate_existing`. But remember, **the ORM cannot see changes in rows if our isolation level is repeatable read or higher, unless we start a new transaction**. "This Session's transaction has been rolled back due to a previous exception during flush." (or similar) --------------------------------------------------------------------------------------------------------- This is an error that occurs when a :meth:`.Session.flush` raises an exception, rolls back the transaction, but further commands upon the `Session` are called without an explicit call to :meth:`.Session.rollback` or :meth:`.Session.close`. It usually corresponds to an application that catches an exception upon :meth:`.Session.flush` or :meth:`.Session.commit` and does not properly handle the exception. For example:: from sqlalchemy import create_engine, Column, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base Base = declarative_base(create_engine('sqlite://')) class Foo(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) Base.metadata.create_all() session = sessionmaker()() # constraint violation session.add_all([Foo(id=1), Foo(id=1)]) try: session.commit() except: # ignore error pass # continue using session without rolling back session.commit() The usage of the :class:`.Session` should fit within a structure similar to this:: try: session.commit() except: session.rollback() raise finally: session.close() # optional, depends on use case Many things can cause a failure within the try/except besides flushes. You should always have some kind of "framing" of your session operations so that connection and transaction resources have a definitive boundary, otherwise your application doesn't really have its usage of resources under control. This is not to say that you need to put try/except blocks all throughout your application - on the contrary, this would be a terrible idea. You should architect your application such that there is one (or few) point(s) of "framing" around session operations. For a detailed discussion on how to organize usage of the :class:`.Session`, please see :ref:`session_faq_whentocreate`. But why does flush() insist on issuing a ROLLBACK? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It would be great if :meth:`.Session.flush` could partially complete and then not roll back, however this is beyond its current capabilities since its internal bookkeeping would have to be modified such that it can be halted at any time and be exactly consistent with what's been flushed to the database. While this is theoretically possible, the usefulness of the enhancement is greatly decreased by the fact that many database operations require a ROLLBACK in any case. Postgres in particular has operations which, once failed, the transaction is not allowed to continue:: test=> create table foo(id integer primary key); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "foo_pkey" for table "foo" CREATE TABLE test=> begin; BEGIN test=> insert into foo values(1); INSERT 0 1 test=> commit; COMMIT test=> begin; BEGIN test=> insert into foo values(1); ERROR: duplicate key value violates unique constraint "foo_pkey" test=> insert into foo values(2); ERROR: current transaction is aborted, commands ignored until end of transaction block What SQLAlchemy offers that solves both issues is support of SAVEPOINT, via :meth:`.Session.begin_nested`. Using :meth:`.Session.begin_nested`, you can frame an operation that may potentially fail within a transaction, and then "roll back" to the point before its failure while maintaining the enclosing transaction. But why isn't the one automatic call to ROLLBACK enough? Why must I ROLLBACK again? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is again a matter of the :class:`.Session` providing a consistent interface and refusing to guess about what context its being used. For example, the :class:`.Session` supports "framing" above within multiple levels. Such as, suppose you had a decorator ``@with_session()``, which did this:: def with_session(fn): def go(*args, **kw): session.begin(subtransactions=True) try: ret = fn(*args, **kw) session.commit() return ret except: session.rollback() raise return go The above decorator begins a transaction if one does not exist already, and then commits it, if it were the creator. The "subtransactions" flag means that if :meth:`.Session.begin` were already called by an enclosing function, nothing happens except a counter is incremented - this counter is decremented when :meth:`.Session.commit` is called and only when it goes back to zero does the actual COMMIT happen. It allows this usage pattern:: @with_session def one(): # do stuff two() @with_session def two(): # etc. one() two() ``one()`` can call ``two()``, or ``two()`` can be called by itself, and the ``@with_session`` decorator ensures the appropriate "framing" - the transaction boundaries stay on the outermost call level. As you can see, if ``two()`` calls ``flush()`` which throws an exception and then issues a ``rollback()``, there will *always* be a second ``rollback()`` performed by the decorator, and possibly a third corresponding to two levels of decorator. If the ``flush()`` pushed the ``rollback()`` all the way out to the top of the stack, and then we said that all remaining ``rollback()`` calls are moot, there is some silent behavior going on there. A poorly written enclosing method might suppress the exception, and then call ``commit()`` assuming nothing is wrong, and then you have a silent failure condition. The main reason people get this error in fact is because they didn't write clean "framing" code and they would have had other problems down the road. If you think the above use case is a little exotic, the same kind of thing comes into play if you want to SAVEPOINT- you might call ``begin_nested()`` several times, and the ``commit()``/``rollback()`` calls each resolve the most recent ``begin_nested()``. The meaning of ``rollback()`` or ``commit()`` is dependent upon which enclosing block it is called, and you might have any sequence of ``rollback()``/``commit()`` in any order, and its the level of nesting that determines their behavior. In both of the above cases, if ``flush()`` broke the nesting of transaction blocks, the behavior is, depending on scenario, anywhere from "magic" to silent failure to blatant interruption of code flow. ``flush()`` makes its own "subtransaction", so that a transaction is started up regardless of the external transactional state, and when complete it calls ``commit()``, or ``rollback()`` upon failure - but that ``rollback()`` corresponds to its own subtransaction - it doesn't want to guess how you'd like to handle the external "framing" of the transaction, which could be nested many levels with any combination of subtransactions and real SAVEPOINTs. The job of starting/ending the "frame" is kept consistently with the code external to the ``flush()``, and we made a decision that this was the most consistent approach. How do I make a Query that always adds a certain filter to every query? ------------------------------------------------------------------------------------------------ See the recipe at `PreFilteredQuery `_. I've created a mapping against an Outer Join, and while the query returns rows, no objects are returned. Why not? ------------------------------------------------------------------------------------------------------------------ Rows returned by an outer join may contain NULL for part of the primary key, as the primary key is the composite of both tables. The :class:`.Query` object ignores incoming rows that don't have an acceptable primary key. Based on the setting of the ``allow_partial_pks`` flag on :func:`.mapper`, a primary key is accepted if the value has at least one non-NULL value, or alternatively if the value has no NULL values. See ``allow_partial_pks`` at :func:`.mapper`. I'm using ``joinedload()`` or ``lazy=False`` to create a JOIN/OUTER JOIN and SQLAlchemy is not constructing the correct query when I try to add a WHERE, ORDER BY, LIMIT, etc. (which relies upon the (OUTER) JOIN) ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- The joins generated by joined eager loading are only used to fully load related collections, and are designed to have no impact on the primary results of the query. Since they are anonymously aliased, they cannot be referenced directly. For detail on this beahvior, see :ref:`zen_of_eager_loading`. Query has no ``__len__()``, why not? ------------------------------------ The Python ``__len__()`` magic method applied to an object allows the ``len()`` builtin to be used to determine the length of the collection. It's intuitive that a SQL query object would link ``__len__()`` to the :meth:`.Query.count` method, which emits a `SELECT COUNT`. The reason this is not possible is because evaluating the query as a list would incur two SQL calls instead of one:: class Iterates(object): def __len__(self): print "LEN!" return 5 def __iter__(self): print "ITER!" return iter([1, 2, 3, 4, 5]) list(Iterates()) output:: ITER! LEN! How Do I use Textual SQL with ORM Queries? ------------------------------------------- See: * :ref:`orm_tutorial_literal_sql` - Ad-hoc textual blocks with :class:`.Query` * :ref:`session_sql_expressions` - Using :class:`.Session` with textual SQL directly. I'm calling ``Session.delete(myobject)`` and it isn't removed from the parent collection! ------------------------------------------------------------------------------------------ See :ref:`session_deleting_from_collections` for a description of this behavior. why isn't my ``__init__()`` called when I load objects? ------------------------------------------------------- See :ref:`mapping_constructors` for a description of this behavior. how do I use ON DELETE CASCADE with SA's ORM? ---------------------------------------------- SQLAlchemy will always issue UPDATE or DELETE statements for dependent rows which are currently loaded in the :class:`.Session`. For rows which are not loaded, it will by default issue SELECT statements to load those rows and udpate/delete those as well; in other words it assumes there is no ON DELETE CASCADE configured. To configure SQLAlchemy to cooperate with ON DELETE CASCADE, see :ref:`passive_deletes`. I set the "foo_id" attribute on my instance to "7", but the "foo" attribute is still ``None`` - shouldn't it have loaded Foo with id #7? ---------------------------------------------------------------------------------------------------------------------------------------------------- The ORM is not constructed in such a way as to support immediate population of relationships driven from foreign key attribute changes - instead, it is designed to work the other way around - foreign key attributes are handled by the ORM behind the scenes, the end user sets up object relationships naturally. Therefore, the recommended way to set ``o.foo`` is to do just that - set it!:: foo = Session.query(Foo).get(7) o.foo = foo Session.commit() Manipulation of foreign key attributes is of course entirely legal. However, setting a foreign-key attribute to a new value currently does not trigger an "expire" event of the :func:`.relationship` in which it's involved. This means that for the following sequence:: o = Session.query(SomeClass).first() assert o.foo is None # accessing an un-set attribute sets it to None o.foo_id = 7 ``o.foo`` is initialized to ``None`` when we first accessed it. Setting ``o.foo_id = 7`` will have the value of "7" as pending, but no flush has occurred - so ``o.foo`` is still ``None``:: # attribute is already set to None, has not been # reconciled with o.foo_id = 7 yet assert o.foo is None For ``o.foo`` to load based on the foreign key mutation is usually achieved naturally after the commit, which both flushes the new foreign key value and expires all state:: Session.commit() # expires all attributes foo_7 = Session.query(Foo).get(7) assert o.foo is foo_7 # o.foo lazyloads on access A more minimal operation is to expire the attribute individually - this can be performed for any :term:`persistent` object using :meth:`.Session.expire`:: o = Session.query(SomeClass).first() o.foo_id = 7 Session.expire(o, ['foo']) # object must be persistent for this foo_7 = Session.query(Foo).get(7) assert o.foo is foo_7 # o.foo lazyloads on access Note that if the object is not persistent but present in the :class:`.Session`, it's known as :term:`pending`. This means the row for the object has not been INSERTed into the database yet. For such an object, setting ``foo_id`` does not have meaning until the row is inserted; otherwise there is no row yet:: new_obj = SomeClass() new_obj.foo_id = 7 Session.add(new_obj) # accessing an un-set attribute sets it to None assert new_obj.foo is None Session.flush() # emits INSERT # expire this because we already set .foo to None Session.expire(o, ['foo']) assert new_obj.foo is foo_7 # now it loads .. topic:: Attribute loading for non-persistent objects One variant on the "pending" behavior above is if we use the flag ``load_on_pending`` on :func:`.relationship`. When this flag is set, the lazy loader will emit for ``new_obj.foo`` before the INSERT proceeds; another variant of this is to use the :meth:`.Session.enable_relationship_loading` method, which can "attach" an object to a :class:`.Session` in such a way that many-to-one relationships load as according to foreign key attributes regardless of the object being in any particular state. Both techniques are **not recommended for general use**; they were added to suit specific programming scenarios encountered by users which involve the repurposing of the ORM's usual object states. The recipe `ExpireRelationshipOnFKChange `_ features an example using SQLAlchemy events in order to coordinate the setting of foreign key attributes with many-to-one relationships. .. _faq_walk_objects: How do I walk all objects that are related to a given object? ------------------------------------------------------------- An object that has other objects related to it will correspond to the :func:`.relationship` constructs set up between mappers. This code fragment will iterate all the objects, correcting for cycles as well:: from sqlalchemy import inspect def walk(obj): deque = [obj] seen = set() while deque: obj = deque.pop(0) if obj in seen: continue else: seen.add(obj) yield obj insp = inspect(obj) for relationship in insp.mapper.relationships: related = getattr(obj, relationship.key) if relationship.uselist: deque.extend(related) elif related is not None: deque.append(related) The function can be demonstrated as follows:: Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('a.id')) c_id = Column(ForeignKey('c.id')) c = relationship("C", backref="bs") class C(Base): __tablename__ = 'c' id = Column(Integer, primary_key=True) a1 = A(bs=[B(), B(c=C())]) for obj in walk(a1): print obj Output:: <__main__.A object at 0x10303b190> <__main__.B object at 0x103025210> <__main__.B object at 0x10303b0d0> <__main__.C object at 0x103025490> Is there a way to automagically have only unique keywords (or other kinds of objects) without doing a query for the keyword and getting a reference to the row containing that keyword? --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- When people read the many-to-many example in the docs, they get hit with the fact that if you create the same ``Keyword`` twice, it gets put in the DB twice. Which is somewhat inconvenient. This `UniqueObject `_ recipe was created to address this issue. SQLAlchemy-1.0.11/doc/build/faq/sqlexpressions.rst0000664000175000017500000001134112636375552023107 0ustar classicclassic00000000000000SQL Expressions ================= .. contents:: :local: :class: faq :backlinks: none .. _faq_sql_expression_string: How do I render SQL expressions as strings, possibly with bound parameters inlined? ------------------------------------------------------------------------------------ The "stringification" of a SQLAlchemy statement or Query in the vast majority of cases is as simple as:: print(str(statement)) this applies both to an ORM :class:`~.orm.query.Query` as well as any :func:`.select` or other statement. Additionally, to get the statement as compiled to a specific dialect or engine, if the statement itself is not already bound to one you can pass this in to :meth:`.ClauseElement.compile`:: print(statement.compile(someengine)) or without an :class:`.Engine`:: from sqlalchemy.dialects import postgresql print(statement.compile(dialect=postgresql.dialect())) When given an ORM :class:`~.orm.query.Query` object, in order to get at the :meth:`.ClauseElement.compile` method we only need access the :attr:`~.orm.query.Query.statement` accessor first:: statement = query.statement print(statement.compile(someengine)) The above forms will render the SQL statement as it is passed to the Python :term:`DBAPI`, which includes that bound parameters are not rendered inline. SQLAlchemy normally does not stringify bound parameters, as this is handled appropriately by the Python DBAPI, not to mention bypassing bound parameters is probably the most widely exploited security hole in modern web applications. SQLAlchemy has limited ability to do this stringification in certain circumstances such as that of emitting DDL. In order to access this functionality one can use the ``literal_binds`` flag, passed to ``compile_kwargs``:: from sqlalchemy.sql import table, column, select t = table('t', column('x')) s = select([t]).where(t.c.x == 5) print(s.compile(compile_kwargs={"literal_binds": True})) the above approach has the caveats that it is only supported for basic types, such as ints and strings, and furthermore if a :func:`.bindparam` without a pre-set value is used directly, it won't be able to stringify that either. To support inline literal rendering for types not supported, implement a :class:`.TypeDecorator` for the target type which includes a :meth:`.TypeDecorator.process_literal_param` method:: from sqlalchemy import TypeDecorator, Integer class MyFancyType(TypeDecorator): impl = Integer def process_literal_param(self, value, dialect): return "my_fancy_formatting(%s)" % value from sqlalchemy import Table, Column, MetaData tab = Table('mytable', MetaData(), Column('x', MyFancyType())) print( tab.select().where(tab.c.x > 5).compile( compile_kwargs={"literal_binds": True}) ) producing output like:: SELECT mytable.x FROM mytable WHERE mytable.x > my_fancy_formatting(5) Why does ``.col.in_([])`` Produce ``col != col``? Why not ``1=0``? ------------------------------------------------------------------- A little introduction to the issue. The IN operator in SQL, given a list of elements to compare against a column, generally does not accept an empty list, that is while it is valid to say:: column IN (1, 2, 3) it's not valid to say:: column IN () SQLAlchemy's :meth:`.Operators.in_` operator, when given an empty list, produces this expression:: column != column As of version 0.6, it also produces a warning stating that a less efficient comparison operation will be rendered. This expression is the only one that is both database agnostic and produces correct results. For example, the naive approach of "just evaluate to false, by comparing 1=0 or 1!=1", does not handle nulls properly. An expression like:: NOT column != column will not return a row when "column" is null, but an expression which does not take the column into account:: NOT 1=0 will. Closer to the mark is the following CASE expression:: CASE WHEN column IS NOT NULL THEN 1=0 ELSE NULL END We don't use this expression due to its verbosity, and its also not typically accepted by Oracle within a WHERE clause - depending on how you phrase it, you'll either get "ORA-00905: missing keyword" or "ORA-00920: invalid relational operator". It's also still less efficient than just rendering SQL without the clause altogether (or not issuing the SQL at all, if the statement is just a simple search). The best approach therefore is to avoid the usage of IN given an argument list of zero length. Instead, don't emit the Query in the first place, if no rows should be returned. The warning is best promoted to a full error condition using the Python warnings filter (see http://docs.python.org/library/warnings.html). SQLAlchemy-1.0.11/doc/build/faq/connections.rst0000664000175000017500000002357412636375552022342 0ustar classicclassic00000000000000Connections / Engines ===================== .. contents:: :local: :class: faq :backlinks: none How do I configure logging? --------------------------- See :ref:`dbengine_logging`. How do I pool database connections? Are my connections pooled? ---------------------------------------------------------------- SQLAlchemy performs application-level connection pooling automatically in most cases. With the exception of SQLite, a :class:`.Engine` object refers to a :class:`.QueuePool` as a source of connectivity. For more detail, see :ref:`engines_toplevel` and :ref:`pooling_toplevel`. How do I pass custom connect arguments to my database API? ----------------------------------------------------------- The :func:`.create_engine` call accepts additional arguments either directly via the ``connect_args`` keyword argument:: e = create_engine("mysql://scott:tiger@localhost/test", connect_args={"encoding": "utf8"}) Or for basic string and integer arguments, they can usually be specified in the query string of the URL:: e = create_engine("mysql://scott:tiger@localhost/test?encoding=utf8") .. seealso:: :ref:`custom_dbapi_args` "MySQL Server has gone away" ---------------------------- There are two major causes for this error: 1. The MySQL client closes connections which have been idle for a set period of time, defaulting to eight hours. This can be avoided by using the ``pool_recycle`` setting with :func:`.create_engine`, described at :ref:`mysql_connection_timeouts`. 2. Usage of the MySQLdb :term:`DBAPI`, or a similar DBAPI, in a non-threadsafe manner, or in an otherwise inappropriate way. The MySQLdb connection object is not threadsafe - this expands out to any SQLAlchemy system that links to a single connection, which includes the ORM :class:`.Session`. For background on how :class:`.Session` should be used in a multithreaded environment, see :ref:`session_faq_threadsafe`. Why does SQLAlchemy issue so many ROLLBACKs? --------------------------------------------- SQLAlchemy currently assumes DBAPI connections are in "non-autocommit" mode - this is the default behavior of the Python database API, meaning it must be assumed that a transaction is always in progress. The connection pool issues ``connection.rollback()`` when a connection is returned. This is so that any transactional resources remaining on the connection are released. On a database like Postgresql or MSSQL where table resources are aggressively locked, this is critical so that rows and tables don't remain locked within connections that are no longer in use. An application can otherwise hang. It's not just for locks, however, and is equally critical on any database that has any kind of transaction isolation, including MySQL with InnoDB. Any connection that is still inside an old transaction will return stale data, if that data was already queried on that connection within isolation. For background on why you might see stale data even on MySQL, see http://dev.mysql.com/doc/refman/5.1/en/innodb-transaction-model.html I'm on MyISAM - how do I turn it off? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The behavior of the connection pool's connection return behavior can be configured using ``reset_on_return``:: from sqlalchemy import create_engine from sqlalchemy.pool import QueuePool engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False)) I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``reset_on_return`` accepts the values ``commit``, ``rollback`` in addition to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause a COMMIT as any connection is returned to the pool:: engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit')) I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working! ---------------------------------------------------------------------------------------------------------------------------------------------------------- If using a SQLite ``:memory:`` database, or a version of SQLAlchemy prior to version 0.7, the default connection pool is the :class:`.SingletonThreadPool`, which maintains exactly one SQLite connection per thread. So two connections in use in the same thread will actually be the same SQLite connection. Make sure you're not using a :memory: database and use :class:`.NullPool`, which is the default for non-memory databases in current SQLAlchemy versions. .. seealso:: :ref:`pysqlite_threading_pooling` - info on PySQLite's behavior. How do I get at the raw DBAPI connection when using an Engine? -------------------------------------------------------------- With a regular SA engine-level Connection, you can get at a pool-proxied version of the DBAPI connection via the :attr:`.Connection.connection` attribute on :class:`.Connection`, and for the really-real DBAPI connection you can call the :attr:`.ConnectionFairy.connection` attribute on that - but there should never be any need to access the non-pool-proxied DBAPI connection, as all methods are proxied through:: engine = create_engine(...) conn = engine.connect() conn.connection. cursor = conn.connection.cursor() You must ensure that you revert any isolation level settings or other operation-specific settings on the connection back to normal before returning it to the pool. As an alternative to reverting settings, you can call the :meth:`.Connection.detach` method on either :class:`.Connection` or the proxied connection, which will de-associate the connection from the pool such that it will be closed and discarded when :meth:`.Connection.close` is called:: conn = engine.connect() conn.detach() # detaches the DBAPI connection from the connection pool conn.connection. conn.close() # connection is closed for real, the pool replaces it with a new connection How do I use engines / connections / sessions with Python multiprocessing, or os.fork()? ---------------------------------------------------------------------------------------- The key goal with multiple python processes is to prevent any database connections from being shared across processes. Depending on specifics of the driver and OS, the issues that arise here range from non-working connections to socket connections that are used by multiple processes concurrently, leading to broken messaging (the latter case is typically the most common). The SQLAlchemy :class:`.Engine` object refers to a connection pool of existing database connections. So when this object is replicated to a child process, the goal is to ensure that no database connections are carried over. There are three general approaches to this: 1. Disable pooling using :class:`.NullPool`. This is the most simplistic, one shot system that prevents the :class:`.Engine` from using any connection more than once. 2. Call :meth:`.Engine.dispose` on any given :class:`.Engine` as soon one is within the new process. In Python multiprocessing, constructs such as ``multiprocessing.Pool`` include "initializer" hooks which are a place that this can be performed; otherwise at the top of where ``os.fork()`` or where the ``Process`` object begins the child fork, a single call to :meth:`.Engine.dispose` will ensure any remaining connections are flushed. 3. An event handler can be applied to the connection pool that tests for connections being shared across process boundaries, and invalidates them. This looks like the following:: import os import warnings from sqlalchemy import event from sqlalchemy import exc def add_engine_pidguard(engine): """Add multiprocessing guards. Forces a connection to be reconnected if it is detected as having been shared to a sub-process. """ @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info['pid'] != pid: # substitute log.debug() or similar here as desired warnings.warn( "Parent process %(orig)s forked (%(newproc)s) with an open " "database connection, " "which is being discarded and recreated." % {"newproc": pid, "orig": connection_record.info['pid']}) connection_record.connection = connection_proxy.connection = None raise exc.DisconnectionError( "Connection record belongs to pid %s, " "attempting to check out in pid %s" % (connection_record.info['pid'], pid) ) These events are applied to an :class:`.Engine` as soon as its created:: engine = create_engine("...") add_engine_pidguard(engine) The above strategies will accommodate the case of an :class:`.Engine` being shared among processes. However, for the case of a transaction-active :class:`.Session` or :class:`.Connection` being shared, there's no automatic fix for this; an application needs to ensure a new child process only initiate new :class:`.Connection` objects and transactions, as well as ORM :class:`.Session` objects. For a :class:`.Session` object, technically this is only needed if the session is currently transaction-bound, however the scope of a single :class:`.Session` is in any case intended to be kept within a single call stack in any case (e.g. not a global object, not shared between processes or threads). SQLAlchemy-1.0.11/doc/build/faq/metadata_schema.rst0000664000175000017500000000746012636375552023114 0ustar classicclassic00000000000000================== MetaData / Schema ================== .. contents:: :local: :class: faq :backlinks: none My program is hanging when I say ``table.drop()`` / ``metadata.drop_all()`` =========================================================================== This usually corresponds to two conditions: 1. using PostgreSQL, which is really strict about table locks, and 2. you have a connection still open which contains locks on the table and is distinct from the connection being used for the DROP statement. Heres the most minimal version of the pattern:: connection = engine.connect() result = connection.execute(mytable.select()) mytable.drop(engine) Above, a connection pool connection is still checked out; furthermore, the result object above also maintains a link to this connection. If "implicit execution" is used, the result will hold this connection opened until the result object is closed or all rows are exhausted. The call to ``mytable.drop(engine)`` attempts to emit DROP TABLE on a second connection procured from the :class:`.Engine` which will lock. The solution is to close out all connections before emitting DROP TABLE:: connection = engine.connect() result = connection.execute(mytable.select()) # fully read result sets result.fetchall() # close connections connection.close() # now locks are removed mytable.drop(engine) Does SQLAlchemy support ALTER TABLE, CREATE VIEW, CREATE TRIGGER, Schema Upgrade Functionality? =============================================================================================== General ALTER support isn't present in SQLAlchemy directly. For special DDL on an ad-hoc basis, the :class:`.DDL` and related constructs can be used. See :doc:`core/ddl` for a discussion on this subject. A more comprehensive option is to use schema migration tools, such as Alembic or SQLAlchemy-Migrate; see :ref:`schema_migrations` for discussion on this. How can I sort Table objects in order of their dependency? =========================================================================== This is available via the :attr:`.MetaData.sorted_tables` function:: metadata = MetaData() # ... add Table objects to metadata ti = metadata.sorted_tables: for t in ti: print t How can I get the CREATE TABLE/ DROP TABLE output as a string? =========================================================================== Modern SQLAlchemy has clause constructs which represent DDL operations. These can be rendered to strings like any other SQL expression:: from sqlalchemy.schema import CreateTable print CreateTable(mytable) To get the string specific to a certain engine:: print CreateTable(mytable).compile(engine) There's also a special form of :class:`.Engine` that can let you dump an entire metadata creation sequence, using this recipe:: def dump(sql, *multiparams, **params): print sql.compile(dialect=engine.dialect) engine = create_engine('postgresql://', strategy='mock', executor=dump) metadata.create_all(engine, checkfirst=False) The `Alembic `_ tool also supports an "offline" SQL generation mode that renders database migrations as SQL scripts. How can I subclass Table/Column to provide certain behaviors/configurations? ============================================================================= :class:`.Table` and :class:`.Column` are not good targets for direct subclassing. However, there are simple ways to get on-construction behaviors using creation functions, and behaviors related to the linkages between schema objects such as constraint conventions or naming conventions using attachment events. An example of many of these techniques can be seen at `Naming Conventions `_. SQLAlchemy-1.0.11/doc/build/faq/performance.rst0000664000175000017500000004265612636375552022323 0ustar classicclassic00000000000000.. _faq_performance: Performance =========== .. contents:: :local: :class: faq :backlinks: none .. _faq_how_to_profile: How can I profile a SQLAlchemy powered application? --------------------------------------------------- Looking for performance issues typically involves two stratgies. One is query profiling, and the other is code profiling. Query Profiling ^^^^^^^^^^^^^^^^ Sometimes just plain SQL logging (enabled via python's logging module or via the ``echo=True`` argument on :func:`.create_engine`) can give an idea how long things are taking. For example, if you log something right after a SQL operation, you'd see something like this in your log:: 17:37:48,325 INFO [sqlalchemy.engine.base.Engine.0x...048c] SELECT ... 17:37:48,326 INFO [sqlalchemy.engine.base.Engine.0x...048c] {} 17:37:48,660 DEBUG [myapp.somemessage] if you logged ``myapp.somemessage`` right after the operation, you know it took 334ms to complete the SQL part of things. Logging SQL will also illustrate if dozens/hundreds of queries are being issued which could be better organized into much fewer queries. When using the SQLAlchemy ORM, the "eager loading" feature is provided to partially (:func:`.contains_eager()`) or fully (:func:`.joinedload()`, :func:`.subqueryload()`) automate this activity, but without the ORM "eager loading" typically means to use joins so that results across multiple tables can be loaded in one result set instead of multiplying numbers of queries as more depth is added (i.e. ``r + r*r2 + r*r2*r3`` ...) For more long-term profiling of queries, or to implement an application-side "slow query" monitor, events can be used to intercept cursor executions, using a recipe like the following:: from sqlalchemy import event from sqlalchemy.engine import Engine import time import logging logging.basicConfig() logger = logging.getLogger("myapp.sqltime") logger.setLevel(logging.DEBUG) @event.listens_for(Engine, "before_cursor_execute") def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault('query_start_time', []).append(time.time()) logger.debug("Start Query: %s", statement) @event.listens_for(Engine, "after_cursor_execute") def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = time.time() - conn.info['query_start_time'].pop(-1) logger.debug("Query Complete!") logger.debug("Total Time: %f", total) Above, we use the :meth:`.ConnectionEvents.before_cursor_execute` and :meth:`.ConnectionEvents.after_cursor_execute` events to establish an interception point around when a statement is executed. We attach a timer onto the connection using the :class:`._ConnectionRecord.info` dictionary; we use a stack here for the occasional case where the cursor execute events may be nested. Code Profiling ^^^^^^^^^^^^^^ If logging reveals that individual queries are taking too long, you'd need a breakdown of how much time was spent within the database processing the query, sending results over the network, being handled by the :term:`DBAPI`, and finally being received by SQLAlchemy's result set and/or ORM layer. Each of these stages can present their own individual bottlenecks, depending on specifics. For that you need to use the `Python Profiling Module `_. Below is a simple recipe which works profiling into a context manager:: import cProfile import StringIO import pstats import contextlib @contextlib.contextmanager def profiled(): pr = cProfile.Profile() pr.enable() yield pr.disable() s = StringIO.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats() # uncomment this to see who's calling what # ps.print_callers() print s.getvalue() To profile a section of code:: with profiled(): Session.query(FooClass).filter(FooClass.somevalue==8).all() The output of profiling can be used to give an idea where time is being spent. A section of profiling output looks like this:: 13726 function calls (13042 primitive calls) in 0.014 seconds Ordered by: cumulative time ncalls tottime percall cumtime percall filename:lineno(function) 222/21 0.001 0.000 0.011 0.001 lib/sqlalchemy/orm/loading.py:26(instances) 220/20 0.002 0.000 0.010 0.001 lib/sqlalchemy/orm/loading.py:327(_instance) 220/20 0.000 0.000 0.010 0.000 lib/sqlalchemy/orm/loading.py:284(populate_state) 20 0.000 0.000 0.010 0.000 lib/sqlalchemy/orm/strategies.py:987(load_collection_from_subq) 20 0.000 0.000 0.009 0.000 lib/sqlalchemy/orm/strategies.py:935(get) 1 0.000 0.000 0.009 0.009 lib/sqlalchemy/orm/strategies.py:940(_load) 21 0.000 0.000 0.008 0.000 lib/sqlalchemy/orm/strategies.py:942() 2 0.000 0.000 0.004 0.002 lib/sqlalchemy/orm/query.py:2400(__iter__) 2 0.000 0.000 0.002 0.001 lib/sqlalchemy/orm/query.py:2414(_execute_and_instances) 2 0.000 0.000 0.002 0.001 lib/sqlalchemy/engine/base.py:659(execute) 2 0.000 0.000 0.002 0.001 lib/sqlalchemy/sql/elements.py:321(_execute_on_connection) 2 0.000 0.000 0.002 0.001 lib/sqlalchemy/engine/base.py:788(_execute_clauseelement) ... Above, we can see that the ``instances()`` SQLAlchemy function was called 222 times (recursively, and 21 times from the outside), taking a total of .011 seconds for all calls combined. Execution Slowness ^^^^^^^^^^^^^^^^^^ The specifics of these calls can tell us where the time is being spent. If for example, you see time being spent within ``cursor.execute()``, e.g. against the DBAPI:: 2 0.102 0.102 0.204 0.102 {method 'execute' of 'sqlite3.Cursor' objects} this would indicate that the database is taking a long time to start returning results, and it means your query should be optimized, either by adding indexes or restructuring the query and/or underlying schema. For that task, analysis of the query plan is warranted, using a system such as EXPLAIN, SHOW PLAN, etc. as is provided by the database backend. Result Fetching Slowness - Core ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If on the other hand you see many thousands of calls related to fetching rows, or very long calls to ``fetchall()``, it may mean your query is returning more rows than expected, or that the fetching of rows itself is slow. The ORM itself typically uses ``fetchall()`` to fetch rows (or ``fetchmany()`` if the :meth:`.Query.yield_per` option is used). An inordinately large number of rows would be indicated by a very slow call to ``fetchall()`` at the DBAPI level:: 2 0.300 0.600 0.300 0.600 {method 'fetchall' of 'sqlite3.Cursor' objects} An unexpectedly large number of rows, even if the ultimate result doesn't seem to have many rows, can be the result of a cartesian product - when multiple sets of rows are combined together without appropriately joining the tables together. It's often easy to produce this behavior with SQLAlchemy Core or ORM query if the wrong :class:`.Column` objects are used in a complex query, pulling in additional FROM clauses that are unexpected. On the other hand, a fast call to ``fetchall()`` at the DBAPI level, but then slowness when SQLAlchemy's :class:`.ResultProxy` is asked to do a ``fetchall()``, may indicate slowness in processing of datatypes, such as unicode conversions and similar:: # the DBAPI cursor is fast... 2 0.020 0.040 0.020 0.040 {method 'fetchall' of 'sqlite3.Cursor' objects} ... # but SQLAlchemy's result proxy is slow, this is type-level processing 2 0.100 0.200 0.100 0.200 lib/sqlalchemy/engine/result.py:778(fetchall) In some cases, a backend might be doing type-level processing that isn't needed. More specifically, seeing calls within the type API that are slow are better indicators - below is what it looks like when we use a type like this:: from sqlalchemy import TypeDecorator import time class Foo(TypeDecorator): impl = String def process_result_value(self, value, thing): # intentionally add slowness for illustration purposes time.sleep(.001) return value the profiling output of this intentionally slow operation can be seen like this:: 200 0.001 0.000 0.237 0.001 lib/sqlalchemy/sql/type_api.py:911(process) 200 0.001 0.000 0.236 0.001 test.py:28(process_result_value) 200 0.235 0.001 0.235 0.001 {time.sleep} that is, we see many expensive calls within the ``type_api`` system, and the actual time consuming thing is the ``time.sleep()`` call. Make sure to check the :doc:`Dialect documentation ` for notes on known performance tuning suggestions at this level, especially for databases like Oracle. There may be systems related to ensuring numeric accuracy or string processing that may not be needed in all cases. There also may be even more low-level points at which row-fetching performance is suffering; for example, if time spent seems to focus on a call like ``socket.receive()``, that could indicate that everything is fast except for the actual network connection, and too much time is spent with data moving over the network. Result Fetching Slowness - ORM ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To detect slowness in ORM fetching of rows (which is the most common area of performance concern), calls like ``populate_state()`` and ``_instance()`` will illustrate individual ORM object populations:: # the ORM calls _instance for each ORM-loaded row it sees, and # populate_state for each ORM-loaded row that results in the population # of an object's attributes 220/20 0.001 0.000 0.010 0.000 lib/sqlalchemy/orm/loading.py:327(_instance) 220/20 0.000 0.000 0.009 0.000 lib/sqlalchemy/orm/loading.py:284(populate_state) The ORM's slowness in turning rows into ORM-mapped objects is a product of the complexity of this operation combined with the overhead of cPython. Common strategies to mitigate this include: * fetch individual columns instead of full entities, that is:: session.query(User.id, User.name) instead of:: session.query(User) * Use :class:`.Bundle` objects to organize column-based results:: u_b = Bundle('user', User.id, User.name) a_b = Bundle('address', Address.id, Address.email) for user, address in session.query(u_b, a_b).join(User.addresses): # ... * Use result caching - see :ref:`examples_caching` for an in-depth example of this. * Consider a faster interpreter like that of Pypy. The output of a profile can be a little daunting but after some practice they are very easy to read. .. seealso:: :ref:`examples_performance` - a suite of performance demonstrations with bundled profiling capabilities. I'm inserting 400,000 rows with the ORM and it's really slow! -------------------------------------------------------------- The SQLAlchemy ORM uses the :term:`unit of work` pattern when synchronizing changes to the database. This pattern goes far beyond simple "inserts" of data. It includes that attributes which are assigned on objects are received using an attribute instrumentation system which tracks changes on objects as they are made, includes that all rows inserted are tracked in an identity map which has the effect that for each row SQLAlchemy must retrieve its "last inserted id" if not already given, and also involves that rows to be inserted are scanned and sorted for dependencies as needed. Objects are also subject to a fair degree of bookkeeping in order to keep all of this running, which for a very large number of rows at once can create an inordinate amount of time spent with large data structures, hence it's best to chunk these. Basically, unit of work is a large degree of automation in order to automate the task of persisting a complex object graph into a relational database with no explicit persistence code, and this automation has a price. ORMs are basically not intended for high-performance bulk inserts - this is the whole reason SQLAlchemy offers the Core in addition to the ORM as a first-class component. For the use case of fast bulk inserts, the SQL generation and execution system that the ORM builds on top of is part of the :doc:`Core `. Using this system directly, we can produce an INSERT that is competitive with using the raw database API directly. Alternatively, the SQLAlchemy ORM offers the :ref:`bulk_operations` suite of methods, which provide hooks into subsections of the unit of work process in order to emit Core-level INSERT and UPDATE constructs with a small degree of ORM-based automation. The example below illustrates time-based tests for several different methods of inserting rows, going from the most automated to the least. With cPython 2.7, runtimes observed:: classics-MacBook-Pro:sqlalchemy classic$ python test.py SQLAlchemy ORM: Total time for 100000 records 12.0471920967 secs SQLAlchemy ORM pk given: Total time for 100000 records 7.06283402443 secs SQLAlchemy ORM bulk_save_objects(): Total time for 100000 records 0.856323003769 secs SQLAlchemy Core: Total time for 100000 records 0.485800027847 secs sqlite3: Total time for 100000 records 0.487842082977 sec We can reduce the time by a factor of three using recent versions of `Pypy `_:: classics-MacBook-Pro:sqlalchemy classic$ /usr/local/src/pypy-2.1-beta2-osx64/bin/pypy test.py SQLAlchemy ORM: Total time for 100000 records 5.88369488716 secs SQLAlchemy ORM pk given: Total time for 100000 records 3.52294301987 secs SQLAlchemy Core: Total time for 100000 records 0.613556146622 secs sqlite3: Total time for 100000 records 0.442467927933 sec Script:: import time import sqlite3 from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import scoped_session, sessionmaker Base = declarative_base() DBSession = scoped_session(sessionmaker()) engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) def init_sqlalchemy(dbname='sqlite:///sqlalchemy.db'): global engine engine = create_engine(dbname, echo=False) DBSession.remove() DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) def test_sqlalchemy_orm(n=100000): init_sqlalchemy() t0 = time.time() for i in xrange(n): customer = Customer() customer.name = 'NAME ' + str(i) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print( "SQLAlchemy ORM: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_orm_pk_given(n=100000): init_sqlalchemy() t0 = time.time() for i in xrange(n): customer = Customer(id=i+1, name="NAME " + str(i)) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print( "SQLAlchemy ORM pk given: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_orm_bulk_insert(n=100000): init_sqlalchemy() t0 = time.time() n1 = n while n1 > 0: n1 = n1 - 10000 DBSession.bulk_insert_mappings( Customer, [ dict(name="NAME " + str(i)) for i in xrange(min(10000, n1)) ] ) DBSession.commit() print( "SQLAlchemy ORM bulk_save_objects(): Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_core(n=100000): init_sqlalchemy() t0 = time.time() engine.execute( Customer.__table__.insert(), [{"name": 'NAME ' + str(i)} for i in xrange(n)] ) print( "SQLAlchemy Core: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def init_sqlite3(dbname): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute("DROP TABLE IF EXISTS customer") c.execute( "CREATE TABLE customer (id INTEGER NOT NULL, " "name VARCHAR(255), PRIMARY KEY(id))") conn.commit() return conn def test_sqlite3(n=100000, dbname='sqlite3.db'): conn = init_sqlite3(dbname) c = conn.cursor() t0 = time.time() for i in xrange(n): row = ('NAME ' + str(i),) c.execute("INSERT INTO customer (name) VALUES (?)", row) conn.commit() print( "sqlite3: Total time for " + str(n) + " records " + str(time.time() - t0) + " sec") if __name__ == '__main__': test_sqlalchemy_orm(100000) test_sqlalchemy_orm_pk_given(100000) test_sqlalchemy_orm_bulk_insert(100000) test_sqlalchemy_core(100000) test_sqlite3(100000) SQLAlchemy-1.0.11/doc/build/faq/ormconfiguration.rst0000664000175000017500000003310512636375552023374 0ustar classicclassic00000000000000ORM Configuration ================== .. contents:: :local: :class: faq :backlinks: none .. _faq_mapper_primary_key: How do I map a table that has no primary key? --------------------------------------------- The SQLAlchemy ORM, in order to map to a particular table, needs there to be at least one column denoted as a primary key column; multiple-column, i.e. composite, primary keys are of course entirely feasible as well. These columns do **not** need to be actually known to the database as primary key columns, though it's a good idea that they are. It's only necessary that the columns *behave* as a primary key does, e.g. as a unique and not nullable identifier for a row. Most ORMs require that objects have some kind of primary key defined because the object in memory must correspond to a uniquely identifiable row in the database table; at the very least, this allows the object can be targeted for UPDATE and DELETE statements which will affect only that object's row and no other. However, the importance of the primary key goes far beyond that. In SQLAlchemy, all ORM-mapped objects are at all times linked uniquely within a :class:`.Session` to their specific database row using a pattern called the :term:`identity map`, a pattern that's central to the unit of work system employed by SQLAlchemy, and is also key to the most common (and not-so-common) patterns of ORM usage. .. note:: It's important to note that we're only talking about the SQLAlchemy ORM; an application which builds on Core and deals only with :class:`.Table` objects, :func:`.select` constructs and the like, **does not** need any primary key to be present on or associated with a table in any way (though again, in SQL, all tables should really have some kind of primary key, lest you need to actually update or delete specific rows). In almost all cases, a table does have a so-called :term:`candidate key`, which is a column or series of columns that uniquely identify a row. If a table truly doesn't have this, and has actual fully duplicate rows, the table is not corresponding to `first normal form `_ and cannot be mapped. Otherwise, whatever columns comprise the best candidate key can be applied directly to the mapper:: class SomeClass(Base): __table__ = some_table_with_no_pk __mapper_args__ = { 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] } Better yet is when using fully declared table metadata, use the ``primary_key=True`` flag on those columns:: class SomeClass(Base): __tablename__ = "some_table_with_no_pk" uid = Column(Integer, primary_key=True) bar = Column(String, primary_key=True) All tables in a relational database should have primary keys. Even a many-to-many association table - the primary key would be the composite of the two association columns:: CREATE TABLE my_association ( user_id INTEGER REFERENCES user(id), account_id INTEGER REFERENCES account(id), PRIMARY KEY (user_id, account_id) ) How do I configure a Column that is a Python reserved word or similar? ---------------------------------------------------------------------------- Column-based attributes can be given any name desired in the mapping. See :ref:`mapper_column_distinct_names`. How do I get a list of all columns, relationships, mapped attributes, etc. given a mapped class? ------------------------------------------------------------------------------------------------- This information is all available from the :class:`.Mapper` object. To get at the :class:`.Mapper` for a particular mapped class, call the :func:`.inspect` function on it:: from sqlalchemy import inspect mapper = inspect(MyClass) From there, all information about the class can be acquired using such methods as: * :attr:`.Mapper.attrs` - a namespace of all mapped attributes. The attributes themselves are instances of :class:`.MapperProperty`, which contain additional attributes that can lead to the mapped SQL expression or column, if applicable. * :attr:`.Mapper.column_attrs` - the mapped attribute namespace limited to column and SQL expression attributes. You might want to use :attr:`.Mapper.columns` to get at the :class:`.Column` objects directly. * :attr:`.Mapper.relationships` - namespace of all :class:`.RelationshipProperty` attributes. * :attr:`.Mapper.all_orm_descriptors` - namespace of all mapped attributes, plus user-defined attributes defined using systems such as :class:`.hybrid_property`, :class:`.AssociationProxy` and others. * :attr:`.Mapper.columns` - A namespace of :class:`.Column` objects and other named SQL expressions associated with the mapping. * :attr:`.Mapper.mapped_table` - The :class:`.Table` or other selectable to which this mapper is mapped. * :attr:`.Mapper.local_table` - The :class:`.Table` that is "local" to this mapper; this differs from :attr:`.Mapper.mapped_table` in the case of a mapper mapped using inheritance to a composed selectable. .. _faq_combining_columns: I'm getting a warning or error about "Implicitly combining column X under attribute Y" -------------------------------------------------------------------------------------- This condition refers to when a mapping contains two columns that are being mapped under the same attribute name due to their name, but there's no indication that this is intentional. A mapped class needs to have explicit names for every attribute that is to store an independent value; when two columns have the same name and aren't disambiguated, they fall under the same attribute and the effect is that the value from one column is **copied** into the other, based on which column was assigned to the attribute first. This behavior is often desirable and is allowed without warning in the case where the two columns are linked together via a foreign key relationship within an inheritance mapping. When the warning or exception occurs, the issue can be resolved by either assigning the columns to differently-named attributes, or if combining them together is desired, by using :func:`.column_property` to make this explicit. Given the example as follows:: from sqlalchemy import Integer, Column, ForeignKey from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey('a.id')) As of SQLAlchemy version 0.9.5, the above condition is detected, and will warn that the ``id`` column of ``A`` and ``B`` is being combined under the same-named attribute ``id``, which above is a serious issue since it means that a ``B`` object's primary key will always mirror that of its ``A``. A mapping which resolves this is as follows:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' b_id = Column('id', Integer, primary_key=True) a_id = Column(Integer, ForeignKey('a.id')) Suppose we did want ``A.id`` and ``B.id`` to be mirrors of each other, despite the fact that ``B.a_id`` is where ``A.id`` is related. We could combine them together using :func:`.column_property`:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' # probably not what you want, but this is a demonstration id = column_property(Column(Integer, primary_key=True), A.id) a_id = Column(Integer, ForeignKey('a.id')) I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys. ------------------------------------------------------------------------------------------------------------------------------------------------------------------ Are you doing this?:: class MyClass(Base): # .... foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")) That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string:: class MyClass(Base): # .... foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)") Or if the objects you need are already available, skip the strings:: class MyClass(Base): # .... foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)) The same idea applies to all the other arguments, such as ``foreign_keys``:: # wrong ! foo = relationship(Dest, foreign_keys=["Dest.foo_id", "Dest.bar_id"]) # correct ! foo = relationship(Dest, foreign_keys="[Dest.foo_id, Dest.bar_id]") # also correct ! foo = relationship(Dest, foreign_keys=[Dest.foo_id, Dest.bar_id]) # if you're using columns from the class that you're inside of, just use the column objects ! class MyClass(Base): foo_id = Column(...) bar_id = Column(...) # ... foo = relationship(Dest, foreign_keys=[foo_id, bar_id]) .. _faq_subqueryload_limit_sort: Why is ``ORDER BY`` required with ``LIMIT`` (especially with ``subqueryload()``)? --------------------------------------------------------------------------------- A relational database can return rows in any arbitrary order, when an explicit ordering is not set. While this ordering very often corresponds to the natural order of rows within a table, this is not the case for all databases and all queries. The consequence of this is that any query that limits rows using ``LIMIT`` or ``OFFSET`` should **always** specify an ``ORDER BY``. Otherwise, it is not deterministic which rows will actually be returned. When we use a SQLAlchemy method like :meth:`.Query.first`, we are in fact applying a ``LIMIT`` of one to the query, so without an explicit ordering it is not deterministic what row we actually get back. While we may not notice this for simple queries on databases that usually returns rows in their natural order, it becomes much more of an issue if we also use :func:`.orm.subqueryload` to load related collections, and we may not be loading the collections as intended. SQLAlchemy implements :func:`.orm.subqueryload` by issuing a separate query, the results of which are matched up to the results from the first query. We see two queries emitted like this: .. sourcecode:: python+sql >>> session.query(User).options(subqueryload(User.addresses)).all() {opensql}-- the "main" query SELECT users.id AS users_id FROM users {stop} {opensql}-- the "load" query issued by subqueryload SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id The second query embeds the first query as a source of rows. When the inner query uses ``OFFSET`` and/or ``LIMIT`` without ordering, the two queries may not see the same results: .. sourcecode:: python+sql >>> user = session.query(User).options(subqueryload(User.addresses)).first() {opensql}-- the "main" query SELECT users.id AS users_id FROM users LIMIT 1 {stop} {opensql}-- the "load" query issued by subqueryload SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users LIMIT 1) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id Depending on database specifics, there is a chance we may get the a result like the following for the two queries:: -- query #1 +--------+ |users_id| +--------+ | 1| +--------+ -- query #2 +------------+-----------------+---------------+ |addresses_id|addresses_user_id|anon_1_users_id| +------------+-----------------+---------------+ | 3| 2| 2| +------------+-----------------+---------------+ | 4| 2| 2| +------------+-----------------+---------------+ Above, we receive two ``addresses`` rows for ``user.id`` of 2, and none for 1. We've wasted two rows and failed to actually load the collection. This is an insidious error because without looking at the SQL and the results, the ORM will not show that there's any issue; if we access the ``addresses`` for the ``User`` we have, it will emit a lazy load for the collection and we won't see that anything actually went wrong. The solution to this problem is to always specify a deterministic sort order, so that the main query always returns the same set of rows. This generally means that you should :meth:`.Query.order_by` on a unique column on the table. The primary key is a good choice for this:: session.query(User).options(subqueryload(User.addresses)).order_by(User.id).first() Note that :func:`.joinedload` does not suffer from the same problem because only one query is ever issued, so the load query cannot be different from the main query. .. seealso:: :ref:`subqueryload_ordering` SQLAlchemy-1.0.11/doc/build/faq/index.rst0000664000175000017500000000054712636375552021122 0ustar classicclassic00000000000000.. _faq_toplevel: ============================ Frequently Asked Questions ============================ The Frequently Asked Questions section is a growing collection of commonly observed questions to well-known issues. .. toctree:: :maxdepth: 1 connections metadata_schema sqlexpressions ormconfiguration performance sessions SQLAlchemy-1.0.11/doc/build/orm/0000775000175000017500000000000012636376632017301 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/orm/relationship_api.rst0000664000175000017500000000036512636375552023371 0ustar classicclassic00000000000000.. automodule:: sqlalchemy.orm Relationships API ----------------- .. autofunction:: relationship .. autofunction:: backref .. autofunction:: relation .. autofunction:: dynamic_loader .. autofunction:: foreign .. autofunction:: remote SQLAlchemy-1.0.11/doc/build/orm/relationship_persistence.rst0000664000175000017500000002733512636375552025152 0ustar classicclassic00000000000000Special Relationship Persistence Patterns ========================================= .. _post_update: Rows that point to themselves / Mutually Dependent Rows ------------------------------------------------------- This is a very specific case where relationship() must perform an INSERT and a second UPDATE in order to properly populate a row (and vice versa an UPDATE and DELETE in order to delete without violating foreign key constraints). The two use cases are: * A table contains a foreign key to itself, and a single row will have a foreign key value pointing to its own primary key. * Two tables each contain a foreign key referencing the other table, with a row in each table referencing the other. For example:: user --------------------------------- user_id name related_user_id 1 'ed' 1 Or:: widget entry ------------------------------------------- --------------------------------- widget_id name favorite_entry_id entry_id name widget_id 1 'somewidget' 5 5 'someentry' 1 In the first case, a row points to itself. Technically, a database that uses sequences such as PostgreSQL or Oracle can INSERT the row at once using a previously generated value, but databases which rely upon autoincrement-style primary key identifiers cannot. The :func:`~sqlalchemy.orm.relationship` always assumes a "parent/child" model of row population during flush, so unless you are populating the primary key/foreign key columns directly, :func:`~sqlalchemy.orm.relationship` needs to use two statements. In the second case, the "widget" row must be inserted before any referring "entry" rows, but then the "favorite_entry_id" column of that "widget" row cannot be set until the "entry" rows have been generated. In this case, it's typically impossible to insert the "widget" and "entry" rows using just two INSERT statements; an UPDATE must be performed in order to keep foreign key constraints fulfilled. The exception is if the foreign keys are configured as "deferred until commit" (a feature some databases support) and if the identifiers were populated manually (again essentially bypassing :func:`~sqlalchemy.orm.relationship`). To enable the usage of a supplementary UPDATE statement, we use the :paramref:`~.relationship.post_update` option of :func:`.relationship`. This specifies that the linkage between the two rows should be created using an UPDATE statement after both rows have been INSERTED; it also causes the rows to be de-associated with each other via UPDATE before a DELETE is emitted. The flag should be placed on just *one* of the relationships, preferably the many-to-one side. Below we illustrate a complete example, including two :class:`.ForeignKey` constructs:: from sqlalchemy import Integer, ForeignKey, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Entry(Base): __tablename__ = 'entry' entry_id = Column(Integer, primary_key=True) widget_id = Column(Integer, ForeignKey('widget.widget_id')) name = Column(String(50)) class Widget(Base): __tablename__ = 'widget' widget_id = Column(Integer, primary_key=True) favorite_entry_id = Column(Integer, ForeignKey('entry.entry_id', name="fk_favorite_entry")) name = Column(String(50)) entries = relationship(Entry, primaryjoin= widget_id==Entry.widget_id) favorite_entry = relationship(Entry, primaryjoin= favorite_entry_id==Entry.entry_id, post_update=True) When a structure against the above configuration is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will be INSERTed referencing the parent "widget" row, and then an UPDATE statement will populate the "favorite_entry_id" column of the "widget" table (it's one row at a time for the time being): .. sourcecode:: pycon+sql >>> w1 = Widget(name='somewidget') >>> e1 = Entry(name='someentry') >>> w1.favorite_entry = e1 >>> w1.entries = [e1] >>> session.add_all([w1, e1]) {sql}>>> session.commit() BEGIN (implicit) INSERT INTO widget (favorite_entry_id, name) VALUES (?, ?) (None, 'somewidget') INSERT INTO entry (widget_id, name) VALUES (?, ?) (1, 'someentry') UPDATE widget SET favorite_entry_id=? WHERE widget.widget_id = ? (1, 1) COMMIT An additional configuration we can specify is to supply a more comprehensive foreign key constraint on ``Widget``, such that it's guaranteed that ``favorite_entry_id`` refers to an ``Entry`` that also refers to this ``Widget``. We can use a composite foreign key, as illustrated below:: from sqlalchemy import Integer, ForeignKey, String, \ Column, UniqueConstraint, ForeignKeyConstraint from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Entry(Base): __tablename__ = 'entry' entry_id = Column(Integer, primary_key=True) widget_id = Column(Integer, ForeignKey('widget.widget_id')) name = Column(String(50)) __table_args__ = ( UniqueConstraint("entry_id", "widget_id"), ) class Widget(Base): __tablename__ = 'widget' widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True) favorite_entry_id = Column(Integer) name = Column(String(50)) __table_args__ = ( ForeignKeyConstraint( ["widget_id", "favorite_entry_id"], ["entry.widget_id", "entry.entry_id"], name="fk_favorite_entry" ), ) entries = relationship(Entry, primaryjoin= widget_id==Entry.widget_id, foreign_keys=Entry.widget_id) favorite_entry = relationship(Entry, primaryjoin= favorite_entry_id==Entry.entry_id, foreign_keys=favorite_entry_id, post_update=True) The above mapping features a composite :class:`.ForeignKeyConstraint` bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure that ``Widget.widget_id`` remains an "autoincrementing" column we specify :paramref:`~.Column.autoincrement` to the value ``"ignore_fk"`` on :class:`.Column`, and additionally on each :func:`.relationship` we must limit those columns considered as part of the foreign key for the purposes of joining and cross-population. .. _passive_updates: Mutable Primary Keys / Update Cascades --------------------------------------- When the primary key of an entity changes, related items which reference the primary key must also be updated as well. For databases which enforce referential integrity, the best strategy is to use the database's ON UPDATE CASCADE functionality in order to propagate primary key changes to referenced foreign keys - the values cannot be out of sync for any moment unless the constraints are marked as "deferrable", that is, not enforced until the transaction completes. It is **highly recommended** that an application which seeks to employ natural primary keys with mutable values to use the ``ON UPDATE CASCADE`` capabilities of the database. An example mapping which illustrates this is:: class User(Base): __tablename__ = 'user' __table_args__ = {'mysql_engine': 'InnoDB'} username = Column(String(50), primary_key=True) fullname = Column(String(100)) addresses = relationship("Address") class Address(Base): __tablename__ = 'address' __table_args__ = {'mysql_engine': 'InnoDB'} email = Column(String(50), primary_key=True) username = Column(String(50), ForeignKey('user.username', onupdate="cascade") ) Above, we illustrate ``onupdate="cascade"`` on the :class:`.ForeignKey` object, and we also illustrate the ``mysql_engine='InnoDB'`` setting which, on a MySQL backend, ensures that the ``InnoDB`` engine supporting referential integrity is used. When using SQLite, referential integrity should be enabled, using the configuration described at :ref:`sqlite_foreign_keys`. Simulating limited ON UPDATE CASCADE without foreign key support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In those cases when a database that does not support referential integrity is used, and natural primary keys with mutable values are in play, SQLAlchemy offers a feature in order to allow propagation of primary key values to already-referenced foreign keys to a **limited** extent, by emitting an UPDATE statement against foreign key columns that immediately reference a primary key column whose value has changed. The primary platforms without referential integrity features are MySQL when the ``MyISAM`` storage engine is used, and SQLite when the ``PRAGMA foreign_keys=ON`` pragma is not used. The Oracle database also has no support for ``ON UPDATE CASCADE``, but because it still enforces referential integrity, needs constraints to be marked as deferrable so that SQLAlchemy can emit UPDATE statements. The feature is enabled by setting the :paramref:`~.relationship.passive_updates` flag to ``False``, most preferably on a one-to-many or many-to-many :func:`.relationship`. When "updates" are no longer "passive" this indicates that SQLAlchemy will issue UPDATE statements individually for objects referenced in the collection referred to by the parent object with a changing primary key value. This also implies that collections will be fully loaded into memory if not already locally present. Our previous mapping using ``passive_updates=False`` looks like:: class User(Base): __tablename__ = 'user' username = Column(String(50), primary_key=True) fullname = Column(String(100)) # passive_updates=False *only* needed if the database # does not implement ON UPDATE CASCADE addresses = relationship("Address", passive_updates=False) class Address(Base): __tablename__ = 'address' email = Column(String(50), primary_key=True) username = Column(String(50), ForeignKey('user.username')) Key limitations of ``passive_updates=False`` include: * it performs much more poorly than direct database ON UPDATE CASCADE, because it needs to fully pre-load affected collections using SELECT and also must emit UPDATE statements against those values, which it will attempt to run in "batches" but still runs on a per-row basis at the DBAPI level. * the feature cannot "cascade" more than one level. That is, if mapping X has a foreign key which refers to the primary key of mapping Y, but then mapping Y's primary key is itself a foreign key to mapping Z, ``passive_updates=False`` cannot cascade a change in primary key value from ``Z`` to ``X``. * Configuring ``passive_updates=False`` only on the many-to-one side of a relationship will not have a full effect, as the unit of work searches only through the current identity map for objects that may be referencing the one with a mutating primary key, not throughout the database. As virtually all databases other than Oracle now support ``ON UPDATE CASCADE``, it is highly recommended that traditional ``ON UPDATE CASCADE`` support be used in the case that natural and mutable primary key values are in use. SQLAlchemy-1.0.11/doc/build/orm/tutorial.rst0000664000175000017500000024572212636375552021712 0ustar classicclassic00000000000000.. _ormtutorial_toplevel: ========================== Object Relational Tutorial ========================== The SQLAlchemy Object Relational Mapper presents a method of associating user-defined Python classes with database tables, and instances of those classes (objects) with rows in their corresponding tables. It includes a system that transparently synchronizes all changes in state between objects and their related rows, called a :term:`unit of work`, as well as a system for expressing database queries in terms of the user defined classes and their defined relationships between each other. The ORM is in contrast to the SQLAlchemy Expression Language, upon which the ORM is constructed. Whereas the SQL Expression Language, introduced in :ref:`sqlexpression_toplevel`, presents a system of representing the primitive constructs of the relational database directly without opinion, the ORM presents a high level and abstracted pattern of usage, which itself is an example of applied usage of the Expression Language. While there is overlap among the usage patterns of the ORM and the Expression Language, the similarities are more superficial than they may at first appear. One approaches the structure and content of data from the perspective of a user-defined :term:`domain model` which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database. A successful application may be constructed using the Object Relational Mapper exclusively. In advanced situations, an application constructed with the ORM may make occasional usage of the Expression Language directly in certain areas where specific database interactions are required. The following tutorial is in doctest format, meaning each ``>>>`` line represents something you can type at a Python command prompt, and the following text represents the expected return value. Version Check ============= A quick check to verify that we are on at least **version 1.0** of SQLAlchemy:: >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest:+SKIP 1.0.0 Connecting ========== For this tutorial we will use an in-memory-only SQLite database. To connect we use :func:`~sqlalchemy.create_engine`:: >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite:///:memory:', echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to ``False``. This tutorial will format the SQL behind a popup window so it doesn't get in our way; just click the "SQL" links to see what's being generated. The return value of :func:`.create_engine` is an instance of :class:`.Engine`, and it represents the core interface to the database, adapted through a :term:`dialect` that handles the details of the database and :term:`DBAPI` in use. In this case the SQLite dialect will interpret instructions to the Python built-in ``sqlite3`` module. .. sidebar:: Lazy Connecting The :class:`.Engine`, when first returned by :func:`.create_engine`, has not actually tried to connect to the database yet; that happens only the first time it is asked to perform a task against the database. The first time a method like :meth:`.Engine.execute` or :meth:`.Engine.connect` is called, the :class:`.Engine` establishes a real :term:`DBAPI` connection to the database, which is then used to emit the SQL. When using the ORM, we typically don't use the :class:`.Engine` directly once created; instead, it's used behind the scenes by the ORM as we'll see shortly. .. seealso:: :ref:`database_urls` - includes examples of :func:`.create_engine` connecting to several kinds of databases with links to more information. Declare a Mapping ================= When using the ORM, the configurational process starts by describing the database tables we'll be dealing with, and then by defining our own classes which will be mapped to those tables. In modern SQLAlchemy, these two tasks are usually performed together, using a system known as :ref:`declarative_toplevel`, which allows us to create classes that include directives to describe the actual database table they will be mapped to. Classes mapped using the Declarative system are defined in terms of a base class which maintains a catalog of classes and tables relative to that base - this is known as the **declarative base class**. Our application will usually have just one instance of this base in a commonly imported module. We create the base class using the :func:`.declarative_base` function, as follows:: >>> from sqlalchemy.ext.declarative import declarative_base >>> Base = declarative_base() Now that we have a "base", we can define any number of mapped classes in terms of it. We will start with just a single table called ``users``, which will store records for the end-users using our application. A new class called ``User`` will be the class to which we map this table. Within the class, we define details about the table to which we'll be mapping, primarily the table name, and names and datatypes of columns:: >>> from sqlalchemy import Column, Integer, String >>> class User(Base): ... __tablename__ = 'users' ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... password = Column(String) ... ... def __repr__(self): ... return "" % ( ... self.name, self.fullname, self.password) .. sidebar:: Tip The ``User`` class defines a ``__repr__()`` method, but note that is **optional**; we only implement it in this tutorial so that our examples show nicely formatted ``User`` objects. A class using Declarative at a minimum needs a ``__tablename__`` attribute, and at least one :class:`.Column` which is part of a primary key [#]_. SQLAlchemy never makes any assumptions by itself about the table to which a class refers, including that it has no built-in conventions for names, datatypes, or constraints. But this doesn't mean boilerplate is required; instead, you're encouraged to create your own automated conventions using helper functions and mixin classes, which is described in detail at :ref:`declarative_mixins`. When our class is constructed, Declarative replaces all the :class:`.Column` objects with special Python accessors known as :term:`descriptors`; this is a process known as :term:`instrumentation`. The "instrumented" mapped class will provide us with the means to refer to our table in a SQL context as well as to persist and load the values of columns from the database. Outside of what the mapping process does to our class, the class remains otherwise mostly a normal Python class, to which we can define any number of ordinary attributes and methods needed by our application. .. [#] For information on why a primary key is required, see :ref:`faq_mapper_primary_key`. Create a Schema =============== With our ``User`` class constructed via the Declarative system, we have defined information about our table, known as :term:`table metadata`. The object used by SQLAlchemy to represent this information for a specific table is called the :class:`.Table` object, and here Declarative has made one for us. We can see this object by inspecting the ``__table__`` attribute:: >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE Table('users', MetaData(bind=None), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('name', String(), table=), Column('fullname', String(), table=), Column('password', String(), table=), schema=None) .. sidebar:: Classical Mappings The Declarative system, though highly recommended, is not required in order to use SQLAlchemy's ORM. Outside of Declarative, any plain Python class can be mapped to any :class:`.Table` using the :func:`.mapper` function directly; this less common usage is described at :ref:`classical_mapping`. When we declared our class, Declarative used a Python metaclass in order to perform additional activities once the class declaration was complete; within this phase, it then created a :class:`.Table` object according to our specifications, and associated it with the class by constructing a :class:`.Mapper` object. This object is a behind-the-scenes object we normally don't need to deal with directly (though it can provide plenty of information about our mapping when we need it). The :class:`.Table` object is a member of a larger collection known as :class:`.MetaData`. When using Declarative, this object is available using the ``.metadata`` attribute of our declarative base class. The :class:`.MetaData` is a :term:`registry` which includes the ability to emit a limited set of schema generation commands to the database. As our SQLite database does not actually have a ``users`` table present, we can use :class:`.MetaData` to issue CREATE TABLE statements to the database for all tables that don't yet exist. Below, we call the :meth:`.MetaData.create_all` method, passing in our :class:`.Engine` as a source of database connectivity. We will see that special commands are first emitted to check for the presence of the ``users`` table, and following that the actual ``CREATE TABLE`` statement: .. sourcecode:: python+sql >>> Base.metadata.create_all(engine) SELECT ... PRAGMA table_info("users") () CREATE TABLE users ( id INTEGER NOT NULL, name VARCHAR, fullname VARCHAR, password VARCHAR, PRIMARY KEY (id) ) () COMMIT .. topic:: Minimal Table Descriptions vs. Full Descriptions Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it's not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: Column(String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by SQLAlchemy other than when creating tables. Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn't generate or assume these without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence Column(Integer, Sequence('user_id_seq'), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` generated via our declarative mapping is therefore:: class User(Base): __tablename__ = 'users' id = Column(Integer, Sequence('user_id_seq'), primary_key=True) name = Column(String(50)) fullname = Column(String(50)) password = Column(String(12)) def __repr__(self): return "" % ( self.name, self.fullname, self.password) We include this more verbose table definition separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements. Create an Instance of the Mapped Class ====================================== With mappings complete, let's now create and inspect a ``User`` object:: >>> ed_user = User(name='ed', fullname='Ed Jones', password='edspassword') >>> ed_user.name 'ed' >>> ed_user.password 'edspassword' >>> str(ed_user.id) 'None' .. sidebar:: the ``__init__()`` method Our ``User`` class, as defined using the Declarative system, has been provided with a constructor (e.g. ``__init__()`` method) which automatically accepts keyword names that match the columns we've mapped. We are free to define any explicit ``__init__()`` method we prefer on our class, which will override the default method provided by Declarative. Even though we didn't specify it in the constructor, the ``id`` attribute still produces a value of ``None`` when we access it (as opposed to Python's usual behavior of raising ``AttributeError`` for an undefined attribute). SQLAlchemy's :term:`instrumentation` normally produces this default value for column-mapped attributes when first accessed. For those attributes where we've actually assigned a value, the instrumentation system is tracking those assignments for use within an eventual INSERT statement to be emitted to the database. Creating a Session ================== We're now ready to start talking to the database. The ORM's "handle" to the database is the :class:`~sqlalchemy.orm.session.Session`. When we first set up the application, at the same level as our :func:`~sqlalchemy.create_engine` statement, we define a :class:`~sqlalchemy.orm.session.Session` class which will serve as a factory for new :class:`~sqlalchemy.orm.session.Session` objects:: >>> from sqlalchemy.orm import sessionmaker >>> Session = sessionmaker(bind=engine) In the case where your application does not yet have an :class:`~sqlalchemy.engine.Engine` when you define your module-level objects, just set it up like this:: >>> Session = sessionmaker() Later, when you create your engine with :func:`~sqlalchemy.create_engine`, connect it to the :class:`~sqlalchemy.orm.session.Session` using :meth:`~.sessionmaker.configure`:: >>> Session.configure(bind=engine) # once engine is available .. sidebar:: Session Lifecycle Patterns The question of when to make a :class:`.Session` depends a lot on what kind of application is being built. Keep in mind, the :class:`.Session` is just a workspace for your objects, local to a particular database connection - if you think of an application thread as a guest at a dinner party, the :class:`.Session` is the guest's plate and the objects it holds are the food (and the database...the kitchen?)! More on this topic available at :ref:`session_faq_whentocreate`. This custom-made :class:`~sqlalchemy.orm.session.Session` class will create new :class:`~sqlalchemy.orm.session.Session` objects which are bound to our database. Other transactional characteristics may be defined when calling :class:`~.sessionmaker` as well; these are described in a later chapter. Then, whenever you need to have a conversation with the database, you instantiate a :class:`~sqlalchemy.orm.session.Session`:: >>> session = Session() The above :class:`~sqlalchemy.orm.session.Session` is associated with our SQLite-enabled :class:`.Engine`, but it hasn't opened any connections yet. When it's first used, it retrieves a connection from a pool of connections maintained by the :class:`.Engine`, and holds onto it until we commit all changes and/or close the session object. Adding New Objects ================== To persist our ``User`` object, we :meth:`~.Session.add` it to our :class:`~sqlalchemy.orm.session.Session`:: >>> ed_user = User(name='ed', fullname='Ed Jones', password='edspassword') >>> session.add(ed_user) At this point, we say that the instance is **pending**; no SQL has yet been issued and the object is not yet represented by a row in the database. The :class:`~sqlalchemy.orm.session.Session` will issue the SQL to persist ``Ed Jones`` as soon as is needed, using a process known as a **flush**. If we query the database for ``Ed Jones``, all pending information will first be flushed, and the query is issued immediately thereafter. For example, below we create a new :class:`~sqlalchemy.orm.query.Query` object which loads instances of ``User``. We "filter by" the ``name`` attribute of ``ed``, and indicate that we'd like only the first result in the full list of rows. A ``User`` instance is returned which is equivalent to that which we've added: .. sourcecode:: python+sql {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+NORMALIZE_WHITESPACE BEGIN (implicit) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('ed', 'Ed Jones', 'edspassword') SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? LIMIT ? OFFSET ? ('ed', 1, 0) {stop}>>> our_user In fact, the :class:`~sqlalchemy.orm.session.Session` has identified that the row returned is the **same** row as one already represented within its internal map of objects, so we actually got back the identical instance as that which we just added:: >>> ed_user is our_user True The ORM concept at work here is known as an :term:`identity map` and ensures that all operations upon a particular row within a :class:`~sqlalchemy.orm.session.Session` operate upon the same set of data. Once an object with a particular primary key is present in the :class:`~sqlalchemy.orm.session.Session`, all SQL queries on that :class:`~sqlalchemy.orm.session.Session` will always return the same Python object for that particular primary key; it also will raise an error if an attempt is made to place a second, already-persisted object with the same primary key within the session. We can add more ``User`` objects at once using :func:`~sqlalchemy.orm.session.Session.add_all`: .. sourcecode:: python+sql >>> session.add_all([ ... User(name='wendy', fullname='Wendy Williams', password='foobar'), ... User(name='mary', fullname='Mary Contrary', password='xxg527'), ... User(name='fred', fullname='Fred Flinstone', password='blah')]) Also, we've decided the password for Ed isn't too secure, so lets change it: .. sourcecode:: python+sql >>> ed_user.password = 'f8s7ccs' The :class:`~sqlalchemy.orm.session.Session` is paying attention. It knows, for example, that ``Ed Jones`` has been modified: .. sourcecode:: python+sql >>> session.dirty IdentitySet([]) and that three new ``User`` objects are pending: .. sourcecode:: python+sql >>> session.new # doctest: +SKIP IdentitySet([, , ]) We tell the :class:`~sqlalchemy.orm.session.Session` that we'd like to issue all remaining changes to the database and commit the transaction, which has been in progress throughout. We do this via :meth:`~.Session.commit`: .. sourcecode:: python+sql {sql}>>> session.commit() UPDATE users SET password=? WHERE users.id = ? ('f8s7ccs', 1) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('wendy', 'Wendy Williams', 'foobar') INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('mary', 'Mary Contrary', 'xxg527') INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('fred', 'Fred Flinstone', 'blah') COMMIT :meth:`~.Session.commit` flushes whatever remaining changes remain to the database, and commits the transaction. The connection resources referenced by the session are now returned to the connection pool. Subsequent operations with this session will occur in a **new** transaction, which will again re-acquire connection resources when first needed. If we look at Ed's ``id`` attribute, which earlier was ``None``, it now has a value: .. sourcecode:: python+sql {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (1,) {stop}1 After the :class:`~sqlalchemy.orm.session.Session` inserts new rows in the database, all newly generated identifiers and database-generated defaults become available on the instance, either immediately or via load-on-first-access. In this case, the entire row was re-loaded on access because a new transaction was begun after we issued :meth:`~.Session.commit`. SQLAlchemy by default refreshes data from a previous transaction the first time it's accessed within a new transaction, so that the most recent state is available. The level of reloading is configurable as is described in :doc:`/orm/session`. .. topic:: Session Object States As our ``User`` object moved from being outside the :class:`.Session`, to inside the :class:`.Session` without a primary key, to actually being inserted, it moved between three out of four available "object states" - **transient**, **pending**, and **persistent**. Being aware of these states and what they mean is always a good idea - be sure to read :ref:`session_object_states` for a quick overview. Rolling Back ============ Since the :class:`~sqlalchemy.orm.session.Session` works within a transaction, we can roll back changes made too. Let's make two changes that we'll revert; ``ed_user``'s user name gets set to ``Edwardo``: .. sourcecode:: python+sql >>> ed_user.name = 'Edwardo' and we'll add another erroneous user, ``fake_user``: .. sourcecode:: python+sql >>> fake_user = User(name='fakeuser', fullname='Invalid', password='12345') >>> session.add(fake_user) Querying the session, we can see that they're flushed into the current transaction: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() UPDATE users SET name=? WHERE users.id = ? ('Edwardo', 1) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('fakeuser', 'Invalid', '12345') SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name IN (?, ?) ('Edwardo', 'fakeuser') {stop}[, ] Rolling back, we can see that ``ed_user``'s name is back to ``ed``, and ``fake_user`` has been kicked out of the session: .. sourcecode:: python+sql {sql}>>> session.rollback() ROLLBACK {stop} {sql}>>> ed_user.name BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (1,) {stop}u'ed' >>> fake_user in session False issuing a SELECT illustrates the changes made to the database: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name IN (?, ?) ('ed', 'fakeuser') {stop}[] .. _ormtutorial_querying: Querying ======== A :class:`~sqlalchemy.orm.query.Query` object is created using the :class:`~sqlalchemy.orm.session.Session.query()` method on :class:`~sqlalchemy.orm.session.Session`. This function takes a variable number of arguments, which can be any combination of classes and class-instrumented descriptors. Below, we indicate a :class:`~sqlalchemy.orm.query.Query` which loads ``User`` instances. When evaluated in an iterative context, the list of ``User`` objects present is returned: .. sourcecode:: python+sql {sql}>>> for instance in session.query(User).order_by(User.id): ... print(instance.name, instance.fullname) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users ORDER BY users.id () {stop}ed Ed Jones wendy Wendy Williams mary Mary Contrary fred Fred Flinstone The :class:`~sqlalchemy.orm.query.Query` also accepts ORM-instrumented descriptors as arguments. Any time multiple class entities or column-based entities are expressed as arguments to the :class:`~sqlalchemy.orm.session.Session.query()` function, the return result is expressed as tuples: .. sourcecode:: python+sql {sql}>>> for name, fullname in session.query(User.name, User.fullname): ... print(name, fullname) SELECT users.name AS users_name, users.fullname AS users_fullname FROM users () {stop}ed Ed Jones wendy Wendy Williams mary Mary Contrary fred Fred Flinstone The tuples returned by :class:`~sqlalchemy.orm.query.Query` are *named* tuples, supplied by the :class:`.KeyedTuple` class, and can be treated much like an ordinary Python object. The names are the same as the attribute's name for an attribute, and the class name for a class: .. sourcecode:: python+sql {sql}>>> for row in session.query(User, User.name).all(): ... print(row.User, row.name) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users () {stop} ed wendy mary fred You can control the names of individual column expressions using the :meth:`~.ColumnElement.label` construct, which is available from any :class:`.ColumnElement`-derived object, as well as any class attribute which is mapped to one (such as ``User.name``): .. sourcecode:: python+sql {sql}>>> for row in session.query(User.name.label('name_label')).all(): ... print(row.name_label) SELECT users.name AS name_label FROM users (){stop} ed wendy mary fred The name given to a full entity such as ``User``, assuming that multiple entities are present in the call to :meth:`~.Session.query`, can be controlled using :func:`~.sqlalchemy.orm.aliased` : .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased >>> user_alias = aliased(User, name='user_alias') {sql}>>> for row in session.query(user_alias, user_alias.name).all(): ... print(row.user_alias) SELECT user_alias.id AS user_alias_id, user_alias.name AS user_alias_name, user_alias.fullname AS user_alias_fullname, user_alias.password AS user_alias_password FROM users AS user_alias (){stop} Basic operations with :class:`~sqlalchemy.orm.query.Query` include issuing LIMIT and OFFSET, most conveniently using Python array slices and typically in conjunction with ORDER BY: .. sourcecode:: python+sql {sql}>>> for u in session.query(User).order_by(User.id)[1:3]: ... print(u) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users ORDER BY users.id LIMIT ? OFFSET ? (2, 1){stop} and filtering results, which is accomplished either with :func:`~sqlalchemy.orm.query.Query.filter_by`, which uses keyword arguments: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter_by(fullname='Ed Jones'): ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? ('Ed Jones',) {stop}ed ...or :func:`~sqlalchemy.orm.query.Query.filter`, which uses more flexible SQL expression language constructs. These allow you to use regular Python operators with the class-level attributes on your mapped class: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.fullname=='Ed Jones'): ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? ('Ed Jones',) {stop}ed The :class:`~sqlalchemy.orm.query.Query` object is fully **generative**, meaning that most method calls return a new :class:`~sqlalchemy.orm.query.Query` object upon which further criteria may be added. For example, to query for users named "ed" with a full name of "Ed Jones", you can call :func:`~sqlalchemy.orm.query.Query.filter` twice, which joins criteria using ``AND``: .. sourcecode:: python+sql {sql}>>> for user in session.query(User).\ ... filter(User.name=='ed').\ ... filter(User.fullname=='Ed Jones'): ... print(user) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? AND users.fullname = ? ('ed', 'Ed Jones') {stop} Common Filter Operators ----------------------- Here's a rundown of some of the most common operators used in :func:`~sqlalchemy.orm.query.Query.filter`: * :meth:`equals <.ColumnOperators.__eq__>`:: query.filter(User.name == 'ed') * :meth:`not equals <.ColumnOperators.__ne__>`:: query.filter(User.name != 'ed') * :meth:`LIKE <.ColumnOperators.like>`:: query.filter(User.name.like('%ed%')) * :meth:`IN <.ColumnOperators.in_>`:: query.filter(User.name.in_(['ed', 'wendy', 'jack'])) # works with query objects too: query.filter(User.name.in_( session.query(User.name).filter(User.name.like('%ed%')) )) * :meth:`NOT IN <.ColumnOperators.notin_>`:: query.filter(~User.name.in_(['ed', 'wendy', 'jack'])) * :meth:`IS NULL <.ColumnOperators.is_>`:: query.filter(User.name == None) # alternatively, if pep8/linters are a concern query.filter(User.name.is_(None)) * :meth:`IS NOT NULL <.ColumnOperators.isnot>`:: query.filter(User.name != None) # alternatively, if pep8/linters are a concern query.filter(User.name.isnot(None)) * :func:`AND <.sql.expression.and_>`:: # use and_() from sqlalchemy import and_ query.filter(and_(User.name == 'ed', User.fullname == 'Ed Jones')) # or send multiple expressions to .filter() query.filter(User.name == 'ed', User.fullname == 'Ed Jones') # or chain multiple filter()/filter_by() calls query.filter(User.name == 'ed').filter(User.fullname == 'Ed Jones') .. note:: Make sure you use :func:`.and_` and **not** the Python ``and`` operator! * :func:`OR <.sql.expression.or_>`:: from sqlalchemy import or_ query.filter(or_(User.name == 'ed', User.name == 'wendy')) .. note:: Make sure you use :func:`.or_` and **not** the Python ``or`` operator! * :meth:`MATCH <.ColumnOperators.match>`:: query.filter(User.name.match('wendy')) .. note:: :meth:`~.ColumnOperators.match` uses a database-specific ``MATCH`` or ``CONTAINS`` function; its behavior will vary by backend and is not available on some backends such as SQLite. Returning Lists and Scalars --------------------------- A number of methods on :class:`.Query` immediately issue SQL and return a value containing loaded database results. Here's a brief tour: * :meth:`~.Query.all()` returns a list: .. sourcecode:: python+sql >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id) {sql}>>> query.all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id ('%ed',) {stop}[, ] * :meth:`~.Query.first()` applies a limit of one and returns the first result as a scalar: .. sourcecode:: python+sql {sql}>>> query.first() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id LIMIT ? OFFSET ? ('%ed', 1, 0) {stop} * :meth:`~.Query.one()` fully fetches all rows, and if not exactly one object identity or composite row is present in the result, raises an error. With multiple rows found: .. sourcecode:: python+sql {sql}>>> from sqlalchemy.orm.exc import MultipleResultsFound >>> try: ... user = query.one() ... except MultipleResultsFound as e: ... print(e) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id ('%ed',) {stop}Multiple rows were found for one() With no rows found: .. sourcecode:: python+sql {sql}>>> from sqlalchemy.orm.exc import NoResultFound >>> try: ... user = query.filter(User.id == 99).one() ... except NoResultFound as e: ... print(e) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? AND users.id = ? ORDER BY users.id ('%ed', 99) {stop}No row was found for one() The :meth:`~.Query.one` method is great for systems that expect to handle "no items found" versus "multiple items found" differently; such as a RESTful web service, which may want to raise a "404 not found" when no results are found, but raise an application error when multiple results are found. * :meth:`~.Query.one_or_none` is like :meth:`~.Query.one`, except that if no results are found, it doesn't raise an error; it just returns ``None``. Like :meth:`~.Query.one`, however, it does raise an error if multiple results are found. * :meth:`~.Query.scalar` invokes the :meth:`~.Query.one` method, and upon success returns the first column of the row: .. sourcecode:: python+sql >>> query = session.query(User.id).filter(User.name == 'ed').\ ... order_by(User.id) {sql}>>> query.scalar() SELECT users.id AS users_id FROM users WHERE users.name = ? ORDER BY users.id ('ed',) {stop}1 .. _orm_tutorial_literal_sql: Using Textual SQL ----------------- Literal strings can be used flexibly with :class:`~sqlalchemy.orm.query.Query`, by specifying their use with the :func:`~.expression.text` construct, which is accepted by most applicable methods. For example, :meth:`~sqlalchemy.orm.query.Query.filter()` and :meth:`~sqlalchemy.orm.query.Query.order_by()`: .. sourcecode:: python+sql >>> from sqlalchemy import text {sql}>>> for user in session.query(User).\ ... filter(text("id<224")).\ ... order_by(text("id")).all(): ... print(user.name) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE id<224 ORDER BY id () {stop}ed wendy mary fred Bind parameters can be specified with string-based SQL, using a colon. To specify the values, use the :meth:`~sqlalchemy.orm.query.Query.params()` method: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(text("id<:value and name=:name")).\ ... params(value=224, name='fred').order_by(User.id).one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE id To use an entirely string-based statement, using :meth:`~sqlalchemy.orm.query.Query.from_statement()`; just ensure that the columns clause of the statement contains the column names normally used by the mapper (below illustrated using an asterisk): .. sourcecode:: python+sql {sql}>>> session.query(User).from_statement( ... text("SELECT * FROM users where name=:name")).\ ... params(name='ed').all() SELECT * FROM users where name=? ('ed',) {stop}[] .. seealso:: :ref:`sqlexpression_text` - The :func:`.text` construct explained from the perspective of Core-only queries. .. versionchanged:: 1.0.0 The :class:`.Query` construct emits warnings when string SQL fragments are coerced to :func:`.text`, and :func:`.text` should be used explicitly. See :ref:`migration_2992` for background. Counting -------- :class:`~sqlalchemy.orm.query.Query` includes a convenience method for counting called :meth:`~sqlalchemy.orm.query.Query.count()`: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.like('%ed')).count() SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ?) AS anon_1 ('%ed',) {stop}2 .. sidebar:: Counting on ``count()`` :meth:`.Query.count` used to be a very complicated method when it would try to guess whether or not a subquery was needed around the existing query, and in some exotic cases it wouldn't do the right thing. Now that it uses a simple subquery every time, it's only two lines long and always returns the right answer. Use ``func.count()`` if a particular statement absolutely cannot tolerate the subquery being present. The :meth:`~.Query.count()` method is used to determine how many rows the SQL statement would return. Looking at the generated SQL above, SQLAlchemy always places whatever it is we are querying into a subquery, then counts the rows from that. In some cases this can be reduced to a simpler ``SELECT count(*) FROM table``, however modern versions of SQLAlchemy don't try to guess when this is appropriate, as the exact SQL can be emitted using more explicit means. For situations where the "thing to be counted" needs to be indicated specifically, we can specify the "count" function directly using the expression ``func.count()``, available from the :attr:`~sqlalchemy.sql.expression.func` construct. Below we use it to return the count of each distinct user name: .. sourcecode:: python+sql >>> from sqlalchemy import func {sql}>>> session.query(func.count(User.name), User.name).group_by(User.name).all() SELECT count(users.name) AS count_1, users.name AS users_name FROM users GROUP BY users.name () {stop}[(1, u'ed'), (1, u'fred'), (1, u'mary'), (1, u'wendy')] To achieve our simple ``SELECT count(*) FROM table``, we can apply it as: .. sourcecode:: python+sql {sql}>>> session.query(func.count('*')).select_from(User).scalar() SELECT count(?) AS count_1 FROM users ('*',) {stop}4 The usage of :meth:`~.Query.select_from` can be removed if we express the count in terms of the ``User`` primary key directly: .. sourcecode:: python+sql {sql}>>> session.query(func.count(User.id)).scalar() SELECT count(users.id) AS count_1 FROM users () {stop}4 .. _orm_tutorial_relationship: Building a Relationship ======================= Let's consider how a second table, related to ``User``, can be mapped and queried. Users in our system can store any number of email addresses associated with their username. This implies a basic one to many association from the ``users`` to a new table which stores email addresses, which we will call ``addresses``. Using declarative, we define this table along with its mapped class, ``Address``: .. sourcecode:: python+sql >>> from sqlalchemy import ForeignKey >>> from sqlalchemy.orm import relationship >>> class Address(Base): ... __tablename__ = 'addresses' ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) ... user_id = Column(Integer, ForeignKey('users.id')) ... ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): ... return "" % self.email_address >>> User.addresses = relationship( ... "Address", order_by=Address.id, back_populates="user") The above class introduces the :class:`.ForeignKey` construct, which is a directive applied to :class:`.Column` that indicates that values in this column should be :term:`constrained` to be values present in the named remote column. This is a core feature of relational databases, and is the "glue" that transforms an otherwise unconnected collection of tables to have rich overlapping relationships. The :class:`.ForeignKey` above expresses that values in the ``addresses.user_id`` column should be constrained to those values in the ``users.id`` column, i.e. its primary key. A second directive, known as :func:`.relationship`, tells the ORM that the ``Address`` class itself should be linked to the ``User`` class, using the attribute ``Address.user``. :func:`.relationship` uses the foreign key relationships between the two tables to determine the nature of this linkage, determining that ``Address.user`` will be :term:`many to one`. An additional :func:`.relationship` directive is placed on the ``User`` mapped class under the attribute ``User.addresses``. In both :func:`.relationship` directives, the parameter :paramref:`.relationship.back_populates` is assigned to refer to the complementary attribute names; by doing so, each :func:`.relationship` can make intelligent decision about the same relationship as expressed in reverse; on one side, ``Address.user`` refers to a ``User`` instance, and on the other side, ``User.addresses`` refers to a list of ``Address`` instances. .. note:: The :paramref:`.relationship.back_populates` parameter is a newer version of a very common SQLAlchemy feature called :paramref:`.relationship.backref`. The :paramref:`.relationship.backref` parameter hasn't gone anywhere and will always remain available! The :paramref:`.relationship.back_populates` is the same thing, except a little more verbose and easier to manipulate. For an overview of the entire topic, see the section :ref:`relationships_backref`. The reverse side of a many-to-one relationship is always :term:`one to many`. A full catalog of available :func:`.relationship` configurations is at :ref:`relationship_patterns`. The two complementing relationships ``Address.user`` and ``User.addresses`` are referred to as a :term:`bidirectional relationship`, and is a key feature of the SQLAlchemy ORM. The section :ref:`relationships_backref` discusses the "backref" feature in detail. Arguments to :func:`.relationship` which concern the remote class can be specified using strings, assuming the Declarative system is in use. Once all mappings are complete, these strings are evaluated as Python expressions in order to produce the actual argument, in the above case the ``User`` class. The names which are allowed during this evaluation include, among other things, the names of all classes which have been created in terms of the declared base. See the docstring for :func:`.relationship` for more detail on argument style. .. topic:: Did you know ? * a FOREIGN KEY constraint in most (though not all) relational databases can only link to a primary key column, or a column that has a UNIQUE constraint. * a FOREIGN KEY constraint that refers to a multiple column primary key, and itself has multiple columns, is known as a "composite foreign key". It can also reference a subset of those columns. * FOREIGN KEY columns can automatically update themselves, in response to a change in the referenced column or row. This is known as the CASCADE *referential action*, and is a built in function of the relational database. * FOREIGN KEY can refer to its own table. This is referred to as a "self-referential" foreign key. * Read more about foreign keys at `Foreign Key - Wikipedia `_. We'll need to create the ``addresses`` table in the database, so we will issue another CREATE from our metadata, which will skip over tables which have already been created: .. sourcecode:: python+sql {sql}>>> Base.metadata.create_all(engine) PRAGMA... CREATE TABLE addresses ( id INTEGER NOT NULL, email_address VARCHAR NOT NULL, user_id INTEGER, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT Working with Related Objects ============================= Now when we create a ``User``, a blank ``addresses`` collection will be present. Various collection types, such as sets and dictionaries, are possible here (see :ref:`custom_collections` for details), but by default, the collection is a Python list. .. sourcecode:: python+sql >>> jack = User(name='jack', fullname='Jack Bean', password='gjffdd') >>> jack.addresses [] We are free to add ``Address`` objects on our ``User`` object. In this case we just assign a full list directly: .. sourcecode:: python+sql >>> jack.addresses = [ ... Address(email_address='jack@google.com'), ... Address(email_address='j25@yahoo.com')] When using a bidirectional relationship, elements added in one direction automatically become visible in the other direction. This behavior occurs based on attribute on-change events and is evaluated in Python, without using any SQL: .. sourcecode:: python+sql >>> jack.addresses[1] >>> jack.addresses[1].user Let's add and commit ``Jack Bean`` to the database. ``jack`` as well as the two ``Address`` members in the corresponding ``addresses`` collection are both added to the session at once, using a process known as **cascading**: .. sourcecode:: python+sql >>> session.add(jack) {sql}>>> session.commit() INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('jack', 'Jack Bean', 'gjffdd') INSERT INTO addresses (email_address, user_id) VALUES (?, ?) ('jack@google.com', 5) INSERT INTO addresses (email_address, user_id) VALUES (?, ?) ('j25@yahoo.com', 5) COMMIT Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addresses: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... filter_by(name='jack').one() BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) {stop}>>> jack Let's look at the ``addresses`` collection. Watch the SQL: .. sourcecode:: python+sql {sql}>>> jack.addresses SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id ORDER BY addresses.id (5,) {stop}[, ] When we accessed the ``addresses`` collection, SQL was suddenly issued. This is an example of a :term:`lazy loading` relationship. The ``addresses`` collection is now loaded and behaves just like an ordinary list. We'll cover ways to optimize the loading of this collection in a bit. .. _ormtutorial_joins: Querying with Joins ==================== Now that we have two tables, we can show some more features of :class:`.Query`, specifically how to create queries that deal with both tables at the same time. The `Wikipedia page on SQL JOIN `_ offers a good introduction to join techniques, several of which we'll illustrate here. To construct a simple implicit join between ``User`` and ``Address``, we can use :meth:`.Query.filter()` to equate their related columns together. Below we load the ``User`` and ``Address`` entities at once using this method: .. sourcecode:: python+sql {sql}>>> for u, a in session.query(User, Address).\ ... filter(User.id==Address.user_id).\ ... filter(Address.email_address=='jack@google.com').\ ... all(): ... print(u) ... print(a) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM users, addresses WHERE users.id = addresses.user_id AND addresses.email_address = ? ('jack@google.com',) {stop} The actual SQL JOIN syntax, on the other hand, is most easily achieved using the :meth:`.Query.join` method: .. sourcecode:: python+sql {sql}>>> session.query(User).join(Address).\ ... filter(Address.email_address=='jack@google.com').\ ... all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE addresses.email_address = ? ('jack@google.com',) {stop}[] :meth:`.Query.join` knows how to join between ``User`` and ``Address`` because there's only one foreign key between them. If there were no foreign keys, or several, :meth:`.Query.join` works better when one of the following forms are used:: query.join(Address, User.id==Address.user_id) # explicit condition query.join(User.addresses) # specify relationship from left to right query.join(Address, User.addresses) # same, with explicit target query.join('addresses') # same, using a string As you would expect, the same idea is used for "outer" joins, using the :meth:`~.Query.outerjoin` function:: query.outerjoin(User.addresses) # LEFT OUTER JOIN The reference documentation for :meth:`~.Query.join` contains detailed information and examples of the calling styles accepted by this method; :meth:`~.Query.join` is an important method at the center of usage for any SQL-fluent application. .. topic:: What does :class:`.Query` select from if there's multiple entities? The :meth:`.Query.join` method will **typically join from the leftmost item** in the list of entities, when the ON clause is omitted, or if the ON clause is a plain SQL expression. To control the first entity in the list of JOINs, use the :meth:`.Query.select_from` method:: query = Session.query(User, Address).select_from(Address).join(User) .. _ormtutorial_aliases: Using Aliases ------------- When querying across multiple tables, if the same table needs to be referenced more than once, SQL typically requires that the table be *aliased* with another name, so that it can be distinguished against other occurrences of that table. The :class:`~sqlalchemy.orm.query.Query` supports this most explicitly using the :attr:`~sqlalchemy.orm.aliased` construct. Below we join to the ``Address`` entity twice, to locate a user who has two distinct email addresses at the same time: .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased >>> adalias1 = aliased(Address) >>> adalias2 = aliased(Address) {sql}>>> for username, email1, email2 in \ ... session.query(User.name, adalias1.email_address, adalias2.email_address).\ ... join(adalias1, User.addresses).\ ... join(adalias2, User.addresses).\ ... filter(adalias1.email_address=='jack@google.com').\ ... filter(adalias2.email_address=='j25@yahoo.com'): ... print(username, email1, email2) SELECT users.name AS users_name, addresses_1.email_address AS addresses_1_email_address, addresses_2.email_address AS addresses_2_email_address FROM users JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id JOIN addresses AS addresses_2 ON users.id = addresses_2.user_id WHERE addresses_1.email_address = ? AND addresses_2.email_address = ? ('jack@google.com', 'j25@yahoo.com') {stop}jack jack@google.com j25@yahoo.com Using Subqueries ---------------- The :class:`~sqlalchemy.orm.query.Query` is suitable for generating statements which can be used as subqueries. Suppose we wanted to load ``User`` objects along with a count of how many ``Address`` records each user has. The best way to generate SQL like this is to get the count of addresses grouped by user ids, and JOIN to the parent. In this case we use a LEFT OUTER JOIN so that we get rows back for those users who don't have any addresses, e.g.:: SELECT users.*, adr_count.address_count FROM users LEFT OUTER JOIN (SELECT user_id, count(*) AS address_count FROM addresses GROUP BY user_id) AS adr_count ON users.id=adr_count.user_id Using the :class:`~sqlalchemy.orm.query.Query`, we build a statement like this from the inside out. The ``statement`` accessor returns a SQL expression representing the statement generated by a particular :class:`~sqlalchemy.orm.query.Query` - this is an instance of a :func:`~.expression.select` construct, which are described in :ref:`sqlexpression_toplevel`:: >>> from sqlalchemy.sql import func >>> stmt = session.query(Address.user_id, func.count('*').\ ... label('address_count')).\ ... group_by(Address.user_id).subquery() The ``func`` keyword generates SQL functions, and the ``subquery()`` method on :class:`~sqlalchemy.orm.query.Query` produces a SQL expression construct representing a SELECT statement embedded within an alias (it's actually shorthand for ``query.statement.alias()``). Once we have our statement, it behaves like a :class:`~sqlalchemy.schema.Table` construct, such as the one we created for ``users`` at the start of this tutorial. The columns on the statement are accessible through an attribute called ``c``: .. sourcecode:: python+sql {sql}>>> for u, count in session.query(User, stmt.c.address_count).\ ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): ... print(u, count) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, anon_1.address_count AS anon_1_address_count FROM users LEFT OUTER JOIN (SELECT addresses.user_id AS user_id, count(?) AS address_count FROM addresses GROUP BY addresses.user_id) AS anon_1 ON users.id = anon_1.user_id ORDER BY users.id ('*',) {stop} None None None None 2 Selecting Entities from Subqueries ---------------------------------- Above, we just selected a result that included a column from a subquery. What if we wanted our subquery to map to an entity ? For this we use ``aliased()`` to associate an "alias" of a mapped class to a subquery: .. sourcecode:: python+sql {sql}>>> stmt = session.query(Address).\ ... filter(Address.email_address != 'j25@yahoo.com').\ ... subquery() >>> adalias = aliased(Address, stmt) >>> for user, address in session.query(User, adalias).\ ... join(adalias, User.addresses): ... print(user) ... print(address) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, anon_1.id AS anon_1_id, anon_1.email_address AS anon_1_email_address, anon_1.user_id AS anon_1_user_id FROM users JOIN (SELECT addresses.id AS id, addresses.email_address AS email_address, addresses.user_id AS user_id FROM addresses WHERE addresses.email_address != ?) AS anon_1 ON users.id = anon_1.user_id ('j25@yahoo.com',) {stop} Using EXISTS ------------ The EXISTS keyword in SQL is a boolean operator which returns True if the given expression contains any rows. It may be used in many scenarios in place of joins, and is also useful for locating rows which do not have a corresponding row in a related table. There is an explicit EXISTS construct, which looks like this: .. sourcecode:: python+sql >>> from sqlalchemy.sql import exists >>> stmt = exists().where(Address.user_id==User.id) {sql}>>> for name, in session.query(User.name).filter(stmt): ... print(name) SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT * FROM addresses WHERE addresses.user_id = users.id) () {stop}jack The :class:`~sqlalchemy.orm.query.Query` features several operators which make usage of EXISTS automatically. Above, the statement can be expressed along the ``User.addresses`` relationship using :meth:`~.RelationshipProperty.Comparator.any`: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.addresses.any()): ... print(name) SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT 1 FROM addresses WHERE users.id = addresses.user_id) () {stop}jack :meth:`~.RelationshipProperty.Comparator.any` takes criterion as well, to limit the rows matched: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.addresses.any(Address.email_address.like('%google%'))): ... print(name) SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT 1 FROM addresses WHERE users.id = addresses.user_id AND addresses.email_address LIKE ?) ('%google%',) {stop}jack :meth:`~.RelationshipProperty.Comparator.has` is the same operator as :meth:`~.RelationshipProperty.Comparator.any` for many-to-one relationships (note the ``~`` operator here too, which means "NOT"): .. sourcecode:: python+sql {sql}>>> session.query(Address).\ ... filter(~Address.user.has(User.name=='jack')).all() SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE NOT (EXISTS (SELECT 1 FROM users WHERE users.id = addresses.user_id AND users.name = ?)) ('jack',) {stop}[] Common Relationship Operators ----------------------------- Here's all the operators which build on relationships - each one is linked to its API documentation which includes full details on usage and behavior: * :meth:`~.RelationshipProperty.Comparator.__eq__` (many-to-one "equals" comparison):: query.filter(Address.user == someuser) * :meth:`~.RelationshipProperty.Comparator.__ne__` (many-to-one "not equals" comparison):: query.filter(Address.user != someuser) * IS NULL (many-to-one comparison, also uses :meth:`~.RelationshipProperty.Comparator.__eq__`):: query.filter(Address.user == None) * :meth:`~.RelationshipProperty.Comparator.contains` (used for one-to-many collections):: query.filter(User.addresses.contains(someaddress)) * :meth:`~.RelationshipProperty.Comparator.any` (used for collections):: query.filter(User.addresses.any(Address.email_address == 'bar')) # also takes keyword arguments: query.filter(User.addresses.any(email_address='bar')) * :meth:`~.RelationshipProperty.Comparator.has` (used for scalar references):: query.filter(Address.user.has(name='ed')) * :meth:`.Query.with_parent` (used for any relationship):: session.query(Address).with_parent(someuser, 'addresses') Eager Loading ============= Recall earlier that we illustrated a :term:`lazy loading` operation, when we accessed the ``User.addresses`` collection of a ``User`` and SQL was emitted. If you want to reduce the number of queries (dramatically, in many cases), we can apply an :term:`eager load` to the query operation. SQLAlchemy offers three types of eager loading, two of which are automatic, and a third which involves custom criterion. All three are usually invoked via functions known as :term:`query options` which give additional instructions to the :class:`.Query` on how we would like various attributes to be loaded, via the :meth:`.Query.options` method. Subquery Load ------------- In this case we'd like to indicate that ``User.addresses`` should load eagerly. A good choice for loading a set of objects as well as their related collections is the :func:`.orm.subqueryload` option, which emits a second SELECT statement that fully loads the collections associated with the results just loaded. The name "subquery" originates from the fact that the SELECT statement constructed directly via the :class:`.Query` is re-used, embedded as a subquery into a SELECT against the related table. This is a little elaborate but very easy to use: .. sourcecode:: python+sql >>> from sqlalchemy.orm import subqueryload {sql}>>> jack = session.query(User).\ ... options(subqueryload(User.addresses)).\ ... filter_by(name='jack').one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id, addresses.id ('jack',) {stop}>>> jack >>> jack.addresses [, ] .. note:: :func:`.subqueryload` when used in conjunction with limiting such as :meth:`.Query.first`, :meth:`.Query.limit` or :meth:`.Query.offset` should also include :meth:`.Query.order_by` on a unique column in order to ensure correct results. See :ref:`subqueryload_ordering`. Joined Load ------------- The other automatic eager loading function is more well known and is called :func:`.orm.joinedload`. This style of loading emits a JOIN, by default a LEFT OUTER JOIN, so that the lead object as well as the related object or collection is loaded in one step. We illustrate loading the same ``addresses`` collection in this way - note that even though the ``User.addresses`` collection on ``jack`` is actually populated right now, the query will emit the extra join regardless: .. sourcecode:: python+sql >>> from sqlalchemy.orm import joinedload {sql}>>> jack = session.query(User).\ ... options(joinedload(User.addresses)).\ ... filter_by(name='jack').one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses_1.id ('jack',) {stop}>>> jack >>> jack.addresses [, ] Note that even though the OUTER JOIN resulted in two rows, we still only got one instance of ``User`` back. This is because :class:`.Query` applies a "uniquing" strategy, based on object identity, to the returned entities. This is specifically so that joined eager loading can be applied without affecting the query results. While :func:`.joinedload` has been around for a long time, :func:`.subqueryload` is a newer form of eager loading. :func:`.subqueryload` tends to be more appropriate for loading related collections while :func:`.joinedload` tends to be better suited for many-to-one relationships, due to the fact that only one row is loaded for both the lead and the related object. .. topic:: ``joinedload()`` is not a replacement for ``join()`` The join created by :func:`.joinedload` is anonymously aliased such that it **does not affect the query results**. An :meth:`.Query.order_by` or :meth:`.Query.filter` call **cannot** reference these aliased tables - so-called "user space" joins are constructed using :meth:`.Query.join`. The rationale for this is that :func:`.joinedload` is only applied in order to affect how related objects or collections are loaded as an optimizing detail - it can be added or removed with no impact on actual results. See the section :ref:`zen_of_eager_loading` for a detailed description of how this is used. Explicit Join + Eagerload -------------------------- A third style of eager loading is when we are constructing a JOIN explicitly in order to locate the primary rows, and would like to additionally apply the extra table to a related object or collection on the primary object. This feature is supplied via the :func:`.orm.contains_eager` function, and is most typically useful for pre-loading the many-to-one object on a query that needs to filter on that same object. Below we illustrate loading an ``Address`` row as well as the related ``User`` object, filtering on the ``User`` named "jack" and using :func:`.orm.contains_eager` to apply the "user" columns to the ``Address.user`` attribute: .. sourcecode:: python+sql >>> from sqlalchemy.orm import contains_eager {sql}>>> jacks_addresses = session.query(Address).\ ... join(Address.user).\ ... filter(User.name=='jack').\ ... options(contains_eager(Address.user)).\ ... all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses JOIN users ON users.id = addresses.user_id WHERE users.name = ? ('jack',) {stop}>>> jacks_addresses [, ] >>> jacks_addresses[0].user For more information on eager loading, including how to configure various forms of loading by default, see the section :doc:`/orm/loading_relationships`. Deleting ======== Let's try to delete ``jack`` and see how that goes. We'll mark as deleted in the session, then we'll issue a ``count`` query to see that no rows remain: .. sourcecode:: python+sql >>> session.delete(jack) {sql}>>> session.query(User).filter_by(name='jack').count() UPDATE addresses SET user_id=? WHERE addresses.id = ? ((None, 1), (None, 2)) DELETE FROM users WHERE users.id = ? (5,) SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ?) AS anon_1 ('jack',) {stop}0 So far, so good. How about Jack's ``Address`` objects ? .. sourcecode:: python+sql {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}2 Uh oh, they're still there ! Analyzing the flush SQL, we can see that the ``user_id`` column of each address was set to NULL, but the rows weren't deleted. SQLAlchemy doesn't assume that deletes cascade, you have to tell it to do so. .. _tutorial_delete_cascade: Configuring delete/delete-orphan Cascade ---------------------------------------- We will configure **cascade** options on the ``User.addresses`` relationship to change the behavior. While SQLAlchemy allows you to add new attributes and relationships to mappings at any point in time, in this case the existing relationship needs to be removed, so we need to tear down the mappings completely and start again - we'll close the :class:`.Session`:: >>> session.close() ROLLBACK and use a new :func:`.declarative_base`:: >>> Base = declarative_base() Next we'll declare the ``User`` class, adding in the ``addresses`` relationship including the cascade configuration (we'll leave the constructor out too):: >>> class User(Base): ... __tablename__ = 'users' ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... password = Column(String) ... ... addresses = relationship("Address", back_populates='user', ... cascade="all, delete, delete-orphan") ... ... def __repr__(self): ... return "" % ( ... self.name, self.fullname, self.password) Then we recreate ``Address``, noting that in this case we've created the ``Address.user`` relationship via the ``User`` class already:: >>> class Address(Base): ... __tablename__ = 'addresses' ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) ... user_id = Column(Integer, ForeignKey('users.id')) ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): ... return "" % self.email_address Now when we load the user ``jack`` (below using :meth:`~.Query.get`, which loads by primary key), removing an address from the corresponding ``addresses`` collection will result in that ``Address`` being deleted: .. sourcecode:: python+sql # load Jack by primary key {sql}>>> jack = session.query(User).get(5) BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (5,) {stop} # remove one Address (lazy load fires off) {sql}>>> del jack.addresses[1] SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id (5,) {stop} # only one address remains {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() DELETE FROM addresses WHERE addresses.id = ? (2,) SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}1 Deleting Jack will delete both Jack and the remaining ``Address`` associated with the user: .. sourcecode:: python+sql >>> session.delete(jack) {sql}>>> session.query(User).filter_by(name='jack').count() DELETE FROM addresses WHERE addresses.id = ? (1,) DELETE FROM users WHERE users.id = ? (5,) SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ?) AS anon_1 ('jack',) {stop}0 {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}0 .. topic:: More on Cascades Further detail on configuration of cascades is at :ref:`unitofwork_cascades`. The cascade functionality can also integrate smoothly with the ``ON DELETE CASCADE`` functionality of the relational database. See :ref:`passive_deletes` for details. .. _orm_tutorial_many_to_many: Building a Many To Many Relationship ==================================== We're moving into the bonus round here, but lets show off a many-to-many relationship. We'll sneak in some other features too, just to take a tour. We'll make our application a blog application, where users can write ``BlogPost`` items, which have ``Keyword`` items associated with them. For a plain many-to-many, we need to create an un-mapped :class:`.Table` construct to serve as the association table. This looks like the following:: >>> from sqlalchemy import Table, Text >>> # association table >>> post_keywords = Table('post_keywords', Base.metadata, ... Column('post_id', ForeignKey('posts.id'), primary_key=True), ... Column('keyword_id', ForeignKey('keywords.id'), primary_key=True) ... ) Above, we can see declaring a :class:`.Table` directly is a little different than declaring a mapped class. :class:`.Table` is a constructor function, so each individual :class:`.Column` argument is separated by a comma. The :class:`.Column` object is also given its name explicitly, rather than it being taken from an assigned attribute name. Next we define ``BlogPost`` and ``Keyword``, using complementary :func:`.relationship` constructs, each referring to the ``post_keywords`` table as an association table:: >>> class BlogPost(Base): ... __tablename__ = 'posts' ... ... id = Column(Integer, primary_key=True) ... user_id = Column(Integer, ForeignKey('users.id')) ... headline = Column(String(255), nullable=False) ... body = Column(Text) ... ... # many to many BlogPost<->Keyword ... keywords = relationship('Keyword', ... secondary=post_keywords, ... back_populates='posts') ... ... def __init__(self, headline, body, author): ... self.author = author ... self.headline = headline ... self.body = body ... ... def __repr__(self): ... return "BlogPost(%r, %r, %r)" % (self.headline, self.body, self.author) >>> class Keyword(Base): ... __tablename__ = 'keywords' ... ... id = Column(Integer, primary_key=True) ... keyword = Column(String(50), nullable=False, unique=True) ... posts = relationship('BlogPost', ... secondary=post_keywords, ... back_populates='keywords') ... ... def __init__(self, keyword): ... self.keyword = keyword .. note:: The above class declarations illustrate explicit ``__init__()`` methods. Remember, when using Declarative, it's optional! Above, the many-to-many relationship is ``BlogPost.keywords``. The defining feature of a many-to-many relationship is the ``secondary`` keyword argument which references a :class:`~sqlalchemy.schema.Table` object representing the association table. This table only contains columns which reference the two sides of the relationship; if it has *any* other columns, such as its own primary key, or foreign keys to other tables, SQLAlchemy requires a different usage pattern called the "association object", described at :ref:`association_pattern`. We would also like our ``BlogPost`` class to have an ``author`` field. We will add this as another bidirectional relationship, except one issue we'll have is that a single user might have lots of blog posts. When we access ``User.posts``, we'd like to be able to filter results further so as not to load the entire collection. For this we use a setting accepted by :func:`~sqlalchemy.orm.relationship` called ``lazy='dynamic'``, which configures an alternate **loader strategy** on the attribute:: .. sourcecode:: python+sql >>> BlogPost.author = relationship(User, back_populates="posts") >>> User.posts = relationship(BlogPost, back_populates="author", lazy="dynamic") Create new tables: .. sourcecode:: python+sql {sql}>>> Base.metadata.create_all(engine) PRAGMA... CREATE TABLE keywords ( id INTEGER NOT NULL, keyword VARCHAR(50) NOT NULL, PRIMARY KEY (id), UNIQUE (keyword) ) () COMMIT CREATE TABLE posts ( id INTEGER NOT NULL, user_id INTEGER, headline VARCHAR(255) NOT NULL, body TEXT, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT CREATE TABLE post_keywords ( post_id INTEGER NOT NULL, keyword_id INTEGER NOT NULL, PRIMARY KEY (post_id, keyword_id), FOREIGN KEY(post_id) REFERENCES posts (id), FOREIGN KEY(keyword_id) REFERENCES keywords (id) ) () COMMIT Usage is not too different from what we've been doing. Let's give Wendy some blog posts: .. sourcecode:: python+sql {sql}>>> wendy = session.query(User).\ ... filter_by(name='wendy').\ ... one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('wendy',) {stop} >>> post = BlogPost("Wendy's Blog Post", "This is a test", wendy) >>> session.add(post) We're storing keywords uniquely in the database, but we know that we don't have any yet, so we can just create them: .. sourcecode:: python+sql >>> post.keywords.append(Keyword('wendy')) >>> post.keywords.append(Keyword('firstpost')) We can now look up all blog posts with the keyword 'firstpost'. We'll use the ``any`` operator to locate "blog posts where any of its keywords has the keyword string 'firstpost'": .. sourcecode:: python+sql {sql}>>> session.query(BlogPost).\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() INSERT INTO keywords (keyword) VALUES (?) ('wendy',) INSERT INTO keywords (keyword) VALUES (?) ('firstpost',) INSERT INTO posts (user_id, headline, body) VALUES (?, ?, ?) (2, "Wendy's Blog Post", 'This is a test') INSERT INTO post_keywords (post_id, keyword_id) VALUES (?, ?) (...) SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?) ('firstpost',) {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] If we want to look up posts owned by the user ``wendy``, we can tell the query to narrow down to that ``User`` object as a parent: .. sourcecode:: python+sql {sql}>>> session.query(BlogPost).\ ... filter(BlogPost.author==wendy).\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE ? = posts.user_id AND (EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?)) (2, 'firstpost') {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] Or we can use Wendy's own ``posts`` relationship, which is a "dynamic" relationship, to query straight from there: .. sourcecode:: python+sql {sql}>>> wendy.posts.\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE ? = posts.user_id AND (EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?)) (2, 'firstpost') {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] Further Reference ================== Query Reference: :ref:`query_api_toplevel` Mapper Reference: :ref:`mapper_config_toplevel` Relationship Reference: :ref:`relationship_config_toplevel` Session Reference: :doc:`/orm/session` SQLAlchemy-1.0.11/doc/build/orm/cascades.rst0000664000175000017500000003762012636375552021611 0ustar classicclassic00000000000000.. _unitofwork_cascades: Cascades ======== Mappers support the concept of configurable :term:`cascade` behavior on :func:`~sqlalchemy.orm.relationship` constructs. This refers to how operations performed on a "parent" object relative to a particular :class:`.Session` should be propagated to items referred to by that relationship (e.g. "child" objects), and is affected by the :paramref:`.relationship.cascade` option. The default behavior of cascade is limited to cascades of the so-called :ref:`cascade_save_update` and :ref:`cascade_merge` settings. The typical "alternative" setting for cascade is to add the :ref:`cascade_delete` and :ref:`cascade_delete_orphan` options; these settings are appropriate for related objects which only exist as long as they are attached to their parent, and are otherwise deleted. Cascade behavior is configured using the by changing the :paramref:`~.relationship.cascade` option on :func:`~sqlalchemy.orm.relationship`:: class Order(Base): __tablename__ = 'order' items = relationship("Item", cascade="all, delete-orphan") customer = relationship("User", cascade="save-update") To set cascades on a backref, the same flag can be used with the :func:`~.sqlalchemy.orm.backref` function, which ultimately feeds its arguments back into :func:`~sqlalchemy.orm.relationship`:: class Item(Base): __tablename__ = 'item' order = relationship("Order", backref=backref("items", cascade="all, delete-orphan") ) .. sidebar:: The Origins of Cascade SQLAlchemy's notion of cascading behavior on relationships, as well as the options to configure them, are primarily derived from the similar feature in the Hibernate ORM; Hibernate refers to "cascade" in a few places such as in `Example: Parent/Child `_. If cascades are confusing, we'll refer to their conclusion, stating "The sections we have just covered can be a bit confusing. However, in practice, it all works out nicely." The default value of :paramref:`~.relationship.cascade` is ``save-update, merge``. The typical alternative setting for this parameter is either ``all`` or more commonly ``all, delete-orphan``. The ``all`` symbol is a synonym for ``save-update, merge, refresh-expire, expunge, delete``, and using it in conjunction with ``delete-orphan`` indicates that the child object should follow along with its parent in all cases, and be deleted once it is no longer associated with that parent. The list of available values which can be specified for the :paramref:`~.relationship.cascade` parameter are described in the following subsections. .. _cascade_save_update: save-update ----------- ``save-update`` cascade indicates that when an object is placed into a :class:`.Session` via :meth:`.Session.add`, all the objects associated with it via this :func:`.relationship` should also be added to that same :class:`.Session`. Suppose we have an object ``user1`` with two related objects ``address1``, ``address2``:: >>> user1 = User() >>> address1, address2 = Address(), Address() >>> user1.addresses = [address1, address2] If we add ``user1`` to a :class:`.Session`, it will also add ``address1``, ``address2`` implicitly:: >>> sess = Session() >>> sess.add(user1) >>> address1 in sess True ``save-update`` cascade also affects attribute operations for objects that are already present in a :class:`.Session`. If we add a third object, ``address3`` to the ``user1.addresses`` collection, it becomes part of the state of that :class:`.Session`:: >>> address3 = Address() >>> user1.append(address3) >>> address3 in sess >>> True ``save-update`` has the possibly surprising behavior which is that persistent objects which were *removed* from a collection or in some cases a scalar attribute may also be pulled into the :class:`.Session` of a parent object; this is so that the flush process may handle that related object appropriately. This case can usually only arise if an object is removed from one :class:`.Session` and added to another:: >>> user1 = sess1.query(User).filter_by(id=1).first() >>> address1 = user1.addresses[0] >>> sess1.close() # user1, address1 no longer associated with sess1 >>> user1.addresses.remove(address1) # address1 no longer associated with user1 >>> sess2 = Session() >>> sess2.add(user1) # ... but it still gets added to the new session, >>> address1 in sess2 # because it's still "pending" for flush True The ``save-update`` cascade is on by default, and is typically taken for granted; it simplifies code by allowing a single call to :meth:`.Session.add` to register an entire structure of objects within that :class:`.Session` at once. While it can be disabled, there is usually not a need to do so. One case where ``save-update`` cascade does sometimes get in the way is in that it takes place in both directions for bi-directional relationships, e.g. backrefs, meaning that the association of a child object with a particular parent can have the effect of the parent object being implicitly associated with that child object's :class:`.Session`; this pattern, as well as how to modify its behavior using the :paramref:`~.relationship.cascade_backrefs` flag, is discussed in the section :ref:`backref_cascade`. .. _cascade_delete: delete ------ The ``delete`` cascade indicates that when a "parent" object is marked for deletion, its related "child" objects should also be marked for deletion. If for example we we have a relationship ``User.addresses`` with ``delete`` cascade configured:: class User(Base): # ... addresses = relationship("Address", cascade="save-update, merge, delete") If using the above mapping, we have a ``User`` object and two related ``Address`` objects:: >>> user1 = sess.query(User).filter_by(id=1).first() >>> address1, address2 = user1.addresses If we mark ``user1`` for deletion, after the flush operation proceeds, ``address1`` and ``address2`` will also be deleted: .. sourcecode:: python+sql >>> sess.delete(user1) >>> sess.commit() {opensql}DELETE FROM address WHERE address.id = ? ((1,), (2,)) DELETE FROM user WHERE user.id = ? (1,) COMMIT Alternatively, if our ``User.addresses`` relationship does *not* have ``delete`` cascade, SQLAlchemy's default behavior is to instead de-associate ``address1`` and ``address2`` from ``user1`` by setting their foreign key reference to ``NULL``. Using a mapping as follows:: class User(Base): # ... addresses = relationship("Address") Upon deletion of a parent ``User`` object, the rows in ``address`` are not deleted, but are instead de-associated: .. sourcecode:: python+sql >>> sess.delete(user1) >>> sess.commit() {opensql}UPDATE address SET user_id=? WHERE address.id = ? (None, 1) UPDATE address SET user_id=? WHERE address.id = ? (None, 2) DELETE FROM user WHERE user.id = ? (1,) COMMIT ``delete`` cascade is more often than not used in conjunction with :ref:`cascade_delete_orphan` cascade, which will emit a DELETE for the related row if the "child" object is deassociated from the parent. The combination of ``delete`` and ``delete-orphan`` cascade covers both situations where SQLAlchemy has to decide between setting a foreign key column to NULL versus deleting the row entirely. .. topic:: ORM-level "delete" cascade vs. FOREIGN KEY level "ON DELETE" cascade The behavior of SQLAlchemy's "delete" cascade has a lot of overlap with the ``ON DELETE CASCADE`` feature of a database foreign key, as well as with that of the ``ON DELETE SET NULL`` foreign key setting when "delete" cascade is not specified. Database level "ON DELETE" cascades are specific to the "FOREIGN KEY" construct of the relational database; SQLAlchemy allows configuration of these schema-level constructs at the :term:`DDL` level using options on :class:`.ForeignKeyConstraint` which are described at :ref:`on_update_on_delete`. It is important to note the differences between the ORM and the relational database's notion of "cascade" as well as how they integrate: * A database level ``ON DELETE`` cascade is configured effectively on the **many-to-one** side of the relationship; that is, we configure it relative to the ``FOREIGN KEY`` constraint that is the "many" side of a relationship. At the ORM level, **this direction is reversed**. SQLAlchemy handles the deletion of "child" objects relative to a "parent" from the "parent" side, which means that ``delete`` and ``delete-orphan`` cascade are configured on the **one-to-many** side. * Database level foreign keys with no ``ON DELETE`` setting are often used to **prevent** a parent row from being removed, as it would necessarily leave an unhandled related row present. If this behavior is desired in a one-to-many relationship, SQLAlchemy's default behavior of setting a foreign key to ``NULL`` can be caught in one of two ways: * The easiest and most common is just to set the foreign-key-holding column to ``NOT NULL`` at the database schema level. An attempt by SQLAlchemy to set the column to NULL will fail with a simple NOT NULL constraint exception. * The other, more special case way is to set the :paramref:`~.relationship.passive_deletes` flag to the string ``"all"``. This has the effect of entirely disabling SQLAlchemy's behavior of setting the foreign key column to NULL, and a DELETE will be emitted for the parent row without any affect on the child row, even if the child row is present in memory. This may be desirable in the case when database-level foreign key triggers, either special ``ON DELETE`` settings or otherwise, need to be activated in all cases when a parent row is deleted. * Database level ``ON DELETE`` cascade is **vastly more efficient** than that of SQLAlchemy. The database can chain a series of cascade operations across many relationships at once; e.g. if row A is deleted, all the related rows in table B can be deleted, and all the C rows related to each of those B rows, and on and on, all within the scope of a single DELETE statement. SQLAlchemy on the other hand, in order to support the cascading delete operation fully, has to individually load each related collection in order to target all rows that then may have further related collections. That is, SQLAlchemy isn't sophisticated enough to emit a DELETE for all those related rows at once within this context. * SQLAlchemy doesn't **need** to be this sophisticated, as we instead provide smooth integration with the database's own ``ON DELETE`` functionality, by using the :paramref:`~.relationship.passive_deletes` option in conjunction with properly configured foreign key constraints. Under this behavior, SQLAlchemy only emits DELETE for those rows that are already locally present in the :class:`.Session`; for any collections that are unloaded, it leaves them to the database to handle, rather than emitting a SELECT for them. The section :ref:`passive_deletes` provides an example of this use. * While database-level ``ON DELETE`` functionality works only on the "many" side of a relationship, SQLAlchemy's "delete" cascade has **limited** ability to operate in the *reverse* direction as well, meaning it can be configured on the "many" side to delete an object on the "one" side when the reference on the "many" side is deleted. However this can easily result in constraint violations if there are other objects referring to this "one" side from the "many", so it typically is only useful when a relationship is in fact a "one to one". The :paramref:`~.relationship.single_parent` flag should be used to establish an in-Python assertion for this case. When using a :func:`.relationship` that also includes a many-to-many table using the :paramref:`~.relationship.secondary` option, SQLAlchemy's delete cascade handles the rows in this many-to-many table automatically. Just like, as described in :ref:`relationships_many_to_many_deletion`, the addition or removal of an object from a many-to-many collection results in the INSERT or DELETE of a row in the many-to-many table, the ``delete`` cascade, when activated as the result of a parent object delete operation, will DELETE not just the row in the "child" table but also in the many-to-many table. .. _cascade_delete_orphan: delete-orphan ------------- ``delete-orphan`` cascade adds behavior to the ``delete`` cascade, such that a child object will be marked for deletion when it is de-associated from the parent, not just when the parent is marked for deletion. This is a common feature when dealing with a related object that is "owned" by its parent, with a NOT NULL foreign key, so that removal of the item from the parent collection results in its deletion. ``delete-orphan`` cascade implies that each child object can only have one parent at a time, so is configured in the vast majority of cases on a one-to-many relationship. Setting it on a many-to-one or many-to-many relationship is more awkward; for this use case, SQLAlchemy requires that the :func:`~sqlalchemy.orm.relationship` be configured with the :paramref:`~.relationship.single_parent` argument, establishes Python-side validation that ensures the object is associated with only one parent at a time. .. _cascade_merge: merge ----- ``merge`` cascade indicates that the :meth:`.Session.merge` operation should be propagated from a parent that's the subject of the :meth:`.Session.merge` call down to referred objects. This cascade is also on by default. .. _cascade_refresh_expire: refresh-expire -------------- ``refresh-expire`` is an uncommon option, indicating that the :meth:`.Session.expire` operation should be propagated from a parent down to referred objects. When using :meth:`.Session.refresh`, the referred objects are expired only, but not actually refreshed. .. _cascade_expunge: expunge ------- ``expunge`` cascade indicates that when the parent object is removed from the :class:`.Session` using :meth:`.Session.expunge`, the operation should be propagated down to referred objects. .. _backref_cascade: Controlling Cascade on Backrefs ------------------------------- The :ref:`cascade_save_update` cascade by default takes place on attribute change events emitted from backrefs. This is probably a confusing statement more easily described through demonstration; it means that, given a mapping such as this:: mapper(Order, order_table, properties={ 'items' : relationship(Item, backref='order') }) If an ``Order`` is already in the session, and is assigned to the ``order`` attribute of an ``Item``, the backref appends the ``Order`` to the ``items`` collection of that ``Order``, resulting in the ``save-update`` cascade taking place:: >>> o1 = Order() >>> session.add(o1) >>> o1 in session True >>> i1 = Item() >>> i1.order = o1 >>> i1 in o1.items True >>> i1 in session True This behavior can be disabled using the :paramref:`~.relationship.cascade_backrefs` flag:: mapper(Order, order_table, properties={ 'items' : relationship(Item, backref='order', cascade_backrefs=False) }) So above, the assignment of ``i1.order = o1`` will append ``i1`` to the ``items`` collection of ``o1``, but will not add ``i1`` to the session. You can, of course, :meth:`~.Session.add` ``i1`` to the session at a later point. This option may be helpful for situations where an object needs to be kept out of a session until it's construction is completed, but still needs to be given associations to objects which are already persistent in the target session. SQLAlchemy-1.0.11/doc/build/orm/session_api.rst0000664000175000017500000000326612636375552022356 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm.session Session API ============ Session and sessionmaker() --------------------------- .. autoclass:: sessionmaker :members: :inherited-members: .. autoclass:: sqlalchemy.orm.session.Session :members: :inherited-members: .. autoclass:: sqlalchemy.orm.session.SessionTransaction :members: Session Utilites ---------------- .. autofunction:: make_transient .. autofunction:: make_transient_to_detached .. autofunction:: object_session .. autofunction:: sqlalchemy.orm.util.was_deleted Attribute and State Management Utilities ----------------------------------------- These functions are provided by the SQLAlchemy attribute instrumentation API to provide a detailed interface for dealing with instances, attribute values, and history. Some of them are useful when constructing event listener functions, such as those described in :doc:`/orm/events`. .. currentmodule:: sqlalchemy.orm.util .. autofunction:: object_state .. currentmodule:: sqlalchemy.orm.attributes .. autofunction:: del_attribute .. autofunction:: get_attribute .. autofunction:: get_history .. autofunction:: init_collection .. autofunction:: flag_modified .. function:: instance_state Return the :class:`.InstanceState` for a given mapped object. This function is the internal version of :func:`.object_state`. The :func:`.object_state` and/or the :func:`.inspect` function is preferred here as they each emit an informative exception if the given object is not mapped. .. autofunction:: sqlalchemy.orm.instrumentation.is_instrumented .. autofunction:: set_attribute .. autofunction:: set_committed_value .. autoclass:: History :members: SQLAlchemy-1.0.11/doc/build/orm/session_transaction.rst0000664000175000017500000005120712636375552024130 0ustar classicclassic00000000000000======================================= Transactions and Connection Management ======================================= .. _unitofwork_transaction: Managing Transactions ===================== A newly constructed :class:`.Session` may be said to be in the "begin" state. In this state, the :class:`.Session` has not established any connection or transactional state with any of the :class:`.Engine` objects that may be associated with it. The :class:`.Session` then receives requests to operate upon a database connection. Typically, this means it is called upon to execute SQL statements using a particular :class:`.Engine`, which may be via :meth:`.Session.query`, :meth:`.Session.execute`, or within a flush operation of pending data, which occurs when such state exists and :meth:`.Session.commit` or :meth:`.Session.flush` is called. As these requests are received, each new :class:`.Engine` encountered is associated with an ongoing transactional state maintained by the :class:`.Session`. When the first :class:`.Engine` is operated upon, the :class:`.Session` can be said to have left the "begin" state and entered "transactional" state. For each :class:`.Engine` encountered, a :class:`.Connection` is associated with it, which is acquired via the :meth:`.Engine.contextual_connect` method. If a :class:`.Connection` was directly associated with the :class:`.Session` (see :ref:`session_external_transaction` for an example of this), it is added to the transactional state directly. For each :class:`.Connection`, the :class:`.Session` also maintains a :class:`.Transaction` object, which is acquired by calling :meth:`.Connection.begin` on each :class:`.Connection`, or if the :class:`.Session` object has been established using the flag ``twophase=True``, a :class:`.TwoPhaseTransaction` object acquired via :meth:`.Connection.begin_twophase`. These transactions are all committed or rolled back corresponding to the invocation of the :meth:`.Session.commit` and :meth:`.Session.rollback` methods. A commit operation will also call the :meth:`.TwoPhaseTransaction.prepare` method on all transactions if applicable. When the transactional state is completed after a rollback or commit, the :class:`.Session` :term:`releases` all :class:`.Transaction` and :class:`.Connection` resources, and goes back to the "begin" state, which will again invoke new :class:`.Connection` and :class:`.Transaction` objects as new requests to emit SQL statements are received. The example below illustrates this lifecycle:: engine = create_engine("...") Session = sessionmaker(bind=engine) # new session. no connections are in use. session = Session() try: # first query. a Connection is acquired # from the Engine, and a Transaction # started. item1 = session.query(Item).get(1) # second query. the same Connection/Transaction # are used. item2 = session.query(Item).get(2) # pending changes are created. item1.foo = 'bar' item2.bar = 'foo' # commit. The pending changes above # are flushed via flush(), the Transaction # is committed, the Connection object closed # and discarded, the underlying DBAPI connection # returned to the connection pool. session.commit() except: # on rollback, the same closure of state # as that of commit proceeds. session.rollback() raise .. _session_begin_nested: Using SAVEPOINT --------------- SAVEPOINT transactions, if supported by the underlying engine, may be delineated using the :meth:`~.Session.begin_nested` method:: Session = sessionmaker() session = Session() session.add(u1) session.add(u2) session.begin_nested() # establish a savepoint session.add(u3) session.rollback() # rolls back u3, keeps u1 and u2 session.commit() # commits u1 and u2 :meth:`~.Session.begin_nested` may be called any number of times, which will issue a new SAVEPOINT with a unique identifier for each call. For each :meth:`~.Session.begin_nested` call, a corresponding :meth:`~.Session.rollback` or :meth:`~.Session.commit` must be issued. (But note that if the return value is used as a context manager, i.e. in a with-statement, then this rollback/commit is issued by the context manager upon exiting the context, and so should not be added explicitly.) When :meth:`~.Session.begin_nested` is called, a :meth:`~.Session.flush` is unconditionally issued (regardless of the ``autoflush`` setting). This is so that when a :meth:`~.Session.rollback` occurs, the full state of the session is expired, thus causing all subsequent attribute/instance access to reference the full state of the :class:`~sqlalchemy.orm.session.Session` right before :meth:`~.Session.begin_nested` was called. :meth:`~.Session.begin_nested`, in the same manner as the less often used :meth:`~.Session.begin` method, returns a transactional object which also works as a context manager. It can be succinctly used around individual record inserts in order to catch things like unique constraint exceptions:: for record in records: try: with session.begin_nested(): session.merge(record) except: print "Skipped record %s" % record session.commit() .. _session_autocommit: Autocommit Mode --------------- The example of :class:`.Session` transaction lifecycle illustrated at the start of :ref:`unitofwork_transaction` applies to a :class:`.Session` configured in the default mode of ``autocommit=False``. Constructing a :class:`.Session` with ``autocommit=True`` produces a :class:`.Session` placed into "autocommit" mode, where each SQL statement invoked by a :meth:`.Session.query` or :meth:`.Session.execute` occurs using a new connection from the connection pool, discarding it after results have been iterated. The :meth:`.Session.flush` operation still occurs within the scope of a single transaction, though this transaction is closed out after the :meth:`.Session.flush` operation completes. .. warning:: "autocommit" mode should **not be considered for general use**. If used, it should always be combined with the usage of :meth:`.Session.begin` and :meth:`.Session.commit`, to ensure a transaction demarcation. Executing queries outside of a demarcated transaction is a legacy mode of usage, and can in some cases lead to concurrent connection checkouts. In the absence of a demarcated transaction, the :class:`.Session` cannot make appropriate decisions as to when autoflush should occur nor when auto-expiration should occur, so these features should be disabled with ``autoflush=False, expire_on_commit=False``. Modern usage of "autocommit" is for framework integrations that need to control specifically when the "begin" state occurs. A session which is configured with ``autocommit=True`` may be placed into the "begin" state using the :meth:`.Session.begin` method. After the cycle completes upon :meth:`.Session.commit` or :meth:`.Session.rollback`, connection and transaction resources are :term:`released` and the :class:`.Session` goes back into "autocommit" mode, until :meth:`.Session.begin` is called again:: Session = sessionmaker(bind=engine, autocommit=True) session = Session() session.begin() try: item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) item1.foo = 'bar' item2.bar = 'foo' session.commit() except: session.rollback() raise The :meth:`.Session.begin` method also returns a transactional token which is compatible with the Python 2.6 ``with`` statement:: Session = sessionmaker(bind=engine, autocommit=True) session = Session() with session.begin(): item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) item1.foo = 'bar' item2.bar = 'foo' .. _session_subtransactions: Using Subtransactions with Autocommit ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A subtransaction indicates usage of the :meth:`.Session.begin` method in conjunction with the ``subtransactions=True`` flag. This produces a non-transactional, delimiting construct that allows nesting of calls to :meth:`~.Session.begin` and :meth:`~.Session.commit`. Its purpose is to allow the construction of code that can function within a transaction both independently of any external code that starts a transaction, as well as within a block that has already demarcated a transaction. ``subtransactions=True`` is generally only useful in conjunction with autocommit, and is equivalent to the pattern described at :ref:`connections_nested_transactions`, where any number of functions can call :meth:`.Connection.begin` and :meth:`.Transaction.commit` as though they are the initiator of the transaction, but in fact may be participating in an already ongoing transaction:: # method_a starts a transaction and calls method_b def method_a(session): session.begin(subtransactions=True) try: method_b(session) session.commit() # transaction is committed here except: session.rollback() # rolls back the transaction raise # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): session.begin(subtransactions=True) try: session.add(SomeObject('bat', 'lala')) session.commit() # transaction is not committed yet except: session.rollback() # rolls back the transaction, in this case # the one that was initiated in method_a(). raise # create a Session and call method_a session = Session(autocommit=True) method_a(session) session.close() Subtransactions are used by the :meth:`.Session.flush` process to ensure that the flush operation takes place within a transaction, regardless of autocommit. When autocommit is disabled, it is still useful in that it forces the :class:`.Session` into a "pending rollback" state, as a failed flush cannot be resumed in mid-operation, where the end user still maintains the "scope" of the transaction overall. .. _session_twophase: Enabling Two-Phase Commit ------------------------- For backends which support two-phase operaration (currently MySQL and PostgreSQL), the session can be instructed to use two-phase commit semantics. This will coordinate the committing of transactions across databases so that the transaction is either committed or rolled back in all databases. You can also :meth:`~.Session.prepare` the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag ``twophase=True`` on the session:: engine1 = create_engine('postgresql://db1') engine2 = create_engine('postgresql://db2') Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 Session.configure(binds={User:engine1, Account:engine2}) session = Session() # .... work with accounts and users # commit. session will issue a flush to all DBs, and a prepare step to all DBs, # before committing both transactions session.commit() .. _session_transaction_isolation: Setting Transaction Isolation Levels ------------------------------------ :term:`Isolation` refers to the behavior of the transaction at the database level in relation to other transactions occurring concurrently. There are four well-known modes of isolation, and typically the Python DBAPI allows these to be set on a per-connection basis, either through explicit APIs or via database-specific calls. SQLAlchemy's dialects support settable isolation modes on a per-:class:`.Engine` or per-:class:`.Connection` basis, using flags at both the :func:`.create_engine` level as well as at the :meth:`.Connection.execution_options` level. When using the ORM :class:`.Session`, it acts as a *facade* for engines and connections, but does not expose transaction isolation directly. So in order to affect transaction isolation level, we need to act upon the :class:`.Engine` or :class:`.Connection` as appropriate. .. seealso:: :paramref:`.create_engine.isolation_level` :ref:`SQLite Transaction Isolation ` :ref:`Postgresql Isolation Level ` :ref:`MySQL Isolation Level ` Setting Isolation Engine-Wide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To set up a :class:`.Session` or :class:`.sessionmaker` with a specific isolation level globally, use the :paramref:`.create_engine.isolation_level` parameter:: from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker eng = create_engine( "postgresql://scott:tiger@localhost/test", isolation_level='REPEATABLE_READ') maker = sessionmaker(bind=eng) session = maker() Setting Isolation for Individual Sessions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we make a new :class:`.Session`, either using the constructor directly or when we call upon the callable produced by a :class:`.sessionmaker`, we can pass the ``bind`` argument directly, overriding the pre-existing bind. We can combine this with the :meth:`.Engine.execution_options` method in order to produce a copy of the original :class:`.Engine` that will add this option:: session = maker( bind=engine.execution_options(isolation_level='SERIALIZABLE')) For the case where the :class:`.Session` or :class:`.sessionmaker` is configured with multiple "binds", we can either re-specify the ``binds`` argument fully, or if we want to only replace specific binds, we can use the :meth:`.Session.bind_mapper` or :meth:`.Session.bind_table` methods:: session = maker() session.bind_mapper( User, user_engine.execution_options(isolation_level='SERIALIZABLE')) We can also use the individual transaction method that follows. Setting Isolation for Individual Transactions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A key caveat regarding isolation level is that the setting cannot be safely modified on a :class:`.Connection` where a transaction has already started. Databases cannot change the isolation level of a transaction in progress, and some DBAPIs and SQLAlchemy dialects have inconsistent behaviors in this area. Some may implicitly emit a ROLLBACK and some may implicitly emit a COMMIT, others may ignore the setting until the next transaction. Therefore SQLAlchemy emits a warning if this option is set when a transaction is already in play. The :class:`.Session` object does not provide for us a :class:`.Connection` for use in a transaction where the transaction is not already begun. So here, we need to pass execution options to the :class:`.Session` at the start of a transaction by passing :paramref:`.Session.connection.execution_options` provided by the :meth:`.Session.connection` method:: from sqlalchemy.orm import Session sess = Session(bind=engine) sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) # work with session # commit transaction. the connection is released # and reverted to its previous isolation level. sess.commit() Above, we first produce a :class:`.Session` using either the constructor or a :class:`.sessionmaker`. Then we explicitly set up the start of a transaction by calling upon :meth:`.Session.connection`, which provides for execution options that will be passed to the connection before the transaction is begun. If we are working with a :class:`.Session` that has multiple binds or some other custom scheme for :meth:`.Session.get_bind`, we can pass additional arguments to :meth:`.Session.connection` in order to affect how the bind is procured:: sess = my_sesssionmaker() # set up a transaction for the bind associated with # the User mapper sess.connection( mapper=User, execution_options={'isolation_level': 'SERIALIZABLE'}) # work with session # commit transaction. the connection is released # and reverted to its previous isolation level. sess.commit() The :paramref:`.Session.connection.execution_options` argument is only accepted on the **first** call to :meth:`.Session.connection` for a particular bind within a transaction. If a transaction is already begun on the target connection, a warning is emitted:: >>> session = Session(eng) >>> session.execute("select 1") >>> session.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) sqlalchemy/orm/session.py:310: SAWarning: Connection is already established for the given bind; execution_options ignored .. versionadded:: 0.9.9 Added the :paramref:`.Session.connection.execution_options` parameter to :meth:`.Session.connection`. Tracking Transaction State with Events -------------------------------------- See the section :ref:`session_transaction_events` for an overview of the available event hooks for session transaction state changes. .. _session_external_transaction: Joining a Session into an External Transaction (such as for test suites) ======================================================================== If a :class:`.Connection` is being used which is already in a transactional state (i.e. has a :class:`.Transaction` established), a :class:`.Session` can be made to participate within that transaction by just binding the :class:`.Session` to that :class:`.Connection`. The usual rationale for this is a test suite that allows ORM code to work freely with a :class:`.Session`, including the ability to call :meth:`.Session.commit`, where afterwards the entire database interaction is rolled back:: from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine from unittest import TestCase # global application scope. create Session class, engine Session = sessionmaker() engine = create_engine('postgresql://...') class SomeTest(TestCase): def setUp(self): # connect to the database self.connection = engine.connect() # begin a non-ORM transaction self.trans = self.connection.begin() # bind an individual Session to the connection self.session = Session(bind=self.connection) def test_something(self): # use the session in tests. self.session.add(Foo()) self.session.commit() def tearDown(self): self.session.close() # rollback - everything that happened with the # Session above (including calls to commit()) # is rolled back. self.trans.rollback() # return connection to the Engine self.connection.close() Above, we issue :meth:`.Session.commit` as well as :meth:`.Transaction.rollback`. This is an example of where we take advantage of the :class:`.Connection` object's ability to maintain *subtransactions*, or nested begin/commit-or-rollback pairs where only the outermost begin/commit pair actually commits the transaction, or if the outermost block rolls back, everything is rolled back. .. topic:: Supporting Tests with Rollbacks The above recipe works well for any kind of database enabled test, except for a test that needs to actually invoke :meth:`.Session.rollback` within the scope of the test itself. The above recipe can be expanded, such that the :class:`.Session` always runs all operations within the scope of a SAVEPOINT, which is established at the start of each transaction, so that tests can also rollback the "transaction" as well while still remaining in the scope of a larger "transaction" that's never committed, using two extra events:: from sqlalchemy import event class SomeTest(TestCase): def setUp(self): # connect to the database self.connection = engine.connect() # begin a non-ORM transaction self.trans = connection.begin() # bind an individual Session to the connection self.session = Session(bind=self.connection) # start the session in a SAVEPOINT... self.session.begin_nested() # then each time that SAVEPOINT ends, reopen it @event.listens_for(self.session, "after_transaction_end") def restart_savepoint(session, transaction): if transaction.nested and not transaction._parent.nested: # ensure that state is expired the way # session.commit() at the top level normally does # (optional step) session.expire_all() session.begin_nested() # ... the tearDown() method stays the same SQLAlchemy-1.0.11/doc/build/orm/loading_objects.rst0000664000175000017500000000053512636375552023164 0ustar classicclassic00000000000000======================= Loading Objects ======================= Notes and features regarding the general loading of mapped objects. For an in-depth introduction to querying with the SQLAlchemy ORM, please see the :ref:`ormtutorial_toplevel`. .. toctree:: :maxdepth: 2 loading_columns loading_relationships constructors query SQLAlchemy-1.0.11/doc/build/orm/inheritance.rst0000664000175000017500000007546312636375552022343 0ustar classicclassic00000000000000.. _inheritance_toplevel: Mapping Class Inheritance Hierarchies ====================================== SQLAlchemy supports three forms of inheritance: **single table inheritance**, where several types of classes are represented by a single table, **concrete table inheritance**, where each type of class is represented by independent tables, and **joined table inheritance**, where the class hierarchy is broken up among dependent tables, each class represented by its own table that only includes those attributes local to that class. The most common forms of inheritance are single and joined table, while concrete inheritance presents more configurational challenges. When mappers are configured in an inheritance relationship, SQLAlchemy has the ability to load elements :term:`polymorphically`, meaning that a single query can return objects of multiple types. Joined Table Inheritance ------------------------- In joined table inheritance, each class along a particular classes' list of parents is represented by a unique table. The total set of attributes for a particular instance is represented as a join along all tables in its inheritance path. Here, we first define the ``Employee`` class. This table will contain a primary key column (or columns), and a column for each attribute that's represented by ``Employee``. In this case it's just ``name``:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'polymorphic_on':type } The mapped table also has a column called ``type``. The purpose of this column is to act as the **discriminator**, and stores a value which indicates the type of object represented within the row. The column may be of any datatype, though string and integer are the most common. .. warning:: Currently, **only one discriminator column may be set**, typically on the base-most class in the hierarchy. "Cascading" polymorphic columns are not yet supported. The discriminator column is only needed if polymorphic loading is desired, as is usually the case. It is not strictly necessary that it be present directly on the base mapped table, and can instead be defined on a derived select statement that's used when the class is queried; however, this is a much more sophisticated configuration scenario. The mapping receives additional arguments via the ``__mapper_args__`` dictionary. Here the ``type`` column is explicitly stated as the discriminator column, and the **polymorphic identity** of ``employee`` is also given; this is the value that will be stored in the polymorphic discriminator column for instances of this class. We next define ``Engineer`` and ``Manager`` subclasses of ``Employee``. Each contains columns that represent the attributes unique to the subclass they represent. Each table also must contain a primary key column (or columns), and in most cases a foreign key reference to the parent table:: class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'engineer', } class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'manager', } It is standard practice that the same column is used for both the role of primary key as well as foreign key to the parent table, and that the column is also named the same as that of the parent table. However, both of these practices are optional. Separate columns may be used for primary key and parent-relationship, the column may be named differently than that of the parent, and even a custom join condition can be specified between parent and child tables instead of using a foreign key. .. topic:: Joined inheritance primary keys One natural effect of the joined table inheritance configuration is that the identity of any mapped object can be determined entirely from the base table. This has obvious advantages, so SQLAlchemy always considers the primary key columns of a joined inheritance class to be those of the base table only. In other words, the ``id`` columns of both the ``engineer`` and ``manager`` tables are not used to locate ``Engineer`` or ``Manager`` objects - only the value in ``employee.id`` is considered. ``engineer.id`` and ``manager.id`` are still of course critical to the proper operation of the pattern overall as they are used to locate the joined row, once the parent row has been determined within a statement. With the joined inheritance mapping complete, querying against ``Employee`` will return a combination of ``Employee``, ``Engineer`` and ``Manager`` objects. Newly saved ``Engineer``, ``Manager``, and ``Employee`` objects will automatically populate the ``employee.type`` column with ``engineer``, ``manager``, or ``employee``, as appropriate. .. _with_polymorphic: Basic Control of Which Tables are Queried ++++++++++++++++++++++++++++++++++++++++++ The :func:`.orm.with_polymorphic` function and the :func:`~sqlalchemy.orm.query.Query.with_polymorphic` method of :class:`~sqlalchemy.orm.query.Query` affects the specific tables which the :class:`.Query` selects from. Normally, a query such as this:: session.query(Employee).all() ...selects only from the ``employee`` table. When loading fresh from the database, our joined-table setup will query from the parent table only, using SQL such as this: .. sourcecode:: python+sql {opensql} SELECT employee.id AS employee_id, employee.name AS employee_name, employee.type AS employee_type FROM employee [] As attributes are requested from those ``Employee`` objects which are represented in either the ``engineer`` or ``manager`` child tables, a second load is issued for the columns in that related row, if the data was not already loaded. So above, after accessing the objects you'd see further SQL issued along the lines of: .. sourcecode:: python+sql {opensql} SELECT manager.id AS manager_id, manager.manager_data AS manager_manager_data FROM manager WHERE ? = manager.id [5] SELECT engineer.id AS engineer_id, engineer.engineer_info AS engineer_engineer_info FROM engineer WHERE ? = engineer.id [2] This behavior works well when issuing searches for small numbers of items, such as when using :meth:`.Query.get`, since the full range of joined tables are not pulled in to the SQL statement unnecessarily. But when querying a larger span of rows which are known to be of many types, you may want to actively join to some or all of the joined tables. The ``with_polymorphic`` feature provides this. Telling our query to polymorphically load ``Engineer`` and ``Manager`` objects, we can use the :func:`.orm.with_polymorphic` function to create a new aliased class which represents a select of the base table combined with outer joins to each of the inheriting tables:: from sqlalchemy.orm import with_polymorphic eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager) The above produces a query which joins the ``employee`` table to both the ``engineer`` and ``manager`` tables like the following: .. sourcecode:: python+sql query.all() {opensql} SELECT employee.id AS employee_id, engineer.id AS engineer_id, manager.id AS manager_id, employee.name AS employee_name, employee.type AS employee_type, engineer.engineer_info AS engineer_engineer_info, manager.manager_data AS manager_manager_data FROM employee LEFT OUTER JOIN engineer ON employee.id = engineer.id LEFT OUTER JOIN manager ON employee.id = manager.id [] The entity returned by :func:`.orm.with_polymorphic` is an :class:`.AliasedClass` object, which can be used in a :class:`.Query` like any other alias, including named attributes for those attributes on the ``Employee`` class. In our example, ``eng_plus_manager`` becomes the entity that we use to refer to the three-way outer join above. It also includes namespaces for each class named in the list of classes, so that attributes specific to those subclasses can be called upon as well. The following example illustrates calling upon attributes specific to ``Engineer`` as well as ``Manager`` in terms of ``eng_plus_manager``:: eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager).filter( or_( eng_plus_manager.Engineer.engineer_info=='x', eng_plus_manager.Manager.manager_data=='y' ) ) :func:`.orm.with_polymorphic` accepts a single class or mapper, a list of classes/mappers, or the string ``'*'`` to indicate all subclasses: .. sourcecode:: python+sql # join to the engineer table entity = with_polymorphic(Employee, Engineer) # join to the engineer and manager tables entity = with_polymorphic(Employee, [Engineer, Manager]) # join to all subclass tables entity = with_polymorphic(Employee, '*') # use the 'entity' with a Query object session.query(entity).all() It also accepts a third argument ``selectable`` which replaces the automatic join creation and instead selects directly from the selectable given. This feature is normally used with "concrete" inheritance, described later, but can be used with any kind of inheritance setup in the case that specialized SQL should be used to load polymorphically:: # custom selectable employee = Employee.__table__ manager = Manager.__table__ engineer = Engineer.__table__ entity = with_polymorphic( Employee, [Engineer, Manager], employee.outerjoin(manager).outerjoin(engineer) ) # use the 'entity' with a Query object session.query(entity).all() Note that if you only need to load a single subtype, such as just the ``Engineer`` objects, :func:`.orm.with_polymorphic` is not needed since you would query against the ``Engineer`` class directly. :meth:`.Query.with_polymorphic` has the same purpose as :func:`.orm.with_polymorphic`, except is not as flexible in its usage patterns in that it only applies to the first full mapping, which then impacts all occurrences of that class or the target subclasses within the :class:`.Query`. For simple cases it might be considered to be more succinct:: session.query(Employee).with_polymorphic([Engineer, Manager]).\ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q')) .. versionadded:: 0.8 :func:`.orm.with_polymorphic`, an improved version of :meth:`.Query.with_polymorphic` method. The mapper also accepts ``with_polymorphic`` as a configurational argument so that the joined-style load will be issued automatically. This argument may be the string ``'*'``, a list of classes, or a tuple consisting of either, followed by a selectable:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee', 'with_polymorphic':'*' } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity':'engineer'} class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity':'manager'} The above mapping will produce a query similar to that of ``with_polymorphic('*')`` for every query of ``Employee`` objects. Using :func:`.orm.with_polymorphic` or :meth:`.Query.with_polymorphic` will override the mapper-level ``with_polymorphic`` setting. .. autofunction:: sqlalchemy.orm.with_polymorphic Advanced Control of Which Tables are Queried +++++++++++++++++++++++++++++++++++++++++++++ The ``with_polymorphic`` functions work fine for simplistic scenarios. However, direct control of table rendering is called for, such as the case when one wants to render to only the subclass table and not the parent table. This use case can be achieved by using the mapped :class:`.Table` objects directly. For example, to query the name of employees with particular criterion:: engineer = Engineer.__table__ manager = Manager.__table__ session.query(Employee.name).\ outerjoin((engineer, engineer.c.employee_id==Employee.employee_id)).\ outerjoin((manager, manager.c.employee_id==Employee.employee_id)).\ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q')) The base table, in this case the "employees" table, isn't always necessary. A SQL query is always more efficient with fewer joins. Here, if we wanted to just load information specific to manager or engineer, we can instruct :class:`.Query` to use only those tables. The ``FROM`` clause is determined by what's specified in the :meth:`.Session.query`, :meth:`.Query.filter`, or :meth:`.Query.select_from` methods:: session.query(Manager.manager_data).select_from(manager) session.query(engineer.c.id).\ filter(engineer.c.engineer_info==manager.c.manager_data) .. _of_type: Creating Joins to Specific Subtypes +++++++++++++++++++++++++++++++++++ The :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` method is a helper which allows the construction of joins along :func:`~sqlalchemy.orm.relationship` paths while narrowing the criterion to specific subclasses. Suppose the ``employees`` table represents a collection of employees which are associated with a ``Company`` object. We'll add a ``company_id`` column to the ``employees`` table and a new table ``companies``: .. sourcecode:: python+sql class Company(Base): __tablename__ = 'company' id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", backref='company', cascade='all, delete-orphan') class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(20)) company_id = Column(Integer, ForeignKey('company.id')) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee', 'with_polymorphic':'*' } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) engineer_info = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'engineer'} class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) manager_data = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'manager'} When querying from ``Company`` onto the ``Employee`` relationship, the ``join()`` method as well as the ``any()`` and ``has()`` operators will create a join from ``company`` to ``employee``, without including ``engineer`` or ``manager`` in the mix. If we wish to have criterion which is specifically against the ``Engineer`` class, we can tell those methods to join or subquery against the joined table representing the subclass using the :meth:`~.orm.interfaces.PropComparator.of_type` operator:: session.query(Company).\ join(Company.employees.of_type(Engineer)).\ filter(Engineer.engineer_info=='someinfo') A longhand version of this would involve spelling out the full target selectable within a 2-tuple:: employee = Employee.__table__ engineer = Engineer.__table__ session.query(Company).\ join((employee.join(engineer), Company.employees)).\ filter(Engineer.engineer_info=='someinfo') :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts a single class argument. More flexibility can be achieved either by joining to an explicit join as above, or by using the :func:`.orm.with_polymorphic` function to create a polymorphic selectable:: manager_and_engineer = with_polymorphic( Employee, [Manager, Engineer], aliased=True) session.query(Company).\ join(manager_and_engineer, Company.employees).\ filter( or_(manager_and_engineer.Engineer.engineer_info=='someinfo', manager_and_engineer.Manager.manager_data=='somedata') ) Above, we use the ``aliased=True`` argument with :func:`.orm.with_polymorhpic` so that the right hand side of the join between ``Company`` and ``manager_and_engineer`` is converted into an aliased subquery. Some backends, such as SQLite and older versions of MySQL can't handle a FROM clause of the following form:: FROM x JOIN (y JOIN z ON ) ON Using ``aliased=True`` instead renders it more like:: FROM x JOIN (SELECT * FROM y JOIN z ON ) AS anon_1 ON The above join can also be expressed more succinctly by combining ``of_type()`` with the polymorphic construct:: manager_and_engineer = with_polymorphic( Employee, [Manager, Engineer], aliased=True) session.query(Company).\ join(Company.employees.of_type(manager_and_engineer)).\ filter( or_(manager_and_engineer.Engineer.engineer_info=='someinfo', manager_and_engineer.Manager.manager_data=='somedata') ) The ``any()`` and ``has()`` operators also can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` when the embedded criterion is in terms of a subclass:: session.query(Company).\ filter( Company.employees.of_type(Engineer). any(Engineer.engineer_info=='someinfo') ).all() Note that the ``any()`` and ``has()`` are both shorthand for a correlated EXISTS query. To build one by hand looks like:: session.query(Company).filter( exists([1], and_(Engineer.engineer_info=='someinfo', employees.c.company_id==companies.c.company_id), from_obj=employees.join(engineers) ) ).all() The EXISTS subquery above selects from the join of ``employees`` to ``engineers``, and also specifies criterion which correlates the EXISTS subselect back to the parent ``companies`` table. .. versionadded:: 0.8 :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts :func:`.orm.aliased` and :func:`.orm.with_polymorphic` constructs in conjunction with :meth:`.Query.join`, ``any()`` and ``has()``. .. _eagerloading_polymorphic_subtypes: Eager Loading of Specific or Polymorphic Subtypes ++++++++++++++++++++++++++++++++++++++++++++++++++ The :func:`.joinedload`, :func:`.subqueryload`, :func:`.contains_eager` and other loading-related options also support paths which make use of :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`. Below we load ``Company`` rows while eagerly loading related ``Engineer`` objects, querying the ``employee`` and ``engineer`` tables simultaneously:: session.query(Company).\ options( subqueryload(Company.employees.of_type(Engineer)). subqueryload("machines") ) ) As is the case with :meth:`.Query.join`, :meth:`~PropComparator.of_type` also can be used with eager loading and :func:`.orm.with_polymorphic` at the same time, so that all sub-attributes of all referenced subtypes can be loaded:: manager_and_engineer = with_polymorphic( Employee, [Manager, Engineer], aliased=True) session.query(Company).\ options( joinedload(Company.employees.of_type(manager_and_engineer)) ) ) .. versionadded:: 0.8 :func:`.joinedload`, :func:`.subqueryload`, :func:`.contains_eager` and related loader options support paths that are qualified with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`, supporting single target types as well as :func:`.orm.with_polymorphic` targets. Another option for the above query is to state the two subtypes separately; the :func:`.joinedload` directive should detect this and create the above ``with_polymorphic`` construct automatically:: session.query(Company).\ options( joinedload(Company.employees.of_type(Manager)), joinedload(Company.employees.of_type(Engineer)), ) ) .. versionadded:: 1.0 Eager loaders such as :func:`.joinedload` will create a polymorphic entity when multiple overlapping :meth:`~PropComparator.of_type` directives are encountered. Single Table Inheritance ------------------------ Single table inheritance is where the attributes of the base class as well as all subclasses are represented within a single table. A column is present in the table for every attribute mapped to the base class and all subclasses; the columns which correspond to a single subclass are nullable. This configuration looks much like joined-table inheritance except there's only one table. In this case, a ``type`` column is required, as there would be no other way to discriminate between classes. The table is specified in the base mapper only; for the inheriting classes, leave their ``table`` parameter blank: .. sourcecode:: python+sql class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(50)) engineer_info = Column(String(50)) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee' } class Manager(Employee): __mapper_args__ = { 'polymorphic_identity':'manager' } class Engineer(Employee): __mapper_args__ = { 'polymorphic_identity':'engineer' } Note that the mappers for the derived classes Manager and Engineer omit the ``__tablename__``, indicating they do not have a mapped table of their own. .. _concrete_inheritance: Concrete Table Inheritance -------------------------- .. note:: this section is currently using classical mappings. The Declarative system fully supports concrete inheritance however. See the links below for more information on using declarative with concrete table inheritance. This form of inheritance maps each class to a distinct table, as below: .. sourcecode:: python+sql employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), ) managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), ) Notice in this case there is no ``type`` column. If polymorphic loading is not required, there's no advantage to using ``inherits`` here; you just define a separate mapper for each class. .. sourcecode:: python+sql mapper(Employee, employees_table) mapper(Manager, managers_table) mapper(Engineer, engineers_table) To load polymorphically, the ``with_polymorphic`` argument is required, along with a selectable indicating how rows should be loaded. In this case we must construct a UNION of all three tables. SQLAlchemy includes a helper function to create these called :func:`~sqlalchemy.orm.util.polymorphic_union`, which will map all the different columns into a structure of selects with the same numbers and names of columns, and also generate a virtual ``type`` column for each subselect: .. sourcecode:: python+sql pjoin = polymorphic_union({ 'employee': employees_table, 'manager': managers_table, 'engineer': engineers_table }, 'type', 'pjoin') employee_mapper = mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type, polymorphic_identity='employee') manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') Upon select, the polymorphic union produces a query like this: .. sourcecode:: python+sql session.query(Employee).all() {opensql} SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id, pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info FROM ( SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type FROM employees UNION ALL SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type FROM managers UNION ALL SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, engineers.engineer_info AS engineer_info, 'engineer' AS type FROM engineers ) AS pjoin [] Concrete Inheritance with Declarative ++++++++++++++++++++++++++++++++++++++ .. versionadded:: 0.7.3 The :ref:`declarative_toplevel` module includes helpers for concrete inheritance. See :ref:`declarative_concrete_helpers` for more information. Using Relationships with Inheritance ------------------------------------ Both joined-table and single table inheritance scenarios produce mappings which are usable in :func:`~sqlalchemy.orm.relationship` functions; that is, it's possible to map a parent object to a child object which is polymorphic. Similarly, inheriting mappers can have :func:`~sqlalchemy.orm.relationship` objects of their own at any level, which are inherited to each child class. The only requirement for relationships is that there is a table relationship between parent and child. An example is the following modification to the joined table inheritance example, which sets a bi-directional relationship between ``Employee`` and ``Company``: .. sourcecode:: python+sql employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.company_id')) ) companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True), Column('name', String(50))) class Company(object): pass mapper(Company, companies, properties={ 'employees': relationship(Employee, backref='company') }) Relationships with Concrete Inheritance +++++++++++++++++++++++++++++++++++++++ In a concrete inheritance scenario, mapping relationships is more challenging since the distinct classes do not share a table. In this case, you *can* establish a relationship from parent to child if a join condition can be constructed from parent to child, if each child table contains a foreign key to the parent: .. sourcecode:: python+sql companies = Table('companies', metadata, Column('id', Integer, primary_key=True), Column('name', String(50))) employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type, polymorphic_identity='employee') mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') mapper(Company, companies, properties={ 'employees': relationship(Employee) }) The big limitation with concrete table inheritance is that :func:`~sqlalchemy.orm.relationship` objects placed on each concrete mapper do **not** propagate to child mappers. If you want to have the same :func:`~sqlalchemy.orm.relationship` objects set up on all concrete mappers, they must be configured manually on each. To configure back references in such a configuration the ``back_populates`` keyword may be used instead of ``backref``, such as below where both ``A(object)`` and ``B(A)`` bidirectionally reference ``C``:: ajoin = polymorphic_union({ 'a':a_table, 'b':b_table }, 'type', 'ajoin') mapper(A, a_table, with_polymorphic=('*', ajoin), polymorphic_on=ajoin.c.type, polymorphic_identity='a', properties={ 'some_c':relationship(C, back_populates='many_a') }) mapper(B, b_table,inherits=A, concrete=True, polymorphic_identity='b', properties={ 'some_c':relationship(C, back_populates='many_a') }) mapper(C, c_table, properties={ 'many_a':relationship(A, collection_class=set, back_populates='some_c'), }) Using Inheritance with Declarative ----------------------------------- Declarative makes inheritance configuration more intuitive. See the docs at :ref:`declarative_inheritance`. SQLAlchemy-1.0.11/doc/build/orm/composites.rst0000664000175000017500000001321512636375552022222 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm .. _mapper_composite: Composite Column Types ======================= Sets of columns can be associated with a single user-defined datatype. The ORM provides a single attribute which represents the group of columns using the class you provide. .. versionchanged:: 0.7 Composites have been simplified such that they no longer "conceal" the underlying column based attributes. Additionally, in-place mutation is no longer automatic; see the section below on enabling mutability to support tracking of in-place changes. .. versionchanged:: 0.9 Composites will return their object-form, rather than as individual columns, when used in a column-oriented :class:`.Query` construct. See :ref:`migration_2824`. A simple example represents pairs of columns as a ``Point`` object. ``Point`` represents such a pair as ``.x`` and ``.y``:: class Point(object): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return self.x, self.y def __repr__(self): return "Point(x=%r, y=%r)" % (self.x, self.y) def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y def __ne__(self, other): return not self.__eq__(other) The requirements for the custom datatype class are that it have a constructor which accepts positional arguments corresponding to its column format, and also provides a method ``__composite_values__()`` which returns the state of the object as a list or tuple, in order of its column-based attributes. It also should supply adequate ``__eq__()`` and ``__ne__()`` methods which test the equality of two instances. We will create a mapping to a table ``vertice``, which represents two points as ``x1/y1`` and ``x2/y2``. These are created normally as :class:`.Column` objects. Then, the :func:`.composite` function is used to assign new attributes that will represent sets of columns via the ``Point`` class:: from sqlalchemy import Column, Integer from sqlalchemy.orm import composite from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Vertex(Base): __tablename__ = 'vertice' id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) x2 = Column(Integer) y2 = Column(Integer) start = composite(Point, x1, y1) end = composite(Point, x2, y2) A classical mapping above would define each :func:`.composite` against the existing table:: mapper(Vertex, vertice_table, properties={ 'start':composite(Point, vertice_table.c.x1, vertice_table.c.y1), 'end':composite(Point, vertice_table.c.x2, vertice_table.c.y2), }) We can now persist and use ``Vertex`` instances, as well as query for them, using the ``.start`` and ``.end`` attributes against ad-hoc ``Point`` instances: .. sourcecode:: python+sql >>> v = Vertex(start=Point(3, 4), end=Point(5, 6)) >>> session.add(v) >>> q = session.query(Vertex).filter(Vertex.start == Point(3, 4)) {sql}>>> print q.first().start BEGIN (implicit) INSERT INTO vertice (x1, y1, x2, y2) VALUES (?, ?, ?, ?) (3, 4, 5, 6) SELECT vertice.id AS vertice_id, vertice.x1 AS vertice_x1, vertice.y1 AS vertice_y1, vertice.x2 AS vertice_x2, vertice.y2 AS vertice_y2 FROM vertice WHERE vertice.x1 = ? AND vertice.y1 = ? LIMIT ? OFFSET ? (3, 4, 1, 0) {stop}Point(x=3, y=4) .. autofunction:: composite Tracking In-Place Mutations on Composites ----------------------------------------- In-place changes to an existing composite value are not tracked automatically. Instead, the composite class needs to provide events to its parent object explicitly. This task is largely automated via the usage of the :class:`.MutableComposite` mixin, which uses events to associate each user-defined composite object with all parent associations. Please see the example in :ref:`mutable_composites`. .. versionchanged:: 0.7 In-place changes to an existing composite value are no longer tracked automatically; the functionality is superseded by the :class:`.MutableComposite` class. .. _composite_operations: Redefining Comparison Operations for Composites ----------------------------------------------- The "equals" comparison operation by default produces an AND of all corresponding columns equated to one another. This can be changed using the ``comparator_factory`` argument to :func:`.composite`, where we specify a custom :class:`.CompositeProperty.Comparator` class to define existing or new operations. Below we illustrate the "greater than" operator, implementing the same expression that the base "greater than" does:: from sqlalchemy.orm.properties import CompositeProperty from sqlalchemy import sql class PointComparator(CompositeProperty.Comparator): def __gt__(self, other): """redefine the 'greater than' operation""" return sql.and_(*[a>b for a, b in zip(self.__clause_element__().clauses, other.__composite_values__())]) class Vertex(Base): ___tablename__ = 'vertice' id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) x2 = Column(Integer) y2 = Column(Integer) start = composite(Point, x1, y1, comparator_factory=PointComparator) end = composite(Point, x2, y2, comparator_factory=PointComparator) SQLAlchemy-1.0.11/doc/build/orm/loading_relationships.rst0000664000175000017500000007222012636375552024417 0ustar classicclassic00000000000000.. _loading_toplevel: .. currentmodule:: sqlalchemy.orm Relationship Loading Techniques =============================== A big part of SQLAlchemy is providing a wide range of control over how related objects get loaded when querying. This behavior can be configured at mapper construction time using the ``lazy`` parameter to the :func:`.relationship` function, as well as by using options with the :class:`.Query` object. Using Loader Strategies: Lazy Loading, Eager Loading ---------------------------------------------------- By default, all inter-object relationships are **lazy loading**. The scalar or collection attribute associated with a :func:`~sqlalchemy.orm.relationship` contains a trigger which fires the first time the attribute is accessed. This trigger, in all but one case, issues a SQL call at the point of access in order to load the related object or objects: .. sourcecode:: python+sql {sql}>>> jack.addresses SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id [5] {stop}[, ] The one case where SQL is not emitted is for a simple many-to-one relationship, when the related object can be identified by its primary key alone and that object is already present in the current :class:`.Session`. This default behavior of "load upon attribute access" is known as "lazy" or "select" loading - the name "select" because a "SELECT" statement is typically emitted when the attribute is first accessed. In the :ref:`ormtutorial_toplevel`, we introduced the concept of **Eager Loading**. We used an ``option`` in conjunction with the :class:`~sqlalchemy.orm.query.Query` object in order to indicate that a relationship should be loaded at the same time as the parent, within a single SQL query. This option, known as :func:`.joinedload`, connects a JOIN (by default a LEFT OUTER join) to the statement and populates the scalar/collection from the same result set as that of the parent: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... options(joinedload('addresses')).\ ... filter_by(name='jack').all() #doctest: +NORMALIZE_WHITESPACE SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ['jack'] In addition to "joined eager loading", a second option for eager loading exists, called "subquery eager loading". This kind of eager loading emits an additional SQL statement for each collection requested, aggregated across all parent objects: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... options(subqueryload('addresses')).\ ... filter_by(name='jack').all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id, addresses.id ('jack',) The default **loader strategy** for any :func:`~sqlalchemy.orm.relationship` is configured by the ``lazy`` keyword argument, which defaults to ``select`` - this indicates a "select" statement . Below we set it as ``joined`` so that the ``children`` relationship is eager loaded using a JOIN:: # load the 'children' collection using LEFT OUTER JOIN class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", lazy='joined') We can also set it to eagerly load using a second query for all collections, using ``subquery``:: # load the 'children' collection using a second query which # JOINS to a subquery of the original class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", lazy='subquery') When querying, all three choices of loader strategy are available on a per-query basis, using the :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` and :func:`~sqlalchemy.orm.lazyload` query options: .. sourcecode:: python+sql # set children to load lazily session.query(Parent).options(lazyload('children')).all() # set children to load eagerly with a join session.query(Parent).options(joinedload('children')).all() # set children to load eagerly with a second statement session.query(Parent).options(subqueryload('children')).all() .. _subqueryload_ordering: The Importance of Ordering -------------------------- A query which makes use of :func:`.subqueryload` in conjunction with a limiting modifier such as :meth:`.Query.first`, :meth:`.Query.limit`, or :meth:`.Query.offset` should **always** include :meth:`.Query.order_by` against unique column(s) such as the primary key, so that the additional queries emitted by :func:`.subqueryload` include the same ordering as used by the parent query. Without it, there is a chance that the inner query could return the wrong rows:: # incorrect, no ORDER BY session.query(User).options(subqueryload(User.addresses)).first() # incorrect if User.name is not unique session.query(User).options(subqueryload(User.addresses)).order_by(User.name).first() # correct session.query(User).options(subqueryload(User.addresses)).order_by(User.name, User.id).first() .. seealso:: :ref:`faq_subqueryload_limit_sort` - detailed example Loading Along Paths ------------------- To reference a relationship that is deeper than one level, method chaining may be used. The object returned by all loader options is an instance of the :class:`.Load` class, which provides a so-called "generative" interface:: session.query(Parent).options( joinedload('foo'). joinedload('bar'). joinedload('bat') ).all() Using method chaining, the loader style of each link in the path is explicitly stated. To navigate along a path without changing the existing loader style of a particular attribute, the :func:`.defaultload` method/function may be used:: session.query(A).options( defaultload("atob").joinedload("btoc") ).all() .. versionchanged:: 0.9.0 The previous approach of specifying dot-separated paths within loader options has been superseded by the less ambiguous approach of the :class:`.Load` object and related methods. With this system, the user specifies the style of loading for each link along the chain explicitly, rather than guessing between options like ``joinedload()`` vs. ``joinedload_all()``. The :func:`.orm.defaultload` is provided to allow path navigation without modification of existing loader options. The dot-separated path system as well as the ``_all()`` functions will remain available for backwards- compatibility indefinitely. Default Loading Strategies -------------------------- .. versionadded:: 0.7.5 Default loader strategies as a new feature. Each of :func:`.joinedload`, :func:`.subqueryload`, :func:`.lazyload`, and :func:`.noload` can be used to set the default style of :func:`.relationship` loading for a particular query, affecting all :func:`.relationship` -mapped attributes not otherwise specified in the :class:`.Query`. This feature is available by passing the string ``'*'`` as the argument to any of these options:: session.query(MyClass).options(lazyload('*')) Above, the ``lazyload('*')`` option will supersede the ``lazy`` setting of all :func:`.relationship` constructs in use for that query, except for those which use the ``'dynamic'`` style of loading. If some relationships specify ``lazy='joined'`` or ``lazy='subquery'``, for example, using ``lazyload('*')`` will unilaterally cause all those relationships to use ``'select'`` loading, e.g. emit a SELECT statement when each attribute is accessed. The option does not supersede loader options stated in the query, such as :func:`.eagerload`, :func:`.subqueryload`, etc. The query below will still use joined loading for the ``widget`` relationship:: session.query(MyClass).options( lazyload('*'), joinedload(MyClass.widget) ) If multiple ``'*'`` options are passed, the last one overrides those previously passed. Per-Entity Default Loading Strategies ------------------------------------- .. versionadded:: 0.9.0 Per-entity default loader strategies. A variant of the default loader strategy is the ability to set the strategy on a per-entity basis. For example, if querying for ``User`` and ``Address``, we can instruct all relationships on ``Address`` only to use lazy loading by first applying the :class:`.Load` object, then specifying the ``*`` as a chained option:: session.query(User, Address).options(Load(Address).lazyload('*')) Above, all relationships on ``Address`` will be set to a lazy load. .. _zen_of_eager_loading: The Zen of Eager Loading ------------------------- The philosophy behind loader strategies is that any set of loading schemes can be applied to a particular query, and *the results don't change* - only the number of SQL statements required to fully load related objects and collections changes. A particular query might start out using all lazy loads. After using it in context, it might be revealed that particular attributes or collections are always accessed, and that it would be more efficient to change the loader strategy for these. The strategy can be changed with no other modifications to the query, the results will remain identical, but fewer SQL statements would be emitted. In theory (and pretty much in practice), nothing you can do to the :class:`.Query` would make it load a different set of primary or related objects based on a change in loader strategy. How :func:`joinedload` in particular achieves this result of not impacting entity rows returned in any way is that it creates an anonymous alias of the joins it adds to your query, so that they can't be referenced by other parts of the query. For example, the query below uses :func:`.joinedload` to create a LEFT OUTER JOIN from ``users`` to ``addresses``, however the ``ORDER BY`` added against ``Address.email_address`` is not valid - the ``Address`` entity is not named in the query: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address <-- this part is wrong ! ['jack'] Above, ``ORDER BY addresses.email_address`` is not valid since ``addresses`` is not in the FROM list. The correct way to load the ``User`` records and order by email address is to use :meth:`.Query.join`: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql} SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack'] The statement above is of course not the same as the previous one, in that the columns from ``addresses`` are not included in the result at all. We can add :func:`.joinedload` back in, so that there are two joins - one is that which we are ordering on, the other is used anonymously to load the contents of the ``User.addresses`` collection: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack'] What we see above is that our usage of :meth:`.Query.join` is to supply JOIN clauses we'd like to use in subsequent query criterion, whereas our usage of :func:`.joinedload` only concerns itself with the loading of the ``User.addresses`` collection, for each ``User`` in the result. In this case, the two joins most probably appear redundant - which they are. If we wanted to use just one JOIN for collection loading as well as ordering, we use the :func:`.contains_eager` option, described in :ref:`contains_eager` below. But to see why :func:`joinedload` does what it does, consider if we were **filtering** on a particular ``Address``: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... filter(Address.email_address=='someaddress@foo.com').\ ... all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com'] Above, we can see that the two JOINs have very different roles. One will match exactly one row, that of the join of ``User`` and ``Address`` where ``Address.email_address=='someaddress@foo.com'``. The other LEFT OUTER JOIN will match *all* ``Address`` rows related to ``User``, and is only used to populate the ``User.addresses`` collection, for those ``User`` objects that are returned. By changing the usage of :func:`.joinedload` to another style of loading, we can change how the collection is loaded completely independently of SQL used to retrieve the actual ``User`` rows we want. Below we change :func:`.joinedload` into :func:`.subqueryload`: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(subqueryload(User.addresses)).\ ... filter(User.name=='jack').\ ... filter(Address.email_address=='someaddress@foo.com').\ ... all() {opensql}SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com'] # ... subqueryload() emits a SELECT in order # to load all address records ... When using joined eager loading, if the query contains a modifier that impacts the rows returned externally to the joins, such as when using DISTINCT, LIMIT, OFFSET or equivalent, the completed statement is first wrapped inside a subquery, and the joins used specifically for joined eager loading are applied to the subquery. SQLAlchemy's joined eager loading goes the extra mile, and then ten miles further, to absolutely ensure that it does not affect the end result of the query, only the way collections and related objects are loaded, no matter what the format of the query is. .. _what_kind_of_loading: What Kind of Loading to Use ? ----------------------------- Which type of loading to use typically comes down to optimizing the tradeoff between number of SQL executions, complexity of SQL emitted, and amount of data fetched. Lets take two examples, a :func:`~sqlalchemy.orm.relationship` which references a collection, and a :func:`~sqlalchemy.orm.relationship` that references a scalar many-to-one reference. * One to Many Collection * When using the default lazy loading, if you load 100 objects, and then access a collection on each of them, a total of 101 SQL statements will be emitted, although each statement will typically be a simple SELECT without any joins. * When using joined loading, the load of 100 objects and their collections will emit only one SQL statement. However, the total number of rows fetched will be equal to the sum of the size of all the collections, plus one extra row for each parent object that has an empty collection. Each row will also contain the full set of columns represented by the parents, repeated for each collection item - SQLAlchemy does not re-fetch these columns other than those of the primary key, however most DBAPIs (with some exceptions) will transmit the full data of each parent over the wire to the client connection in any case. Therefore joined eager loading only makes sense when the size of the collections are relatively small. The LEFT OUTER JOIN can also be performance intensive compared to an INNER join. * When using subquery loading, the load of 100 objects will emit two SQL statements. The second statement will fetch a total number of rows equal to the sum of the size of all collections. An INNER JOIN is used, and a minimum of parent columns are requested, only the primary keys. So a subquery load makes sense when the collections are larger. * When multiple levels of depth are used with joined or subquery loading, loading collections-within- collections will multiply the total number of rows fetched in a cartesian fashion. Both forms of eager loading always join from the original parent class. * Many to One Reference * When using the default lazy loading, a load of 100 objects will like in the case of the collection emit as many as 101 SQL statements. However - there is a significant exception to this, in that if the many-to-one reference is a simple foreign key reference to the target's primary key, each reference will be checked first in the current identity map using :meth:`.Query.get`. So here, if the collection of objects references a relatively small set of target objects, or the full set of possible target objects have already been loaded into the session and are strongly referenced, using the default of `lazy='select'` is by far the most efficient way to go. * When using joined loading, the load of 100 objects will emit only one SQL statement. The join will be a LEFT OUTER JOIN, and the total number of rows will be equal to 100 in all cases. If you know that each parent definitely has a child (i.e. the foreign key reference is NOT NULL), the joined load can be configured with :paramref:`~.relationship.innerjoin` set to ``True``, which is usually specified within the :func:`~sqlalchemy.orm.relationship`. For a load of objects where there are many possible target references which may have not been loaded already, joined loading with an INNER JOIN is extremely efficient. * Subquery loading will issue a second load for all the child objects, so for a load of 100 objects there would be two SQL statements emitted. There's probably not much advantage here over joined loading, however, except perhaps that subquery loading can use an INNER JOIN in all cases whereas joined loading requires that the foreign key is NOT NULL. .. _joinedload_and_join: .. _contains_eager: Routing Explicit Joins/Statements into Eagerly Loaded Collections ------------------------------------------------------------------ The behavior of :func:`~sqlalchemy.orm.joinedload()` is such that joins are created automatically, using anonymous aliases as targets, the results of which are routed into collections and scalar references on loaded objects. It is often the case that a query already includes the necessary joins which represent a particular collection or scalar reference, and the joins added by the joinedload feature are redundant - yet you'd still like the collections/references to be populated. For this SQLAlchemy supplies the :func:`~sqlalchemy.orm.contains_eager()` option. This option is used in the same manner as the :func:`~sqlalchemy.orm.joinedload()` option except it is assumed that the :class:`~sqlalchemy.orm.query.Query` will specify the appropriate joins explicitly. Below, we specify a join between ``User`` and ``Address`` and addtionally establish this as the basis for eager loading of ``User.addresses``:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) addresses = relationship("Address") class Address(Base): __tablename__ = 'address' # ... q = session.query(User).join(User.addresses).\ options(contains_eager(User.addresses)) If the "eager" portion of the statement is "aliased", the ``alias`` keyword argument to :func:`~sqlalchemy.orm.contains_eager` may be used to indicate it. This is sent as a reference to an :func:`.aliased` or :class:`.Alias` construct: .. sourcecode:: python+sql # use an alias of the Address entity adalias = aliased(Address) # construct a Query object which expects the "addresses" results query = session.query(User).\ outerjoin(adalias, User.addresses).\ options(contains_eager(User.addresses, alias=adalias)) # get results normally {sql}r = query.all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, adalias.address_id AS adalias_address_id, adalias.user_id AS adalias_user_id, adalias.email_address AS adalias_email_address, (...other columns...) FROM users LEFT OUTER JOIN email_addresses AS email_addresses_1 ON users.user_id = email_addresses_1.user_id The path given as the argument to :func:`.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the string version would look like:: query(User).options(contains_eager('orders').contains_eager('items')) Or using the class-bound descriptor:: query(User).options(contains_eager(User.orders).contains_eager(Order.items)) Using contains_eager() to load a custom-filtered collection result ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When we use :func:`.contains_eager`, *we* are constructing ourselves the SQL that will be used to populate collections. From this, it naturally follows that we can opt to **modify** what values the collection is intended to store, by writing our SQL to load a subset of elements for collections or scalar attributes. As an example, we can load a ``User`` object and eagerly load only particular addresses into its ``.addresses`` collection just by filtering:: q = session.query(User).join(User.addresses).\ filter(Address.email.like('%ed%')).\ options(contains_eager(User.addresses)) The above query will load only ``User`` objects which contain at least ``Address`` object that contains the substring ``'ed'`` in its ``email`` field; the ``User.addresses`` collection will contain **only** these ``Address`` entries, and *not* any other ``Address`` entries that are in fact associated with the collection. .. warning:: Keep in mind that when we load only a subset of objects into a collection, that collection no longer represents what's actually in the database. If we attempted to add entries to this collection, we might find ourselves conflicting with entries that are already in the database but not locally loaded. In addition, the **collection will fully reload normally** once the object or attribute is expired. This expiration occurs whenever the :meth:`.Session.commit`, :meth:`.Session.rollback` methods are used assuming default session settings, or the :meth:`.Session.expire_all` or :meth:`.Session.expire` methods are used. For these reasons, prefer returning separate fields in a tuple rather than artificially altering a collection, when an object plus a custom set of related objects is desired:: q = session.query(User, Address).join(User.addresses).\ filter(Address.email.like('%ed%')) Advanced Usage with Arbitrary Statements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``alias`` argument can be more creatively used, in that it can be made to represent any set of arbitrary names to match up into a statement. Below it is linked to a :func:`.select` which links a set of column objects to a string SQL statement:: # label the columns of the addresses table eager_columns = select([ addresses.c.address_id.label('a1'), addresses.c.email_address.label('a2'), addresses.c.user_id.label('a3')]) # select from a raw SQL statement which uses those label names for the # addresses table. contains_eager() matches them up. query = session.query(User).\ from_statement("select users.*, addresses.address_id as a1, " "addresses.email_address as a2, addresses.user_id as a3 " "from users left outer join addresses on users.user_id=addresses.user_id").\ options(contains_eager(User.addresses, alias=eager_columns)) Creating Custom Load Rules --------------------------- .. warning:: This is an advanced technique! Great care and testing should be applied. The ORM has various edge cases where the value of an attribute is locally available, however the ORM itself doesn't have awareness of this. There are also cases when a user-defined system of loading attributes is desirable. To support the use case of user-defined loading systems, a key function :func:`.attributes.set_committed_value` is provided. This function is basically equivalent to Python's own ``setattr()`` function, except that when applied to a target object, SQLAlchemy's "attribute history" system which is used to determine flush-time changes is bypassed; the attribute is assigned in the same way as if the ORM loaded it that way from the database. The use of :func:`.attributes.set_committed_value` can be combined with another key event known as :meth:`.InstanceEvents.load` to produce attribute-population behaviors when an object is loaded. One such example is the bi-directional "one-to-one" case, where loading the "many-to-one" side of a one-to-one should also imply the value of the "one-to-many" side. The SQLAlchemy ORM does not consider backrefs when loading related objects, and it views a "one-to-one" as just another "one-to-many", that just happens to be one row. Given the following mapping:: from sqlalchemy import Integer, ForeignKey, Column from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) b_id = Column(ForeignKey('b.id')) b = relationship("B", backref=backref("a", uselist=False), lazy='joined') class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) If we query for an ``A`` row, and then ask it for ``a.b.a``, we will get an extra SELECT:: >>> a1.b.a SELECT a.id AS a_id, a.b_id AS a_b_id FROM a WHERE ? = a.b_id This SELECT is redundant becasue ``b.a`` is the same value as ``a1``. We can create an on-load rule to populate this for us:: from sqlalchemy import event from sqlalchemy.orm import attributes @event.listens_for(A, "load") def load_b(target, context): if 'b' in target.__dict__: attributes.set_committed_value(target.b, 'a', target) Now when we query for ``A``, we will get ``A.b`` from the joined eager load, and ``A.b.a`` from our event: .. sourcecode:: pycon+sql {sql}a1 = s.query(A).first() SELECT a.id AS a_id, a.b_id AS a_b_id, b_1.id AS b_1_id FROM a LEFT OUTER JOIN b AS b_1 ON b_1.id = a.b_id LIMIT ? OFFSET ? (1, 0) {stop}assert a1.b.a is a1 Relationship Loader API ------------------------ .. autofunction:: contains_alias .. autofunction:: contains_eager .. autofunction:: defaultload .. autofunction:: eagerload .. autofunction:: eagerload_all .. autofunction:: immediateload .. autofunction:: joinedload .. autofunction:: joinedload_all .. autofunction:: lazyload .. autofunction:: noload .. autofunction:: subqueryload .. autofunction:: subqueryload_all SQLAlchemy-1.0.11/doc/build/orm/examples.rst0000664000175000017500000000520312636375552021651 0ustar classicclassic00000000000000.. _examples_toplevel: ============ ORM Examples ============ The SQLAlchemy distribution includes a variety of code examples illustrating a select set of patterns, some typical and some not so typical. All are runnable and can be found in the ``/examples`` directory of the distribution. Descriptions and source code for all can be found here. Additional SQLAlchemy examples, some user contributed, are available on the wiki at ``_. Mapping Recipes =============== .. _examples_adjacencylist: Adjacency List -------------- .. automodule:: examples.adjacency_list .. _examples_associations: Associations ------------ .. automodule:: examples.association Directed Graphs --------------- .. automodule:: examples.graphs Dynamic Relations as Dictionaries ------------------------------------ .. automodule:: examples.dynamic_dict .. _examples_generic_associations: Generic Associations ------------------------ .. automodule:: examples.generic_associations Large Collections ------------------------ .. automodule:: examples.large_collection Materialized Paths ------------------ .. automodule:: examples.materialized_paths Nested Sets ------------ .. automodule:: examples.nested_sets .. _examples_performance: Performance ----------- .. automodule:: examples.performance .. _examples_relationships: Relationship Join Conditions ---------------------------- .. automodule:: examples.join_conditions .. _examples_xmlpersistence: XML Persistence ------------------------ .. automodule:: examples.elementtree Versioning Objects ------------------------ .. _examples_versioned_history: Versioning with a History Table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: examples.versioned_history .. _examples_versioned_rows: Versioning using Temporal Rows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: examples.versioned_rows Vertical Attribute Mapping ------------------------------------ .. automodule:: examples.vertical Inheritance Mapping Recipes ============================ Basic Inheritance Mappings ---------------------------------- .. automodule:: examples.inheritance Special APIs ============ .. _examples_instrumentation: Attribute Instrumentation ------------------------------------ .. automodule:: examples.custom_attributes .. _examples_sharding: Horizontal Sharding ------------------------ .. automodule:: examples.sharding Extending the ORM ================= .. _examples_caching: Dogpile Caching ------------------------ .. automodule:: examples.dogpile_caching .. _examples_postgis: PostGIS Integration ------------------------ .. automodule:: examples.postgis SQLAlchemy-1.0.11/doc/build/orm/query.rst0000664000175000017500000000153612636375552021205 0ustar classicclassic00000000000000.. _query_api_toplevel: .. module:: sqlalchemy.orm Query API ========= The Query Object ---------------- :class:`~.Query` is produced in terms of a given :class:`~.Session`, using the :meth:`~.Session.query` method:: q = session.query(SomeMappedClass) Following is the full interface for the :class:`.Query` object. .. autoclass:: sqlalchemy.orm.query.Query :members: ORM-Specific Query Constructs ----------------------------- .. autofunction:: sqlalchemy.orm.aliased .. autoclass:: sqlalchemy.orm.util.AliasedClass .. autoclass:: sqlalchemy.orm.util.AliasedInsp .. autoclass:: sqlalchemy.orm.query.Bundle :members: .. autoclass:: sqlalchemy.util.KeyedTuple :members: keys, _fields, _asdict .. autoclass:: sqlalchemy.orm.strategy_options.Load :members: .. autofunction:: join .. autofunction:: outerjoin .. autofunction:: with_parent SQLAlchemy-1.0.11/doc/build/orm/internals.rst0000664000175000017500000000525212636375552022036 0ustar classicclassic00000000000000.. _orm_internal_toplevel: ORM Internals ============= Key ORM constructs, not otherwise covered in other sections, are listed here. .. currentmodule: sqlalchemy.orm .. autoclass:: sqlalchemy.orm.state.AttributeState :members: .. autoclass:: sqlalchemy.orm.util.CascadeOptions :members: .. autoclass:: sqlalchemy.orm.instrumentation.ClassManager :members: :inherited-members: .. autoclass:: sqlalchemy.orm.properties.ColumnProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.properties.ComparableProperty :members: .. autoclass:: sqlalchemy.orm.descriptor_props.CompositeProperty :members: .. autoclass:: sqlalchemy.orm.attributes.Event :members: .. autoclass:: sqlalchemy.orm.identity.IdentityMap :members: .. autoclass:: sqlalchemy.orm.base.InspectionAttr :members: .. autoclass:: sqlalchemy.orm.base.InspectionAttrInfo :members: .. autoclass:: sqlalchemy.orm.state.InstanceState :members: .. autoclass:: sqlalchemy.orm.attributes.InstrumentedAttribute :members: __get__, __set__, __delete__ :undoc-members: .. autodata:: sqlalchemy.orm.interfaces.MANYTOONE .. autodata:: sqlalchemy.orm.interfaces.MANYTOMANY .. autoclass:: sqlalchemy.orm.interfaces.MapperProperty :members: .. py:attribute:: info Info dictionary associated with the object, allowing user-defined data to be associated with this :class:`.InspectionAttr`. The dictionary is generated when first accessed. Alternatively, it can be specified as a constructor argument to the :func:`.column_property`, :func:`.relationship`, or :func:`.composite` functions. .. versionadded:: 0.8 Added support for .info to all :class:`.MapperProperty` subclasses. .. versionchanged:: 1.0.0 :attr:`.InspectionAttr.info` moved from :class:`.MapperProperty` so that it can apply to a wider variety of ORM and extension constructs. .. seealso:: :attr:`.QueryableAttribute.info` :attr:`.SchemaItem.info` .. autodata:: sqlalchemy.orm.interfaces.NOT_EXTENSION .. autodata:: sqlalchemy.orm.interfaces.ONETOMANY .. autoclass:: sqlalchemy.orm.interfaces.PropComparator :members: :inherited-members: .. autoclass:: sqlalchemy.orm.properties.RelationshipProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.descriptor_props.SynonymProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.query.QueryContext :members: .. autoclass:: sqlalchemy.orm.attributes.QueryableAttribute :members: :inherited-members: .. autoclass:: sqlalchemy.orm.session.UOWTransaction :members: SQLAlchemy-1.0.11/doc/build/orm/scalar_mapping.rst0000664000175000017500000000053512636375552023016 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm =============================== Mapping Columns and Expressions =============================== The following sections discuss how table columns and SQL expressions are mapped to individual object attributes. .. toctree:: :maxdepth: 2 mapping_columns mapped_sql_expr mapped_attributes composites SQLAlchemy-1.0.11/doc/build/orm/loading.rst0000664000175000017500000000006412636375552021450 0ustar classicclassic00000000000000:orphan: Moved! :doc:`/orm/loading_relationships`SQLAlchemy-1.0.11/doc/build/orm/loading_columns.rst0000664000175000017500000001520412636375552023212 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm =============== Loading Columns =============== This section presents additional options regarding the loading of columns. .. _deferred: Deferred Column Loading ======================== This feature allows particular columns of a table be loaded only upon direct access, instead of when the entity is queried using :class:`.Query`. This feature is useful when one wants to avoid loading a large text or binary field into memory when it's not needed. Individual columns can be lazy loaded by themselves or placed into groups that lazy-load together, using the :func:`.orm.deferred` function to mark them as "deferred". In the example below, we define a mapping that will load each of ``.excerpt`` and ``.photo`` in separate, individual-row SELECT statements when each attribute is first referenced on the individual object instance:: from sqlalchemy.orm import deferred from sqlalchemy import Integer, String, Text, Binary, Column class Book(Base): __tablename__ = 'book' book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) photo = deferred(Column(Binary)) Classical mappings as always place the usage of :func:`.orm.deferred` in the ``properties`` dictionary against the table-bound :class:`.Column`:: mapper(Book, book_table, properties={ 'photo':deferred(book_table.c.photo) }) Deferred columns can be associated with a "group" name, so that they load together when any of them are first accessed. The example below defines a mapping with a ``photos`` deferred group. When one ``.photo`` is accessed, all three photos will be loaded in one SELECT statement. The ``.excerpt`` will be loaded separately when it is accessed:: class Book(Base): __tablename__ = 'book' book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) photo1 = deferred(Column(Binary), group='photos') photo2 = deferred(Column(Binary), group='photos') photo3 = deferred(Column(Binary), group='photos') You can defer or undefer columns at the :class:`~sqlalchemy.orm.query.Query` level using options, including :func:`.orm.defer` and :func:`.orm.undefer`:: from sqlalchemy.orm import defer, undefer query = session.query(Book) query = query.options(defer('summary')) query = query.options(undefer('excerpt')) query.all() :func:`.orm.deferred` attributes which are marked with a "group" can be undeferred using :func:`.orm.undefer_group`, sending in the group name:: from sqlalchemy.orm import undefer_group query = session.query(Book) query.options(undefer_group('photos')).all() Load Only Cols --------------- An arbitrary set of columns can be selected as "load only" columns, which will be loaded while deferring all other columns on a given entity, using :func:`.orm.load_only`:: from sqlalchemy.orm import load_only session.query(Book).options(load_only("summary", "excerpt")) .. versionadded:: 0.9.0 Deferred Loading with Multiple Entities --------------------------------------- To specify column deferral options within a :class:`.Query` that loads multiple types of entity, the :class:`.Load` object can specify which parent entity to start with:: from sqlalchemy.orm import Load query = session.query(Book, Author).join(Book.author) query = query.options( Load(Book).load_only("summary", "excerpt"), Load(Author).defer("bio") ) To specify column deferral options along the path of various relationships, the options support chaining, where the loading style of each relationship is specified first, then is chained to the deferral options. Such as, to load ``Book`` instances, then joined-eager-load the ``Author``, then apply deferral options to the ``Author`` entity:: from sqlalchemy.orm import joinedload query = session.query(Book) query = query.options( joinedload(Book.author).load_only("summary", "excerpt"), ) In the case where the loading style of parent relationships should be left unchanged, use :func:`.orm.defaultload`:: from sqlalchemy.orm import defaultload query = session.query(Book) query = query.options( defaultload(Book.author).load_only("summary", "excerpt"), ) .. versionadded:: 0.9.0 support for :class:`.Load` and other options which allow for better targeting of deferral options. Column Deferral API ------------------- .. autofunction:: deferred .. autofunction:: defer .. autofunction:: load_only .. autofunction:: undefer .. autofunction:: undefer_group .. _bundles: Column Bundles =============== The :class:`.Bundle` may be used to query for groups of columns under one namespace. .. versionadded:: 0.9.0 The bundle allows columns to be grouped together:: from sqlalchemy.orm import Bundle bn = Bundle('mybundle', MyClass.data1, MyClass.data2) for row in session.query(bn).filter(bn.c.data1 == 'd1'): print row.mybundle.data1, row.mybundle.data2 The bundle can be subclassed to provide custom behaviors when results are fetched. The method :meth:`.Bundle.create_row_processor` is given the :class:`.Query` and a set of "row processor" functions at query execution time; these processor functions when given a result row will return the individual attribute value, which can then be adapted into any kind of return data structure. Below illustrates replacing the usual :class:`.KeyedTuple` return structure with a straight Python dictionary:: from sqlalchemy.orm import Bundle class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" def proc(row): return dict( zip(labels, (proc(row) for proc in procs)) ) return proc .. versionchanged:: 1.0 The ``proc()`` callable passed to the ``create_row_processor()`` method of custom :class:`.Bundle` classes now accepts only a single "row" argument. A result from the above bundle will return dictionary values:: bn = DictBundle('mybundle', MyClass.data1, MyClass.data2) for row in session.query(bn).filter(bn.c.data1 == 'd1'): print row.mybundle['data1'], row.mybundle['data2'] The :class:`.Bundle` construct is also integrated into the behavior of :func:`.composite`, where it is used to return composite attributes as objects when queried as individual attributes. SQLAlchemy-1.0.11/doc/build/orm/relationships.rst0000664000175000017500000000101012636375552022707 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm .. _relationship_config_toplevel: Relationship Configuration ========================== This section describes the :func:`relationship` function and in depth discussion of its usage. For an introduction to relationships, start with the :ref:`ormtutorial_toplevel` and head into :ref:`orm_tutorial_relationship`. .. toctree:: :maxdepth: 2 basic_relationships self_referential backref join_conditions collections relationship_persistence relationship_api SQLAlchemy-1.0.11/doc/build/orm/extensions/0000775000175000017500000000000012636376632021500 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/orm/extensions/automap.rst0000664000175000017500000000054712636375552023706 0ustar classicclassic00000000000000.. _automap_toplevel: Automap ======= .. automodule:: sqlalchemy.ext.automap API Reference ------------- .. autofunction:: automap_base .. autoclass:: AutomapBase :members: .. autofunction:: classname_for_table .. autofunction:: name_for_scalar_relationship .. autofunction:: name_for_collection_relationship .. autofunction:: generate_relationship SQLAlchemy-1.0.11/doc/build/orm/extensions/associationproxy.rst0000664000175000017500000004775412636375552025671 0ustar classicclassic00000000000000.. _associationproxy_toplevel: Association Proxy ================= .. module:: sqlalchemy.ext.associationproxy ``associationproxy`` is used to create a read/write view of a target attribute across a relationship. It essentially conceals the usage of a "middle" attribute between two endpoints, and can be used to cherry-pick fields from a collection of related objects or to reduce the verbosity of using the association object pattern. Applied creatively, the association proxy allows the construction of sophisticated collections and dictionary views of virtually any geometry, persisted to the database using standard, transparently configured relational patterns. Simplifying Scalar Collections ------------------------------ Consider a many-to-many mapping between two classes, ``User`` and ``Keyword``. Each ``User`` can have any number of ``Keyword`` objects, and vice-versa (the many-to-many pattern is described at :ref:`relationships_many_to_many`):: from sqlalchemy import Column, Integer, String, ForeignKey, Table from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) def __init__(self, name): self.name = name class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword userkeywords_table = Table('userkeywords', Base.metadata, Column('user_id', Integer, ForeignKey("user.id"), primary_key=True), Column('keyword_id', Integer, ForeignKey("keyword.id"), primary_key=True) ) Reading and manipulating the collection of "keyword" strings associated with ``User`` requires traversal from each collection element to the ``.keyword`` attribute, which can be awkward:: >>> user = User('jek') >>> user.kw.append(Keyword('cheese inspector')) >>> print(user.kw) [<__main__.Keyword object at 0x12bf830>] >>> print(user.kw[0].keyword) cheese inspector >>> print([keyword.keyword for keyword in user.kw]) ['cheese inspector'] The ``association_proxy`` is applied to the ``User`` class to produce a "view" of the ``kw`` relationship, which only exposes the string value of ``.keyword`` associated with each ``Keyword`` object:: from sqlalchemy.ext.associationproxy import association_proxy class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) def __init__(self, name): self.name = name # proxy the 'keyword' attribute from the 'kw' relationship keywords = association_proxy('kw', 'keyword') We can now reference the ``.keywords`` collection as a listing of strings, which is both readable and writable. New ``Keyword`` objects are created for us transparently:: >>> user = User('jek') >>> user.keywords.append('cheese inspector') >>> user.keywords ['cheese inspector'] >>> user.keywords.append('snack ninja') >>> user.kw [<__main__.Keyword object at 0x12cdd30>, <__main__.Keyword object at 0x12cde30>] The :class:`.AssociationProxy` object produced by the :func:`.association_proxy` function is an instance of a `Python descriptor `_. It is always declared with the user-defined class being mapped, regardless of whether Declarative or classical mappings via the :func:`.mapper` function are used. The proxy functions by operating upon the underlying mapped attribute or collection in response to operations, and changes made via the proxy are immediately apparent in the mapped attribute, as well as vice versa. The underlying attribute remains fully accessible. When first accessed, the association proxy performs introspection operations on the target collection so that its behavior corresponds correctly. Details such as if the locally proxied attribute is a collection (as is typical) or a scalar reference, as well as if the collection acts like a set, list, or dictionary is taken into account, so that the proxy should act just like the underlying collection or attribute does. Creation of New Values ----------------------- When a list append() event (or set add(), dictionary __setitem__(), or scalar assignment event) is intercepted by the association proxy, it instantiates a new instance of the "intermediary" object using its constructor, passing as a single argument the given value. In our example above, an operation like:: user.keywords.append('cheese inspector') Is translated by the association proxy into the operation:: user.kw.append(Keyword('cheese inspector')) The example works here because we have designed the constructor for ``Keyword`` to accept a single positional argument, ``keyword``. For those cases where a single-argument constructor isn't feasible, the association proxy's creational behavior can be customized using the ``creator`` argument, which references a callable (i.e. Python function) that will produce a new object instance given the singular argument. Below we illustrate this using a lambda as is typical:: class User(Base): # ... # use Keyword(keyword=kw) on append() events keywords = association_proxy('kw', 'keyword', creator=lambda kw: Keyword(keyword=kw)) The ``creator`` function accepts a single argument in the case of a list- or set- based collection, or a scalar attribute. In the case of a dictionary-based collection, it accepts two arguments, "key" and "value". An example of this is below in :ref:`proxying_dictionaries`. Simplifying Association Objects ------------------------------- The "association object" pattern is an extended form of a many-to-many relationship, and is described at :ref:`association_pattern`. Association proxies are useful for keeping "association objects" out the way during regular use. Suppose our ``userkeywords`` table above had additional columns which we'd like to map explicitly, but in most cases we don't require direct access to these attributes. Below, we illustrate a new mapping which introduces the ``UserKeyword`` class, which is mapped to the ``userkeywords`` table illustrated earlier. This class adds an additional column ``special_key``, a value which we occasionally want to access, but not in the usual case. We create an association proxy on the ``User`` class called ``keywords``, which will bridge the gap from the ``user_keywords`` collection of ``User`` to the ``.keyword`` attribute present on each ``UserKeyword``:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # association proxy of "user_keywords" collection # to "keyword" attribute keywords = association_proxy('user_keywords', 'keyword') def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String(50)) # bidirectional attribute/collection of "user"/"user_keywords" user = relationship(User, backref=backref("user_keywords", cascade="all, delete-orphan") ) # reference to the "Keyword" object keyword = relationship("Keyword") def __init__(self, keyword=None, user=None, special_key=None): self.user = user self.keyword = keyword self.special_key = special_key class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): return 'Keyword(%s)' % repr(self.keyword) With the above configuration, we can operate upon the ``.keywords`` collection of each ``User`` object, and the usage of ``UserKeyword`` is concealed:: >>> user = User('log') >>> for kw in (Keyword('new_from_blammo'), Keyword('its_big')): ... user.keywords.append(kw) ... >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] Where above, each ``.keywords.append()`` operation is equivalent to:: >>> user.user_keywords.append(UserKeyword(Keyword('its_heavy'))) The ``UserKeyword`` association object has two attributes here which are populated; the ``.keyword`` attribute is populated directly as a result of passing the ``Keyword`` object as the first argument. The ``.user`` argument is then assigned as the ``UserKeyword`` object is appended to the ``User.user_keywords`` collection, where the bidirectional relationship configured between ``User.user_keywords`` and ``UserKeyword.user`` results in a population of the ``UserKeyword.user`` attribute. The ``special_key`` argument above is left at its default value of ``None``. For those cases where we do want ``special_key`` to have a value, we create the ``UserKeyword`` object explicitly. Below we assign all three attributes, where the assignment of ``.user`` has the effect of the ``UserKeyword`` being appended to the ``User.user_keywords`` collection:: >>> UserKeyword(Keyword('its_wood'), user, special_key='my special key') The association proxy returns to us a collection of ``Keyword`` objects represented by all these operations:: >>> user.keywords [Keyword('new_from_blammo'), Keyword('its_big'), Keyword('its_heavy'), Keyword('its_wood')] .. _proxying_dictionaries: Proxying to Dictionary Based Collections ----------------------------------------- The association proxy can proxy to dictionary based collections as well. SQLAlchemy mappings usually use the :func:`.attribute_mapped_collection` collection type to create dictionary collections, as well as the extended techniques described in :ref:`dictionary_collections`. The association proxy adjusts its behavior when it detects the usage of a dictionary-based collection. When new values are added to the dictionary, the association proxy instantiates the intermediary object by passing two arguments to the creation function instead of one, the key and the value. As always, this creation function defaults to the constructor of the intermediary class, and can be customized using the ``creator`` argument. Below, we modify our ``UserKeyword`` example such that the ``User.user_keywords`` collection will now be mapped using a dictionary, where the ``UserKeyword.special_key`` argument will be used as the key for the dictionary. We then apply a ``creator`` argument to the ``User.keywords`` proxy so that these values are assigned appropriately when new elements are added to the dictionary:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # proxy to 'user_keywords', instantiating UserKeyword # assigning the new key to 'special_key', values to # 'keyword'. keywords = association_proxy('user_keywords', 'keyword', creator=lambda k, v: UserKeyword(special_key=k, keyword=v) ) def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String) # bidirectional user/user_keywords relationships, mapping # user_keywords with a dictionary against "special_key" as key. user = relationship(User, backref=backref( "user_keywords", collection_class=attribute_mapped_collection("special_key"), cascade="all, delete-orphan" ) ) keyword = relationship("Keyword") class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): return 'Keyword(%s)' % repr(self.keyword) We illustrate the ``.keywords`` collection as a dictionary, mapping the ``UserKeyword.string_key`` value to ``Keyword`` objects:: >>> user = User('log') >>> user.keywords['sk1'] = Keyword('kw1') >>> user.keywords['sk2'] = Keyword('kw2') >>> print(user.keywords) {'sk1': Keyword('kw1'), 'sk2': Keyword('kw2')} .. _composite_association_proxy: Composite Association Proxies ----------------------------- Given our previous examples of proxying from relationship to scalar attribute, proxying across an association object, and proxying dictionaries, we can combine all three techniques together to give ``User`` a ``keywords`` dictionary that deals strictly with the string value of ``special_key`` mapped to the string ``keyword``. Both the ``UserKeyword`` and ``Keyword`` classes are entirely concealed. This is achieved by building an association proxy on ``User`` that refers to an association proxy present on ``UserKeyword``:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # the same 'user_keywords'->'keyword' proxy as in # the basic dictionary example keywords = association_proxy( 'user_keywords', 'keyword', creator=lambda k, v: UserKeyword(special_key=k, keyword=v) ) def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String) user = relationship(User, backref=backref( "user_keywords", collection_class=attribute_mapped_collection("special_key"), cascade="all, delete-orphan" ) ) # the relationship to Keyword is now called # 'kw' kw = relationship("Keyword") # 'keyword' is changed to be a proxy to the # 'keyword' attribute of 'Keyword' keyword = association_proxy('kw', 'keyword') class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword ``User.keywords`` is now a dictionary of string to string, where ``UserKeyword`` and ``Keyword`` objects are created and removed for us transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: >>> user = User('log') >>> user.keywords = { ... 'sk1':'kw1', ... 'sk2':'kw2' ... } >>> print(user.keywords) {'sk1': 'kw1', 'sk2': 'kw2'} >>> user.keywords['sk3'] = 'kw3' >>> del user.keywords['sk2'] >>> print(user.keywords) {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage ... print(user.user_keywords['sk3'].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created for each dictionary set operation, the example fails to maintain uniqueness for the ``Keyword`` objects on their string name, which is a typical requirement for a tagging scenario such as this one. For this use case the recipe `UniqueObject `_, or a comparable creational strategy, is recommended, which will apply a "lookup first, then create" strategy to the constructor of the ``Keyword`` class, so that an already existing ``Keyword`` is returned if the given name is already present. Querying with Association Proxies --------------------------------- The :class:`.AssociationProxy` features simple SQL construction capabilities which relate down to the underlying :func:`.relationship` in use as well as the target attribute. For example, the :meth:`.RelationshipProperty.Comparator.any` and :meth:`.RelationshipProperty.Comparator.has` operations are available, and will produce a "nested" EXISTS clause, such as in our basic association object example:: >>> print(session.query(User).filter(User.keywords.any(keyword='jek'))) SELECT user.id AS user_id, user.name AS user_name FROM user WHERE EXISTS (SELECT 1 FROM user_keyword WHERE user.id = user_keyword.user_id AND (EXISTS (SELECT 1 FROM keyword WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1))) For a proxy to a scalar attribute, ``__eq__()`` is supported:: >>> print(session.query(UserKeyword).filter(UserKeyword.keyword == 'jek')) SELECT user_keyword.* FROM user_keyword WHERE EXISTS (SELECT 1 FROM keyword WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1) and ``.contains()`` is available for a proxy to a scalar collection:: >>> print(session.query(User).filter(User.keywords.contains('jek'))) SELECT user.* FROM user WHERE EXISTS (SELECT 1 FROM userkeywords, keyword WHERE user.id = userkeywords.user_id AND keyword.id = userkeywords.keyword_id AND keyword.keyword = :keyword_1) :class:`.AssociationProxy` can be used with :meth:`.Query.join` somewhat manually using the :attr:`~.AssociationProxy.attr` attribute in a star-args context:: q = session.query(User).join(*User.keywords.attr) .. versionadded:: 0.7.3 :attr:`~.AssociationProxy.attr` attribute in a star-args context. :attr:`~.AssociationProxy.attr` is composed of :attr:`.AssociationProxy.local_attr` and :attr:`.AssociationProxy.remote_attr`, which are just synonyms for the actual proxied attributes, and can also be used for querying:: uka = aliased(UserKeyword) ka = aliased(Keyword) q = session.query(User).\ join(uka, User.keywords.local_attr).\ join(ka, User.keywords.remote_attr) .. versionadded:: 0.7.3 :attr:`.AssociationProxy.local_attr` and :attr:`.AssociationProxy.remote_attr`, synonyms for the actual proxied attributes, and usable for querying. API Documentation ----------------- .. autofunction:: association_proxy .. autoclass:: AssociationProxy :members: :undoc-members: :inherited-members: .. autodata:: ASSOCIATION_PROXY SQLAlchemy-1.0.11/doc/build/orm/extensions/hybrid.rst0000664000175000017500000000046612636375552023521 0ustar classicclassic00000000000000.. _hybrids_toplevel: Hybrid Attributes ================= .. automodule:: sqlalchemy.ext.hybrid API Reference ------------- .. autoclass:: hybrid_method :members: .. autoclass:: hybrid_property :members: .. autoclass:: Comparator .. autodata:: HYBRID_METHOD .. autodata:: HYBRID_PROPERTY SQLAlchemy-1.0.11/doc/build/orm/extensions/horizontal_shard.rst0000664000175000017500000000032512636375552025604 0ustar classicclassic00000000000000Horizontal Sharding =================== .. automodule:: sqlalchemy.ext.horizontal_shard API Documentation ----------------- .. autoclass:: ShardedSession :members: .. autoclass:: ShardedQuery :members: SQLAlchemy-1.0.11/doc/build/orm/extensions/instrumentation.rst0000664000175000017500000000071312636375552025476 0ustar classicclassic00000000000000.. _instrumentation_toplevel: Alternate Class Instrumentation ================================ .. automodule:: sqlalchemy.ext.instrumentation API Reference ------------- .. autodata:: INSTRUMENTATION_MANAGER .. autoclass:: sqlalchemy.orm.instrumentation.InstrumentationFactory .. autoclass:: InstrumentationManager :members: :undoc-members: .. autodata:: instrumentation_finders .. autoclass:: ExtendedInstrumentationRegistry :members: SQLAlchemy-1.0.11/doc/build/orm/extensions/orderinglist.rst0000664000175000017500000000043212636375552024736 0ustar classicclassic00000000000000Ordering List ============= .. automodule:: sqlalchemy.ext.orderinglist API Reference ------------- .. autofunction:: ordering_list .. autofunction:: count_from_0 .. autofunction:: count_from_1 .. autofunction:: count_from_n_factory .. autoclass:: OrderingList :members: SQLAlchemy-1.0.11/doc/build/orm/extensions/index.rst0000664000175000017500000000120612636375552023340 0ustar classicclassic00000000000000.. _plugins: .. _sqlalchemy.ext: ORM Extensions ============== SQLAlchemy has a variety of ORM extensions available, which add additional functionality to the core behavior. The extensions build almost entirely on public core and ORM APIs and users should be encouraged to read their source code to further their understanding of their behavior. In particular the "Horizontal Sharding", "Hybrid Attributes", and "Mutation Tracking" extensions are very succinct. .. toctree:: :maxdepth: 1 associationproxy automap baked declarative/index mutable orderinglist horizontal_shard hybrid instrumentation SQLAlchemy-1.0.11/doc/build/orm/extensions/mutable.rst0000664000175000017500000000057212636375552023667 0ustar classicclassic00000000000000.. _mutable_toplevel: Mutation Tracking ================== .. automodule:: sqlalchemy.ext.mutable API Reference ------------- .. autoclass:: MutableBase :members: _parents, coerce .. autoclass:: Mutable :members: :inherited-members: :private-members: .. autoclass:: MutableComposite :members: .. autoclass:: MutableDict :members: :undoc-members: SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/0000775000175000017500000000000012636376632023763 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/inheritance.rst0000664000175000017500000002763412636375552027022 0ustar classicclassic00000000000000.. _declarative_inheritance: Inheritance Configuration ========================= Declarative supports all three forms of inheritance as intuitively as possible. The ``inherits`` mapper keyword argument is not needed as declarative will determine this from the class itself. The various "polymorphic" keyword arguments are specified using ``__mapper_args__``. Joined Table Inheritance ~~~~~~~~~~~~~~~~~~~~~~~~ Joined table inheritance is defined as a subclass that defines its own table:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column(Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column(String(50)) Note that above, the ``Engineer.id`` attribute, since it shares the same attribute name as the ``Person.id`` attribute, will in fact represent the ``people.id`` and ``engineers.id`` columns together, with the "Engineer.id" column taking precedence if queried directly. To provide the ``Engineer`` class with an attribute that represents only the ``engineers.id`` column, give it a different attribute name:: class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} engineer_id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column(String(50)) .. versionchanged:: 0.7 joined table inheritance favors the subclass column over that of the superclass, such as querying above for ``Engineer.id``. Prior to 0.7 this was the reverse. .. _declarative_single_table: Single Table Inheritance ~~~~~~~~~~~~~~~~~~~~~~~~ Single table inheritance is defined as a subclass that does not have its own table; you just leave out the ``__table__`` and ``__tablename__`` attributes:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) When the above mappers are configured, the ``Person`` class is mapped to the ``people`` table *before* the ``primary_language`` column is defined, and this column will not be included in its own mapping. When ``Engineer`` then defines the ``primary_language`` column, the column is added to the ``people`` table so that it is included in the mapping for ``Engineer`` and is also part of the table's full set of columns. Columns which are not mapped to ``Person`` are also excluded from any other single or joined inheriting classes using the ``exclude_properties`` mapper argument. Below, ``Manager`` will have all the attributes of ``Person`` and ``Manager`` but *not* the ``primary_language`` attribute of ``Engineer``:: class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) The attribute exclusion logic is provided by the ``exclude_properties`` mapper argument, and declarative's default behavior can be disabled by passing an explicit ``exclude_properties`` collection (empty or otherwise) to the ``__mapper_args__``. Resolving Column Conflicts ^^^^^^^^^^^^^^^^^^^^^^^^^^ Note above that the ``primary_language`` and ``golf_swing`` columns are "moved up" to be applied to ``Person.__table__``, as a result of their declaration on a subclass that has no table of its own. A tricky case comes up when two subclasses want to specify *the same* column, as below:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} start_date = Column(DateTime) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} start_date = Column(DateTime) Above, the ``start_date`` column declared on both ``Engineer`` and ``Manager`` will result in an error:: sqlalchemy.exc.ArgumentError: Column 'start_date' on class conflicts with existing column 'people.start_date' In a situation like this, Declarative can't be sure of the intent, especially if the ``start_date`` columns had, for example, different types. A situation like this can be resolved by using :class:`.declared_attr` to define the :class:`.Column` conditionally, taking care to return the **existing column** via the parent ``__table__`` if it already exists:: from sqlalchemy.ext.declarative import declared_attr class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} @declared_attr def start_date(cls): "Start date column, if not present already." return Person.__table__.c.get('start_date', Column(DateTime)) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} @declared_attr def start_date(cls): "Start date column, if not present already." return Person.__table__.c.get('start_date', Column(DateTime)) Above, when ``Manager`` is mapped, the ``start_date`` column is already present on the ``Person`` class. Declarative lets us return that :class:`.Column` as a result in this case, where it knows to skip re-assigning the same column. If the mapping is mis-configured such that the ``start_date`` column is accidentally re-assigned to a different table (such as, if we changed ``Manager`` to be joined inheritance without fixing ``start_date``), an error is raised which indicates an existing :class:`.Column` is trying to be re-assigned to a different owning :class:`.Table`. .. versionadded:: 0.8 :class:`.declared_attr` can be used on a non-mixin class, and the returned :class:`.Column` or other mapped attribute will be applied to the mapping as any other attribute. Previously, the resulting attribute would be ignored, and also result in a warning being emitted when a subclass was created. .. versionadded:: 0.8 :class:`.declared_attr`, when used either with a mixin or non-mixin declarative class, can return an existing :class:`.Column` already assigned to the parent :class:`.Table`, to indicate that the re-assignment of the :class:`.Column` should be skipped, however should still be mapped on the target class, in order to resolve duplicate column conflicts. The same concept can be used with mixin classes (see :ref:`declarative_mixins`):: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class HasStartDate(object): @declared_attr def start_date(cls): return cls.__table__.c.get('start_date', Column(DateTime)) class Engineer(HasStartDate, Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} class Manager(HasStartDate, Person): __mapper_args__ = {'polymorphic_identity': 'manager'} The above mixin checks the local ``__table__`` attribute for the column. Because we're using single table inheritance, we're sure that in this case, ``cls.__table__`` refers to ``People.__table__``. If we were mixing joined- and single-table inheritance, we might want our mixin to check more carefully if ``cls.__table__`` is really the :class:`.Table` we're looking for. Concrete Table Inheritance ~~~~~~~~~~~~~~~~~~~~~~~~~~ Concrete is defined as a subclass which has its own table and sets the ``concrete`` keyword argument to ``True``:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) name = Column(String(50)) class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'concrete':True} id = Column(Integer, primary_key=True) primary_language = Column(String(50)) name = Column(String(50)) Usage of an abstract base class is a little less straightforward as it requires usage of :func:`~sqlalchemy.orm.util.polymorphic_union`, which needs to be created with the :class:`.Table` objects before the class is built:: engineers = Table('engineers', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('primary_language', String(50)) ) managers = Table('managers', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('golf_swing', String(50)) ) punion = polymorphic_union({ 'engineer':engineers, 'manager':managers }, 'type', 'punion') class Person(Base): __table__ = punion __mapper_args__ = {'polymorphic_on':punion.c.type} class Engineer(Person): __table__ = engineers __mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True} class Manager(Person): __table__ = managers __mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True} .. _declarative_concrete_helpers: Using the Concrete Helpers ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Helper classes provides a simpler pattern for concrete inheritance. With these objects, the ``__declare_first__`` helper is used to configure the "polymorphic" loader for the mapper after all subclasses have been declared. .. versionadded:: 0.7.3 An abstract base can be declared using the :class:`.AbstractConcreteBase` class:: from sqlalchemy.ext.declarative import AbstractConcreteBase class Employee(AbstractConcreteBase, Base): pass To have a concrete ``employee`` table, use :class:`.ConcreteBase` instead:: from sqlalchemy.ext.declarative import ConcreteBase class Employee(ConcreteBase, Base): __tablename__ = 'employee' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'concrete':True} Either ``Employee`` base can be used in the normal fashion:: class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { 'polymorphic_identity':'manager', 'concrete':True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) __mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True} The :class:`.AbstractConcreteBase` class is itself mapped, and can be used as a target of relationships:: class Company(Base): __tablename__ = 'company' id = Column(Integer, primary_key=True) employees = relationship("Employee", primaryjoin="Company.id == Employee.company_id") .. versionchanged:: 0.9.3 Support for use of :class:`.AbstractConcreteBase` as the target of a :func:`.relationship` has been improved. It can also be queried directly:: for employee in session.query(Employee).filter(Employee.name == 'qbert'): print(employee) SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/relationships.rst0000664000175000017500000001241312636375552027402 0ustar classicclassic00000000000000.. _declarative_configuring_relationships: ========================= Configuring Relationships ========================= Relationships to other classes are done in the usual way, with the added feature that the class specified to :func:`~sqlalchemy.orm.relationship` may be a string name. The "class registry" associated with ``Base`` is used at mapper compilation time to resolve the name into the actual class object, which is expected to have been defined once the mapper configuration is used:: class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String(50)) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) Column constructs, since they are just that, are immediately usable, as below where we define a primary join condition on the ``Address`` class using them:: class Address(Base): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) user = relationship(User, primaryjoin=user_id == User.id) In addition to the main argument for :func:`~sqlalchemy.orm.relationship`, other arguments which depend upon the columns present on an as-yet undefined class may also be specified as strings. These strings are evaluated as Python expressions. The full namespace available within this evaluation includes all classes mapped for this declarative base, as well as the contents of the ``sqlalchemy`` package, including expression functions like :func:`~sqlalchemy.sql.expression.desc` and :attr:`~sqlalchemy.sql.expression.func`:: class User(Base): # .... addresses = relationship("Address", order_by="desc(Address.email)", primaryjoin="Address.user_id==User.id") For the case where more than one module contains a class of the same name, string class names can also be specified as module-qualified paths within any of these string expressions:: class User(Base): # .... addresses = relationship("myapp.model.address.Address", order_by="desc(myapp.model.address.Address.email)", primaryjoin="myapp.model.address.Address.user_id==" "myapp.model.user.User.id") The qualified path can be any partial path that removes ambiguity between the names. For example, to disambiguate between ``myapp.model.address.Address`` and ``myapp.model.lookup.Address``, we can specify ``address.Address`` or ``lookup.Address``:: class User(Base): # .... addresses = relationship("address.Address", order_by="desc(address.Address.email)", primaryjoin="address.Address.user_id==" "User.id") .. versionadded:: 0.8 module-qualified paths can be used when specifying string arguments with Declarative, in order to specify specific modules. Two alternatives also exist to using string-based attributes. A lambda can also be used, which will be evaluated after all mappers have been configured:: class User(Base): # ... addresses = relationship(lambda: Address, order_by=lambda: desc(Address.email), primaryjoin=lambda: Address.user_id==User.id) Or, the relationship can be added to the class explicitly after the classes are available:: User.addresses = relationship(Address, primaryjoin=Address.user_id==User.id) .. _declarative_many_to_many: Configuring Many-to-Many Relationships ====================================== Many-to-many relationships are also declared in the same way with declarative as with traditional mappings. The ``secondary`` argument to :func:`.relationship` is as usual passed a :class:`.Table` object, which is typically declared in the traditional way. The :class:`.Table` usually shares the :class:`.MetaData` object used by the declarative base:: keywords = Table( 'keywords', Base.metadata, Column('author_id', Integer, ForeignKey('authors.id')), Column('keyword_id', Integer, ForeignKey('keywords.id')) ) class Author(Base): __tablename__ = 'authors' id = Column(Integer, primary_key=True) keywords = relationship("Keyword", secondary=keywords) Like other :func:`~sqlalchemy.orm.relationship` arguments, a string is accepted as well, passing the string name of the table as defined in the ``Base.metadata.tables`` collection:: class Author(Base): __tablename__ = 'authors' id = Column(Integer, primary_key=True) keywords = relationship("Keyword", secondary="keywords") As with traditional mapping, its generally not a good idea to use a :class:`.Table` as the "secondary" argument which is also mapped to a class, unless the :func:`.relationship` is declared with ``viewonly=True``. Otherwise, the unit-of-work system may attempt duplicate INSERT and DELETE statements against the underlying table. SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/mixins.rst0000664000175000017500000004707612636375552026042 0ustar classicclassic00000000000000.. _declarative_mixins: Mixin and Custom Base Classes ============================== A common need when using :mod:`~sqlalchemy.ext.declarative` is to share some functionality, such as a set of common columns, some common table options, or other mapped properties, across many classes. The standard Python idioms for this is to have the classes inherit from a base which includes these common features. When using :mod:`~sqlalchemy.ext.declarative`, this idiom is allowed via the usage of a custom declarative base class, as well as a "mixin" class which is inherited from in addition to the primary base. Declarative includes several helper features to make this work in terms of how mappings are declared. An example of some commonly mixed-in idioms is below:: from sqlalchemy.ext.declarative import declared_attr class MyMixin(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} __mapper_args__= {'always_refresh': True} id = Column(Integer, primary_key=True) class MyModel(MyMixin, Base): name = Column(String(1000)) Where above, the class ``MyModel`` will contain an "id" column as the primary key, a ``__tablename__`` attribute that derives from the name of the class itself, as well as ``__table_args__`` and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. There's no fixed convention over whether ``MyMixin`` precedes ``Base`` or not. Normal Python method resolution rules apply, and the above example would work just as well with:: class MyModel(Base, MyMixin): name = Column(String(1000)) This works because ``Base`` here doesn't define any of the variables that ``MyMixin`` defines, i.e. ``__tablename__``, ``__table_args__``, ``id``, etc. If the ``Base`` did define an attribute of the same name, the class placed first in the inherits list would determine which attribute is used on the newly defined class. Augmenting the Base ~~~~~~~~~~~~~~~~~~~ In addition to using a pure mixin, most of the techniques in this section can also be applied to the base class itself, for patterns that should apply to all classes derived from a particular base. This is achieved using the ``cls`` argument of the :func:`.declarative_base` function:: from sqlalchemy.ext.declarative import declared_attr class Base(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} id = Column(Integer, primary_key=True) from sqlalchemy.ext.declarative import declarative_base Base = declarative_base(cls=Base) class MyModel(Base): name = Column(String(1000)) Where above, ``MyModel`` and all other classes that derive from ``Base`` will have a table name derived from the class name, an ``id`` primary key column, as well as the "InnoDB" engine for MySQL. Mixing in Columns ~~~~~~~~~~~~~~~~~ The most basic way to specify a column on a mixin is by simple declaration:: class TimestampMixin(object): created_at = Column(DateTime, default=func.now()) class MyModel(TimestampMixin, Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) name = Column(String(1000)) Where above, all declarative classes that include ``TimestampMixin`` will also have a column ``created_at`` that applies a timestamp to all row insertions. Those familiar with the SQLAlchemy expression language know that the object identity of clause elements defines their role in a schema. Two ``Table`` objects ``a`` and ``b`` may both have a column called ``id``, but the way these are differentiated is that ``a.c.id`` and ``b.c.id`` are two distinct Python objects, referencing their parent tables ``a`` and ``b`` respectively. In the case of the mixin column, it seems that only one :class:`.Column` object is explicitly created, yet the ultimate ``created_at`` column above must exist as a distinct Python object for each separate destination class. To accomplish this, the declarative extension creates a **copy** of each :class:`.Column` object encountered on a class that is detected as a mixin. This copy mechanism is limited to simple columns that have no foreign keys, as a :class:`.ForeignKey` itself contains references to columns which can't be properly recreated at this level. For columns that have foreign keys, as well as for the variety of mapper-level constructs that require destination-explicit context, the :class:`~.declared_attr` decorator is provided so that patterns common to many classes can be defined as callables:: from sqlalchemy.ext.declarative import declared_attr class ReferenceAddressMixin(object): @declared_attr def address_id(cls): return Column(Integer, ForeignKey('address.id')) class User(ReferenceAddressMixin, Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) Where above, the ``address_id`` class-level callable is executed at the point at which the ``User`` class is constructed, and the declarative extension can use the resulting :class:`.Column` object as returned by the method without the need to copy it. .. versionchanged:: 0.6.5 Rename ``sqlalchemy.util.classproperty`` into :class:`~.declared_attr`. Columns generated by :class:`~.declared_attr` can also be referenced by ``__mapper_args__`` to a limited degree, currently by ``polymorphic_on`` and ``version_id_col``; the declarative extension will resolve them at class construction time:: class MyMixin: @declared_attr def type_(cls): return Column(String(50)) __mapper_args__= {'polymorphic_on':type_} class MyModel(MyMixin, Base): __tablename__='test' id = Column(Integer, primary_key=True) Mixing in Relationships ~~~~~~~~~~~~~~~~~~~~~~~ Relationships created by :func:`~sqlalchemy.orm.relationship` are provided with declarative mixin classes exclusively using the :class:`.declared_attr` approach, eliminating any ambiguity which could arise when copying a relationship and its possibly column-bound contents. Below is an example which combines a foreign key column and a relationship so that two classes ``Foo`` and ``Bar`` can both be configured to reference a common target class via many-to-one:: class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) @declared_attr def target(cls): return relationship("Target") class Foo(RefTargetMixin, Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) class Bar(RefTargetMixin, Base): __tablename__ = 'bar' id = Column(Integer, primary_key=True) class Target(Base): __tablename__ = 'target' id = Column(Integer, primary_key=True) Using Advanced Relationship Arguments (e.g. ``primaryjoin``, etc.) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :func:`~sqlalchemy.orm.relationship` definitions which require explicit primaryjoin, order_by etc. expressions should in all but the most simplistic cases use **late bound** forms for these arguments, meaning, using either the string form or a lambda. The reason for this is that the related :class:`.Column` objects which are to be configured using ``@declared_attr`` are not available to another ``@declared_attr`` attribute; while the methods will work and return new :class:`.Column` objects, those are not the :class:`.Column` objects that Declarative will be using as it calls the methods on its own, thus using *different* :class:`.Column` objects. The canonical example is the primaryjoin condition that depends upon another mixed-in column:: class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) @declared_attr def target(cls): return relationship(Target, primaryjoin=Target.id==cls.target_id # this is *incorrect* ) Mapping a class using the above mixin, we will get an error like:: sqlalchemy.exc.InvalidRequestError: this ForeignKey's parent column is not yet associated with a Table. This is because the ``target_id`` :class:`.Column` we've called upon in our ``target()`` method is not the same :class:`.Column` that declarative is actually going to map to our table. The condition above is resolved using a lambda:: class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) @declared_attr def target(cls): return relationship(Target, primaryjoin=lambda: Target.id==cls.target_id ) or alternatively, the string form (which ultimately generates a lambda):: class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) @declared_attr def target(cls): return relationship("Target", primaryjoin="Target.id==%s.target_id" % cls.__name__ ) Mixing in deferred(), column_property(), and other MapperProperty classes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Like :func:`~sqlalchemy.orm.relationship`, all :class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as :func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`, etc. ultimately involve references to columns, and therefore, when used with declarative mixins, have the :class:`.declared_attr` requirement so that no reliance on copying is needed:: class SomethingMixin(object): @declared_attr def dprop(cls): return deferred(Column(Integer)) class Something(SomethingMixin, Base): __tablename__ = "something" The :func:`.column_property` or other construct may refer to other columns from the mixin. These are copied ahead of time before the :class:`.declared_attr` is invoked:: class SomethingMixin(object): x = Column(Integer) y = Column(Integer) @declared_attr def x_plus_y(cls): return column_property(cls.x + cls.y) .. versionchanged:: 1.0.0 mixin columns are copied to the final mapped class so that :class:`.declared_attr` methods can access the actual column that will be mapped. Mixing in Association Proxy and Other Attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mixins can specify user-defined attributes as well as other extension units such as :func:`.association_proxy`. The usage of :class:`.declared_attr` is required in those cases where the attribute must be tailored specifically to the target subclass. An example is when constructing multiple :func:`.association_proxy` attributes which each target a different type of child object. Below is an :func:`.association_proxy` / mixin example which provides a scalar list of string values to an implementing class:: from sqlalchemy import Column, Integer, ForeignKey, String from sqlalchemy.orm import relationship from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base, declared_attr Base = declarative_base() class HasStringCollection(object): @declared_attr def _strings(cls): class StringAttribute(Base): __tablename__ = cls.string_table_name id = Column(Integer, primary_key=True) value = Column(String(50), nullable=False) parent_id = Column(Integer, ForeignKey('%s.id' % cls.__tablename__), nullable=False) def __init__(self, value): self.value = value return relationship(StringAttribute) @declared_attr def strings(cls): return association_proxy('_strings', 'value') class TypeA(HasStringCollection, Base): __tablename__ = 'type_a' string_table_name = 'type_a_strings' id = Column(Integer(), primary_key=True) class TypeB(HasStringCollection, Base): __tablename__ = 'type_b' string_table_name = 'type_b_strings' id = Column(Integer(), primary_key=True) Above, the ``HasStringCollection`` mixin produces a :func:`.relationship` which refers to a newly generated class called ``StringAttribute``. The ``StringAttribute`` class is generated with its own :class:`.Table` definition which is local to the parent class making usage of the ``HasStringCollection`` mixin. It also produces an :func:`.association_proxy` object which proxies references to the ``strings`` attribute onto the ``value`` attribute of each ``StringAttribute`` instance. ``TypeA`` or ``TypeB`` can be instantiated given the constructor argument ``strings``, a list of strings:: ta = TypeA(strings=['foo', 'bar']) tb = TypeA(strings=['bat', 'bar']) This list will generate a collection of ``StringAttribute`` objects, which are persisted into a table that's local to either the ``type_a_strings`` or ``type_b_strings`` table:: >>> print ta._strings [<__main__.StringAttribute object at 0x10151cd90>, <__main__.StringAttribute object at 0x10151ce10>] When constructing the :func:`.association_proxy`, the :class:`.declared_attr` decorator must be used so that a distinct :func:`.association_proxy` object is created for each of the ``TypeA`` and ``TypeB`` classes. .. versionadded:: 0.8 :class:`.declared_attr` is usable with non-mapped attributes, including user-defined attributes as well as :func:`.association_proxy`. Controlling table inheritance with mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``__tablename__`` attribute may be used to provide a function that will determine the name of the table used for each class in an inheritance hierarchy, as well as whether a class has its own distinct table. This is achieved using the :class:`.declared_attr` indicator in conjunction with a method named ``__tablename__()``. Declarative will always invoke :class:`.declared_attr` for the special names ``__tablename__``, ``__mapper_args__`` and ``__table_args__`` function **for each mapped class in the hierarchy**. The function therefore needs to expect to receive each class individually and to provide the correct answer for each. For example, to create a mixin that gives every class a simple table name based on class name:: from sqlalchemy.ext.declarative import declared_attr class Tablename: @declared_attr def __tablename__(cls): return cls.__name__.lower() class Person(Tablename, Base): id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = None __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) Alternatively, we can modify our ``__tablename__`` function to return ``None`` for subclasses, using :func:`.has_inherited_table`. This has the effect of those subclasses being mapped with single table inheritance against the parent:: from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.ext.declarative import has_inherited_table class Tablename(object): @declared_attr def __tablename__(cls): if has_inherited_table(cls): return None return cls.__name__.lower() class Person(Tablename, Base): id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): primary_language = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'engineer'} .. _mixin_inheritance_columns: Mixing in Columns in Inheritance Scenarios ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In constrast to how ``__tablename__`` and other special names are handled when used with :class:`.declared_attr`, when we mix in columns and properties (e.g. relationships, column properties, etc.), the function is invoked for the **base class only** in the hierarchy. Below, only the ``Person`` class will receive a column called ``id``; the mapping will fail on ``Engineer``, which is not given a primary key:: class HasId(object): @declared_attr def id(cls): return Column('id', Integer, primary_key=True) class Person(HasId, Base): __tablename__ = 'person' discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineer' primary_language = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'engineer'} It is usually the case in joined-table inheritance that we want distinctly named columns on each subclass. However in this case, we may want to have an ``id`` column on every table, and have them refer to each other via foreign key. We can achieve this as a mixin by using the :attr:`.declared_attr.cascading` modifier, which indicates that the function should be invoked **for each class in the hierarchy**, just like it does for ``__tablename__``:: class HasId(object): @declared_attr.cascading def id(cls): if has_inherited_table(cls): return Column('id', Integer, ForeignKey('person.id'), primary_key=True) else: return Column('id', Integer, primary_key=True) class Person(HasId, Base): __tablename__ = 'person' discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineer' primary_language = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'engineer'} .. versionadded:: 1.0.0 added :attr:`.declared_attr.cascading`. Combining Table/Mapper Arguments from Multiple Mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the case of ``__table_args__`` or ``__mapper_args__`` specified with declarative mixins, you may want to combine some parameters from several mixins with those you wish to define on the class iteself. The :class:`.declared_attr` decorator can be used here to create user-defined collation routines that pull from multiple collections:: from sqlalchemy.ext.declarative import declared_attr class MySQLSettings(object): __table_args__ = {'mysql_engine':'InnoDB'} class MyOtherMixin(object): __table_args__ = {'info':'foo'} class MyModel(MySQLSettings, MyOtherMixin, Base): __tablename__='my_model' @declared_attr def __table_args__(cls): args = dict() args.update(MySQLSettings.__table_args__) args.update(MyOtherMixin.__table_args__) return args id = Column(Integer, primary_key=True) Creating Indexes with Mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To define a named, potentially multicolumn :class:`.Index` that applies to all tables derived from a mixin, use the "inline" form of :class:`.Index` and establish it as part of ``__table_args__``:: class MyMixin(object): a = Column(Integer) b = Column(Integer) @declared_attr def __table_args__(cls): return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),) class MyModel(MyMixin, Base): __tablename__ = 'atable' c = Column(Integer,primary_key=True) SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/api.rst0000664000175000017500000000530112636375552025265 0ustar classicclassic00000000000000.. automodule:: sqlalchemy.ext.declarative =============== Declarative API =============== API Reference ============= .. autofunction:: declarative_base .. autofunction:: as_declarative .. autoclass:: declared_attr :members: .. autofunction:: sqlalchemy.ext.declarative.api._declarative_constructor .. autofunction:: has_inherited_table .. autofunction:: synonym_for .. autofunction:: comparable_using .. autofunction:: instrument_declarative .. autoclass:: AbstractConcreteBase .. autoclass:: ConcreteBase .. autoclass:: DeferredReflection :members: Special Directives ------------------ ``__declare_last__()`` ~~~~~~~~~~~~~~~~~~~~~~ The ``__declare_last__()`` hook allows definition of a class level function that is automatically called by the :meth:`.MapperEvents.after_configured` event, which occurs after mappings are assumed to be completed and the 'configure' step has finished:: class MyClass(Base): @classmethod def __declare_last__(cls): "" # do something with mappings .. versionadded:: 0.7.3 ``__declare_first__()`` ~~~~~~~~~~~~~~~~~~~~~~~ Like ``__declare_last__()``, but is called at the beginning of mapper configuration via the :meth:`.MapperEvents.before_configured` event:: class MyClass(Base): @classmethod def __declare_first__(cls): "" # do something before mappings are configured .. versionadded:: 0.9.3 .. _declarative_abstract: ``__abstract__`` ~~~~~~~~~~~~~~~~~~~ ``__abstract__`` causes declarative to skip the production of a table or mapper for the class entirely. A class can be added within a hierarchy in the same way as mixin (see :ref:`declarative_mixins`), allowing subclasses to extend just from the special class:: class SomeAbstractBase(Base): __abstract__ = True def some_helpful_method(self): "" @declared_attr def __mapper_args__(cls): return {"helpful mapper arguments":True} class MyMappedClass(SomeAbstractBase): "" One possible use of ``__abstract__`` is to use a distinct :class:`.MetaData` for different bases:: Base = declarative_base() class DefaultBase(Base): __abstract__ = True metadata = MetaData() class OtherBase(Base): __abstract__ = True metadata = MetaData() Above, classes which inherit from ``DefaultBase`` will use one :class:`.MetaData` as the registry of tables, and those which inherit from ``OtherBase`` will use a different one. The tables themselves can then be created perhaps within distinct databases:: DefaultBase.metadata.create_all(some_engine) OtherBase.metadata_create_all(some_other_engine) .. versionadded:: 0.7.3 SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/basic_use.rst0000664000175000017500000001027512636375552026457 0ustar classicclassic00000000000000========= Basic Use ========= SQLAlchemy object-relational configuration involves the combination of :class:`.Table`, :func:`.mapper`, and class objects to define a mapped class. :mod:`~sqlalchemy.ext.declarative` allows all three to be expressed at once within the class declaration. As much as possible, regular SQLAlchemy schema and ORM constructs are used directly, so that configuration between "classical" ORM usage and declarative remain highly similar. As a simple example:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class SomeClass(Base): __tablename__ = 'some_table' id = Column(Integer, primary_key=True) name = Column(String(50)) Above, the :func:`declarative_base` callable returns a new base class from which all mapped classes should inherit. When the class definition is completed, a new :class:`.Table` and :func:`.mapper` will have been generated. The resulting table and mapper are accessible via ``__table__`` and ``__mapper__`` attributes on the ``SomeClass`` class:: # access the mapped Table SomeClass.__table__ # access the Mapper SomeClass.__mapper__ Defining Attributes =================== In the previous example, the :class:`.Column` objects are automatically named with the name of the attribute to which they are assigned. To name columns explicitly with a name distinct from their mapped attribute, just give the column a name. Below, column "some_table_id" is mapped to the "id" attribute of `SomeClass`, but in SQL will be represented as "some_table_id":: class SomeClass(Base): __tablename__ = 'some_table' id = Column("some_table_id", Integer, primary_key=True) Attributes may be added to the class after its construction, and they will be added to the underlying :class:`.Table` and :func:`.mapper` definitions as appropriate:: SomeClass.data = Column('data', Unicode) SomeClass.related = relationship(RelatedInfo) Classes which are constructed using declarative can interact freely with classes that are mapped explicitly with :func:`.mapper`. It is recommended, though not required, that all tables share the same underlying :class:`~sqlalchemy.schema.MetaData` object, so that string-configured :class:`~sqlalchemy.schema.ForeignKey` references can be resolved without issue. Accessing the MetaData ======================= The :func:`declarative_base` base class contains a :class:`.MetaData` object where newly defined :class:`.Table` objects are collected. This object is intended to be accessed directly for :class:`.MetaData`-specific operations. Such as, to issue CREATE statements for all tables:: engine = create_engine('sqlite://') Base.metadata.create_all(engine) :func:`declarative_base` can also receive a pre-existing :class:`.MetaData` object, which allows a declarative setup to be associated with an already existing traditional collection of :class:`~sqlalchemy.schema.Table` objects:: mymetadata = MetaData() Base = declarative_base(metadata=mymetadata) Class Constructor ================= As a convenience feature, the :func:`declarative_base` sets a default constructor on classes which takes keyword arguments, and assigns them to the named attributes:: e = Engineer(primary_language='python') Mapper Configuration ==================== Declarative makes use of the :func:`~.orm.mapper` function internally when it creates the mapping to the declared table. The options for :func:`~.orm.mapper` are passed directly through via the ``__mapper_args__`` class attribute. As always, arguments which reference locally mapped columns can reference them directly from within the class declaration:: from datetime import datetime class Widget(Base): __tablename__ = 'widgets' id = Column(Integer, primary_key=True) timestamp = Column(DateTime, nullable=False) __mapper_args__ = { 'version_id_col': timestamp, 'version_id_generator': lambda v:datetime.now() } .. _declarative_sql_expressions: Defining SQL Expressions ======================== See :ref:`mapper_sql_expressions` for examples on declaratively mapping attributes to SQL expressions. SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/index.rst0000664000175000017500000000142112636375552025622 0ustar classicclassic00000000000000.. _declarative_toplevel: =========== Declarative =========== The Declarative system is the typically used system provided by the SQLAlchemy ORM in order to define classes mapped to relational database tables. However, as noted in :ref:`classical_mapping`, Declarative is in fact a series of extensions that ride on top of the SQLAlchemy :func:`.mapper` construct. While the documentation typically refers to Declarative for most examples, the following sections will provide detailed information on how the Declarative API interacts with the basic :func:`.mapper` and Core :class:`.Table` systems, as well as how sophisticated patterns can be built using systems such as mixins. .. toctree:: :maxdepth: 2 basic_use relationships table_config inheritance mixins api SQLAlchemy-1.0.11/doc/build/orm/extensions/declarative/table_config.rst0000664000175000017500000001133312636375552027132 0ustar classicclassic00000000000000.. _declarative_table_args: =================== Table Configuration =================== Table arguments other than the name, metadata, and mapped Column arguments are specified using the ``__table_args__`` class attribute. This attribute accommodates both positional as well as keyword arguments that are normally sent to the :class:`~sqlalchemy.schema.Table` constructor. The attribute can be specified in one of two forms. One is as a dictionary:: class MyClass(Base): __tablename__ = 'sometable' __table_args__ = {'mysql_engine':'InnoDB'} The other, a tuple, where each argument is positional (usually constraints):: class MyClass(Base): __tablename__ = 'sometable' __table_args__ = ( ForeignKeyConstraint(['id'], ['remote_table.id']), UniqueConstraint('foo'), ) Keyword arguments can be specified with the above form by specifying the last argument as a dictionary:: class MyClass(Base): __tablename__ = 'sometable' __table_args__ = ( ForeignKeyConstraint(['id'], ['remote_table.id']), UniqueConstraint('foo'), {'autoload':True} ) Using a Hybrid Approach with __table__ ======================================= As an alternative to ``__tablename__``, a direct :class:`~sqlalchemy.schema.Table` construct may be used. The :class:`~sqlalchemy.schema.Column` objects, which in this case require their names, will be added to the mapping just like a regular mapping to a table:: class MyClass(Base): __table__ = Table('my_table', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)) ) ``__table__`` provides a more focused point of control for establishing table metadata, while still getting most of the benefits of using declarative. An application that uses reflection might want to load table metadata elsewhere and pass it to declarative classes:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() Base.metadata.reflect(some_engine) class User(Base): __table__ = metadata.tables['user'] class Address(Base): __table__ = metadata.tables['address'] Some configuration schemes may find it more appropriate to use ``__table__``, such as those which already take advantage of the data-driven nature of :class:`.Table` to customize and/or automate schema definition. Note that when the ``__table__`` approach is used, the object is immediately usable as a plain :class:`.Table` within the class declaration body itself, as a Python class is only another syntactical block. Below this is illustrated by using the ``id`` column in the ``primaryjoin`` condition of a :func:`.relationship`:: class MyClass(Base): __table__ = Table('my_table', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)) ) widgets = relationship(Widget, primaryjoin=Widget.myclass_id==__table__.c.id) Similarly, mapped attributes which refer to ``__table__`` can be placed inline, as below where we assign the ``name`` column to the attribute ``_name``, generating a synonym for ``name``:: from sqlalchemy.ext.declarative import synonym_for class MyClass(Base): __table__ = Table('my_table', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)) ) _name = __table__.c.name @synonym_for("_name") def name(self): return "Name: %s" % _name Using Reflection with Declarative ================================= It's easy to set up a :class:`.Table` that uses ``autoload=True`` in conjunction with a mapped class:: class MyClass(Base): __table__ = Table('mytable', Base.metadata, autoload=True, autoload_with=some_engine) However, one improvement that can be made here is to not require the :class:`.Engine` to be available when classes are being first declared. To achieve this, use the :class:`.DeferredReflection` mixin, which sets up mappings only after a special ``prepare(engine)`` step is called:: from sqlalchemy.ext.declarative import declarative_base, DeferredReflection Base = declarative_base(cls=DeferredReflection) class Foo(Base): __tablename__ = 'foo' bars = relationship("Bar") class Bar(Base): __tablename__ = 'bar' # illustrate overriding of "bar.foo_id" to have # a foreign key constraint otherwise not # reflected, such as when using MySQL foo_id = Column(Integer, ForeignKey('foo.id')) Base.prepare(e) .. versionadded:: 0.8 Added :class:`.DeferredReflection`. SQLAlchemy-1.0.11/doc/build/orm/extensions/baked.rst0000664000175000017500000003767112636375552023316 0ustar classicclassic00000000000000.. _baked_toplevel: Baked Queries ============= .. module:: sqlalchemy.ext.baked ``baked`` provides an alternative creational pattern for :class:`~.query.Query` objects, which allows for caching of the object's construction and string-compilation steps. This means that for a particular :class:`~.query.Query` building scenario that is used more than once, all of the Python function invocation involved in building the query from its initial construction up through generating a SQL string will only occur **once**, rather than for each time that query is built up and executed. The rationale for this system is to greatly reduce Python interpreter overhead for everything that occurs **before the SQL is emitted**. The caching of the "baked" system does **not** in any way reduce SQL calls or cache the **return results** from the database. A technique that demonstates the caching of the SQL calls and result sets themselves is available in :ref:`examples_caching`. .. versionadded:: 1.0.0 .. note:: The :mod:`sqlalchemy.ext.baked` extension should be considered **experimental** as of 1.0.0. It provides a dramatically different system of producing queries which has yet to be proven at scale. Synopsis -------- Usage of the baked system starts by producing a so-called "bakery", which represents storage for a particular series of query objects:: from sqlalchemy.ext import baked bakery = baked.bakery() The above "bakery" will store cached data in an LRU cache that defaults to 200 elements, noting that an ORM query will typically contain one entry for the ORM query as invoked, as well as one entry per database dialect for the SQL string. The bakery allows us to build up a :class:`~.query.Query` object by specifying its construction as a series of Python callables, which are typically lambdas. For succinct usage, it overrides the ``+=`` operator so that a typical query build-up looks like the following:: from sqlalchemy import bindparam def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) baked_query += lambda q: q.filter(User.name == bindparam('username')) baked_query += lambda q: q.order_by(User.id) if email: baked_query += lambda q: q.filter(User.email == bindparam('email')) result = baked_query(session).params(username=username, email=email).all() return result Following are some observations about the above code: 1. The ``baked_query`` object is an instance of :class:`.BakedQuery`. This object is essentially the "builder" for a real orm :class:`~.query.Query` object, but it is not itself the *actual* :class:`~.query.Query` object. 2. The actual :class:`~.query.Query` object is not built at all, until the very end of the function when :meth:`.Result.all` is called. 3. The steps that are added to the ``baked_query`` object are all expressed as Python functions, typically lambdas. The first lambda given to the :func:`.bakery` function receives a :class:`.Session` as its argument. The remaining lambdas each receive a :class:`~.query.Query` as their argument. 4. In the above code, even though our application may call upon ``search_for_user()`` many times, and even though within each invocation we build up an entirely new :class:`.BakedQuery` object, *all of the lambdas are only called once*. Each lambda is **never** called a second time for as long as this query is cached in the bakery. 5. The caching is achieved by storing references to the **lambda objects themselves** in order to formulate a cache key; that is, the fact that the Python interpreter assigns an in-Python identity to these functions is what determines how to identify the query on successive runs. For those invocations of ``search_for_user()`` where the ``email`` parameter is specified, the callable ``lambda q: q.filter(User.email == bindparam('email'))`` will be part of the cache key that's retrieved; when ``email`` is ``None``, this callable is not part of the cache key. 6. Because the lambdas are all called only once, it is essential that no variables which may change across calls are referenced **within** the lambdas; instead, assuming these are values to be bound into the SQL string, we use :func:`.bindparam` to construct named parameters, where we apply their actual values later using :meth:`.Result.params`. Performance ----------- The baked query probably looks a little odd, a little bit awkward and a little bit verbose. However, the savings in Python performance for a query which is invoked lots of times in an application are very dramatic. The example suite ``short_selects`` demonstrated in :ref:`examples_performance` illustrates a comparison of queries which each return only one row, such as the following regular query:: session = Session(bind=engine) for id_ in random.sample(ids, n): session.query(Customer).filter(Customer.id == id_).one() compared to the equivalent "baked" query:: bakery = baked.bakery() s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery(lambda s: s.query(Customer)) q += lambda q: q.filter(Customer.id == bindparam('id')) q(s).params(id=id_).one() The difference in Python function call count for an iteration of 10000 calls to each block are:: test_baked_query : test a baked query of the full entity. (10000 iterations); total fn calls 1951294 test_orm_query : test a straight ORM query of the full entity. (10000 iterations); total fn calls 7900535 In terms of number of seconds on a powerful laptop, this comes out as:: test_baked_query : test a baked query of the full entity. (10000 iterations); total time 2.174126 sec test_orm_query : test a straight ORM query of the full entity. (10000 iterations); total time 7.958516 sec Note that this test very intentionally features queries that only return one row. For queries that return many rows, the performance advantage of the baked query will have less and less of an impact, proportional to the time spent fetching rows. It is critical to keep in mind that the **baked query feature only applies to building the query itself, not the fetching of results**. Using the baked feature is by no means a guarantee to a much faster application; it is only a potentially useful feature for those applications that have been measured as being impacted by this particular form of overhead. .. topic:: Measure twice, cut once For background on how to profile a SQLAlchemy application, please see the section :ref:`faq_performance`. It is essential that performance measurement techniques are used when attempting to improve the performance of an application. Rationale --------- The "lambda" approach above is a superset of what would be a more traditional "parameterized" approach. Suppose we wished to build a simple system where we build a :class:`~.query.Query` just once, then store it in a dictionary for re-use. This is possible right now by just building up the query, and removing its :class:`.Session` by calling ``my_cached_query = query.with_session(None)``:: my_simple_cache = {} def lookup(session, id_argument): if "my_key" not in my_simple_cache: query = session.query(Model).filter(Model.id == bindparam('id')) my_simple_cache["my_key"] = query.with_session(None) else: query = my_simple_cache["my_key"].with_session(session) return query.params(id=id_argument).all() The above approach gets us a very minimal performance benefit. By re-using a :class:`~.query.Query`, we save on the Python work within the ``session.query(Model)`` constructor as well as calling upon ``filter(Model.id == bindparam('id'))``, which will skip for us the building up of the Core expression as well as sending it to :meth:`.Query.filter`. However, the approach still regenerates the full :class:`.Select` object every time when :meth:`.Query.all` is called and additionally this brand new :class:`.Select` is sent off to the string compilation step every time, which for a simple case like the above is probably about 70% of the overhead. To reduce the additional overhead, we need some more specialized logic, some way to memoize the construction of the select object and the construction of the SQL. There is an example of this on the wiki in the section `BakedQuery `_, a precursor to this feature, however in that system, we aren't caching the *construction* of the query. In order to remove all the overhead, we need to cache both the construction of the query as well as the SQL compilation. Let's assume we adapted the recipe in this way and made ourselves a method ``.bake()`` that pre-compiles the SQL for the query, producing a new object that can be invoked with minimal overhead. Our example becomes:: my_simple_cache = {} def lookup(session, id_argument): if "my_key" not in my_simple_cache: query = session.query(Model).filter(Model.id == bindparam('id')) my_simple_cache["my_key"] = query.with_session(None).bake() else: query = my_simple_cache["my_key"].with_session(session) return query.params(id=id_argument).all() Above, we've fixed the performance situation, but we still have this string cache key to deal with. We can use the "bakery" approach to re-frame the above in a way that looks less unusual than the "building up lambdas" approach, and more like a simple improvement upon the simple "reuse a query" approach:: bakery = baked.bakery() def lookup(session, id_argument): def create_model_query(session): return session.query(Model).filter(Model.id == bindparam('id')) parameterized_query = bakery.bake(create_model_query) return parameterized_query(session).params(id=id_argument).all() Above, we use the "baked" system in a manner that is very similar to the simplistic "cache a query" system. However, it uses two fewer lines of code, does not need to manufacture a cache key of "my_key", and also includes the same feature as our custom "bake" function that caches 100% of the Python invocation work from the constructor of the query, to the filter call, to the production of the :class:`.Select` object, to the string compilation step. From the above, if we ask ourselves, "what if lookup needs to make conditional decisions as to the structure of the query?", this is where hopefully it becomes apparent why "baked" is the way it is. Instead of a parameterized query building off from exactly one function (which is how we thought baked might work originally), we can build it from *any number* of functions. Consider our naive example, if we needed to have an additional clause in our query on a conditional basis:: my_simple_cache = {} def lookup(session, id_argument, include_frobnizzle=False): if include_frobnizzle: cache_key = "my_key_with_frobnizzle" else: cache_key = "my_key_without_frobnizzle" if cache_key not in my_simple_cache: query = session.query(Model).filter(Model.id == bindparam('id')) if include_frobnizzle: query = query.filter(Model.frobnizzle == True) my_simple_cache[cache_key] = query.with_session(None).bake() else: query = my_simple_cache[cache_key].with_session(session) return query.params(id=id_argument).all() Our "simple" parameterized system must now be tasked with generating cache keys which take into account whether or not the "include_frobnizzle" flag was passed, as the presence of this flag means that the generated SQL would be entirely different. It should be apparent that as the complexity of query building goes up, the task of caching these queries becomes burdensome very quickly. We can convert the above example into a direct use of "bakery" as follows:: bakery = baked.bakery() def lookup(session, id_argument, include_frobnizzle=False): def create_model_query(session): return session.query(Model).filter(Model.id == bindparam('id')) parameterized_query = bakery.bake(create_model_query) if include_frobnizzle: def include_frobnizzle_in_query(query): return query.filter(Model.frobnizzle == True) parameterized_query = parameterized_query.with_criteria( include_frobnizzle_in_query) return parameterized_query(session).params(id=id_argument).all() Above, we again cache not just the query object but all the work it needs to do in order to generate SQL. We also no longer need to deal with making sure we generate a cache key that accurately takes into account all of the structural modifications we've made; this is now handled automatically and without the chance of mistakes. This code sample is a few lines shorter than the naive example, removes the need to deal with cache keys, and has the vast performance benefits of the full so-called "baked" feature. But still a little verbose! Hence we take methods like :meth:`.BakedQuery.add_criteria` and :meth:`.BakedQuery.with_criteria` and shorten them into operators, and encourage (though certainly not require!) using simple lambdas, only as a means to reduce verbosity:: bakery = baked.bakery() def lookup(session, id_argument, include_frobnizzle=False): parameterized_query = bakery.bake( lambda s: s.query(Model).filter(Model.id == bindparam('id')) ) if include_frobnizzle: parameterized_query += lambda q: q.filter(Model.frobnizzle == True) return parameterized_query(session).params(id=id_argument).all() Where above, the approach is simpler to implement and much more similar in code flow to what a non-cached querying function would look like, hence making code easier to port. The above description is essentially a summary of the design process used to arrive at the current "baked" approach. Starting from the "normal" approaches, the additional issues of cache key construction and management, removal of all redundant Python execution, and queries built up with conditionals needed to be addressed, leading to the final approach. Lazy Loading Integration ------------------------ The baked query can be integrated with SQLAlchemy's lazy loader feature transparently. A future release of SQLAlchemy may enable this by default, as its use within lazy loading is completely transparent. For now, to enable baked lazyloading for all lazyloaders systemwide, call upon the :func:`.bake_lazy_loaders` function. This will impact all relationships that use the ``lazy='select'`` strategy as well as all use of the :func:`.lazyload` per-query strategy. "Baked" lazy loading may be enabled on a per-:func:`.relationship` basis using the ``baked_select`` loader strategy:: class MyClass(Base): # ... widgets = relationship("Widget", lazy="baked_select") The ``baked_select`` strategy is available once any part of the application has imported the ``sqlalchemy.ext.baked`` module. The "bakery" used by this feature is local to the mapper for ``MyClass``. For per-query use, the :func:`.baked_lazyload` strategy may be used, which works like any other loader option. Opting out with the bake_queries flag ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :func:`.relationship` construct includes a flag :paramref:`.relationship.bake_queries` which when set to False will cause that relationship to opt out of the baked query system, when the application-wide :func:`.bake_lazy_loaders` function has been called to enable baked query loaders by default. API Documentation ----------------- .. autofunction:: bakery .. autoclass:: BakedQuery :members: .. autoclass:: Result :members: .. autofunction:: bake_lazy_loaders .. autofunction:: unbake_lazy_loaders .. autofunction:: baked_lazyload .. autofunction:: baked_lazyload_all SQLAlchemy-1.0.11/doc/build/orm/deprecated.rst0000664000175000017500000000155512636375552022141 0ustar classicclassic00000000000000:orphan: .. _dep_interfaces_orm_toplevel: Deprecated ORM Event Interfaces ================================ .. module:: sqlalchemy.orm.interfaces This section describes the class-based ORM event interface which first existed in SQLAlchemy 0.1, which progressed with more kinds of events up until SQLAlchemy 0.5. The non-ORM analogue is described at :ref:`dep_interfaces_core_toplevel`. .. deprecated:: 0.7 As of SQLAlchemy 0.7, the new event system described in :ref:`event_toplevel` replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing. Mapper Events ----------------- .. autoclass:: MapperExtension :members: Session Events ----------------- .. autoclass:: SessionExtension :members: Attribute Events -------------------- .. autoclass:: AttributeExtension :members: SQLAlchemy-1.0.11/doc/build/orm/mapping_columns.rst0000664000175000017500000002106312636375552023230 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm Mapping Table Columns ===================== The default behavior of :func:`~.orm.mapper` is to assemble all the columns in the mapped :class:`.Table` into mapped object attributes, each of which are named according to the name of the column itself (specifically, the ``key`` attribute of :class:`.Column`). This behavior can be modified in several ways. .. _mapper_column_distinct_names: Naming Columns Distinctly from Attribute Names ---------------------------------------------- A mapping by default shares the same name for a :class:`.Column` as that of the mapped attribute - specifically it matches the :attr:`.Column.key` attribute on :class:`.Column`, which by default is the same as the :attr:`.Column.name`. The name assigned to the Python attribute which maps to :class:`.Column` can be different from either :attr:`.Column.name` or :attr:`.Column.key` just by assigning it that way, as we illustrate here in a Declarative mapping:: class User(Base): __tablename__ = 'user' id = Column('user_id', Integer, primary_key=True) name = Column('user_name', String(50)) Where above ``User.id`` resolves to a column named ``user_id`` and ``User.name`` resolves to a column named ``user_name``. When mapping to an existing table, the :class:`.Column` object can be referenced directly:: class User(Base): __table__ = user_table id = user_table.c.user_id name = user_table.c.user_name Or in a classical mapping, placed in the ``properties`` dictionary with the desired key:: mapper(User, user_table, properties={ 'id': user_table.c.user_id, 'name': user_table.c.user_name, }) In the next section we'll examine the usage of ``.key`` more closely. .. _mapper_automated_reflection_schemes: Automating Column Naming Schemes from Reflected Tables ------------------------------------------------------ In the previous section :ref:`mapper_column_distinct_names`, we showed how a :class:`.Column` explicitly mapped to a class can have a different attribute name than the column. But what if we aren't listing out :class:`.Column` objects explicitly, and instead are automating the production of :class:`.Table` objects using reflection (e.g. as described in :ref:`metadata_reflection_toplevel`)? In this case we can make use of the :meth:`.DDLEvents.column_reflect` event to intercept the production of :class:`.Column` objects and provide them with the :attr:`.Column.key` of our choice:: @event.listens_for(Table, "column_reflect") def column_reflect(inspector, table, column_info): # set column.key = "attr_" column_info['key'] = "attr_%s" % column_info['name'].lower() With the above event, the reflection of :class:`.Column` objects will be intercepted with our event that adds a new ".key" element, such as in a mapping as below:: class MyClass(Base): __table__ = Table("some_table", Base.metadata, autoload=True, autoload_with=some_engine) If we want to qualify our event to only react for the specific :class:`.MetaData` object above, we can check for it in our event:: @event.listens_for(Table, "column_reflect") def column_reflect(inspector, table, column_info): if table.metadata is Base.metadata: # set column.key = "attr_" column_info['key'] = "attr_%s" % column_info['name'].lower() .. _column_prefix: Naming All Columns with a Prefix -------------------------------- A quick approach to prefix column names, typically when mapping to an existing :class:`.Table` object, is to use ``column_prefix``:: class User(Base): __table__ = user_table __mapper_args__ = {'column_prefix':'_'} The above will place attribute names such as ``_user_id``, ``_user_name``, ``_password`` etc. on the mapped ``User`` class. This approach is uncommon in modern usage. For dealing with reflected tables, a more flexible approach is to use that described in :ref:`mapper_automated_reflection_schemes`. Using column_property for column level options ----------------------------------------------- Options can be specified when mapping a :class:`.Column` using the :func:`.column_property` function. This function explicitly creates the :class:`.ColumnProperty` used by the :func:`.mapper` to keep track of the :class:`.Column`; normally, the :func:`.mapper` creates this automatically. Using :func:`.column_property`, we can pass additional arguments about how we'd like the :class:`.Column` to be mapped. Below, we pass an option ``active_history``, which specifies that a change to this column's value should result in the former value being loaded first:: from sqlalchemy.orm import column_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = column_property(Column(String(50)), active_history=True) :func:`.column_property` is also used to map a single attribute to multiple columns. This use case arises when mapping to a :func:`~.expression.join` which has attributes which are equated to each other:: class User(Base): __table__ = user.join(address) # assign "user.id", "address.user_id" to the # "id" attribute id = column_property(user_table.c.id, address_table.c.user_id) For more examples featuring this usage, see :ref:`maptojoin`. Another place where :func:`.column_property` is needed is to specify SQL expressions as mapped attributes, such as below where we create an attribute ``fullname`` that is the string concatenation of the ``firstname`` and ``lastname`` columns:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) fullname = column_property(firstname + " " + lastname) See examples of this usage at :ref:`mapper_sql_expressions`. .. autofunction:: column_property .. _include_exclude_cols: Mapping a Subset of Table Columns --------------------------------- Sometimes, a :class:`.Table` object was made available using the reflection process described at :ref:`metadata_reflection` to load the table's structure from the database. For such a table that has lots of columns that don't need to be referenced in the application, the ``include_properties`` or ``exclude_properties`` arguments can specify that only a subset of columns should be mapped. For example:: class User(Base): __table__ = user_table __mapper_args__ = { 'include_properties' :['user_id', 'user_name'] } ...will map the ``User`` class to the ``user_table`` table, only including the ``user_id`` and ``user_name`` columns - the rest are not referenced. Similarly:: class Address(Base): __table__ = address_table __mapper_args__ = { 'exclude_properties' : ['street', 'city', 'state', 'zip'] } ...will map the ``Address`` class to the ``address_table`` table, including all columns present except ``street``, ``city``, ``state``, and ``zip``. When this mapping is used, the columns that are not included will not be referenced in any SELECT statements emitted by :class:`.Query`, nor will there be any mapped attribute on the mapped class which represents the column; assigning an attribute of that name will have no effect beyond that of a normal Python attribute assignment. In some cases, multiple columns may have the same name, such as when mapping to a join of two or more tables that share some column name. ``include_properties`` and ``exclude_properties`` can also accommodate :class:`.Column` objects to more accurately describe which columns should be included or excluded:: class UserAddress(Base): __table__ = user_table.join(addresses_table) __mapper_args__ = { 'exclude_properties' :[address_table.c.id], 'primary_key' : [user_table.c.id] } .. note:: insert and update defaults configured on individual :class:`.Column` objects, i.e. those described at :ref:`metadata_defaults` including those configured by the ``default``, ``update``, ``server_default`` and ``server_onupdate`` arguments, will continue to function normally even if those :class:`.Column` objects are not mapped. This is because in the case of ``default`` and ``update``, the :class:`.Column` object is still present on the underlying :class:`.Table`, thus allowing the default functions to take place when the ORM emits an INSERT or UPDATE, and in the case of ``server_default`` and ``server_onupdate``, the relational database itself maintains these functions. SQLAlchemy-1.0.11/doc/build/orm/session_events.rst0000664000175000017500000002404312636375552023105 0ustar classicclassic00000000000000.. _session_events_toplevel: Tracking Object and Session Changes with Events =============================================== SQLAlchemy features an extensive :ref:`Event Listening ` system used throughout the Core and ORM. Within the ORM, there are a wide variety of event listener hooks, which are documented at an API level at :ref:`orm_event_toplevel`. This collection of events has grown over the years to include lots of very useful new events as well as some older events that aren't as relevant as they once were. This section will attempt to introduce the major event hooks and when they might be used. .. _session_persistence_events: Persistence Events ------------------ Probably the most widely used series of events are the "persistence" events, which correspond to the :ref:`flush process`. The flush is where all the decisions are made about pending changes to objects and are then emitted out to the database in the form of INSERT, UPDATE, and DELETE staetments. ``before_flush()`` ^^^^^^^^^^^^^^^^^^ The :meth:`.SessionEvents.before_flush` hook is by far the most generally useful event to use when an application wants to ensure that additional persistence changes to the database are made when a flush proceeds. Use :meth:`.SessionEvents.before_flush` in order to operate upon objects to validate their state as well as to compose additional objects and references before they are persisted. Within this event, it is **safe to manipulate the Session's state**, that is, new objects can be attached to it, objects can be deleted, and indivual attributes on objects can be changed freely, and these changes will be pulled into the flush process when the event hook completes. The typical :meth:`.SessionEvents.before_flush` hook will be tasked with scanning the collections :attr:`.Session.new`, :attr:`.Session.dirty` and :attr:`.Session.deleted` in order to look for objects where something will be happening. For illustrations of :meth:`.SessionEvents.before_flush`, see examples such as :ref:`examples_versioned_history` and :ref:`examples_versioned_rows`. ``after_flush()`` ^^^^^^^^^^^^^^^^^ The :meth:`.SessionEvents.after_flush` hook is called after the SQL has been emitted for a flush process, but **before* the state of the objects that were flushed has been altered. That is, you can still inspect the :attr:`.Session.new`, :attr:`.Session.dirty` and :attr:`.Session.deleted` collections to see what was just flushed, and you can also use history tracking features like the ones provided by :class:`.AttributeState` to see what changes were just persisted. In the :meth:`.SessionEvents.after_flush` event, additional SQL can be emitted to the database based on what's observed to have changed. ``after_flush_postexec()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^ :meth:`.SessionEvents.after_flush_postexec` is called soon after :meth:`.SessionEvents.after_flush`, but is invoked **after** the state of the objects has been modified to account for the flush that just took place. The :attr:`.Session.new`, :attr:`.Session.dirty` and :attr:`.Session.deleted` collections are normally completely empty here. Use :meth:`.SessionEvents.after_flush_postexec` to inspect the identity map for finalized objects and possibly emit additional SQL. In this hook, there is the ability to make new changes on objects, which means the :class:`.Session` will again go into a "dirty" state; the mechanics of the :class:`.Session` here will cause it to flush **again** if new changes are detected in this hook if the flush were invoked in the context of :meth:`.Session.commit`; otherwise, the pending changes will be bundled as part of the next normal flush. When the hook detects new changes within a :meth:`.Session.commit`, a counter ensures that an endless loop in this regard is stopped after 100 iterations, in the case that an :meth:`.SessionEvents.after_flush_postexec` hook continually adds new state to be flushed each time it is called. .. _session_persistence_mapper: Mapper-level Events ^^^^^^^^^^^^^^^^^^^ In addition to the flush-level hooks, there is also a suite of hooks that are more fine-grained, in that they are called on a per-object basis and are broken out based on INSERT, UPDATE or DELETE. These are the mapper persistence hooks, and they too are very popular, however these events need to be approached more cautiously, as they proceed within the context of the flush process that is already ongoing; many operations are not safe to proceed here. The events are: * :meth:`.MapperEvents.before_insert` * :meth:`.MapperEvents.after_insert` * :meth:`.MapperEvents.before_update` * :meth:`.MapperEvents.after_update` * :meth:`.MapperEvents.before_delete` * :meth:`.MapperEvents.after_delete` Each event is passed the :class:`.Mapper`, the mapped object itself, and the :class:`.Connection` which is being used to emit an INSERT, UPDATE or DELETE statement. The appeal of these events is clear, in that if an application wants to tie some activity to when a specific type of object is persisted with an INSERT, the hook is very specific; unlike the :meth:`.SessionEvents.before_flush` event, there's no need to search through collections like :attr:`.Session.new` in order to find targets. However, the flush plan which represents the full list of every single INSERT, UPDATE, DELETE statement to be emitted has *already been decided* when these events are called, and no changes may be made at this stage. Therefore the only changes that are even possible to the given objects are upon attributes **local** to the object's row. Any other change to the object or other objects will impact the state of the :class:`.Session`, which will fail to function properly. Operations that are not supported within these mapper-level persistence events include: * :meth:`.Session.add` * :meth:`.Session.delete` * Mapped collection append, add, remove, delete, discard, etc. * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` The reason the :class:`.Connection` is passed is that it is encouraged that **simple SQL operations take place here**, directly on the :class:`.Connection`, such as incrementing counters or inserting extra rows within log tables. When dealing with the :class:`.Connection`, it is expected that Core-level SQL operations will be used; e.g. those described in :ref:`sqlexpression_toplevel`. There are also many per-object operations that don't need to be handled within a flush event at all. The most common alternative is to simply establish additional state along with an object inside its ``__init__()`` method, such as creating additional objects that are to be associated with the new object. Using validators as described in :ref:`simple_validators` is another approach; these functions can intercept changes to attributes and establish additional state changes on the target object in response to the attribute change. With both of these approaches, the object is in the correct state before it ever gets to the flush step. .. _session_lifecycle_events: Object Lifecycle Events ----------------------- Another use case for events is to track the lifecycle of objects. This refers to the states first introduced at :ref:`session_object_states`. As of SQLAlchemy 1.0, there is no direct event interface for tracking of these states. Events that can be used at the moment to track the state of objects include: * :meth:`.InstanceEvents.init` * :meth:`.InstanceEvents.load` * :meth:`.SessionEvents.before_attach` * :meth:`.SessionEvents.after_attach` * :meth:`.SessionEvents.before_flush` - by scanning the session's collections * :meth:`.SessionEvents.after_flush` - by scanning the session's collections SQLAlchemy 1.1 will introduce a comprehensive event system to track the object persistence states fully and unambiguously. .. _session_transaction_events: Transaction Events ------------------ Transaction events allow an application to be notifed when transaction boundaries occur at the :class:`.Session` level as well as when the :class:`.Session` changes the transactional state on :class:`.Connection` objects. * :meth:`.SessionEvents.after_transaction_create`, :meth:`.SessionEvents.after_transaction_end` - these events track the logical transaction scopes of the :class:`.Session` in a way that is not specific to individual database connections. These events are intended to help with integration of transaction-tracking systems such as ``zope.sqlalchemy``. Use these events when the application needs to align some external scope with the transactional scope of the :class:`.Session`. These hooks mirror the "nested" transactional behavior of the :class:`.Session`, in that they track logical "subtransactions" as well as "nested" (e.g. SAVEPOINT) transactions. * :meth:`.SessionEvents.before_commit`, :meth:`.SessionEvents.after_commit`, :meth:`.SessionEvents.after_begin`, :meth:`.SessionEvents.after_rollback`, :meth:`.SessionEvents.after_soft_rollback` - These events allow tracking of transaction events from the perspective of database connections. :meth:`.SessionEvents.after_begin` in particular is a per-connection event; a :class:`.Session` that maintains more than one connection will emit this event for each connection individually as those connections become used within the current transaction. The rollback and commit events then refer to when the DBAPI connections themselves have received rollback or commit instructions directly. Attribute Change Events ----------------------- The attribute change events allow interception of when specific attributes on an object are modified. These events include :meth:`.AttributeEvents.set`, :meth:`.AttributeEvents.append`, and :meth:`.AttributeEvents.remove`. These events are extremely useful, particularly for per-object validation operations; however, it is often much more convenient to use a "validator" hook, which uses these hooks behind the scenes; see :ref:`simple_validators` for background on this. The attribute events are also behind the mechanics of backreferences. An example illustrating use of attribute events is in :ref:`examples_instrumentation`. SQLAlchemy-1.0.11/doc/build/orm/versioning.rst0000664000175000017500000002504512636375552022224 0ustar classicclassic00000000000000.. _mapper_version_counter: Configuring a Version Counter ============================= The :class:`.Mapper` supports management of a :term:`version id column`, which is a single table column that increments or otherwise updates its value each time an ``UPDATE`` to the mapped table occurs. This value is checked each time the ORM emits an ``UPDATE`` or ``DELETE`` against the row to ensure that the value held in memory matches the database value. .. warning:: Because the versioning feature relies upon comparison of the **in memory** record of an object, the feature only applies to the :meth:`.Session.flush` process, where the ORM flushes individual in-memory rows to the database. It does **not** take effect when performing a multirow UPDATE or DELETE using :meth:`.Query.update` or :meth:`.Query.delete` methods, as these methods only emit an UPDATE or DELETE statement but otherwise do not have direct access to the contents of those rows being affected. The purpose of this feature is to detect when two concurrent transactions are modifying the same row at roughly the same time, or alternatively to provide a guard against the usage of a "stale" row in a system that might be re-using data from a previous transaction without refreshing (e.g. if one sets ``expire_on_commit=False`` with a :class:`.Session`, it is possible to re-use the data from a previous transaction). .. topic:: Concurrent transaction updates When detecting concurrent updates within transactions, it is typically the case that the database's transaction isolation level is below the level of :term:`repeatable read`; otherwise, the transaction will not be exposed to a new row value created by a concurrent update which conflicts with the locally updated value. In this case, the SQLAlchemy versioning feature will typically not be useful for in-transaction conflict detection, though it still can be used for cross-transaction staleness detection. The database that enforces repeatable reads will typically either have locked the target row against a concurrent update, or is employing some form of multi version concurrency control such that it will emit an error when the transaction is committed. SQLAlchemy's version_id_col is an alternative which allows version tracking to occur for specific tables within a transaction that otherwise might not have this isolation level set. .. seealso:: `Repeatable Read Isolation Level `_ - Postgresql's implementation of repeatable read, including a description of the error condition. Simple Version Counting ----------------------- The most straightforward way to track versions is to add an integer column to the mapped table, then establish it as the ``version_id_col`` within the mapper options:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) version_id = Column(Integer, nullable=False) name = Column(String(50), nullable=False) __mapper_args__ = { "version_id_col": version_id } Above, the ``User`` mapping tracks integer versions using the column ``version_id``. When an object of type ``User`` is first flushed, the ``version_id`` column will be given a value of "1". Then, an UPDATE of the table later on will always be emitted in a manner similar to the following:: UPDATE user SET version_id=:version_id, name=:name WHERE user.id = :user_id AND user.version_id = :user_version_id {"name": "new name", "version_id": 2, "user_id": 1, "user_version_id": 1} The above UPDATE statement is updating the row that not only matches ``user.id = 1``, it also is requiring that ``user.version_id = 1``, where "1" is the last version identifier we've been known to use on this object. If a transaction elsewhere has modified the row independently, this version id will no longer match, and the UPDATE statement will report that no rows matched; this is the condition that SQLAlchemy tests, that exactly one row matched our UPDATE (or DELETE) statement. If zero rows match, that indicates our version of the data is stale, and a :exc:`.StaleDataError` is raised. .. _custom_version_counter: Custom Version Counters / Types ------------------------------- Other kinds of values or counters can be used for versioning. Common types include dates and GUIDs. When using an alternate type or counter scheme, SQLAlchemy provides a hook for this scheme using the ``version_id_generator`` argument, which accepts a version generation callable. This callable is passed the value of the current known version, and is expected to return the subsequent version. For example, if we wanted to track the versioning of our ``User`` class using a randomly generated GUID, we could do this (note that some backends support a native GUID type, but we illustrate here using a simple string):: import uuid class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) version_uuid = Column(String(32)) name = Column(String(50), nullable=False) __mapper_args__ = { 'version_id_col':version_uuid, 'version_id_generator':lambda version: uuid.uuid4().hex } The persistence engine will call upon ``uuid.uuid4()`` each time a ``User`` object is subject to an INSERT or an UPDATE. In this case, our version generation function can disregard the incoming value of ``version``, as the ``uuid4()`` function generates identifiers without any prerequisite value. If we were using a sequential versioning scheme such as numeric or a special character system, we could make use of the given ``version`` in order to help determine the subsequent value. .. seealso:: :ref:`custom_guid_type` .. _server_side_version_counter: Server Side Version Counters ---------------------------- The ``version_id_generator`` can also be configured to rely upon a value that is generated by the database. In this case, the database would need some means of generating new identifiers when a row is subject to an INSERT as well as with an UPDATE. For the UPDATE case, typically an update trigger is needed, unless the database in question supports some other native version identifier. The Postgresql database in particular supports a system column called `xmin `_ which provides UPDATE versioning. We can make use of the Postgresql ``xmin`` column to version our ``User`` class as follows:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) xmin = Column("xmin", Integer, system=True) __mapper_args__ = { 'version_id_col': xmin, 'version_id_generator': False } With the above mapping, the ORM will rely upon the ``xmin`` column for automatically providing the new value of the version id counter. .. topic:: creating tables that refer to system columns In the above scenario, as ``xmin`` is a system column provided by Postgresql, we use the ``system=True`` argument to mark it as a system-provided column, omitted from the ``CREATE TABLE`` statement. The ORM typically does not actively fetch the values of database-generated values when it emits an INSERT or UPDATE, instead leaving these columns as "expired" and to be fetched when they are next accessed, unless the ``eager_defaults`` :func:`.mapper` flag is set. However, when a server side version column is used, the ORM needs to actively fetch the newly generated value. This is so that the version counter is set up *before* any concurrent transaction may update it again. This fetching is also best done simultaneously within the INSERT or UPDATE statement using :term:`RETURNING`, otherwise if emitting a SELECT statement afterwards, there is still a potential race condition where the version counter may change before it can be fetched. When the target database supports RETURNING, an INSERT statement for our ``User`` class will look like this:: INSERT INTO "user" (name) VALUES (%(name)s) RETURNING "user".id, "user".xmin {'name': 'ed'} Where above, the ORM can acquire any newly generated primary key values along with server-generated version identifiers in one statement. When the backend does not support RETURNING, an additional SELECT must be emitted for **every** INSERT and UPDATE, which is much less efficient, and also introduces the possibility of missed version counters:: INSERT INTO "user" (name) VALUES (%(name)s) {'name': 'ed'} SELECT "user".version_id AS user_version_id FROM "user" where "user".id = :param_1 {"param_1": 1} It is *strongly recommended* that server side version counters only be used when absolutely necessary and only on backends that support :term:`RETURNING`, e.g. Postgresql, Oracle, SQL Server (though SQL Server has `major caveats `_ when triggers are used), Firebird. .. versionadded:: 0.9.0 Support for server side version identifier tracking. Programmatic or Conditional Version Counters --------------------------------------------- When ``version_id_generator`` is set to False, we can also programmatically (and conditionally) set the version identifier on our object in the same way we assign any other mapped attribute. Such as if we used our UUID example, but set ``version_id_generator`` to ``False``, we can set the version identifier at our choosing:: import uuid class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) version_uuid = Column(String(32)) name = Column(String(50), nullable=False) __mapper_args__ = { 'version_id_col':version_uuid, 'version_id_generator': False } u1 = User(name='u1', version_uuid=uuid.uuid4()) session.add(u1) session.commit() u1.name = 'u2' u1.version_uuid = uuid.uuid4() session.commit() We can update our ``User`` object without incrementing the version counter as well; the value of the counter will remain unchanged, and the UPDATE statement will still check against the previous value. This may be useful for schemes where only certain classes of UPDATE are sensitive to concurrency issues:: # will leave version_uuid unchanged u1.name = 'u3' session.commit() .. versionadded:: 0.9.0 Support for programmatic and conditional version identifier tracking. SQLAlchemy-1.0.11/doc/build/orm/mapping_styles.rst0000664000175000017500000001445612636375552023103 0ustar classicclassic00000000000000================= Types of Mappings ================= Modern SQLAlchemy features two distinct styles of mapper configuration. The "Classical" style is SQLAlchemy's original mapping API, whereas "Declarative" is the richer and more succinct system that builds on top of "Classical". Both styles may be used interchangeably, as the end result of each is exactly the same - a user-defined class mapped by the :func:`.mapper` function onto a selectable unit, typically a :class:`.Table`. Declarative Mapping =================== The *Declarative Mapping* is the typical way that mappings are constructed in modern SQLAlchemy. Making use of the :ref:`declarative_toplevel` system, the components of the user-defined class as well as the :class:`.Table` metadata to which the class is mapped are defined at once:: from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, ForeignKey Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) Above, a basic single-table mapping with four columns. Additional attributes, such as relationships to other mapped classes, are also declared inline within the class definition:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) addresses = relationship("Address", backref="user", order_by="Address.id") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) user_id = Column(ForeignKey('user.id')) email_address = Column(String) The declarative mapping system is introduced in the :ref:`ormtutorial_toplevel`. For additional details on how this system works, see :ref:`declarative_toplevel`. .. _classical_mapping: Classical Mappings ================== A *Classical Mapping* refers to the configuration of a mapped class using the :func:`.mapper` function, without using the Declarative system. This is SQLAlchemy's original class mapping API, and is still the base mapping system provided by the ORM. In "classical" form, the table metadata is created separately with the :class:`.Table` construct, then associated with the ``User`` class via the :func:`.mapper` function:: from sqlalchemy import Table, MetaData, Column, Integer, String, ForeignKey from sqlalchemy.orm import mapper metadata = MetaData() user = Table('user', metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('fullname', String(50)), Column('password', String(12)) ) class User(object): def __init__(self, name, fullname, password): self.name = name self.fullname = fullname self.password = password mapper(User, user) Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`.relationship`:: address = Table('address', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email_address', String(50)) ) mapper(User, user, properties={ 'addresses' : relationship(Address, backref='user', order_by=address.c.id) }) mapper(Address, address) When using classical mappings, classes must be provided directly without the benefit of the "string lookup" system provided by Declarative. SQL expressions are typically specified in terms of the :class:`.Table` objects, i.e. ``address.c.id`` above for the ``Address`` relationship, and not ``Address.id``, as ``Address`` may not yet be linked to table metadata, nor can we specify a string here. Some examples in the documentation still use the classical approach, but note that the classical as well as Declarative approaches are **fully interchangeable**. Both systems ultimately create the same configuration, consisting of a :class:`.Table`, user-defined class, linked together with a :func:`.mapper`. When we talk about "the behavior of :func:`.mapper`", this includes when using the Declarative system as well - it's still used, just behind the scenes. Runtime Introspection of Mappings, Objects ========================================== The :class:`.Mapper` object is available from any mapped class, regardless of method, using the :ref:`core_inspection_toplevel` system. Using the :func:`.inspect` function, one can acquire the :class:`.Mapper` from a mapped class:: >>> from sqlalchemy import inspect >>> insp = inspect(User) Detailed information is available including :attr:`.Mapper.columns`:: >>> insp.columns This is a namespace that can be viewed in a list format or via individual names:: >>> list(insp.columns) [Column('id', Integer(), table=, primary_key=True, nullable=False), Column('name', String(length=50), table=), Column('fullname', String(length=50), table=), Column('password', String(length=12), table=)] >>> insp.columns.name Column('name', String(length=50), table=) Other namespaces include :attr:`.Mapper.all_orm_descriptors`, which includes all mapped attributes as well as hybrids, association proxies:: >>> insp.all_orm_descriptors >>> insp.all_orm_descriptors.keys() ['fullname', 'password', 'name', 'id'] As well as :attr:`.Mapper.column_attrs`:: >>> list(insp.column_attrs) [, , , ] >>> insp.column_attrs.name >>> insp.column_attrs.name.expression Column('name', String(length=50), table=) .. seealso:: :ref:`core_inspection_toplevel` :class:`.Mapper` :class:`.InstanceState` SQLAlchemy-1.0.11/doc/build/orm/events.rst0000664000175000017500000000206512636375552021342 0ustar classicclassic00000000000000.. _orm_event_toplevel: ORM Events ========== The ORM includes a wide variety of hooks available for subscription. For an introduction to the most commonly used ORM events, see the section :ref:`session_events_toplevel`. The event system in general is discussed at :ref:`event_toplevel`. Non-ORM events such as those regarding connections and low-level statement execution are described in :ref:`core_event_toplevel`. Attribute Events ---------------- .. autoclass:: sqlalchemy.orm.events.AttributeEvents :members: Mapper Events --------------- .. autoclass:: sqlalchemy.orm.events.MapperEvents :members: Instance Events --------------- .. autoclass:: sqlalchemy.orm.events.InstanceEvents :members: Session Events -------------- .. autoclass:: sqlalchemy.orm.events.SessionEvents :members: Query Events ------------- .. autoclass:: sqlalchemy.orm.events.QueryEvents :members: Instrumentation Events ----------------------- .. automodule:: sqlalchemy.orm.instrumentation .. autoclass:: sqlalchemy.orm.events.InstrumentationEvents :members: SQLAlchemy-1.0.11/doc/build/orm/collections.rst0000664000175000017500000005341012636375552022354 0ustar classicclassic00000000000000.. _collections_toplevel: .. currentmodule:: sqlalchemy.orm ======================================= Collection Configuration and Techniques ======================================= The :func:`.relationship` function defines a linkage between two classes. When the linkage defines a one-to-many or many-to-many relationship, it's represented as a Python collection when objects are loaded and manipulated. This section presents additional information about collection configuration and techniques. .. _largecollections: .. currentmodule:: sqlalchemy.orm Working with Large Collections =============================== The default behavior of :func:`.relationship` is to fully load the collection of items in, as according to the loading strategy of the relationship. Additionally, the :class:`.Session` by default only knows how to delete objects which are actually present within the session. When a parent instance is marked for deletion and flushed, the :class:`.Session` loads its full list of child items in so that they may either be deleted as well, or have their foreign key value set to null; this is to avoid constraint violations. For large collections of child items, there are several strategies to bypass full loading of child items both at load time as well as deletion time. .. _dynamic_relationship: Dynamic Relationship Loaders ----------------------------- A key feature to enable management of a large collection is the so-called "dynamic" relationship. This is an optional form of :func:`~sqlalchemy.orm.relationship` which returns a :class:`~sqlalchemy.orm.query.Query` object in place of a collection when accessed. :func:`~sqlalchemy.orm.query.Query.filter` criterion may be applied as well as limits and offsets, either explicitly or via array slices:: class User(Base): __tablename__ = 'user' posts = relationship(Post, lazy="dynamic") jack = session.query(User).get(id) # filter Jack's blog posts posts = jack.posts.filter(Post.headline=='this is a post') # apply array slices posts = jack.posts[5:20] The dynamic relationship supports limited write operations, via the ``append()`` and ``remove()`` methods:: oldpost = jack.posts.filter(Post.headline=='old post').one() jack.posts.remove(oldpost) jack.posts.append(Post('new post')) Since the read side of the dynamic relationship always queries the database, changes to the underlying collection will not be visible until the data has been flushed. However, as long as "autoflush" is enabled on the :class:`.Session` in use, this will occur automatically each time the collection is about to emit a query. To place a dynamic relationship on a backref, use the :func:`~.orm.backref` function in conjunction with ``lazy='dynamic'``:: class Post(Base): __table__ = posts_table user = relationship(User, backref=backref('posts', lazy='dynamic') ) Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time. .. note:: The :func:`~.orm.dynamic_loader` function is essentially the same as :func:`~.orm.relationship` with the ``lazy='dynamic'`` argument specified. .. warning:: The "dynamic" loader applies to **collections only**. It is not valid to use "dynamic" loaders with many-to-one, one-to-one, or uselist=False relationships. Newer versions of SQLAlchemy emit warnings or exceptions in these cases. Setting Noload --------------- A "noload" relationship never loads from the database, even when accessed. It is configured using ``lazy='noload'``:: class MyClass(Base): __tablename__ = 'some_table' children = relationship(MyOtherClass, lazy='noload') Above, the ``children`` collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the time they are added. However when instances of ``MyClass`` are freshly loaded from the database, the ``children`` collection stays empty. .. _passive_deletes: Using Passive Deletes ---------------------- Use :paramref:`~.relationship.passive_deletes` to disable child object loading on a DELETE operation, in conjunction with "ON DELETE (CASCADE|SET NULL)" on your database to automatically cascade deletes to child objects:: class MyClass(Base): __tablename__ = 'mytable' id = Column(Integer, primary_key=True) children = relationship("MyOtherClass", cascade="all, delete-orphan", passive_deletes=True) class MyOtherClass(Base): __tablename__ = 'myothertable' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('mytable.id', ondelete='CASCADE') ) .. note:: To use "ON DELETE CASCADE", the underlying database engine must support foreign keys. * When using MySQL, an appropriate storage engine must be selected. See :ref:`mysql_storage_engines` for details. * When using SQLite, foreign key support must be enabled explicitly. See :ref:`sqlite_foreign_keys` for details. When :paramref:`~.relationship.passive_deletes` is applied, the ``children`` relationship will not be loaded into memory when an instance of ``MyClass`` is marked for deletion. The ``cascade="all, delete-orphan"`` *will* take effect for instances of ``MyOtherClass`` which are currently present in the session; however for instances of ``MyOtherClass`` which are not loaded, SQLAlchemy assumes that "ON DELETE CASCADE" rules will ensure that those rows are deleted by the database. .. currentmodule:: sqlalchemy.orm.collections .. _custom_collections: Customizing Collection Access ============================= Mapping a one-to-many or many-to-many relationship results in a collection of values accessible through an attribute on the parent instance. By default, this collection is a ``list``:: class Parent(Base): __tablename__ = 'parent' parent_id = Column(Integer, primary_key=True) children = relationship(Child) parent = Parent() parent.children.append(Child()) print parent.children[0] Collections are not limited to lists. Sets, mutable sequences and almost any other Python object that can act as a container can be used in place of the default list, by specifying the :paramref:`~.relationship.collection_class` option on :func:`~sqlalchemy.orm.relationship`:: class Parent(Base): __tablename__ = 'parent' parent_id = Column(Integer, primary_key=True) # use a set children = relationship(Child, collection_class=set) parent = Parent() child = Child() parent.children.add(child) assert child in parent.children Dictionary Collections ----------------------- A little extra detail is needed when using a dictionary as a collection. This because objects are always loaded from the database as lists, and a key-generation strategy must be available to populate the dictionary correctly. The :func:`.attribute_mapped_collection` function is by far the most common way to achieve a simple dictionary collection. It produces a dictionary class that will apply a particular attribute of the mapped class as a key. Below we map an ``Item`` class containing a dictionary of ``Note`` items keyed to the ``Note.keyword`` attribute:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=attribute_mapped_collection('keyword'), cascade="all, delete-orphan") class Note(Base): __tablename__ = 'note' id = Column(Integer, primary_key=True) item_id = Column(Integer, ForeignKey('item.id'), nullable=False) keyword = Column(String) text = Column(String) def __init__(self, keyword, text): self.keyword = keyword self.text = text ``Item.notes`` is then a dictionary:: >>> item = Item() >>> item.notes['a'] = Note('a', 'atext') >>> item.notes.items() {'a': <__main__.Note object at 0x2eaaf0>} :func:`.attribute_mapped_collection` will ensure that the ``.keyword`` attribute of each ``Note`` complies with the key in the dictionary. Such as, when assigning to ``Item.notes``, the dictionary key we supply must match that of the actual ``Note`` object:: item = Item() item.notes = { 'a': Note('a', 'atext'), 'b': Note('b', 'btext') } The attribute which :func:`.attribute_mapped_collection` uses as a key does not need to be mapped at all! Using a regular Python ``@property`` allows virtually any detail or combination of details about the object to be used as the key, as below when we establish it as a tuple of ``Note.keyword`` and the first ten letters of the ``Note.text`` field:: class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=attribute_mapped_collection('note_key'), backref="item", cascade="all, delete-orphan") class Note(Base): __tablename__ = 'note' id = Column(Integer, primary_key=True) item_id = Column(Integer, ForeignKey('item.id'), nullable=False) keyword = Column(String) text = Column(String) @property def note_key(self): return (self.keyword, self.text[0:10]) def __init__(self, keyword, text): self.keyword = keyword self.text = text Above we added a ``Note.item`` backref. Assigning to this reverse relationship, the ``Note`` is added to the ``Item.notes`` dictionary and the key is generated for us automatically:: >>> item = Item() >>> n1 = Note("a", "atext") >>> n1.item = item >>> item.notes {('a', 'atext'): <__main__.Note object at 0x2eaaf0>} Other built-in dictionary types include :func:`.column_mapped_collection`, which is almost like :func:`.attribute_mapped_collection` except given the :class:`.Column` object directly:: from sqlalchemy.orm.collections import column_mapped_collection class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=column_mapped_collection(Note.__table__.c.keyword), cascade="all, delete-orphan") as well as :func:`.mapped_collection` which is passed any callable function. Note that it's usually easier to use :func:`.attribute_mapped_collection` along with a ``@property`` as mentioned earlier:: from sqlalchemy.orm.collections import mapped_collection class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=mapped_collection(lambda note: note.text[0:10]), cascade="all, delete-orphan") Dictionary mappings are often combined with the "Association Proxy" extension to produce streamlined dictionary views. See :ref:`proxying_dictionaries` and :ref:`composite_association_proxy` for examples. .. autofunction:: attribute_mapped_collection .. autofunction:: column_mapped_collection .. autofunction:: mapped_collection Custom Collection Implementations ================================== You can use your own types for collections as well. In simple cases, inherting from ``list`` or ``set``, adding custom behavior, is all that's needed. In other cases, special decorators are needed to tell SQLAlchemy more detail about how the collection operates. .. topic:: Do I need a custom collection implementation? In most cases not at all! The most common use cases for a "custom" collection is one that validates or marshals incoming values into a new form, such as a string that becomes a class instance, or one which goes a step beyond and represents the data internally in some fashion, presenting a "view" of that data on the outside of a different form. For the first use case, the :func:`.orm.validates` decorator is by far the simplest way to intercept incoming values in all cases for the purposes of validation and simple marshaling. See :ref:`simple_validators` for an example of this. For the second use case, the :ref:`associationproxy_toplevel` extension is a well-tested, widely used system that provides a read/write "view" of a collection in terms of some attribute present on the target object. As the target attribute can be a ``@property`` that returns virtually anything, a wide array of "alternative" views of a collection can be constructed with just a few functions. This approach leaves the underlying mapped collection unaffected and avoids the need to carefully tailor collection behavior on a method-by-method basis. Customized collections are useful when the collection needs to have special behaviors upon access or mutation operations that can't otherwise be modeled externally to the collection. They can of course be combined with the above two approaches. Collections in SQLAlchemy are transparently *instrumented*. Instrumentation means that normal operations on the collection are tracked and result in changes being written to the database at flush time. Additionally, collection operations can fire *events* which indicate some secondary operation must take place. Examples of a secondary operation include saving the child item in the parent's :class:`~sqlalchemy.orm.session.Session` (i.e. the ``save-update`` cascade), as well as synchronizing the state of a bi-directional relationship (i.e. a :func:`.backref`). The collections package understands the basic interface of lists, sets and dicts and will automatically apply instrumentation to those built-in types and their subclasses. Object-derived types that implement a basic collection interface are detected and instrumented via duck-typing: .. sourcecode:: python+sql class ListLike(object): def __init__(self): self.data = [] def append(self, item): self.data.append(item) def remove(self, item): self.data.remove(item) def extend(self, items): self.data.extend(items) def __iter__(self): return iter(self.data) def foo(self): return 'foo' ``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't be instrumented, and ``foo`` won't be either. Duck-typing (i.e. guesswork) isn't rock-solid, of course, so you can be explicit about the interface you are implementing by providing an ``__emulates__`` class attribute:: class SetLike(object): __emulates__ = set def __init__(self): self.data = set() def append(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def __iter__(self): return iter(self.data) This class looks list-like because of ``append``, but ``__emulates__`` forces it to set-like. ``remove`` is known to be part of the set interface and will be instrumented. But this class won't work quite yet: a little glue is needed to adapt it for use by SQLAlchemy. The ORM needs to know which methods to use to append, remove and iterate over members of the collection. When using a type like ``list`` or ``set``, the appropriate methods are well-known and used automatically when present. This set-like class does not provide the expected ``add`` method, so we must supply an explicit mapping for the ORM via a decorator. Annotating Custom Collections via Decorators -------------------------------------------- Decorators can be used to tag the individual methods the ORM needs to manage collections. Use them when your class doesn't quite meet the regular interface for its container type, or when you otherwise would like to use a different method to get the job done. .. sourcecode:: python+sql from sqlalchemy.orm.collections import collection class SetLike(object): __emulates__ = set def __init__(self): self.data = set() @collection.appender def append(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def __iter__(self): return iter(self.data) And that's all that's needed to complete the example. SQLAlchemy will add instances via the ``append`` method. ``remove`` and ``__iter__`` are the default methods for sets and will be used for removing and iteration. Default methods can be changed as well: .. sourcecode:: python+sql from sqlalchemy.orm.collections import collection class MyList(list): @collection.remover def zark(self, item): # do something special... @collection.iterator def hey_use_this_instead_for_iteration(self): # ... There is no requirement to be list-, or set-like at all. Collection classes can be any shape, so long as they have the append, remove and iterate interface marked for SQLAlchemy's use. Append and remove methods will be called with a mapped entity as the single argument, and iterator methods are called with no arguments and must return an iterator. .. autoclass:: collection :members: .. _dictionary_collections: Custom Dictionary-Based Collections ----------------------------------- The :class:`.MappedCollection` class can be used as a base class for your custom types or as a mix-in to quickly add ``dict`` collection support to other classes. It uses a keying function to delegate to ``__setitem__`` and ``__delitem__``: .. sourcecode:: python+sql from sqlalchemy.util import OrderedDict from sqlalchemy.orm.collections import MappedCollection class NodeMap(OrderedDict, MappedCollection): """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained.""" def __init__(self, *args, **kw): MappedCollection.__init__(self, keyfunc=lambda node: node.name) OrderedDict.__init__(self, *args, **kw) When subclassing :class:`.MappedCollection`, user-defined versions of ``__setitem__()`` or ``__delitem__()`` should be decorated with :meth:`.collection.internally_instrumented`, **if** they call down to those same methods on :class:`.MappedCollection`. This because the methods on :class:`.MappedCollection` are already instrumented - calling them from within an already instrumented call can cause events to be fired off repeatedly, or inappropriately, leading to internal state corruption in rare cases:: from sqlalchemy.orm.collections import MappedCollection,\ collection class MyMappedCollection(MappedCollection): """Use @internally_instrumented when your methods call down to already-instrumented methods. """ @collection.internally_instrumented def __setitem__(self, key, value, _sa_initiator=None): # do something with key, value super(MyMappedCollection, self).__setitem__(key, value, _sa_initiator) @collection.internally_instrumented def __delitem__(self, key, _sa_initiator=None): # do something with key super(MyMappedCollection, self).__delitem__(key, _sa_initiator) The ORM understands the ``dict`` interface just like lists and sets, and will automatically instrument all dict-like methods if you choose to subclass ``dict`` or provide dict-like collection behavior in a duck-typed class. You must decorate appender and remover methods, however- there are no compatible methods in the basic dictionary interface for SQLAlchemy to use by default. Iteration will go through ``itervalues()`` unless otherwise decorated. .. note:: Due to a bug in MappedCollection prior to version 0.7.6, this workaround usually needs to be called before a custom subclass of :class:`.MappedCollection` which uses :meth:`.collection.internally_instrumented` can be used:: from sqlalchemy.orm.collections import _instrument_class, MappedCollection _instrument_class(MappedCollection) This will ensure that the :class:`.MappedCollection` has been properly initialized with custom ``__setitem__()`` and ``__delitem__()`` methods before used in a custom subclass. .. autoclass:: sqlalchemy.orm.collections.MappedCollection :members: Instrumentation and Custom Types -------------------------------- Many custom types and existing library classes can be used as a entity collection type as-is without further ado. However, it is important to note that the instrumentation process will modify the type, adding decorators around methods automatically. The decorations are lightweight and no-op outside of relationships, but they do add unneeded overhead when triggered elsewhere. When using a library class as a collection, it can be good practice to use the "trivial subclass" trick to restrict the decorations to just your usage in relationships. For example: .. sourcecode:: python+sql class MyAwesomeList(some.great.library.AwesomeList): pass # ... relationship(..., collection_class=MyAwesomeList) The ORM uses this approach for built-ins, quietly substituting a trivial subclass when a ``list``, ``set`` or ``dict`` is used directly. Collection Internals ===================== Various internal methods. .. autofunction:: bulk_replace .. autoclass:: collection .. autodata:: collection_adapter .. autoclass:: CollectionAdapter .. autoclass:: InstrumentedDict .. autoclass:: InstrumentedList .. autoclass:: InstrumentedSet .. autofunction:: prepare_instrumentation SQLAlchemy-1.0.11/doc/build/orm/backref.rst0000664000175000017500000002661212636375552021437 0ustar classicclassic00000000000000.. _relationships_backref: Linking Relationships with Backref ---------------------------------- The :paramref:`~.relationship.backref` keyword argument was first introduced in :ref:`ormtutorial_toplevel`, and has been mentioned throughout many of the examples here. What does it actually do ? Let's start with the canonical ``User`` and ``Address`` scenario:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) The above configuration establishes a collection of ``Address`` objects on ``User`` called ``User.addresses``. It also establishes a ``.user`` attribute on ``Address`` which will refer to the parent ``User`` object. In fact, the :paramref:`~.relationship.backref` keyword is only a common shortcut for placing a second :func:`.relationship` onto the ``Address`` mapping, including the establishment of an event listener on both sides which will mirror attribute operations in both directions. The above configuration is equivalent to:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", back_populates="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) user = relationship("User", back_populates="addresses") Above, we add a ``.user`` relationship to ``Address`` explicitly. On both relationships, the :paramref:`~.relationship.back_populates` directive tells each relationship about the other one, indicating that they should establish "bidirectional" behavior between each other. The primary effect of this configuration is that the relationship adds event handlers to both attributes which have the behavior of "when an append or set event occurs here, set ourselves onto the incoming attribute using this particular attribute name". The behavior is illustrated as follows. Start with a ``User`` and an ``Address`` instance. The ``.addresses`` collection is empty, and the ``.user`` attribute is ``None``:: >>> u1 = User() >>> a1 = Address() >>> u1.addresses [] >>> print a1.user None However, once the ``Address`` is appended to the ``u1.addresses`` collection, both the collection and the scalar attribute have been populated:: >>> u1.addresses.append(a1) >>> u1.addresses [<__main__.Address object at 0x12a6ed0>] >>> a1.user <__main__.User object at 0x12a6590> This behavior of course works in reverse for removal operations as well, as well as for equivalent operations on both sides. Such as when ``.user`` is set again to ``None``, the ``Address`` object is removed from the reverse collection:: >>> a1.user = None >>> u1.addresses [] The manipulation of the ``.addresses`` collection and the ``.user`` attribute occurs entirely in Python without any interaction with the SQL database. Without this behavior, the proper state would be apparent on both sides once the data has been flushed to the database, and later reloaded after a commit or expiration operation occurs. The :paramref:`~.relationship.backref`/:paramref:`~.relationship.back_populates` behavior has the advantage that common bidirectional operations can reflect the correct state without requiring a database round trip. Remember, when the :paramref:`~.relationship.backref` keyword is used on a single relationship, it's exactly the same as if the above two relationships were created individually using :paramref:`~.relationship.back_populates` on each. Backref Arguments ~~~~~~~~~~~~~~~~~~ We've established that the :paramref:`~.relationship.backref` keyword is merely a shortcut for building two individual :func:`.relationship` constructs that refer to each other. Part of the behavior of this shortcut is that certain configurational arguments applied to the :func:`.relationship` will also be applied to the other direction - namely those arguments that describe the relationship at a schema level, and are unlikely to be different in the reverse direction. The usual case here is a many-to-many :func:`.relationship` that has a :paramref:`~.relationship.secondary` argument, or a one-to-many or many-to-one which has a :paramref:`~.relationship.primaryjoin` argument (the :paramref:`~.relationship.primaryjoin` argument is discussed in :ref:`relationship_primaryjoin`). Such as if we limited the list of ``Address`` objects to those which start with "tony":: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.email.startswith('tony'))", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) We can observe, by inspecting the resulting property, that both sides of the relationship have this join condition applied:: >>> print User.addresses.property.primaryjoin "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> >>> print Address.user.property.primaryjoin "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> This reuse of arguments should pretty much do the "right thing" - it uses only arguments that are applicable, and in the case of a many-to- many relationship, will reverse the usage of :paramref:`~.relationship.primaryjoin` and :paramref:`~.relationship.secondaryjoin` to correspond to the other direction (see the example in :ref:`self_referential_many_to_many` for this). It's very often the case however that we'd like to specify arguments that are specific to just the side where we happened to place the "backref". This includes :func:`.relationship` arguments like :paramref:`~.relationship.lazy`, :paramref:`~.relationship.remote_side`, :paramref:`~.relationship.cascade` and :paramref:`~.relationship.cascade_backrefs`. For this case we use the :func:`.backref` function in place of a string:: # from sqlalchemy.orm import backref class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref=backref("user", lazy="joined")) Where above, we placed a ``lazy="joined"`` directive only on the ``Address.user`` side, indicating that when a query against ``Address`` is made, a join to the ``User`` entity should be made automatically which will populate the ``.user`` attribute of each returned ``Address``. The :func:`.backref` function formatted the arguments we gave it into a form that is interpreted by the receiving :func:`.relationship` as additional arguments to be applied to the new relationship it creates. One Way Backrefs ~~~~~~~~~~~~~~~~~ An unusual case is that of the "one way backref". This is where the "back-populating" behavior of the backref is only desirable in one direction. An example of this is a collection which contains a filtering :paramref:`~.relationship.primaryjoin` condition. We'd like to append items to this collection as needed, and have them populate the "parent" object on the incoming object. However, we'd also like to have items that are not part of the collection, but still have the same "parent" association - these items should never be in the collection. Taking our previous example, where we established a :paramref:`~.relationship.primaryjoin` that limited the collection only to ``Address`` objects whose email address started with the word ``tony``, the usual backref behavior is that all items populate in both directions. We wouldn't want this behavior for a case like the following:: >>> u1 = User() >>> a1 = Address(email='mary') >>> a1.user = u1 >>> u1.addresses [<__main__.Address object at 0x1411910>] Above, the ``Address`` object that doesn't match the criterion of "starts with 'tony'" is present in the ``addresses`` collection of ``u1``. After these objects are flushed, the transaction committed and their attributes expired for a re-load, the ``addresses`` collection will hit the database on next access and no longer have this ``Address`` object present, due to the filtering condition. But we can do away with this unwanted side of the "backref" behavior on the Python side by using two separate :func:`.relationship` constructs, placing :paramref:`~.relationship.back_populates` only on one side:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.email.startswith('tony'))", back_populates="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) user = relationship("User") With the above scenario, appending an ``Address`` object to the ``.addresses`` collection of a ``User`` will always establish the ``.user`` attribute on that ``Address``:: >>> u1 = User() >>> a1 = Address(email='tony') >>> u1.addresses.append(a1) >>> a1.user <__main__.User object at 0x1411850> However, applying a ``User`` to the ``.user`` attribute of an ``Address``, will not append the ``Address`` object to the collection:: >>> a2 = Address(email='mary') >>> a2.user = u1 >>> a2 in u1.addresses False Of course, we've disabled some of the usefulness of :paramref:`~.relationship.backref` here, in that when we do append an ``Address`` that corresponds to the criteria of ``email.startswith('tony')``, it won't show up in the ``User.addresses`` collection until the session is flushed, and the attributes reloaded after a commit or expire operation. While we could consider an attribute event that checks this criterion in Python, this starts to cross the line of duplicating too much SQL behavior in Python. The backref behavior itself is only a slight transgression of this philosophy - SQLAlchemy tries to keep these to a minimum overall. SQLAlchemy-1.0.11/doc/build/orm/exceptions.rst0000664000175000017500000000012012636375552022205 0ustar classicclassic00000000000000ORM Exceptions ============== .. automodule:: sqlalchemy.orm.exc :members: SQLAlchemy-1.0.11/doc/build/orm/contextual.rst0000664000175000017500000003215512636375552022227 0ustar classicclassic00000000000000.. _unitofwork_contextual: Contextual/Thread-local Sessions ================================= Recall from the section :ref:`session_faq_whentocreate`, the concept of "session scopes" was introduced, with an emphasis on web applications and the practice of linking the scope of a :class:`.Session` with that of a web request. Most modern web frameworks include integration tools so that the scope of the :class:`.Session` can be managed automatically, and these tools should be used as they are available. SQLAlchemy includes its own helper object, which helps with the establishment of user-defined :class:`.Session` scopes. It is also used by third-party integration systems to help construct their integration schemes. The object is the :class:`.scoped_session` object, and it represents a **registry** of :class:`.Session` objects. If you're not familiar with the registry pattern, a good introduction can be found in `Patterns of Enterprise Architecture `_. .. note:: The :class:`.scoped_session` object is a very popular and useful object used by many SQLAlchemy applications. However, it is important to note that it presents **only one approach** to the issue of :class:`.Session` management. If you're new to SQLAlchemy, and especially if the term "thread-local variable" seems strange to you, we recommend that if possible you familiarize first with an off-the-shelf integration system such as `Flask-SQLAlchemy `_ or `zope.sqlalchemy `_. A :class:`.scoped_session` is constructed by calling it, passing it a **factory** which can create new :class:`.Session` objects. A factory is just something that produces a new object when called, and in the case of :class:`.Session`, the most common factory is the :class:`.sessionmaker`, introduced earlier in this section. Below we illustrate this usage:: >>> from sqlalchemy.orm import scoped_session >>> from sqlalchemy.orm import sessionmaker >>> session_factory = sessionmaker(bind=some_engine) >>> Session = scoped_session(session_factory) The :class:`.scoped_session` object we've created will now call upon the :class:`.sessionmaker` when we "call" the registry:: >>> some_session = Session() Above, ``some_session`` is an instance of :class:`.Session`, which we can now use to talk to the database. This same :class:`.Session` is also present within the :class:`.scoped_session` registry we've created. If we call upon the registry a second time, we get back the **same** :class:`.Session`:: >>> some_other_session = Session() >>> some_session is some_other_session True This pattern allows disparate sections of the application to call upon a global :class:`.scoped_session`, so that all those areas may share the same session without the need to pass it explicitly. The :class:`.Session` we've established in our registry will remain, until we explicitly tell our registry to dispose of it, by calling :meth:`.scoped_session.remove`:: >>> Session.remove() The :meth:`.scoped_session.remove` method first calls :meth:`.Session.close` on the current :class:`.Session`, which has the effect of releasing any connection/transactional resources owned by the :class:`.Session` first, then discarding the :class:`.Session` itself. "Releasing" here means that connections are returned to their connection pool and any transactional state is rolled back, ultimately using the ``rollback()`` method of the underlying DBAPI connection. At this point, the :class:`.scoped_session` object is "empty", and will create a **new** :class:`.Session` when called again. As illustrated below, this is not the same :class:`.Session` we had before:: >>> new_session = Session() >>> new_session is some_session False The above series of steps illustrates the idea of the "registry" pattern in a nutshell. With that basic idea in hand, we can discuss some of the details of how this pattern proceeds. Implicit Method Access ---------------------- The job of the :class:`.scoped_session` is simple; hold onto a :class:`.Session` for all who ask for it. As a means of producing more transparent access to this :class:`.Session`, the :class:`.scoped_session` also includes **proxy behavior**, meaning that the registry itself can be treated just like a :class:`.Session` directly; when methods are called on this object, they are **proxied** to the underlying :class:`.Session` being maintained by the registry:: Session = scoped_session(some_factory) # equivalent to: # # session = Session() # print session.query(MyClass).all() # print Session.query(MyClass).all() The above code accomplishes the same task as that of acquiring the current :class:`.Session` by calling upon the registry, then using that :class:`.Session`. Thread-Local Scope ------------------ Users who are familiar with multithreaded programming will note that representing anything as a global variable is usually a bad idea, as it implies that the global object will be accessed by many threads concurrently. The :class:`.Session` object is entirely designed to be used in a **non-concurrent** fashion, which in terms of multithreading means "only in one thread at a time". So our above example of :class:`.scoped_session` usage, where the same :class:`.Session` object is maintained across multiple calls, suggests that some process needs to be in place such that mutltiple calls across many threads don't actually get a handle to the same session. We call this notion **thread local storage**, which means, a special object is used that will maintain a distinct object per each application thread. Python provides this via the `threading.local() `_ construct. The :class:`.scoped_session` object by default uses this object as storage, so that a single :class:`.Session` is maintained for all who call upon the :class:`.scoped_session` registry, but only within the scope of a single thread. Callers who call upon the registry in a different thread get a :class:`.Session` instance that is local to that other thread. Using this technique, the :class:`.scoped_session` provides a quick and relatively simple (if one is familiar with thread-local storage) way of providing a single, global object in an application that is safe to be called upon from multiple threads. The :meth:`.scoped_session.remove` method, as always, removes the current :class:`.Session` associated with the thread, if any. However, one advantage of the ``threading.local()`` object is that if the application thread itself ends, the "storage" for that thread is also garbage collected. So it is in fact "safe" to use thread local scope with an application that spawns and tears down threads, without the need to call :meth:`.scoped_session.remove`. However, the scope of transactions themselves, i.e. ending them via :meth:`.Session.commit` or :meth:`.Session.rollback`, will usually still be something that must be explicitly arranged for at the appropriate time, unless the application actually ties the lifespan of a thread to the lifespan of a transaction. .. _session_lifespan: Using Thread-Local Scope with Web Applications ---------------------------------------------- As discussed in the section :ref:`session_faq_whentocreate`, a web application is architected around the concept of a **web request**, and integrating such an application with the :class:`.Session` usually implies that the :class:`.Session` will be associated with that request. As it turns out, most Python web frameworks, with notable exceptions such as the asynchronous frameworks Twisted and Tornado, use threads in a simple way, such that a particular web request is received, processed, and completed within the scope of a single *worker thread*. When the request ends, the worker thread is released to a pool of workers where it is available to handle another request. This simple correspondence of web request and thread means that to associate a :class:`.Session` with a thread implies it is also associated with the web request running within that thread, and vice versa, provided that the :class:`.Session` is created only after the web request begins and torn down just before the web request ends. So it is a common practice to use :class:`.scoped_session` as a quick way to integrate the :class:`.Session` with a web application. The sequence diagram below illustrates this flow:: Web Server Web Framework SQLAlchemy ORM Code -------------- -------------- ------------------------------ startup -> Web framework # Session registry is established initializes Session = scoped_session(sessionmaker()) incoming web request -> web request -> # The registry is *optionally* starts # called upon explicitly to create # a Session local to the thread and/or request Session() # the Session registry can otherwise # be used at any time, creating the # request-local Session() if not present, # or returning the existing one Session.query(MyClass) # ... Session.add(some_object) # ... # if data was modified, commit the # transaction Session.commit() web request ends -> # the registry is instructed to # remove the Session Session.remove() sends output <- outgoing web <- response Using the above flow, the process of integrating the :class:`.Session` with the web application has exactly two requirements: 1. Create a single :class:`.scoped_session` registry when the web application first starts, ensuring that this object is accessible by the rest of the application. 2. Ensure that :meth:`.scoped_session.remove` is called when the web request ends, usually by integrating with the web framework's event system to establish an "on request end" event. As noted earlier, the above pattern is **just one potential way** to integrate a :class:`.Session` with a web framework, one which in particular makes the significant assumption that the **web framework associates web requests with application threads**. It is however **strongly recommended that the integration tools provided with the web framework itself be used, if available**, instead of :class:`.scoped_session`. In particular, while using a thread local can be convenient, it is preferable that the :class:`.Session` be associated **directly with the request**, rather than with the current thread. The next section on custom scopes details a more advanced configuration which can combine the usage of :class:`.scoped_session` with direct request based scope, or any kind of scope. Using Custom Created Scopes --------------------------- The :class:`.scoped_session` object's default behavior of "thread local" scope is only one of many options on how to "scope" a :class:`.Session`. A custom scope can be defined based on any existing system of getting at "the current thing we are working with". Suppose a web framework defines a library function ``get_current_request()``. An application built using this framework can call this function at any time, and the result will be some kind of ``Request`` object that represents the current request being processed. If the ``Request`` object is hashable, then this function can be easily integrated with :class:`.scoped_session` to associate the :class:`.Session` with the request. Below we illustrate this in conjunction with a hypothetical event marker provided by the web framework ``on_request_end``, which allows code to be invoked whenever a request ends:: from my_web_framework import get_current_request, on_request_end from sqlalchemy.orm import scoped_session, sessionmaker Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request) @on_request_end def remove_session(req): Session.remove() Above, we instantiate :class:`.scoped_session` in the usual way, except that we pass our request-returning function as the "scopefunc". This instructs :class:`.scoped_session` to use this function to generate a dictionary key whenever the registry is called upon to return the current :class:`.Session`. In this case it is particularly important that we ensure a reliable "remove" system is implemented, as this dictionary is not otherwise self-managed. Contextual Session API ---------------------- .. autoclass:: sqlalchemy.orm.scoping.scoped_session :members: .. autoclass:: sqlalchemy.util.ScopedRegistry :members: .. autoclass:: sqlalchemy.util.ThreadLocalRegistry SQLAlchemy-1.0.11/doc/build/orm/session_basics.rst0000664000175000017500000010016412636375552023044 0ustar classicclassic00000000000000========================== Session Basics ========================== What does the Session do ? ========================== In the most general sense, the :class:`~.Session` establishes all conversations with the database and represents a "holding zone" for all the objects which you've loaded or associated with it during its lifespan. It provides the entrypoint to acquire a :class:`.Query` object, which sends queries to the database using the :class:`~.Session` object's current database connection, populating result rows into objects that are then stored in the :class:`.Session`, inside a structure called the `Identity Map `_ - a data structure that maintains unique copies of each object, where "unique" means "only one object with a particular primary key". The :class:`.Session` begins in an essentially stateless form. Once queries are issued or other objects are persisted with it, it requests a connection resource from an :class:`.Engine` that is associated either with the :class:`.Session` itself or with the mapped :class:`.Table` objects being operated upon. This connection represents an ongoing transaction, which remains in effect until the :class:`.Session` is instructed to commit or roll back its pending state. All changes to objects maintained by a :class:`.Session` are tracked - before the database is queried again or before the current transaction is committed, it **flushes** all pending changes to the database. This is known as the `Unit of Work `_ pattern. When using a :class:`.Session`, it's important to note that the objects which are associated with it are **proxy objects** to the transaction being held by the :class:`.Session` - there are a variety of events that will cause objects to re-access the database in order to keep synchronized. It is possible to "detach" objects from a :class:`.Session`, and to continue using them, though this practice has its caveats. It's intended that usually, you'd re-associate detached objects with another :class:`.Session` when you want to work with them again, so that they can resume their normal task of representing database state. .. _session_getting: Getting a Session ================= :class:`.Session` is a regular Python class which can be directly instantiated. However, to standardize how sessions are configured and acquired, the :class:`.sessionmaker` class is normally used to create a top level :class:`.Session` configuration which can then be used throughout an application without the need to repeat the configurational arguments. The usage of :class:`.sessionmaker` is illustrated below: .. sourcecode:: python+sql from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker # an Engine, which the Session will use for connection # resources some_engine = create_engine('postgresql://scott:tiger@localhost/') # create a configured "Session" class Session = sessionmaker(bind=some_engine) # create a Session session = Session() # work with sess myobject = MyObject('foo', 'bar') session.add(myobject) session.commit() Above, the :class:`.sessionmaker` call creates a factory for us, which we assign to the name ``Session``. This factory, when called, will create a new :class:`.Session` object using the configurational arguments we've given the factory. In this case, as is typical, we've configured the factory to specify a particular :class:`.Engine` for connection resources. A typical setup will associate the :class:`.sessionmaker` with an :class:`.Engine`, so that each :class:`.Session` generated will use this :class:`.Engine` to acquire connection resources. This association can be set up as in the example above, using the ``bind`` argument. When you write your application, place the :class:`.sessionmaker` factory at the global level. This factory can then be used by the rest of the applcation as the source of new :class:`.Session` instances, keeping the configuration for how :class:`.Session` objects are constructed in one place. The :class:`.sessionmaker` factory can also be used in conjunction with other helpers, which are passed a user-defined :class:`.sessionmaker` that is then maintained by the helper. Some of these helpers are discussed in the section :ref:`session_faq_whentocreate`. Adding Additional Configuration to an Existing sessionmaker() -------------------------------------------------------------- A common scenario is where the :class:`.sessionmaker` is invoked at module import time, however the generation of one or more :class:`.Engine` instances to be associated with the :class:`.sessionmaker` has not yet proceeded. For this use case, the :class:`.sessionmaker` construct offers the :meth:`.sessionmaker.configure` method, which will place additional configuration directives into an existing :class:`.sessionmaker` that will take place when the construct is invoked:: from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine # configure Session class with desired options Session = sessionmaker() # later, we create the engine engine = create_engine('postgresql://...') # associate it with our custom Session class Session.configure(bind=engine) # work with the session session = Session() Creating Ad-Hoc Session Objects with Alternate Arguments --------------------------------------------------------- For the use case where an application needs to create a new :class:`.Session` with special arguments that deviate from what is normally used throughout the application, such as a :class:`.Session` that binds to an alternate source of connectivity, or a :class:`.Session` that should have other arguments such as ``expire_on_commit`` established differently from what most of the application wants, specific arguments can be passed to the :class:`.sessionmaker` factory's :meth:`.sessionmaker.__call__` method. These arguments will override whatever configurations have already been placed, such as below, where a new :class:`.Session` is constructed against a specific :class:`.Connection`:: # at the module level, the global sessionmaker, # bound to a specific Engine Session = sessionmaker(bind=engine) # later, some unit of code wants to create a # Session that is bound to a specific Connection conn = engine.connect() session = Session(bind=conn) The typical rationale for the association of a :class:`.Session` with a specific :class:`.Connection` is that of a test fixture that maintains an external transaction - see :ref:`session_external_transaction` for an example of this. .. _session_faq: Session Frequently Asked Questions =================================== By this point, many users already have questions about sessions. This section presents a mini-FAQ (note that we have also a :doc:`real FAQ `) of the most basic issues one is presented with when using a :class:`.Session`. When do I make a :class:`.sessionmaker`? ------------------------------------------ Just one time, somewhere in your application's global scope. It should be looked upon as part of your application's configuration. If your application has three .py files in a package, you could, for example, place the :class:`.sessionmaker` line in your ``__init__.py`` file; from that point on your other modules say "from mypackage import Session". That way, everyone else just uses :class:`.Session()`, and the configuration of that session is controlled by that central point. If your application starts up, does imports, but does not know what database it's going to be connecting to, you can bind the :class:`.Session` at the "class" level to the engine later on, using :meth:`.sessionmaker.configure`. In the examples in this section, we will frequently show the :class:`.sessionmaker` being created right above the line where we actually invoke :class:`.Session`. But that's just for example's sake! In reality, the :class:`.sessionmaker` would be somewhere at the module level. The calls to instantiate :class:`.Session` would then be placed at the point in the application where database conversations begin. .. _session_faq_whentocreate: When do I construct a :class:`.Session`, when do I commit it, and when do I close it? ------------------------------------------------------------------------------------- .. topic:: tl;dr; 1. As a general rule, keep the lifecycle of the session **separate and external** from functions and objects that access and/or manipulate database data. This will greatly help with achieving a predictable and consistent transactional scope. 2. Make sure you have a clear notion of where transactions begin and end, and keep transactions **short**, meaning, they end at the series of a sequence of operations, instead of being held open indefinitely. A :class:`.Session` is typically constructed at the beginning of a logical operation where database access is potentially anticipated. The :class:`.Session`, whenever it is used to talk to the database, begins a database transaction as soon as it starts communicating. Assuming the ``autocommit`` flag is left at its recommended default of ``False``, this transaction remains in progress until the :class:`.Session` is rolled back, committed, or closed. The :class:`.Session` will begin a new transaction if it is used again, subsequent to the previous transaction ending; from this it follows that the :class:`.Session` is capable of having a lifespan across many transactions, though only one at a time. We refer to these two concepts as **transaction scope** and **session scope**. The implication here is that the SQLAlchemy ORM is encouraging the developer to establish these two scopes in their application, including not only when the scopes begin and end, but also the expanse of those scopes, for example should a single :class:`.Session` instance be local to the execution flow within a function or method, should it be a global object used by the entire application, or somewhere in between these two. The burden placed on the developer to determine this scope is one area where the SQLAlchemy ORM necessarily has a strong opinion about how the database should be used. The :term:`unit of work` pattern is specifically one of accumulating changes over time and flushing them periodically, keeping in-memory state in sync with what's known to be present in a local transaction. This pattern is only effective when meaningful transaction scopes are in place. It's usually not very hard to determine the best points at which to begin and end the scope of a :class:`.Session`, though the wide variety of application architectures possible can introduce challenging situations. A common choice is to tear down the :class:`.Session` at the same time the transaction ends, meaning the transaction and session scopes are the same. This is a great choice to start out with as it removes the need to consider session scope as separate from transaction scope. While there's no one-size-fits-all recommendation for how transaction scope should be determined, there are common patterns. Especially if one is writing a web application, the choice is pretty much established. A web application is the easiest case because such an appication is already constructed around a single, consistent scope - this is the **request**, which represents an incoming request from a browser, the processing of that request to formulate a response, and finally the delivery of that response back to the client. Integrating web applications with the :class:`.Session` is then the straightforward task of linking the scope of the :class:`.Session` to that of the request. The :class:`.Session` can be established as the request begins, or using a :term:`lazy initialization` pattern which establishes one as soon as it is needed. The request then proceeds, with some system in place where application logic can access the current :class:`.Session` in a manner associated with how the actual request object is accessed. As the request ends, the :class:`.Session` is torn down as well, usually through the usage of event hooks provided by the web framework. The transaction used by the :class:`.Session` may also be committed at this point, or alternatively the application may opt for an explicit commit pattern, only committing for those requests where one is warranted, but still always tearing down the :class:`.Session` unconditionally at the end. Some web frameworks include infrastructure to assist in the task of aligning the lifespan of a :class:`.Session` with that of a web request. This includes products such as `Flask-SQLAlchemy `_, for usage in conjunction with the Flask web framework, and `Zope-SQLAlchemy `_, typically used with the Pyramid framework. SQLAlchemy recommends that these products be used as available. In those situations where the integration libraries are not provided or are insufficient, SQLAlchemy includes its own "helper" class known as :class:`.scoped_session`. A tutorial on the usage of this object is at :ref:`unitofwork_contextual`. It provides both a quick way to associate a :class:`.Session` with the current thread, as well as patterns to associate :class:`.Session` objects with other kinds of scopes. As mentioned before, for non-web applications there is no one clear pattern, as applications themselves don't have just one pattern of architecture. The best strategy is to attempt to demarcate "operations", points at which a particular thread begins to perform a series of operations for some period of time, which can be committed at the end. Some examples: * A background daemon which spawns off child forks would want to create a :class:`.Session` local to each child process, work with that :class:`.Session` through the life of the "job" that the fork is handling, then tear it down when the job is completed. * For a command-line script, the application would create a single, global :class:`.Session` that is established when the program begins to do its work, and commits it right as the program is completing its task. * For a GUI interface-driven application, the scope of the :class:`.Session` may best be within the scope of a user-generated event, such as a button push. Or, the scope may correspond to explicit user interaction, such as the user "opening" a series of records, then "saving" them. As a general rule, the application should manage the lifecycle of the session *externally* to functions that deal with specific data. This is a fundamental separation of concerns which keeps data-specific operations agnostic of the context in which they access and manipulate that data. E.g. **don't do this**:: ### this is the **wrong way to do it** ### class ThingOne(object): def go(self): session = Session() try: session.query(FooBar).update({"x": 5}) session.commit() except: session.rollback() raise class ThingTwo(object): def go(self): session = Session() try: session.query(Widget).update({"q": 18}) session.commit() except: session.rollback() raise def run_my_program(): ThingOne().go() ThingTwo().go() Keep the lifecycle of the session (and usually the transaction) **separate and external**:: ### this is a **better** (but not the only) way to do it ### class ThingOne(object): def go(self, session): session.query(FooBar).update({"x": 5}) class ThingTwo(object): def go(self, session): session.query(Widget).update({"q": 18}) def run_my_program(): session = Session() try: ThingOne().go(session) ThingTwo().go(session) session.commit() except: session.rollback() raise finally: session.close() The advanced developer will try to keep the details of session, transaction and exception management as far as possible from the details of the program doing its work. For example, we can further separate concerns using a `context manager `_:: ### another way (but again *not the only way*) to do it ### from contextlib import contextmanager @contextmanager def session_scope(): """Provide a transactional scope around a series of operations.""" session = Session() try: yield session session.commit() except: session.rollback() raise finally: session.close() def run_my_program(): with session_scope() as session: ThingOne().go(session) ThingTwo().go(session) Is the Session a cache? ---------------------------------- Yeee...no. It's somewhat used as a cache, in that it implements the :term:`identity map` pattern, and stores objects keyed to their primary key. However, it doesn't do any kind of query caching. This means, if you say ``session.query(Foo).filter_by(name='bar')``, even if ``Foo(name='bar')`` is right there, in the identity map, the session has no idea about that. It has to issue SQL to the database, get the rows back, and then when it sees the primary key in the row, *then* it can look in the local identity map and see that the object is already there. It's only when you say ``query.get({some primary key})`` that the :class:`~sqlalchemy.orm.session.Session` doesn't have to issue a query. Additionally, the Session stores object instances using a weak reference by default. This also defeats the purpose of using the Session as a cache. The :class:`.Session` is not designed to be a global object from which everyone consults as a "registry" of objects. That's more the job of a **second level cache**. SQLAlchemy provides a pattern for implementing second level caching using `dogpile.cache `_, via the :ref:`examples_caching` example. How can I get the :class:`~sqlalchemy.orm.session.Session` for a certain object? ------------------------------------------------------------------------------------ Use the :meth:`~.Session.object_session` classmethod available on :class:`~sqlalchemy.orm.session.Session`:: session = Session.object_session(someobject) The newer :ref:`core_inspection_toplevel` system can also be used:: from sqlalchemy import inspect session = inspect(someobject).session .. _session_faq_threadsafe: Is the session thread-safe? ------------------------------ The :class:`.Session` is very much intended to be used in a **non-concurrent** fashion, which usually means in only one thread at a time. The :class:`.Session` should be used in such a way that one instance exists for a single series of operations within a single transaction. One expedient way to get this effect is by associating a :class:`.Session` with the current thread (see :ref:`unitofwork_contextual` for background). Another is to use a pattern where the :class:`.Session` is passed between functions and is otherwise not shared with other threads. The bigger point is that you should not *want* to use the session with multiple concurrent threads. That would be like having everyone at a restaurant all eat from the same plate. The session is a local "workspace" that you use for a specific set of tasks; you don't want to, or need to, share that session with other threads who are doing some other task. Making sure the :class:`.Session` is only used in a single concurrent thread at a time is called a "share nothing" approach to concurrency. But actually, not sharing the :class:`.Session` implies a more significant pattern; it means not just the :class:`.Session` object itself, but also **all objects that are associated with that Session**, must be kept within the scope of a single concurrent thread. The set of mapped objects associated with a :class:`.Session` are essentially proxies for data within database rows accessed over a database connection, and so just like the :class:`.Session` itself, the whole set of objects is really just a large-scale proxy for a database connection (or connections). Ultimately, it's mostly the DBAPI connection itself that we're keeping away from concurrent access; but since the :class:`.Session` and all the objects associated with it are all proxies for that DBAPI connection, the entire graph is essentially not safe for concurrent access. If there are in fact multiple threads participating in the same task, then you may consider sharing the session and its objects between those threads; however, in this extremely unusual scenario the application would need to ensure that a proper locking scheme is implemented so that there isn't *concurrent* access to the :class:`.Session` or its state. A more common approach to this situation is to maintain a single :class:`.Session` per concurrent thread, but to instead *copy* objects from one :class:`.Session` to another, often using the :meth:`.Session.merge` method to copy the state of an object into a new object local to a different :class:`.Session`. Basics of Using a Session =========================== The most basic :class:`.Session` use patterns are presented here. Querying -------- The :meth:`~.Session.query` function takes one or more *entities* and returns a new :class:`~sqlalchemy.orm.query.Query` object which will issue mapper queries within the context of this Session. An entity is defined as a mapped class, a :class:`~sqlalchemy.orm.mapper.Mapper` object, an orm-enabled *descriptor*, or an ``AliasedClass`` object:: # query from a class session.query(User).filter_by(name='ed').all() # query with multiple classes, returns tuples session.query(User, Address).join('addresses').filter_by(name='ed').all() # query using orm-enabled descriptors session.query(User.name, User.fullname).all() # query from a mapper user_mapper = class_mapper(User) session.query(user_mapper) When :class:`~sqlalchemy.orm.query.Query` returns results, each object instantiated is stored within the identity map. When a row matches an object which is already present, the same object is returned. In the latter case, whether or not the row is populated onto an existing object depends upon whether the attributes of the instance have been *expired* or not. A default-configured :class:`~sqlalchemy.orm.session.Session` automatically expires all instances along transaction boundaries, so that with a normally isolated transaction, there shouldn't be any issue of instances representing data which is stale with regards to the current transaction. The :class:`.Query` object is introduced in great detail in :ref:`ormtutorial_toplevel`, and further documented in :ref:`query_api_toplevel`. Adding New or Existing Items ---------------------------- :meth:`~.Session.add` is used to place instances in the session. For *transient* (i.e. brand new) instances, this will have the effect of an INSERT taking place for those instances upon the next flush. For instances which are *persistent* (i.e. were loaded by this session), they are already present and do not need to be added. Instances which are *detached* (i.e. have been removed from a session) may be re-associated with a session using this method:: user1 = User(name='user1') user2 = User(name='user2') session.add(user1) session.add(user2) session.commit() # write changes to the database To add a list of items to the session at once, use :meth:`~.Session.add_all`:: session.add_all([item1, item2, item3]) The :meth:`~.Session.add` operation **cascades** along the ``save-update`` cascade. For more details see the section :ref:`unitofwork_cascades`. Deleting -------- The :meth:`~.Session.delete` method places an instance into the Session's list of objects to be marked as deleted:: # mark two objects to be deleted session.delete(obj1) session.delete(obj2) # commit (or flush) session.commit() .. _session_deleting_from_collections: Deleting from Collections ~~~~~~~~~~~~~~~~~~~~~~~~~~ A common confusion that arises regarding :meth:`~.Session.delete` is when objects which are members of a collection are being deleted. While the collection member is marked for deletion from the database, this does not impact the collection itself in memory until the collection is expired. Below, we illustrate that even after an ``Address`` object is marked for deletion, it's still present in the collection associated with the parent ``User``, even after a flush:: >>> address = user.addresses[1] >>> session.delete(address) >>> session.flush() >>> address in user.addresses True When the above session is committed, all attributes are expired. The next access of ``user.addresses`` will re-load the collection, revealing the desired state:: >>> session.commit() >>> address in user.addresses False The usual practice of deleting items within collections is to forego the usage of :meth:`~.Session.delete` directly, and instead use cascade behavior to automatically invoke the deletion as a result of removing the object from the parent collection. The ``delete-orphan`` cascade accomplishes this, as illustrated in the example below:: mapper(User, users_table, properties={ 'addresses':relationship(Address, cascade="all, delete, delete-orphan") }) del user.addresses[1] session.flush() Where above, upon removing the ``Address`` object from the ``User.addresses`` collection, the ``delete-orphan`` cascade has the effect of marking the ``Address`` object for deletion in the same way as passing it to :meth:`~.Session.delete`. See also :ref:`unitofwork_cascades` for detail on cascades. Deleting based on Filter Criterion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The caveat with ``Session.delete()`` is that you need to have an object handy already in order to delete. The Query includes a :func:`~sqlalchemy.orm.query.Query.delete` method which deletes based on filtering criteria:: session.query(User).filter(User.id==7).delete() The ``Query.delete()`` method includes functionality to "expire" objects already in the session which match the criteria. However it does have some caveats, including that "delete" and "delete-orphan" cascades won't be fully expressed for collections which are already loaded. See the API docs for :meth:`~sqlalchemy.orm.query.Query.delete` for more details. .. _session_flushing: Flushing -------- When the :class:`~sqlalchemy.orm.session.Session` is used with its default configuration, the flush step is nearly always done transparently. Specifically, the flush occurs before any individual :class:`~sqlalchemy.orm.query.Query` is issued, as well as within the :meth:`~.Session.commit` call before the transaction is committed. It also occurs before a SAVEPOINT is issued when :meth:`~.Session.begin_nested` is used. Regardless of the autoflush setting, a flush can always be forced by issuing :meth:`~.Session.flush`:: session.flush() The "flush-on-Query" aspect of the behavior can be disabled by constructing :class:`.sessionmaker` with the flag ``autoflush=False``:: Session = sessionmaker(autoflush=False) Additionally, autoflush can be temporarily disabled by setting the ``autoflush`` flag at any time:: mysession = Session() mysession.autoflush = False Some autoflush-disable recipes are available at `DisableAutoFlush `_. The flush process *always* occurs within a transaction, even if the :class:`~sqlalchemy.orm.session.Session` has been configured with ``autocommit=True``, a setting that disables the session's persistent transactional state. If no transaction is present, :meth:`~.Session.flush` creates its own transaction and commits it. Any failures during flush will always result in a rollback of whatever transaction is present. If the Session is not in ``autocommit=True`` mode, an explicit call to :meth:`~.Session.rollback` is required after a flush fails, even though the underlying transaction will have been rolled back already - this is so that the overall nesting pattern of so-called "subtransactions" is consistently maintained. .. _session_committing: Committing ---------- :meth:`~.Session.commit` is used to commit the current transaction. It always issues :meth:`~.Session.flush` beforehand to flush any remaining state to the database; this is independent of the "autoflush" setting. If no transaction is present, it raises an error. Note that the default behavior of the :class:`~sqlalchemy.orm.session.Session` is that a "transaction" is always present; this behavior can be disabled by setting ``autocommit=True``. In autocommit mode, a transaction can be initiated by calling the :meth:`~.Session.begin` method. .. note:: The term "transaction" here refers to a transactional construct within the :class:`.Session` itself which may be maintaining zero or more actual database (DBAPI) transactions. An individual DBAPI connection begins participation in the "transaction" as it is first used to execute a SQL statement, then remains present until the session-level "transaction" is completed. See :ref:`unitofwork_transaction` for further detail. Another behavior of :meth:`~.Session.commit` is that by default it expires the state of all instances present after the commit is complete. This is so that when the instances are next accessed, either through attribute access or by them being present in a :class:`~sqlalchemy.orm.query.Query` result set, they receive the most recent state. To disable this behavior, configure :class:`.sessionmaker` with ``expire_on_commit=False``. Normally, instances loaded into the :class:`~sqlalchemy.orm.session.Session` are never changed by subsequent queries; the assumption is that the current transaction is isolated so the state most recently loaded is correct as long as the transaction continues. Setting ``autocommit=True`` works against this model to some degree since the :class:`~sqlalchemy.orm.session.Session` behaves in exactly the same way with regard to attribute state, except no transaction is present. .. _session_rollback: Rolling Back ------------ :meth:`~.Session.rollback` rolls back the current transaction. With a default configured session, the post-rollback state of the session is as follows: * All transactions are rolled back and all connections returned to the connection pool, unless the Session was bound directly to a Connection, in which case the connection is still maintained (but still rolled back). * Objects which were initially in the *pending* state when they were added to the :class:`~sqlalchemy.orm.session.Session` within the lifespan of the transaction are expunged, corresponding to their INSERT statement being rolled back. The state of their attributes remains unchanged. * Objects which were marked as *deleted* within the lifespan of the transaction are promoted back to the *persistent* state, corresponding to their DELETE statement being rolled back. Note that if those objects were first *pending* within the transaction, that operation takes precedence instead. * All objects not expunged are fully expired. With that state understood, the :class:`~sqlalchemy.orm.session.Session` may safely continue usage after a rollback occurs. When a :meth:`~.Session.flush` fails, typically for reasons like primary key, foreign key, or "not nullable" constraint violations, a :meth:`~.Session.rollback` is issued automatically (it's currently not possible for a flush to continue after a partial failure). However, the flush process always uses its own transactional demarcator called a *subtransaction*, which is described more fully in the docstrings for :class:`~sqlalchemy.orm.session.Session`. What it means here is that even though the database transaction has been rolled back, the end user must still issue :meth:`~.Session.rollback` to fully reset the state of the :class:`~sqlalchemy.orm.session.Session`. Closing ------- The :meth:`~.Session.close` method issues a :meth:`~.Session.expunge_all`, and :term:`releases` any transactional/connection resources. When connections are returned to the connection pool, transactional state is rolled back as well. SQLAlchemy-1.0.11/doc/build/orm/mapper_config.rst0000664000175000017500000000071612636375552022650 0ustar classicclassic00000000000000 .. _mapper_config_toplevel: ==================== Mapper Configuration ==================== This section describes a variety of configurational patterns that are usable with mappers. It assumes you've worked through :ref:`ormtutorial_toplevel` and know how to construct and use rudimentary mappers and relationships. .. toctree:: :maxdepth: 2 mapping_styles scalar_mapping inheritance nonstandard_mappings versioning mapping_api SQLAlchemy-1.0.11/doc/build/orm/mapped_sql_expr.rst0000664000175000017500000002010712636375552023216 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm .. _mapper_sql_expressions: SQL Expressions as Mapped Attributes ===================================== Attributes on a mapped class can be linked to SQL expressions, which can be used in queries. Using a Hybrid -------------- The easiest and most flexible way to link relatively simple SQL expressions to a class is to use a so-called "hybrid attribute", described in the section :ref:`hybrids_toplevel`. The hybrid provides for an expression that works at both the Python level as well as at the SQL expression level. For example, below we map a class ``User``, containing attributes ``firstname`` and ``lastname``, and include a hybrid that will provide for us the ``fullname``, which is the string concatenation of the two:: from sqlalchemy.ext.hybrid import hybrid_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @hybrid_property def fullname(self): return self.firstname + " " + self.lastname Above, the ``fullname`` attribute is interpreted at both the instance and class level, so that it is available from an instance:: some_user = session.query(User).first() print some_user.fullname as well as usable within queries:: some_user = session.query(User).filter(User.fullname == "John Smith").first() The string concatenation example is a simple one, where the Python expression can be dual purposed at the instance and class level. Often, the SQL expression must be distinguished from the Python expression, which can be achieved using :meth:`.hybrid_property.expression`. Below we illustrate the case where a conditional needs to be present inside the hybrid, using the ``if`` statement in Python and the :func:`.sql.expression.case` construct for SQL expressions:: from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import case class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @hybrid_property def fullname(self): if self.firstname is not None: return self.firstname + " " + self.lastname else: return self.lastname @fullname.expression def fullname(cls): return case([ (cls.firstname != None, cls.firstname + " " + cls.lastname), ], else_ = cls.lastname) .. _mapper_column_property_sql_expressions: Using column_property --------------------- The :func:`.orm.column_property` function can be used to map a SQL expression in a manner similar to a regularly mapped :class:`.Column`. With this technique, the attribute is loaded along with all other column-mapped attributes at load time. This is in some cases an advantage over the usage of hybrids, as the value can be loaded up front at the same time as the parent row of the object, particularly if the expression is one which links to other tables (typically as a correlated subquery) to access data that wouldn't normally be available on an already loaded object. Disadvantages to using :func:`.orm.column_property` for SQL expressions include that the expression must be compatible with the SELECT statement emitted for the class as a whole, and there are also some configurational quirks which can occur when using :func:`.orm.column_property` from declarative mixins. Our "fullname" example can be expressed using :func:`.orm.column_property` as follows:: from sqlalchemy.orm import column_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) fullname = column_property(firstname + " " + lastname) Correlated subqueries may be used as well. Below we use the :func:`.select` construct to create a SELECT that links together the count of ``Address`` objects available for a particular ``User``:: from sqlalchemy.orm import column_property from sqlalchemy import select, func from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id')) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) address_count = column_property( select([func.count(Address.id)]).\ where(Address.user_id==id).\ correlate_except(Address) ) In the above example, we define a :func:`.select` construct like the following:: select([func.count(Address.id)]).\ where(Address.user_id==id).\ correlate_except(Address) The meaning of the above statement is, select the count of ``Address.id`` rows where the ``Address.user_id`` column is equated to ``id``, which in the context of the ``User`` class is the :class:`.Column` named ``id`` (note that ``id`` is also the name of a Python built in function, which is not what we want to use here - if we were outside of the ``User`` class definition, we'd use ``User.id``). The :meth:`.select.correlate_except` directive indicates that each element in the FROM clause of this :func:`.select` may be omitted from the FROM list (that is, correlated to the enclosing SELECT statement against ``User``) except for the one corresponding to ``Address``. This isn't strictly necessary, but prevents ``Address`` from being inadvertently omitted from the FROM list in the case of a long string of joins between ``User`` and ``Address`` tables where SELECT statements against ``Address`` are nested. If import issues prevent the :func:`.column_property` from being defined inline with the class, it can be assigned to the class after both are configured. In Declarative this has the effect of calling :meth:`.Mapper.add_property` to add an additional property after the fact:: User.address_count = column_property( select([func.count(Address.id)]).\ where(Address.user_id==User.id) ) For many-to-many relationships, use :func:`.and_` to join the fields of the association table to both tables in a relation, illustrated here with a classical mapping:: from sqlalchemy import and_ mapper(Author, authors, properties={ 'book_count': column_property( select([func.count(books.c.id)], and_( book_authors.c.author_id==authors.c.id, book_authors.c.book_id==books.c.id ))) }) Using a plain descriptor ------------------------- In cases where a SQL query more elaborate than what :func:`.orm.column_property` or :class:`.hybrid_property` can provide must be emitted, a regular Python function accessed as an attribute can be used, assuming the expression only needs to be available on an already-loaded instance. The function is decorated with Python's own ``@property`` decorator to mark it as a read-only attribute. Within the function, :func:`.object_session` is used to locate the :class:`.Session` corresponding to the current object, which is then used to emit a query:: from sqlalchemy.orm import object_session from sqlalchemy import select, func class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @property def address_count(self): return object_session(self).\ scalar( select([func.count(Address.id)]).\ where(Address.user_id==self.id) ) The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that it needs to emit a SQL query upon each access. SQLAlchemy-1.0.11/doc/build/orm/constructors.rst0000664000175000017500000000504512636375552022607 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm .. _mapping_constructors: Constructors and Object Initialization ======================================= Mapping imposes no restrictions or requirements on the constructor (``__init__``) method for the class. You are free to require any arguments for the function that you wish, assign attributes to the instance that are unknown to the ORM, and generally do anything else you would normally do when writing a constructor for a Python class. The SQLAlchemy ORM does not call ``__init__`` when recreating objects from database rows. The ORM's process is somewhat akin to the Python standard library's ``pickle`` module, invoking the low level ``__new__`` method and then quietly restoring attributes directly on the instance rather than calling ``__init__``. If you need to do some setup on database-loaded instances before they're ready to use, you can use the ``@reconstructor`` decorator to tag a method as the ORM counterpart to ``__init__``. SQLAlchemy will call this method with no arguments every time it loads or reconstructs one of your instances. This is useful for recreating transient properties that are normally assigned in your ``__init__``:: from sqlalchemy import orm class MyMappedClass(object): def __init__(self, data): self.data = data # we need stuff on all instances, but not in the database. self.stuff = [] @orm.reconstructor def init_on_load(self): self.stuff = [] When ``obj = MyMappedClass()`` is executed, Python calls the ``__init__`` method as normal and the ``data`` argument is required. When instances are loaded during a :class:`~sqlalchemy.orm.query.Query` operation as in ``query(MyMappedClass).one()``, ``init_on_load`` is called. Any method may be tagged as the :func:`~sqlalchemy.orm.reconstructor`, even the ``__init__`` method. SQLAlchemy will call the reconstructor method with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush() operation, so the activity within a reconstructor should be conservative. :func:`~sqlalchemy.orm.reconstructor` is a shortcut into a larger system of "instance level" events, which can be subscribed to using the event API - see :class:`.InstanceEvents` for the full API description of these events. .. autofunction:: reconstructor SQLAlchemy-1.0.11/doc/build/orm/join_conditions.rst0000664000175000017500000007413612636375552023236 0ustar classicclassic00000000000000.. _relationship_configure_joins: Configuring how Relationship Joins ------------------------------------ :func:`.relationship` will normally create a join between two tables by examining the foreign key relationship between the two tables to determine which columns should be compared. There are a variety of situations where this behavior needs to be customized. .. _relationship_foreign_keys: Handling Multiple Join Paths ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One of the most common situations to deal with is when there are more than one foreign key path between two tables. Consider a ``Customer`` class that contains two foreign keys to an ``Address`` class:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Customer(Base): __tablename__ = 'customer' id = Column(Integer, primary_key=True) name = Column(String) billing_address_id = Column(Integer, ForeignKey("address.id")) shipping_address_id = Column(Integer, ForeignKey("address.id")) billing_address = relationship("Address") shipping_address = relationship("Address") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) street = Column(String) city = Column(String) state = Column(String) zip = Column(String) The above mapping, when we attempt to use it, will produce the error:: sqlalchemy.exc.AmbiguousForeignKeysError: Could not determine join condition between parent/child tables on relationship Customer.billing_address - there are multiple foreign key paths linking the tables. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference to the parent table. The above message is pretty long. There are many potential messages that :func:`.relationship` can return, which have been carefully tailored to detect a variety of common configurational issues; most will suggest the additional configuration that's needed to resolve the ambiguity or other missing information. In this case, the message wants us to qualify each :func:`.relationship` by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:: class Customer(Base): __tablename__ = 'customer' id = Column(Integer, primary_key=True) name = Column(String) billing_address_id = Column(Integer, ForeignKey("address.id")) shipping_address_id = Column(Integer, ForeignKey("address.id")) billing_address = relationship("Address", foreign_keys=[billing_address_id]) shipping_address = relationship("Address", foreign_keys=[shipping_address_id]) Above, we specify the ``foreign_keys`` argument, which is a :class:`.Column` or list of :class:`.Column` objects which indicate those columns to be considered "foreign", or in other words, the columns that contain a value referring to a parent table. Loading the ``Customer.billing_address`` relationship from a ``Customer`` object will use the value present in ``billing_address_id`` in order to identify the row in ``Address`` to be loaded; similarly, ``shipping_address_id`` is used for the ``shipping_address`` relationship. The linkage of the two columns also plays a role during persistence; the newly generated primary key of a just-inserted ``Address`` object will be copied into the appropriate foreign key column of an associated ``Customer`` object during a flush. When specifying ``foreign_keys`` with Declarative, we can also use string names to specify, however it is important that if using a list, the **list is part of the string**:: billing_address = relationship("Address", foreign_keys="[Customer.billing_address_id]") In this specific example, the list is not necessary in any case as there's only one :class:`.Column` we need:: billing_address = relationship("Address", foreign_keys="Customer.billing_address_id") .. versionchanged:: 0.8 :func:`.relationship` can resolve ambiguity between foreign key targets on the basis of the ``foreign_keys`` argument alone; the :paramref:`~.relationship.primaryjoin` argument is no longer needed in this situation. .. _relationship_primaryjoin: Specifying Alternate Join Conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default behavior of :func:`.relationship` when constructing a join is that it equates the value of primary key columns on one side to that of foreign-key-referring columns on the other. We can change this criterion to be anything we'd like using the :paramref:`~.relationship.primaryjoin` argument, as well as the :paramref:`~.relationship.secondaryjoin` argument in the case when a "secondary" table is used. In the example below, using the ``User`` class as well as an ``Address`` class which stores a street address, we create a relationship ``boston_addresses`` which will only load those ``Address`` objects which specify a city of "Boston":: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) boston_addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id')) street = Column(String) city = Column(String) state = Column(String) zip = Column(String) Within this string SQL expression, we made use of the :func:`.and_` conjunction construct to establish two distinct predicates for the join condition - joining both the ``User.id`` and ``Address.user_id`` columns to each other, as well as limiting rows in ``Address`` to just ``city='Boston'``. When using Declarative, rudimentary SQL functions like :func:`.and_` are automatically available in the evaluated namespace of a string :func:`.relationship` argument. The custom criteria we use in a :paramref:`~.relationship.primaryjoin` is generally only significant when SQLAlchemy is rendering SQL in order to load or represent this relationship. That is, it's used in the SQL statement that's emitted in order to perform a per-attribute lazy load, or when a join is constructed at query time, such as via :meth:`.Query.join`, or via the eager "joined" or "subquery" styles of loading. When in-memory objects are being manipulated, we can place any ``Address`` object we'd like into the ``boston_addresses`` collection, regardless of what the value of the ``.city`` attribute is. The objects will remain present in the collection until the attribute is expired and re-loaded from the database where the criterion is applied. When a flush occurs, the objects inside of ``boston_addresses`` will be flushed unconditionally, assigning value of the primary key ``user.id`` column onto the foreign-key-holding ``address.user_id`` column for each row. The ``city`` criteria has no effect here, as the flush process only cares about synchronizing primary key values into referencing foreign key values. .. _relationship_custom_foreign: Creating Custom Foreign Conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Another element of the primary join condition is how those columns considered "foreign" are determined. Usually, some subset of :class:`.Column` objects will specify :class:`.ForeignKey`, or otherwise be part of a :class:`.ForeignKeyConstraint` that's relevant to the join condition. :func:`.relationship` looks to this foreign key status as it decides how it should load and persist data for this relationship. However, the :paramref:`~.relationship.primaryjoin` argument can be used to create a join condition that doesn't involve any "schema" level foreign keys. We can combine :paramref:`~.relationship.primaryjoin` along with :paramref:`~.relationship.foreign_keys` and :paramref:`~.relationship.remote_side` explicitly in order to establish such a join. Below, a class ``HostEntry`` joins to itself, equating the string ``content`` column to the ``ip_address`` column, which is a Postgresql type called ``INET``. We need to use :func:`.cast` in order to cast one side of the join to the type of the other:: from sqlalchemy import cast, String, Column, Integer from sqlalchemy.orm import relationship from sqlalchemy.dialects.postgresql import INET from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side parent_host = relationship("HostEntry", primaryjoin=ip_address == cast(content, INET), foreign_keys=content, remote_side=ip_address ) The above relationship will produce a join like:: SELECT host_entry.id, host_entry.ip_address, host_entry.content FROM host_entry JOIN host_entry AS host_entry_1 ON host_entry_1.ip_address = CAST(host_entry.content AS INET) An alternative syntax to the above is to use the :func:`.foreign` and :func:`.remote` :term:`annotations`, inline within the :paramref:`~.relationship.primaryjoin` expression. This syntax represents the annotations that :func:`.relationship` normally applies by itself to the join condition given the :paramref:`~.relationship.foreign_keys` and :paramref:`~.relationship.remote_side` arguments. These functions may be more succinct when an explicit join condition is present, and additionally serve to mark exactly the column that is "foreign" or "remote" independent of whether that column is stated multiple times or within complex SQL expressions:: from sqlalchemy.orm import foreign, remote class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments parent_host = relationship("HostEntry", primaryjoin=remote(ip_address) == \ cast(foreign(content), INET), ) .. _relationship_custom_operator: Using custom operators in join conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Another use case for relationships is the use of custom operators, such as Postgresql's "is contained within" ``<<`` operator when joining with types such as :class:`.postgresql.INET` and :class:`.postgresql.CIDR`. For custom operators we use the :meth:`.Operators.op` function:: inet_column.op("<<")(cidr_column) However, if we construct a :paramref:`~.relationship.primaryjoin` using this operator, :func:`.relationship` will still need more information. This is because when it examines our primaryjoin condition, it specifically looks for operators used for **comparisons**, and this is typically a fixed list containing known comparison operators such as ``==``, ``<``, etc. So for our custom operator to participate in this system, we need it to register as a comparison operator using the :paramref:`~.Operators.op.is_comparison` parameter:: inet_column.op("<<", is_comparison=True)(cidr_column) A complete example:: class IPA(Base): __tablename__ = 'ip_address' id = Column(Integer, primary_key=True) v4address = Column(INET) network = relationship("Network", primaryjoin="IPA.v4address.op('<<', is_comparison=True)" "(foreign(Network.v4representation))", viewonly=True ) class Network(Base): __tablename__ = 'network' id = Column(Integer, primary_key=True) v4representation = Column(CIDR) Above, a query such as:: session.query(IPA).join(IPA.network) Will render as:: SELECT ip_address.id AS ip_address_id, ip_address.v4address AS ip_address_v4address FROM ip_address JOIN network ON ip_address.v4address << network.v4representation .. versionadded:: 0.9.2 - Added the :paramref:`.Operators.op.is_comparison` flag to assist in the creation of :func:`.relationship` constructs using custom operators. .. _relationship_overlapping_foreignkeys: Overlapping Foreign Keys ~~~~~~~~~~~~~~~~~~~~~~~~ A rare scenario can arise when composite foreign keys are used, such that a single column may be the subject of more than one column referred to via foreign key constraint. Consider an (admittedly complex) mapping such as the ``Magazine`` object, referred to both by the ``Writer`` object and the ``Article`` object using a composite primary key scheme that includes ``magazine_id`` for both; then to make ``Article`` refer to ``Writer`` as well, ``Article.magazine_id`` is involved in two separate relationships; ``Article.magazine`` and ``Article.writer``:: class Magazine(Base): __tablename__ = 'magazine' id = Column(Integer, primary_key=True) class Article(Base): __tablename__ = 'article' article_id = Column(Integer) magazine_id = Column(ForeignKey('magazine.id')) writer_id = Column() magazine = relationship("Magazine") writer = relationship("Writer") __table_args__ = ( PrimaryKeyConstraint('article_id', 'magazine_id'), ForeignKeyConstraint( ['writer_id', 'magazine_id'], ['writer.id', 'writer.magazine_id'] ), ) class Writer(Base): __tablename__ = 'writer' id = Column(Integer, primary_key=True) magazine_id = Column(ForeignKey('magazine.id'), primary_key=True) magazine = relationship("Magazine") When the above mapping is configured, we will see this warning emitted:: SAWarning: relationship 'Article.writer' will copy column writer.magazine_id to column article.magazine_id, which conflicts with relationship(s): 'Article.magazine' (copies magazine.id to article.magazine_id). Consider applying viewonly=True to read-only relationships, or provide a primaryjoin condition marking writable columns with the foreign() annotation. What this refers to originates from the fact that ``Article.magazine_id`` is the subject of two different foreign key constraints; it refers to ``Magazine.id`` directly as a source column, but also refers to ``Writer.magazine_id`` as a source column in the context of the composite key to ``Writer``. If we associate an ``Article`` with a particular ``Magazine``, but then associate the ``Article`` with a ``Writer`` that's associated with a *different* ``Magazine``, the ORM will overwrite ``Article.magazine_id`` non-deterministically, silently changing which magazine we refer towards; it may also attempt to place NULL into this columnn if we de-associate a ``Writer`` from an ``Article``. The warning lets us know this is the case. To solve this, we need to break out the behavior of ``Article`` to include all three of the following features: 1. ``Article`` first and foremost writes to ``Article.magazine_id`` based on data persisted in the ``Article.magazine`` relationship only, that is a value copied from ``Magazine.id``. 2. ``Article`` can write to ``Article.writer_id`` on behalf of data persisted in the ``Article.writer`` relationship, but only the ``Writer.id`` column; the ``Writer.magazine_id`` column should not be written into ``Article.magazine_id`` as it ultimately is sourced from ``Magazine.id``. 3. ``Article`` takes ``Article.magazine_id`` into account when loading ``Article.writer``, even though it *doesn't* write to it on behalf of this relationship. To get just #1 and #2, we could specify only ``Article.writer_id`` as the "foreign keys" for ``Article.writer``:: class Article(Base): # ... writer = relationship("Writer", foreign_keys='Article.writer_id') However, this has the effect of ``Article.writer`` not taking ``Article.magazine_id`` into account when querying against ``Writer``: .. sourcecode:: sql SELECT article.article_id AS article_article_id, article.magazine_id AS article_magazine_id, article.writer_id AS article_writer_id FROM article JOIN writer ON writer.id = article.writer_id Therefore, to get at all of #1, #2, and #3, we express the join condition as well as which columns to be written by combining :paramref:`~.relationship.primaryjoin` fully, along with either the :paramref:`~.relationship.foreign_keys` argument, or more succinctly by annotating with :func:`~.orm.foreign`:: class Article(Base): # ... writer = relationship( "Writer", primaryjoin="and_(Writer.id == foreign(Article.writer_id), " "Writer.magazine_id == Article.magazine_id)") .. versionchanged:: 1.0.0 the ORM will attempt to warn when a column is used as the synchronization target from more than one relationship simultaneously. Non-relational Comparisons / Materialized Path ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. warning:: this section details an experimental feature. Using custom expressions means we can produce unorthodox join conditions that don't obey the usual primary/foreign key model. One such example is the materialized path pattern, where we compare strings for overlapping path tokens in order to produce a tree structure. Through careful use of :func:`.foreign` and :func:`.remote`, we can build a relationship that effectively produces a rudimentary materialized path system. Essentially, when :func:`.foreign` and :func:`.remote` are on the *same* side of the comparison expression, the relationship is considered to be "one to many"; when they are on *different* sides, the relationship is considered to be "many to one". For the comparison we'll use here, we'll be dealing with collections so we keep things configured as "one to many":: class Element(Base): __tablename__ = 'element' path = Column(String, primary_key=True) descendants = relationship('Element', primaryjoin= remote(foreign(path)).like( path.concat('/%')), viewonly=True, order_by=path) Above, if given an ``Element`` object with a path attribute of ``"/foo/bar2"``, we seek for a load of ``Element.descendants`` to look like:: SELECT element.path AS element_path FROM element WHERE element.path LIKE ('/foo/bar2' || '/%') ORDER BY element.path .. versionadded:: 0.9.5 Support has been added to allow a single-column comparison to itself within a primaryjoin condition, as well as for primaryjoin conditions that use :meth:`.ColumnOperators.like` as the comparison operator. .. _self_referential_many_to_many: Self-Referential Many-to-Many Relationship ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Many to many relationships can be customized by one or both of :paramref:`~.relationship.primaryjoin` and :paramref:`~.relationship.secondaryjoin` - the latter is significant for a relationship that specifies a many-to-many reference using the :paramref:`~.relationship.secondary` argument. A common situation which involves the usage of :paramref:`~.relationship.primaryjoin` and :paramref:`~.relationship.secondaryjoin` is when establishing a many-to-many relationship from a class to itself, as shown below:: from sqlalchemy import Integer, ForeignKey, String, Column, Table from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() node_to_node = Table("node_to_node", Base.metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) ) class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) label = Column(String) right_nodes = relationship("Node", secondary=node_to_node, primaryjoin=id==node_to_node.c.left_node_id, secondaryjoin=id==node_to_node.c.right_node_id, backref="left_nodes" ) Where above, SQLAlchemy can't know automatically which columns should connect to which for the ``right_nodes`` and ``left_nodes`` relationships. The :paramref:`~.relationship.primaryjoin` and :paramref:`~.relationship.secondaryjoin` arguments establish how we'd like to join to the association table. In the Declarative form above, as we are declaring these conditions within the Python block that corresponds to the ``Node`` class, the ``id`` variable is available directly as the :class:`.Column` object we wish to join with. Alternatively, we can define the :paramref:`~.relationship.primaryjoin` and :paramref:`~.relationship.secondaryjoin` arguments using strings, which is suitable in the case that our configuration does not have either the ``Node.id`` column object available yet or the ``node_to_node`` table perhaps isn't yet available. When referring to a plain :class:`.Table` object in a declarative string, we use the string name of the table as it is present in the :class:`.MetaData`:: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) label = Column(String) right_nodes = relationship("Node", secondary="node_to_node", primaryjoin="Node.id==node_to_node.c.left_node_id", secondaryjoin="Node.id==node_to_node.c.right_node_id", backref="left_nodes" ) A classical mapping situation here is similar, where ``node_to_node`` can be joined to ``node.c.id``:: from sqlalchemy import Integer, ForeignKey, String, Column, Table, MetaData from sqlalchemy.orm import relationship, mapper metadata = MetaData() node_to_node = Table("node_to_node", metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) ) node = Table("node", metadata, Column('id', Integer, primary_key=True), Column('label', String) ) class Node(object): pass mapper(Node, node, properties={ 'right_nodes':relationship(Node, secondary=node_to_node, primaryjoin=node.c.id==node_to_node.c.left_node_id, secondaryjoin=node.c.id==node_to_node.c.right_node_id, backref="left_nodes" )}) Note that in both examples, the :paramref:`~.relationship.backref` keyword specifies a ``left_nodes`` backref - when :func:`.relationship` creates the second relationship in the reverse direction, it's smart enough to reverse the :paramref:`~.relationship.primaryjoin` and :paramref:`~.relationship.secondaryjoin` arguments. .. _composite_secondary_join: Composite "Secondary" Joins ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: This section features some new and experimental features of SQLAlchemy. Sometimes, when one seeks to build a :func:`.relationship` between two tables there is a need for more than just two or three tables to be involved in order to join them. This is an area of :func:`.relationship` where one seeks to push the boundaries of what's possible, and often the ultimate solution to many of these exotic use cases needs to be hammered out on the SQLAlchemy mailing list. In more recent versions of SQLAlchemy, the :paramref:`~.relationship.secondary` parameter can be used in some of these cases in order to provide a composite target consisting of multiple tables. Below is an example of such a join condition (requires version 0.9.2 at least to function as is):: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) b_id = Column(ForeignKey('b.id')) d = relationship("D", secondary="join(B, D, B.d_id == D.id)." "join(C, C.d_id == D.id)", primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", secondaryjoin="D.id == B.d_id", uselist=False ) class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) d_id = Column(ForeignKey('d.id')) class C(Base): __tablename__ = 'c' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('a.id')) d_id = Column(ForeignKey('d.id')) class D(Base): __tablename__ = 'd' id = Column(Integer, primary_key=True) In the above example, we provide all three of :paramref:`~.relationship.secondary`, :paramref:`~.relationship.primaryjoin`, and :paramref:`~.relationship.secondaryjoin`, in the declarative style referring to the named tables ``a``, ``b``, ``c``, ``d`` directly. A query from ``A`` to ``D`` looks like: .. sourcecode:: python+sql sess.query(A).join(A.d).all() {opensql}SELECT a.id AS a_id, a.b_id AS a_b_id FROM a JOIN ( b AS b_1 JOIN d AS d_1 ON b_1.d_id = d_1.id JOIN c AS c_1 ON c_1.d_id = d_1.id) ON a.b_id = b_1.id AND a.id = c_1.a_id JOIN d ON d.id = b_1.d_id In the above example, we take advantage of being able to stuff multiple tables into a "secondary" container, so that we can join across many tables while still keeping things "simple" for :func:`.relationship`, in that there's just "one" table on both the "left" and the "right" side; the complexity is kept within the middle. .. versionadded:: 0.9.2 Support is improved for allowing a :func:`.join()` construct to be used directly as the target of the :paramref:`~.relationship.secondary` argument, including support for joins, eager joins and lazy loading, as well as support within declarative to specify complex conditions such as joins involving class names as targets. .. _relationship_non_primary_mapper: Relationship to Non Primary Mapper ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the previous section, we illustrated a technique where we used :paramref:`~.relationship.secondary` in order to place additional tables within a join condition. There is one complex join case where even this technique is not sufficient; when we seek to join from ``A`` to ``B``, making use of any number of ``C``, ``D``, etc. in between, however there are also join conditions between ``A`` and ``B`` *directly*. In this case, the join from ``A`` to ``B`` may be difficult to express with just a complex :paramref:`~.relationship.primaryjoin` condition, as the intermediary tables may need special handling, and it is also not expressable with a :paramref:`~.relationship.secondary` object, since the ``A->secondary->B`` pattern does not support any references between ``A`` and ``B`` directly. When this **extremely advanced** case arises, we can resort to creating a second mapping as a target for the relationship. This is where we use :func:`.mapper` in order to make a mapping to a class that includes all the additional tables we need for this join. In order to produce this mapper as an "alternative" mapping for our class, we use the :paramref:`~.mapper.non_primary` flag. Below illustrates a :func:`.relationship` with a simple join from ``A`` to ``B``, however the primaryjoin condition is augmented with two additional entities ``C`` and ``D``, which also must have rows that line up with the rows in both ``A`` and ``B`` simultaneously:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) b_id = Column(ForeignKey('b.id')) class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) class C(Base): __tablename__ = 'c' id = Column(Integer, primary_key=True) a_id = Column(ForeignKey('a.id')) class D(Base): __tablename__ = 'd' id = Column(Integer, primary_key=True) c_id = Column(ForeignKey('c.id')) b_id = Column(ForeignKey('b.id')) # 1. set up the join() as a variable, so we can refer # to it in the mapping multiple times. j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) # 2. Create a new mapper() to B, with non_primary=True. # Columns in the join with the same name must be # disambiguated within the mapping, using named properties. B_viacd = mapper(B, j, non_primary=True, properties={ "b_id": [j.c.b_id, j.c.d_b_id], "d_id": j.c.d_id }) A.b = relationship(B_viacd, primaryjoin=A.b_id == B_viacd.c.b_id) In the above case, our non-primary mapper for ``B`` will emit for additional columns when we query; these can be ignored: .. sourcecode:: python+sql sess.query(A).join(A.b).all() {opensql}SELECT a.id AS a_id, a.b_id AS a_b_id FROM a JOIN (b JOIN d ON d.b_id = b.id JOIN c ON c.id = d.c_id) ON a.b_id = b.id Building Query-Enabled Properties ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Very ambitious custom join conditions may fail to be directly persistable, and in some cases may not even load correctly. To remove the persistence part of the equation, use the flag :paramref:`~.relationship.viewonly` on the :func:`~sqlalchemy.orm.relationship`, which establishes it as a read-only attribute (data written to the collection will be ignored on flush()). However, in extreme cases, consider using a regular Python property in conjunction with :class:`.Query` as follows: .. sourcecode:: python+sql class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) def _get_addresses(self): return object_session(self).query(Address).with_parent(self).filter(...).all() addresses = property(_get_addresses) SQLAlchemy-1.0.11/doc/build/orm/extending.rst0000664000175000017500000000021012636375552022011 0ustar classicclassic00000000000000==================== Events and Internals ==================== .. toctree:: :maxdepth: 2 events internals exceptions deprecated SQLAlchemy-1.0.11/doc/build/orm/index.rst0000664000175000017500000000070312636375552021142 0ustar classicclassic00000000000000.. _orm_toplevel: SQLAlchemy ORM =============== Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial. .. toctree:: :maxdepth: 2 tutorial mapper_config relationships loading_objects session extending extensions/index examples SQLAlchemy-1.0.11/doc/build/orm/self_referential.rst0000664000175000017500000002344412636375552023353 0ustar classicclassic00000000000000.. _self_referential: Adjacency List Relationships ----------------------------- The **adjacency list** pattern is a common relational pattern whereby a table contains a foreign key reference to itself. This is the most common way to represent hierarchical data in flat tables. Other methods include **nested sets**, sometimes called "modified preorder", as well as **materialized path**. Despite the appeal that modified preorder has when evaluated for its fluency within SQL queries, the adjacency list model is probably the most appropriate pattern for the large majority of hierarchical storage needs, for reasons of concurrency, reduced complexity, and that modified preorder has little advantage over an application which can fully load subtrees into the application space. In this example, we'll work with a single mapped class called ``Node``, representing a tree structure:: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node") With this structure, a graph such as the following:: root --+---> child1 +---> child2 --+--> subchild1 | +--> subchild2 +---> child3 Would be represented with data such as:: id parent_id data --- ------- ---- 1 NULL root 2 1 child1 3 1 child2 4 3 subchild1 5 3 subchild2 6 1 child3 The :func:`.relationship` configuration here works in the same way as a "normal" one-to-many relationship, with the exception that the "direction", i.e. whether the relationship is one-to-many or many-to-one, is assumed by default to be one-to-many. To establish the relationship as many-to-one, an extra directive is added known as :paramref:`~.relationship.remote_side`, which is a :class:`.Column` or collection of :class:`.Column` objects that indicate those which should be considered to be "remote":: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) parent = relationship("Node", remote_side=[id]) Where above, the ``id`` column is applied as the :paramref:`~.relationship.remote_side` of the ``parent`` :func:`.relationship`, thus establishing ``parent_id`` as the "local" side, and the relationship then behaves as a many-to-one. As always, both directions can be combined into a bidirectional relationship using the :func:`.backref` function:: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node", backref=backref('parent', remote_side=[id]) ) There are several examples included with SQLAlchemy illustrating self-referential strategies; these include :ref:`examples_adjacencylist` and :ref:`examples_xmlpersistence`. Composite Adjacency Lists ~~~~~~~~~~~~~~~~~~~~~~~~~ A sub-category of the adjacency list relationship is the rare case where a particular column is present on both the "local" and "remote" side of the join condition. An example is the ``Folder`` class below; using a composite primary key, the ``account_id`` column refers to itself, to indicate sub folders which are within the same account as that of the parent; while ``folder_id`` refers to a specific folder within that account:: class Folder(Base): __tablename__ = 'folder' __table_args__ = ( ForeignKeyConstraint( ['account_id', 'parent_id'], ['folder.account_id', 'folder.folder_id']), ) account_id = Column(Integer, primary_key=True) folder_id = Column(Integer, primary_key=True) parent_id = Column(Integer) name = Column(String) parent_folder = relationship("Folder", backref="child_folders", remote_side=[account_id, folder_id] ) Above, we pass ``account_id`` into the :paramref:`~.relationship.remote_side` list. :func:`.relationship` recognizes that the ``account_id`` column here is on both sides, and aligns the "remote" column along with the ``folder_id`` column, which it recognizes as uniquely present on the "remote" side. .. versionadded:: 0.8 Support for self-referential composite keys in :func:`.relationship` where a column points to itself. Self-Referential Query Strategies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Querying of self-referential structures works like any other query:: # get all nodes named 'child2' session.query(Node).filter(Node.data=='child2') However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, a join from a table to itself requires that at least one side of the expression be "aliased" so that it can be unambiguously referred to. Recall from :ref:`ormtutorial_aliases` in the ORM tutorial that the :func:`.orm.aliased` construct is normally used to provide an "alias" of an ORM entity. Joining from ``Node`` to itself using this technique looks like: .. sourcecode:: python+sql from sqlalchemy.orm import aliased nodealias = aliased(Node) {sql}session.query(Node).filter(Node.data=='subchild1').\ join(nodealias, Node.parent).\ filter(nodealias.data=="child2").\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node.parent_id = node_1.id WHERE node.data = ? AND node_1.data = ? ['subchild1', 'child2'] :meth:`.Query.join` also includes a feature known as :paramref:`.Query.join.aliased` that can shorten the verbosity self- referential joins, at the expense of query flexibility. This feature performs a similar "aliasing" step to that above, without the need for an explicit entity. Calls to :meth:`.Query.filter` and similar subsequent to the aliased join will **adapt** the ``Node`` entity to be that of the alias: .. sourcecode:: python+sql {sql}session.query(Node).filter(Node.data=='subchild1').\ join(Node.parent, aliased=True).\ filter(Node.data=='child2').\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node_1.id = node.parent_id WHERE node.data = ? AND node_1.data = ? ['subchild1', 'child2'] To add criterion to multiple points along a longer join, add :paramref:`.Query.join.from_joinpoint` to the additional :meth:`~.Query.join` calls: .. sourcecode:: python+sql # get all nodes named 'subchild1' with a # parent named 'child2' and a grandparent 'root' {sql}session.query(Node).\ filter(Node.data=='subchild1').\ join(Node.parent, aliased=True).\ filter(Node.data=='child2').\ join(Node.parent, aliased=True, from_joinpoint=True).\ filter(Node.data=='root').\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node_1.id = node.parent_id JOIN node AS node_2 ON node_2.id = node_1.parent_id WHERE node.data = ? AND node_1.data = ? AND node_2.data = ? ['subchild1', 'child2', 'root'] :meth:`.Query.reset_joinpoint` will also remove the "aliasing" from filtering calls:: session.query(Node).\ join(Node.children, aliased=True).\ filter(Node.data == 'foo').\ reset_joinpoint().\ filter(Node.data == 'bar') For an example of using :paramref:`.Query.join.aliased` to arbitrarily join along a chain of self-referential nodes, see :ref:`examples_xmlpersistence`. .. _self_referential_eager_loading: Configuring Self-Referential Eager Loading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Eager loading of relationships occurs using joins or outerjoins from parent to child table during a normal query operation, such that the parent and its immediate child collection or reference can be populated from a single SQL statement, or a second statement for all immediate child collections. SQLAlchemy's joined and subquery eager loading use aliased tables in all cases when joining to related items, so are compatible with self-referential joining. However, to use eager loading with a self-referential relationship, SQLAlchemy needs to be told how many levels deep it should join and/or query; otherwise the eager load will not take place at all. This depth setting is configured via :paramref:`~.relationships.join_depth`: .. sourcecode:: python+sql class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node", lazy="joined", join_depth=2) {sql}session.query(Node).all() SELECT node_1.id AS node_1_id, node_1.parent_id AS node_1_parent_id, node_1.data AS node_1_data, node_2.id AS node_2_id, node_2.parent_id AS node_2_parent_id, node_2.data AS node_2_data, node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node LEFT OUTER JOIN node AS node_2 ON node.id = node_2.parent_id LEFT OUTER JOIN node AS node_1 ON node_2.id = node_1.parent_id [] SQLAlchemy-1.0.11/doc/build/orm/mapped_attributes.rst0000664000175000017500000002653012636375552023555 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm Changing Attribute Behavior ============================ .. _simple_validators: Simple Validators ----------------- A quick way to add a "validation" routine to an attribute is to use the :func:`~sqlalchemy.orm.validates` decorator. An attribute validator can raise an exception, halting the process of mutating the attribute's value, or can change the given value into something different. Validators, like all attribute extensions, are only called by normal userland code; they are not issued when the ORM is populating the object:: from sqlalchemy.orm import validates class EmailAddress(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) @validates('email') def validate_email(self, key, address): assert '@' in address return address .. versionchanged:: 1.0.0 - validators are no longer triggered within the flush process when the newly fetched values for primary key columns as well as some python- or server-side defaults are fetched. Prior to 1.0, validators may be triggered in those cases as well. Validators also receive collection append events, when items are added to a collection:: from sqlalchemy.orm import validates class User(Base): # ... addresses = relationship("Address") @validates('addresses') def validate_address(self, key, address): assert '@' in address.email return address The validation function by default does not get emitted for collection remove events, as the typical expectation is that a value being discarded doesn't require validation. However, :func:`.validates` supports reception of these events by specifying ``include_removes=True`` to the decorator. When this flag is set, the validation function must receive an additional boolean argument which if ``True`` indicates that the operation is a removal:: from sqlalchemy.orm import validates class User(Base): # ... addresses = relationship("Address") @validates('addresses', include_removes=True) def validate_address(self, key, address, is_remove): if is_remove: raise ValueError( "not allowed to remove items from the collection") else: assert '@' in address.email return address The case where mutually dependent validators are linked via a backref can also be tailored, using the ``include_backrefs=False`` option; this option, when set to ``False``, prevents a validation function from emitting if the event occurs as a result of a backref:: from sqlalchemy.orm import validates class User(Base): # ... addresses = relationship("Address", backref='user') @validates('addresses', include_backrefs=False) def validate_address(self, key, address): assert '@' in address.email return address Above, if we were to assign to ``Address.user`` as in ``some_address.user = some_user``, the ``validate_address()`` function would *not* be emitted, even though an append occurs to ``some_user.addresses`` - the event is caused by a backref. Note that the :func:`~.validates` decorator is a convenience function built on top of attribute events. An application that requires more control over configuration of attribute change behavior can make use of this system, described at :class:`~.AttributeEvents`. .. autofunction:: validates .. _mapper_hybrids: Using Descriptors and Hybrids ----------------------------- A more comprehensive way to produce modified behavior for an attribute is to use :term:`descriptors`. These are commonly used in Python using the ``property()`` function. The standard SQLAlchemy technique for descriptors is to create a plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:: class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) # name the attribute with an underscore, # different from the column name _email = Column("email", String) # then create an ".email" attribute # to get/set "._email" @property def email(self): return self._email @email.setter def email(self, email): self._email = email The approach above will work, but there's more we can add. While our ``EmailAddress`` object will shuttle the value through the ``email`` descriptor and into the ``_email`` mapped attribute, the class level ``EmailAddress.email`` attribute does not have the usual expression semantics usable with :class:`.Query`. To provide these, we instead use the :mod:`~sqlalchemy.ext.hybrid` extension as follows:: from sqlalchemy.ext.hybrid import hybrid_property class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) _email = Column("email", String) @hybrid_property def email(self): return self._email @email.setter def email(self, email): self._email = email The ``.email`` attribute, in addition to providing getter/setter behavior when we have an instance of ``EmailAddress``, also provides a SQL expression when used at the class level, that is, from the ``EmailAddress`` class directly: .. sourcecode:: python+sql from sqlalchemy.orm import Session session = Session() {sql}address = session.query(EmailAddress).\ filter(EmailAddress.email == 'address@example.com').\ one() SELECT address.email AS address_email, address.id AS address_id FROM address WHERE address.email = ? ('address@example.com',) {stop} address.email = 'otheraddress@example.com' {sql}session.commit() UPDATE address SET email=? WHERE address.id = ? ('otheraddress@example.com', 1) COMMIT {stop} The :class:`~.hybrid_property` also allows us to change the behavior of the attribute, including defining separate behaviors when the attribute is accessed at the instance level versus at the class/expression level, using the :meth:`.hybrid_property.expression` modifier. Such as, if we wanted to add a host name automatically, we might define two sets of string manipulation logic:: class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) _email = Column("email", String) @hybrid_property def email(self): """Return the value of _email up until the last twelve characters.""" return self._email[:-12] @email.setter def email(self, email): """Set the value of _email, tacking on the twelve character value @example.com.""" self._email = email + "@example.com" @email.expression def email(cls): """Produce a SQL expression that represents the value of the _email column, minus the last twelve characters.""" return func.substr(cls._email, 0, func.length(cls._email) - 12) Above, accessing the ``email`` property of an instance of ``EmailAddress`` will return the value of the ``_email`` attribute, removing or adding the hostname ``@example.com`` from the value. When we query against the ``email`` attribute, a SQL function is rendered which produces the same effect: .. sourcecode:: python+sql {sql}address = session.query(EmailAddress).filter(EmailAddress.email == 'address').one() SELECT address.email AS address_email, address.id AS address_id FROM address WHERE substr(address.email, ?, length(address.email) - ?) = ? (0, 12, 'address') {stop} Read more about Hybrids at :ref:`hybrids_toplevel`. .. _synonyms: Synonyms -------- Synonyms are a mapper-level construct that allow any attribute on a class to "mirror" another attribute that is mapped. In the most basic sense, the synonym is an easy way to make a certain attribute available by an additional name:: class MyClass(Base): __tablename__ = 'my_table' id = Column(Integer, primary_key=True) job_status = Column(String(50)) status = synonym("job_status") The above class ``MyClass`` has two attributes, ``.job_status`` and ``.status`` that will behave as one attribute, both at the expression level:: >>> print MyClass.job_status == 'some_status' my_table.job_status = :job_status_1 >>> print MyClass.status == 'some_status' my_table.job_status = :job_status_1 and at the instance level:: >>> m1 = MyClass(status='x') >>> m1.status, m1.job_status ('x', 'x') >>> m1.job_status = 'y' >>> m1.status, m1.job_status ('y', 'y') The :func:`.synonym` can be used for any kind of mapped attribute that subclasses :class:`.MapperProperty`, including mapped columns and relationships, as well as synonyms themselves. Beyond a simple mirror, :func:`.synonym` can also be made to reference a user-defined :term:`descriptor`. We can supply our ``status`` synonym with a ``@property``:: class MyClass(Base): __tablename__ = 'my_table' id = Column(Integer, primary_key=True) status = Column(String(50)) @property def job_status(self): return "Status: " + self.status job_status = synonym("status", descriptor=job_status) When using Declarative, the above pattern can be expressed more succinctly using the :func:`.synonym_for` decorator:: from sqlalchemy.ext.declarative import synonym_for class MyClass(Base): __tablename__ = 'my_table' id = Column(Integer, primary_key=True) status = Column(String(50)) @synonym_for("status") @property def job_status(self): return "Status: " + self.status While the :func:`.synonym` is useful for simple mirroring, the use case of augmenting attribute behavior with descriptors is better handled in modern usage using the :ref:`hybrid attribute ` feature, which is more oriented towards Python descriptors. Technically, a :func:`.synonym` can do everything that a :class:`.hybrid_property` can do, as it also supports injection of custom SQL capabilities, but the hybrid is more straightforward to use in more complex situations. .. autofunction:: synonym .. _custom_comparators: Operator Customization ---------------------- The "operators" used by the SQLAlchemy ORM and Core expression language are fully customizable. For example, the comparison expression ``User.name == 'ed'`` makes usage of an operator built into Python itself called ``operator.eq`` - the actual SQL construct which SQLAlchemy associates with such an operator can be modified. New operations can be associated with column expressions as well. The operators which take place for column expressions are most directly redefined at the type level - see the section :ref:`types_operators` for a description. ORM level functions like :func:`.column_property`, :func:`.relationship`, and :func:`.composite` also provide for operator redefinition at the ORM level, by passing a :class:`.PropComparator` subclass to the ``comparator_factory`` argument of each function. Customization of operators at this level is a rare use case. See the documentation at :class:`.PropComparator` for an overview. SQLAlchemy-1.0.11/doc/build/orm/persistence_techniques.rst0000664000175000017500000002557312636375552024623 0ustar classicclassic00000000000000================================= Additional Persistence Techniques ================================= .. _flush_embedded_sql_expressions: Embedding SQL Insert/Update Expressions into a Flush ===================================================== This feature allows the value of a database column to be set to a SQL expression instead of a literal value. It's especially useful for atomic updates, calling stored procedures, etc. All you do is assign an expression to an attribute:: class SomeClass(object): pass mapper(SomeClass, some_table) someobject = session.query(SomeClass).get(5) # set 'value' attribute to a SQL expression adding one someobject.value = some_table.c.value + 1 # issues "UPDATE some_table SET value=value+1" session.commit() This technique works both for INSERT and UPDATE statements. After the flush/commit operation, the ``value`` attribute on ``someobject`` above is expired, so that when next accessed the newly generated value will be loaded from the database. .. _session_sql_expressions: Using SQL Expressions with Sessions ==================================== SQL expressions and strings can be executed via the :class:`~sqlalchemy.orm.session.Session` within its transactional context. This is most easily accomplished using the :meth:`~.Session.execute` method, which returns a :class:`~sqlalchemy.engine.ResultProxy` in the same manner as an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`:: Session = sessionmaker(bind=engine) session = Session() # execute a string statement result = session.execute("select * from table where id=:id", {'id':7}) # execute a SQL expression construct result = session.execute(select([mytable]).where(mytable.c.id==7)) The current :class:`~sqlalchemy.engine.Connection` held by the :class:`~sqlalchemy.orm.session.Session` is accessible using the :meth:`~.Session.connection` method:: connection = session.connection() The examples above deal with a :class:`~sqlalchemy.orm.session.Session` that's bound to a single :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`. To execute statements using a :class:`~sqlalchemy.orm.session.Session` which is bound either to multiple engines, or none at all (i.e. relies upon bound metadata), both :meth:`~.Session.execute` and :meth:`~.Session.connection` accept a ``mapper`` keyword argument, which is passed a mapped class or :class:`~sqlalchemy.orm.mapper.Mapper` instance, which is used to locate the proper context for the desired engine:: Session = sessionmaker() session = Session() # need to specify mapper or class when executing result = session.execute("select * from table where id=:id", {'id':7}, mapper=MyMappedClass) result = session.execute(select([mytable], mytable.c.id==7), mapper=MyMappedClass) connection = session.connection(MyMappedClass) .. _session_partitioning: Partitioning Strategies ======================= Simple Vertical Partitioning ---------------------------- Vertical partitioning places different kinds of objects, or different tables, across multiple databases:: engine1 = create_engine('postgresql://db1') engine2 = create_engine('postgresql://db2') Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 Session.configure(binds={User:engine1, Account:engine2}) session = Session() Above, operations against either class will make usage of the :class:`.Engine` linked to that class. Upon a flush operation, similar rules take place to ensure each class is written to the right database. The transactions among the multiple databases can optionally be coordinated via two phase commit, if the underlying backend supports it. See :ref:`session_twophase` for an example. Custom Vertical Partitioning ---------------------------- More comprehensive rule-based class-level partitioning can be built by overriding the :meth:`.Session.get_bind` method. Below we illustrate a custom :class:`.Session` which delivers the following rules: 1. Flush operations are delivered to the engine named ``master``. 2. Operations on objects that subclass ``MyOtherClass`` all occur on the ``other`` engine. 3. Read operations for all other classes occur on a random choice of the ``slave1`` or ``slave2`` database. :: engines = { 'master':create_engine("sqlite:///master.db"), 'other':create_engine("sqlite:///other.db"), 'slave1':create_engine("sqlite:///slave1.db"), 'slave2':create_engine("sqlite:///slave2.db"), } from sqlalchemy.orm import Session, sessionmaker import random class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): if mapper and issubclass(mapper.class_, MyOtherClass): return engines['other'] elif self._flushing: return engines['master'] else: return engines[ random.choice(['slave1','slave2']) ] The above :class:`.Session` class is plugged in using the ``class_`` argument to :class:`.sessionmaker`:: Session = sessionmaker(class_=RoutingSession) This approach can be combined with multiple :class:`.MetaData` objects, using an approach such as that of using the declarative ``__abstract__`` keyword, described at :ref:`declarative_abstract`. Horizontal Partitioning ----------------------- Horizontal partitioning partitions the rows of a single table (or a set of tables) across multiple databases. See the "sharding" example: :ref:`examples_sharding`. .. _bulk_operations: Bulk Operations =============== .. note:: Bulk Operations mode is a new series of operations made available on the :class:`.Session` object for the purpose of invoking INSERT and UPDATE statements with greatly reduced Python overhead, at the expense of much less functionality, automation, and error checking. As of SQLAlchemy 1.0, these features should be considered as "beta", and additionally are intended for advanced users. .. versionadded:: 1.0.0 Bulk operations on the :class:`.Session` include :meth:`.Session.bulk_save_objects`, :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`. The purpose of these methods is to directly expose internal elements of the unit of work system, such that facilities for emitting INSERT and UPDATE statements given dictionaries or object states can be utilized alone, bypassing the normal unit of work mechanics of state, relationship and attribute management. The advantages to this approach is strictly one of reduced Python overhead: * The flush() process, including the survey of all objects, their state, their cascade status, the status of all objects associated with them via :func:`.relationship`, and the topological sort of all operations to be performed is completely bypassed. This reduces a great amount of Python overhead. * The objects as given have no defined relationship to the target :class:`.Session`, even when the operation is complete, meaning there's no overhead in attaching them or managing their state in terms of the identity map or session. * The :meth:`.Session.bulk_insert_mappings` and :meth:`.Session.bulk_update_mappings` methods accept lists of plain Python dictionaries, not objects; this further reduces a large amount of overhead associated with instantiating mapped objects and assigning state to them, which normally is also subject to expensive tracking of history on a per-attribute basis. * The process of fetching primary keys after an INSERT also is disabled by default. When performed correctly, INSERT statements can now more readily be batched by the unit of work process into ``executemany()`` blocks, which perform vastly better than individual statement invocations. * UPDATE statements can similarly be tailored such that all attributes are subject to the SET clase unconditionally, again making it much more likely that ``executemany()`` blocks can be used. The performance behavior of the bulk routines should be studied using the :ref:`examples_performance` example suite. This is a series of example scripts which illustrate Python call-counts across a variety of scenarios, including bulk insert and update scenarios. .. seealso:: :ref:`examples_performance` - includes detailed examples of bulk operations contrasted against traditional Core and ORM methods, including performance metrics. Usage ----- The methods each work in the context of the :class:`.Session` object's transaction, like any other:: s = Session() objects = [ User(name="u1"), User(name="u2"), User(name="u3") ] s.bulk_save_objects(objects) For :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`, dictionaries are passed:: s.bulk_insert_mappings(User, [dict(name="u1"), dict(name="u2"), dict(name="u3")] ) .. seealso:: :meth:`.Session.bulk_save_objects` :meth:`.Session.bulk_insert_mappings` :meth:`.Session.bulk_update_mappings` Comparison to Core Insert / Update Constructs --------------------------------------------- The bulk methods offer performance that under particular circumstances can be close to that of using the core :class:`.Insert` and :class:`.Update` constructs in an "executemany" context (for a description of "executemany", see :ref:`execute_multiple` in the Core tutorial). In order to achieve this, the :paramref:`.Session.bulk_insert_mappings.return_defaults` flag should be disabled so that rows can be batched together. The example suite in :ref:`examples_performance` should be carefully studied in order to gain familiarity with how fast bulk performance can be achieved. ORM Compatibility ----------------- The bulk insert / update methods lose a significant amount of functionality versus traditional ORM use. The following is a listing of features that are **not available** when using these methods: * persistence along :func:`.relationship` linkages * sorting of rows within order of dependency; rows are inserted or updated directly in the order in which they are passed to the methods * Session-management on the given objects, including attachment to the session, identity map management. * Functionality related to primary key mutation, ON UPDATE cascade * SQL expression inserts / updates (e.g. :ref:`flush_embedded_sql_expressions`) * ORM events such as :meth:`.MapperEvents.before_insert`, etc. The bulk session methods have no event support. Features that **are available** include: * INSERTs and UPDATEs of mapped objects * Version identifier support * Multi-table mappings, such as joined-inheritance - however, an object to be inserted across multiple tables either needs to have primary key identifiers fully populated ahead of time, else the :paramref:`.Session.bulk_save_objects.return_defaults` flag must be used, which will greatly reduce the performance benefits SQLAlchemy-1.0.11/doc/build/orm/nonstandard_mappings.rst0000664000175000017500000001673212636375552024255 0ustar classicclassic00000000000000======================== Non-Traditional Mappings ======================== .. _maptojoin: Mapping a Class against Multiple Tables ======================================== Mappers can be constructed against arbitrary relational units (called *selectables*) in addition to plain tables. For example, the :func:`~.expression.join` function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a :class:`.Table`:: from sqlalchemy import Table, Column, Integer, \ String, MetaData, join, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import column_property metadata = MetaData() # define two Table objects user_table = Table('user', metadata, Column('id', Integer, primary_key=True), Column('name', String), ) address_table = Table('address', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email_address', String) ) # define a join between them. This # takes place across the user.id and address.user_id # columns. user_address_join = join(user_table, address_table) Base = declarative_base() # map to it class AddressUser(Base): __table__ = user_address_join id = column_property(user_table.c.id, address_table.c.user_id) address_id = address_table.c.id In the example above, the join expresses columns for both the ``user`` and the ``address`` table. The ``user.id`` and ``address.user_id`` columns are equated by foreign key, so in the mapping they are defined as one attribute, ``AddressUser.id``, using :func:`.column_property` to indicate a specialized column mapping. Based on this part of the configuration, the mapping will copy new primary key values from ``user.id`` into the ``address.user_id`` column when a flush occurs. Additionally, the ``address.id`` column is mapped explicitly to an attribute named ``address_id``. This is to **disambiguate** the mapping of the ``address.id`` column from the same-named ``AddressUser.id`` attribute, which here has been assigned to refer to the ``user`` table combined with the ``address.user_id`` foreign key. The natural primary key of the above mapping is the composite of ``(user.id, address.id)``, as these are the primary key columns of the ``user`` and ``address`` table combined together. The identity of an ``AddressUser`` object will be in terms of these two values, and is represented from an ``AddressUser`` object as ``(AddressUser.id, AddressUser.address_id)``. Mapping a Class against Arbitrary Selects ========================================= Similar to mapping against a join, a plain :func:`~.expression.select` object can be used with a mapper as well. The example fragment below illustrates mapping a class called ``Customer`` to a :func:`~.expression.select` which includes a join to a subquery:: from sqlalchemy import select, func subq = select([ func.count(orders.c.id).label('order_count'), func.max(orders.c.price).label('highest_order'), orders.c.customer_id ]).group_by(orders.c.customer_id).alias() customer_select = select([customers, subq]).\ select_from( join(customers, subq, customers.c.id == subq.c.customer_id) ).alias() class Customer(Base): __table__ = customer_select Above, the full row represented by ``customer_select`` will be all the columns of the ``customers`` table, in addition to those columns exposed by the ``subq`` subquery, which are ``order_count``, ``highest_order``, and ``customer_id``. Mapping the ``Customer`` class to this selectable then creates a class which will contain those attributes. When the ORM persists new instances of ``Customer``, only the ``customers`` table will actually receive an INSERT. This is because the primary key of the ``orders`` table is not represented in the mapping; the ORM will only emit an INSERT into a table for which it has mapped the primary key. .. note:: The practice of mapping to arbitrary SELECT statements, especially complex ones as above, is almost never needed; it necessarily tends to produce complex queries which are often less efficient than that which would be produced by direct query construction. The practice is to some degree based on the very early history of SQLAlchemy where the :func:`.mapper` construct was meant to represent the primary querying interface; in modern usage, the :class:`.Query` object can be used to construct virtually any SELECT statement, including complex composites, and should be favored over the "map-to-selectable" approach. Multiple Mappers for One Class ============================== In modern SQLAlchemy, a particular class is mapped by only one so-called **primary** mapper at a time. This mapper is involved in three main areas of functionality: querying, persistence, and instrumentation of the mapped class. The rationale of the primary mapper relates to the fact that the :func:`.mapper` modifies the class itself, not only persisting it towards a particular :class:`.Table`, but also :term:`instrumenting` attributes upon the class which are structured specifically according to the table metadata. It's not possible for more than one mapper to be associated with a class in equal measure, since only one mapper can actually instrument the class. However, there is a class of mapper known as the **non primary** mapper with allows additional mappers to be associated with a class, but with a limited scope of use. This scope typically applies to being able to load rows from an alternate table or selectable unit, but still producing classes which are ultimately persisted using the primary mapping. The non-primary mapper is created using the classical style of mapping against a class that is already mapped with a primary mapper, and involves the use of the :paramref:`~sqlalchemy.orm.mapper.non_primary` flag. The non primary mapper is of very limited use in modern SQLAlchemy, as the task of being able to load classes from subqueries or other compound statements can be now accomplished using the :class:`.Query` object directly. There is really only one use case for the non-primary mapper, which is that we wish to build a :func:`.relationship` to such a mapper; this is useful in the rare and advanced case that our relationship is attempting to join two classes together using many tables and/or joins in between. An example of this pattern is at :ref:`relationship_non_primary_mapper`. As far as the use case of a class that can actually be fully persisted to different tables under different scenarios, very early versions of SQLAlchemy offered a feature for this adapted from Hibernate, known as the "entity name" feature. However, this use case became infeasable within SQLAlchemy once the mapped class itself became the source of SQL expression construction; that is, the class' attributes themselves link directly to mapped table columns. The feature was removed and replaced with a simple recipe-oriented approach to accomplishing this task without any ambiguity of instrumentation - to create new subclasses, each mapped individually. This pattern is now available as a recipe at `Entity Name `_. SQLAlchemy-1.0.11/doc/build/orm/mapping_api.rst0000664000175000017500000000061112636375552022315 0ustar classicclassic00000000000000.. module:: sqlalchemy.orm Class Mapping API ================= .. autofunction:: mapper .. autofunction:: object_mapper .. autofunction:: class_mapper .. autofunction:: configure_mappers .. autofunction:: clear_mappers .. autofunction:: sqlalchemy.orm.util.identity_key .. autofunction:: sqlalchemy.orm.util.polymorphic_union .. autoclass:: sqlalchemy.orm.mapper.Mapper :members: SQLAlchemy-1.0.11/doc/build/orm/session.rst0000664000175000017500000000105712636375552021521 0ustar classicclassic00000000000000.. _session_toplevel: ================= Using the Session ================= .. module:: sqlalchemy.orm.session The :func:`.orm.mapper` function and :mod:`~sqlalchemy.ext.declarative` extensions are the primary configurational interface for the ORM. Once mappings are configured, the primary usage interface for persistence operations is the :class:`.Session`. .. toctree:: :maxdepth: 2 session_basics session_state_management cascades session_transaction persistence_techniques contextual session_events session_api SQLAlchemy-1.0.11/doc/build/orm/classical.rst0000664000175000017500000000005612636375552021772 0ustar classicclassic00000000000000:orphan: Moved! :ref:`classical_mapping` SQLAlchemy-1.0.11/doc/build/orm/session_state_management.rst0000664000175000017500000006141112636375552025115 0ustar classicclassic00000000000000State Management ================ .. _session_object_states: Quickie Intro to Object States ------------------------------ It's helpful to know the states which an instance can have within a session: * **Transient** - an instance that's not in a session, and is not saved to the database; i.e. it has no database identity. The only relationship such an object has to the ORM is that its class has a ``mapper()`` associated with it. * **Pending** - when you :meth:`~.Session.add` a transient instance, it becomes pending. It still wasn't actually flushed to the database yet, but it will be when the next flush occurs. * **Persistent** - An instance which is present in the session and has a record in the database. You get persistent instances by either flushing so that the pending instances become persistent, or by querying the database for existing instances (or moving persistent instances from other sessions into your local session). .. note:: An object that is marked as deleted, e.g. via the :meth:`.Session.delete` method, is still considered persistent. The object remains in the identity map until the flush proceeds and a DELETE state is emitted, at which point the object moves to the state that is for most practical purposes "detached" - after the session's transaction is committed, the object becomes fully detached. SQLAlchemy 1.1 will introduce a new object state called "deleted" which represents this "deleted but not quite detached" state explicitly. * **Detached** - an instance which corresponds, or previously corresponded, to a record in the database, but is not currently in any session. The detached object will contain a database identity marker, however because it is not associated with a session, it is unknown whether or not this database identity actually exists in a target database. Detached objects are safe to use normally, except that they have no ability to load unloaded attributes or attributes that were previously marked as "expired". Knowing these states is important, since the :class:`.Session` tries to be strict about ambiguous operations (such as trying to save the same object to two different sessions at the same time). Getting the Current State of an Object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The actual state of any mapped object can be viewed at any time using the :func:`.inspect` system:: >>> from sqlalchemy import inspect >>> insp = inspect(my_object) >>> insp.persistent True .. seealso:: :attr:`.InstanceState.transient` :attr:`.InstanceState.pending` :attr:`.InstanceState.persistent` :attr:`.InstanceState.detached` .. _session_attributes: Session Attributes ------------------ The :class:`~sqlalchemy.orm.session.Session` itself acts somewhat like a set-like collection. All items present may be accessed using the iterator interface:: for obj in session: print obj And presence may be tested for using regular "contains" semantics:: if obj in session: print "Object is present" The session is also keeping track of all newly created (i.e. pending) objects, all objects which have had changes since they were last loaded or saved (i.e. "dirty"), and everything that's been marked as deleted:: # pending objects recently added to the Session session.new # persistent objects which currently have changes detected # (this collection is now created on the fly each time the property is called) session.dirty # persistent objects that have been marked as deleted via session.delete(obj) session.deleted # dictionary of all persistent objects, keyed on their # identity key session.identity_map (Documentation: :attr:`.Session.new`, :attr:`.Session.dirty`, :attr:`.Session.deleted`, :attr:`.Session.identity_map`). Note that objects within the session are *weakly referenced*. This means that when they are dereferenced in the outside application, they fall out of scope from within the :class:`~sqlalchemy.orm.session.Session` as well and are subject to garbage collection by the Python interpreter. The exceptions to this include objects which are pending, objects which are marked as deleted, or persistent objects which have pending changes on them. After a full flush, these collections are all empty, and all objects are again weakly referenced. .. note:: To disable the weak referencing behavior and force all objects within the session to remain until explicitly expunged, configure :class:`.sessionmaker` with the ``weak_identity_map=False`` setting. However note that this option is **deprecated**; it is present only to allow compatibility with older applications, typically those that were made back before SQLAlchemy had the ability to effectively weak-reference all objects. It is recommended that strong references to objects be maintained by the calling application externally to the :class:`.Session` itself, to the extent that is required by the application. This eliminates the :class:`.Session` as a possible source of unbounded memory growth in the case where large numbers of objects are being loaded and/or persisted. Simple examples of externally managed strong-referencing behavior include loading objects into a local dictionary keyed to their primary key, or into lists or sets for the span of time that they need to remain referenced. These collections can be associated with a :class:`.Session`, if desired, by placing them into the :attr:`.Session.info` dictionary. Events such as the :meth:`.SessionEvents.after_attach` and :meth:`.MapperEvents.load` event may also be of use for intercepting objects as they are associated with a :class:`.Session`. .. _unitofwork_merging: Merging ------- :meth:`~.Session.merge` transfers state from an outside object into a new or already existing instance within a session. It also reconciles the incoming data against the state of the database, producing a history stream which will be applied towards the next flush, or alternatively can be made to produce a simple "transfer" of state without producing change history or accessing the database. Usage is as follows:: merged_object = session.merge(existing_object) When given an instance, it follows these steps: * It examines the primary key of the instance. If it's present, it attempts to locate that instance in the local identity map. If the ``load=True`` flag is left at its default, it also checks the database for this primary key if not located locally. * If the given instance has no primary key, or if no instance can be found with the primary key given, a new instance is created. * The state of the given instance is then copied onto the located/newly created instance. For attributes which are present on the source instance, the value is transferred to the target instance. For mapped attributes which aren't present on the source, the attribute is expired on the target instance, discarding its existing value. If the ``load=True`` flag is left at its default, this copy process emits events and will load the target object's unloaded collections for each attribute present on the source object, so that the incoming state can be reconciled against what's present in the database. If ``load`` is passed as ``False``, the incoming data is "stamped" directly without producing any history. * The operation is cascaded to related objects and collections, as indicated by the ``merge`` cascade (see :ref:`unitofwork_cascades`). * The new instance is returned. With :meth:`~.Session.merge`, the given "source" instance is not modified nor is it associated with the target :class:`.Session`, and remains available to be merged with any number of other :class:`.Session` objects. :meth:`~.Session.merge` is useful for taking the state of any kind of object structure without regard for its origins or current session associations and copying its state into a new session. Here's some examples: * An application which reads an object structure from a file and wishes to save it to the database might parse the file, build up the structure, and then use :meth:`~.Session.merge` to save it to the database, ensuring that the data within the file is used to formulate the primary key of each element of the structure. Later, when the file has changed, the same process can be re-run, producing a slightly different object structure, which can then be ``merged`` in again, and the :class:`~sqlalchemy.orm.session.Session` will automatically update the database to reflect those changes, loading each object from the database by primary key and then updating its state with the new state given. * An application is storing objects in an in-memory cache, shared by many :class:`.Session` objects simultaneously. :meth:`~.Session.merge` is used each time an object is retrieved from the cache to create a local copy of it in each :class:`.Session` which requests it. The cached object remains detached; only its state is moved into copies of itself that are local to individual :class:`~.Session` objects. In the caching use case, it's common to use the ``load=False`` flag to remove the overhead of reconciling the object's state with the database. There's also a "bulk" version of :meth:`~.Session.merge` called :meth:`~.Query.merge_result` that was designed to work with cache-extended :class:`.Query` objects - see the section :ref:`examples_caching`. * An application wants to transfer the state of a series of objects into a :class:`.Session` maintained by a worker thread or other concurrent system. :meth:`~.Session.merge` makes a copy of each object to be placed into this new :class:`.Session`. At the end of the operation, the parent thread/process maintains the objects it started with, and the thread/worker can proceed with local copies of those objects. In the "transfer between threads/processes" use case, the application may want to use the ``load=False`` flag as well to avoid overhead and redundant SQL queries as the data is transferred. Merge Tips ~~~~~~~~~~ :meth:`~.Session.merge` is an extremely useful method for many purposes. However, it deals with the intricate border between objects that are transient/detached and those that are persistent, as well as the automated transference of state. The wide variety of scenarios that can present themselves here often require a more careful approach to the state of objects. Common problems with merge usually involve some unexpected state regarding the object being passed to :meth:`~.Session.merge`. Lets use the canonical example of the User and Address objects:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email_address = Column(String(50), nullable=False) user_id = Column(Integer, ForeignKey('user.id'), nullable=False) Assume a ``User`` object with one ``Address``, already persistent:: >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')]) >>> session.add(u1) >>> session.commit() We now create ``a1``, an object outside the session, which we'd like to merge on top of the existing ``Address``:: >>> existing_a1 = u1.addresses[0] >>> a1 = Address(id=existing_a1.id) A surprise would occur if we said this:: >>> a1.user = u1 >>> a1 = session.merge(a1) >>> session.commit() sqlalchemy.orm.exc.FlushError: New instance
with identity key (, (1,)) conflicts with persistent instance
Why is that ? We weren't careful with our cascades. The assignment of ``a1.user`` to a persistent object cascaded to the backref of ``User.addresses`` and made our ``a1`` object pending, as though we had added it. Now we have *two* ``Address`` objects in the session:: >>> a1 = Address() >>> a1.user = u1 >>> a1 in session True >>> existing_a1 in session True >>> a1 is existing_a1 False Above, our ``a1`` is already pending in the session. The subsequent :meth:`~.Session.merge` operation essentially does nothing. Cascade can be configured via the :paramref:`~.relationship.cascade` option on :func:`.relationship`, although in this case it would mean removing the ``save-update`` cascade from the ``User.addresses`` relationship - and usually, that behavior is extremely convenient. The solution here would usually be to not assign ``a1.user`` to an object already persistent in the target session. The ``cascade_backrefs=False`` option of :func:`.relationship` will also prevent the ``Address`` from being added to the session via the ``a1.user = u1`` assignment. Further detail on cascade operation is at :ref:`unitofwork_cascades`. Another example of unexpected state:: >>> a1 = Address(id=existing_a1.id, user_id=u1.id) >>> assert a1.user is None >>> True >>> a1 = session.merge(a1) >>> session.commit() sqlalchemy.exc.IntegrityError: (IntegrityError) address.user_id may not be NULL Here, we accessed a1.user, which returned its default value of ``None``, which as a result of this access, has been placed in the ``__dict__`` of our object ``a1``. Normally, this operation creates no change event, so the ``user_id`` attribute takes precedence during a flush. But when we merge the ``Address`` object into the session, the operation is equivalent to:: >>> existing_a1.id = existing_a1.id >>> existing_a1.user_id = u1.id >>> existing_a1.user = None Where above, both ``user_id`` and ``user`` are assigned to, and change events are emitted for both. The ``user`` association takes precedence, and None is applied to ``user_id``, causing a failure. Most :meth:`~.Session.merge` issues can be examined by first checking - is the object prematurely in the session ? .. sourcecode:: python+sql >>> a1 = Address(id=existing_a1, user_id=user.id) >>> assert a1 not in session >>> a1 = session.merge(a1) Or is there state on the object that we don't want ? Examining ``__dict__`` is a quick way to check:: >>> a1 = Address(id=existing_a1, user_id=user.id) >>> a1.user >>> a1.__dict__ {'_sa_instance_state': , 'user_id': 1, 'id': 1, 'user': None} >>> # we don't want user=None merged, remove it >>> del a1.user >>> a1 = session.merge(a1) >>> # success >>> session.commit() Expunging --------- Expunge removes an object from the Session, sending persistent instances to the detached state, and pending instances to the transient state: .. sourcecode:: python+sql session.expunge(obj1) To remove all items, call :meth:`~.Session.expunge_all` (this method was formerly known as ``clear()``). .. _session_expire: Refreshing / Expiring --------------------- :term:`Expiring` means that the database-persisted data held inside a series of object attributes is erased, in such a way that when those attributes are next accessed, a SQL query is emitted which will refresh that data from the database. When we talk about expiration of data we are usually talking about an object that is in the :term:`persistent` state. For example, if we load an object as follows:: user = session.query(User).filter_by(name='user1').first() The above ``User`` object is persistent, and has a series of attributes present; if we were to look inside its ``__dict__``, we'd see that state loaded:: >>> user.__dict__ { 'id': 1, 'name': u'user1', '_sa_instance_state': <...>, } where ``id`` and ``name`` refer to those columns in the database. ``_sa_instance_state`` is a non-database-persisted value used by SQLAlchemy internally (it refers to the :class:`.InstanceState` for the instance. While not directly relevant to this section, if we want to get at it, we should use the :func:`.inspect` function to access it). At this point, the state in our ``User`` object matches that of the loaded database row. But upon expiring the object using a method such as :meth:`.Session.expire`, we see that the state is removed:: >>> session.expire(user) >>> user.__dict__ {'_sa_instance_state': <...>} We see that while the internal "state" still hangs around, the values which correspond to the ``id`` and ``name`` columns are gone. If we were to access one of these columns and are watching SQL, we'd see this: .. sourcecode:: python+sql >>> print(user.name) {opensql}SELECT user.id AS user_id, user.name AS user_name FROM user WHERE user.id = ? (1,) {stop}user1 Above, upon accessing the expired attribute ``user.name``, the ORM initiated a :term:`lazy load` to retrieve the most recent state from the database, by emitting a SELECT for the user row to which this user refers. Afterwards, the ``__dict__`` is again populated:: >>> user.__dict__ { 'id': 1, 'name': u'user1', '_sa_instance_state': <...>, } .. note:: While we are peeking inside of ``__dict__`` in order to see a bit of what SQLAlchemy does with object attributes, we **should not modify** the contents of ``__dict__`` directly, at least as far as those attributes which the SQLAlchemy ORM is maintaining (other attributes outside of SQLA's realm are fine). This is because SQLAlchemy uses :term:`descriptors` in order to track the changes we make to an object, and when we modify ``__dict__`` directly, the ORM won't be able to track that we changed something. Another key behavior of both :meth:`~.Session.expire` and :meth:`~.Session.refresh` is that all un-flushed changes on an object are discarded. That is, if we were to modify an attribute on our ``User``:: >>> user.name = 'user2' but then we call :meth:`~.Session.expire` without first calling :meth:`~.Session.flush`, our pending value of ``'user2'`` is discarded:: >>> session.expire(user) >>> user.name 'user1' The :meth:`~.Session.expire` method can be used to mark as "expired" all ORM-mapped attributes for an instance:: # expire all ORM-mapped attributes on obj1 session.expire(obj1) it can also be passed a list of string attribute names, referring to specific attributes to be marked as expired:: # expire only attributes obj1.attr1, obj1.attr2 session.expire(obj1, ['attr1', 'attr2']) The :meth:`~.Session.refresh` method has a similar interface, but instead of expiring, it emits an immediate SELECT for the object's row immediately:: # reload all attributes on obj1 session.refresh(obj1) :meth:`~.Session.refresh` also accepts a list of string attribute names, but unlike :meth:`~.Session.expire`, expects at least one name to be that of a column-mapped attribute:: # reload obj1.attr1, obj1.attr2 session.refresh(obj1, ['attr1', 'attr2']) The :meth:`.Session.expire_all` method allows us to essentially call :meth:`.Session.expire` on all objects contained within the :class:`.Session` at once:: session.expire_all() What Actually Loads ~~~~~~~~~~~~~~~~~~~ The SELECT statement that's emitted when an object marked with :meth:`~.Session.expire` or loaded with :meth:`~.Session.refresh` varies based on several factors, including: * The load of expired attributes is triggered from **column-mapped attributes only**. While any kind of attribute can be marked as expired, including a :func:`.relationship` - mapped attribute, accessing an expired :func:`.relationship` attribute will emit a load only for that attribute, using standard relationship-oriented lazy loading. Column-oriented attributes, even if expired, will not load as part of this operation, and instead will load when any column-oriented attribute is accessed. * :func:`.relationship`- mapped attributes will not load in response to expired column-based attributes being accessed. * Regarding relationships, :meth:`~.Session.refresh` is more restrictive than :meth:`~.Session.expire` with regards to attributes that aren't column-mapped. Calling :meth:`.refresh` and passing a list of names that only includes relationship-mapped attributes will actually raise an error. In any case, non-eager-loading :func:`.relationship` attributes will not be included in any refresh operation. * :func:`.relationship` attributes configured as "eager loading" via the :paramref:`~.relationship.lazy` parameter will load in the case of :meth:`~.Session.refresh`, if either no attribute names are specified, or if their names are inclued in the list of attributes to be refreshed. * Attributes that are configured as :func:`.deferred` will not normally load, during either the expired-attribute load or during a refresh. An unloaded attribute that's :func:`.deferred` instead loads on its own when directly accessed, or if part of a "group" of deferred attributes where an unloaded attribute in that group is accessed. * For expired attributes that are loaded on access, a joined-inheritance table mapping will emit a SELECT that typically only includes those tables for which unloaded attributes are present. The action here is sophisticated enough to load only the parent or child table, for example, if the subset of columns that were originally expired encompass only one or the other of those tables. * When :meth:`~.Session.refresh` is used on a joined-inheritance table mapping, the SELECT emitted will resemble that of when :meth:`.Session.query` is used on the target object's class. This is typically all those tables that are set up as part of the mapping. When to Expire or Refresh ~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.Session` uses the expiration feature automatically whenever the transaction referred to by the session ends. Meaning, whenever :meth:`.Session.commit` or :meth:`.Session.rollback` is called, all objects within the :class:`.Session` are expired, using a feature equivalent to that of the :meth:`.Session.expire_all` method. The rationale is that the end of a transaction is a demarcating point at which there is no more context available in order to know what the current state of the database is, as any number of other transactions may be affecting it. Only when a new transaction starts can we again have access to the current state of the database, at which point any number of changes may have occurred. .. sidebar:: Transaction Isolation Of course, most databases are capable of handling multiple transactions at once, even involving the same rows of data. When a relational database handles multiple transactions involving the same tables or rows, this is when the :term:`isolation` aspect of the database comes into play. The isolation behavior of different databases varies considerably and even on a single database can be configured to behave in different ways (via the so-called :term:`isolation level` setting). In that sense, the :class:`.Session` can't fully predict when the same SELECT statement, emitted a second time, will definitely return the data we already have, or will return new data. So as a best guess, it assumes that within the scope of a transaction, unless it is known that a SQL expression has been emitted to modify a particular row, there's no need to refresh a row unless explicitly told to do so. The :meth:`.Session.expire` and :meth:`.Session.refresh` methods are used in those cases when one wants to force an object to re-load its data from the database, in those cases when it is known that the current state of data is possibly stale. Reasons for this might include: * some SQL has been emitted within the transaction outside of the scope of the ORM's object handling, such as if a :meth:`.Table.update` construct were emitted using the :meth:`.Session.execute` method; * if the application is attempting to acquire data that is known to have been modified in a concurrent transaction, and it is also known that the isolation rules in effect allow this data to be visible. The second bullet has the important caveat that "it is also known that the isolation rules in effect allow this data to be visible." This means that it cannot be assumed that an UPDATE that happened on another database connection will yet be visible here locally; in many cases, it will not. This is why if one wishes to use :meth:`.expire` or :meth:`.refresh` in order to view data between ongoing transactions, an understanding of the isolation behavior in effect is essential. .. seealso:: :meth:`.Session.expire` :meth:`.Session.expire_all` :meth:`.Session.refresh` :term:`isolation` - glossary explanation of isolation which includes links to Wikipedia. `The SQLAlchemy Session In-Depth `_ - a video + slides with an in-depth discussion of the object lifecycle including the role of data expiration. SQLAlchemy-1.0.11/doc/build/orm/basic_relationships.rst0000664000175000017500000003552312636375552024070 0ustar classicclassic00000000000000.. _relationship_patterns: Basic Relationship Patterns ---------------------------- A quick walkthrough of the basic relational patterns. The imports used for each of the following sections is as follows:: from sqlalchemy import Table, Column, Integer, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() One To Many ~~~~~~~~~~~~ A one to many relationship places a foreign key on the child table referencing the parent. :func:`.relationship` is then specified on the parent, as referencing a collection of items represented by the child:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) To establish a bidirectional relationship in one-to-many, where the "reverse" side is a many to one, specify an additional :func:`.relationship` and connect the two using the :paramref:`.relationship.back_populates` parameter:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", back_populates="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) parent = relationship("Parent", back_populates="children") ``Child`` will get a ``parent`` attribute with many-to-one semantics. Alternatively, the :paramref:`~.relationship.backref` option may be used on a single :func:`.relationship` instead of using :paramref:`~.relationship.back_populates`:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") Many To One ~~~~~~~~~~~~ Many to one places a foreign key in the parent table referencing the child. :func:`.relationship` is declared on the parent, where a new scalar-holding attribute will be created:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) Bidirectional behavior is achieved by adding a second :func:`.relationship` and applying the :paramref:`.relationship.back_populates` parameter in both directions:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", back_populates="parents") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parents = relationship("Parent", back_populates="child") Alternatively, the :paramref:`~.relationship.backref` parameter may be applied to a single :func:`.relationship`, such as ``Parent.child``:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref="parents") .. _relationships_one_to_one: One To One ~~~~~~~~~~~ One To One is essentially a bidirectional relationship with a scalar attribute on both sides. To achieve this, the :paramref:`~.relationship.uselist` flag indicates the placement of a scalar attribute instead of a collection on the "many" side of the relationship. To convert one-to-many into one-to-one:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child = relationship("Child", uselist=False, back_populates="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) parent = relationship("Child", back_populates="child") Or for many-to-one:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", back_populates="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent = relationship("Child", back_populates="child", uselist=False) As always, the :paramref:`.relationship.backref` and :func:`.backref` functions may be used in lieu of the :paramref:`.relationship.back_populates` approach; to specify ``uselist`` on a backref, use the :func:`.backref` function:: from sqlalchemy.orm import backref class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref=backref("parent", uselist=False)) .. _relationships_many_to_many: Many To Many ~~~~~~~~~~~~~ Many to Many adds an association table between two classes. The association table is indicated by the :paramref:`~.relationship.secondary` argument to :func:`.relationship`. Usually, the :class:`.Table` uses the :class:`.MetaData` object associated with the declarative base class, so that the :class:`.ForeignKey` directives can locate the remote tables with which to link:: association_table = Table('association', Base.metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')) ) class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table) class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) For a bidirectional relationship, both sides of the relationship contain a collection. Specify using :paramref:`.relationship.back_populates`, and for each :func:`.relationship` specify the common association table:: association_table = Table('association', Base.metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')) ) class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship( "Child", secondary=association_table, back_populates="parents") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) parents = relationship( "Parent", secondary=association_table, back_populates="children") When using the :paramref:`~.relationship.backref` parameter instead of :paramref:`.relationship.back_populates`, the backref will automatically use the same :paramref:`~.relationship.secondary` argument for the reverse relationship:: association_table = Table('association', Base.metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')) ) class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table, backref="parents") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) The :paramref:`~.relationship.secondary` argument of :func:`.relationship` also accepts a callable that returns the ultimate argument, which is evaluated only when mappers are first used. Using this, we can define the ``association_table`` at a later point, as long as it's available to the callable after all module initialization is complete:: class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=lambda: association_table, backref="parents") With the declarative extension in use, the traditional "string name of the table" is accepted as well, matching the name of the table as stored in ``Base.metadata.tables``:: class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary="association", backref="parents") .. _relationships_many_to_many_deletion: Deleting Rows from the Many to Many Table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A behavior which is unique to the :paramref:`~.relationship.secondary` argument to :func:`.relationship` is that the :class:`.Table` which is specified here is automatically subject to INSERT and DELETE statements, as objects are added or removed from the collection. There is **no need to delete from this table manually**. The act of removing a record from the collection will have the effect of the row being deleted on flush:: # row will be deleted from the "secondary" table # automatically myparent.children.remove(somechild) A question which often arises is how the row in the "secondary" table can be deleted when the child object is handed directly to :meth:`.Session.delete`:: session.delete(somechild) There are several possibilities here: * If there is a :func:`.relationship` from ``Parent`` to ``Child``, but there is **not** a reverse-relationship that links a particular ``Child`` to each ``Parent``, SQLAlchemy will not have any awareness that when deleting this particular ``Child`` object, it needs to maintain the "secondary" table that links it to the ``Parent``. No delete of the "secondary" table will occur. * If there is a relationship that links a particular ``Child`` to each ``Parent``, suppose it's called ``Child.parents``, SQLAlchemy by default will load in the ``Child.parents`` collection to locate all ``Parent`` objects, and remove each row from the "secondary" table which establishes this link. Note that this relationship does not need to be bidrectional; SQLAlchemy is strictly looking at every :func:`.relationship` associated with the ``Child`` object being deleted. * A higher performing option here is to use ON DELETE CASCADE directives with the foreign keys used by the database. Assuming the database supports this feature, the database itself can be made to automatically delete rows in the "secondary" table as referencing rows in "child" are deleted. SQLAlchemy can be instructed to forego actively loading in the ``Child.parents`` collection in this case using the :paramref:`~.relationship.passive_deletes` directive on :func:`.relationship`; see :ref:`passive_deletes` for more details on this. Note again, these behaviors are *only* relevant to the :paramref:`~.relationship.secondary` option used with :func:`.relationship`. If dealing with association tables that are mapped explicitly and are *not* present in the :paramref:`~.relationship.secondary` option of a relevant :func:`.relationship`, cascade rules can be used instead to automatically delete entities in reaction to a related entity being deleted - see :ref:`unitofwork_cascades` for information on this feature. .. _association_pattern: Association Object ~~~~~~~~~~~~~~~~~~ The association object pattern is a variant on many-to-many: it's used when your association table contains additional columns beyond those which are foreign keys to the left and right tables. Instead of using the :paramref:`~.relationship.secondary` argument, you map a new class directly to the association table. The left side of the relationship references the association object via one-to-many, and the association class references the right side via many-to-one. Below we illustrate an association table mapped to the ``Association`` class which includes a column called ``extra_data``, which is a string value that is stored along with each association between ``Parent`` and ``Child``:: class Association(Base): __tablename__ = 'association' left_id = Column(Integer, ForeignKey('left.id'), primary_key=True) right_id = Column(Integer, ForeignKey('right.id'), primary_key=True) extra_data = Column(String(50)) child = relationship("Child") class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Association") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) As always, the bidirectional version make use of :paramref:`.relationship.back_populates` or :paramref:`.relationship.backref`:: class Association(Base): __tablename__ = 'association' left_id = Column(Integer, ForeignKey('left.id'), primary_key=True) right_id = Column(Integer, ForeignKey('right.id'), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", back_populates="parents") parent = relationship("Parent", back_populates="children") class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Association", back_populates="parent") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) parents = relationship("Association", back_populates="child") Working with the association pattern in its direct form requires that child objects are associated with an association instance before being appended to the parent; similarly, access from parent to child goes through the association object:: # create parent, append a child via association p = Parent() a = Association(extra_data="some data") a.child = Child() p.children.append(a) # iterate through child objects via association, including association # attributes for assoc in p.children: print assoc.extra_data print assoc.child To enhance the association object pattern such that direct access to the ``Association`` object is optional, SQLAlchemy provides the :ref:`associationproxy_toplevel` extension. This extension allows the configuration of attributes which will access two "hops" with a single access, one "hop" to the associated object, and a second to a target attribute. .. note:: When using the association object pattern, it is advisable that the association-mapped table not be used as the :paramref:`~.relationship.secondary` argument on a :func:`.relationship` elsewhere, unless that :func:`.relationship` contains the option :paramref:`~.relationship.viewonly` set to ``True``. SQLAlchemy otherwise may attempt to emit redundant INSERT and DELETE statements on the same table, if similar state is detected on the related attribute as well as the associated object. SQLAlchemy-1.0.11/doc/build/core/0000775000175000017500000000000012636376632017434 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/build/core/inspection.rst0000664000175000017500000000315312636375552022343 0ustar classicclassic00000000000000.. _core_inspection_toplevel: .. _inspection_toplevel: Runtime Inspection API ====================== .. automodule:: sqlalchemy.inspection :members: Available Inspection Targets ---------------------------- Below is a listing of many of the most common inspection targets. * :class:`.Connectable` (i.e. :class:`.Engine`, :class:`.Connection`) - returns an :class:`.Inspector` object. * :class:`.ClauseElement` - all SQL expression components, including :class:`.Table`, :class:`.Column`, serve as their own inspection objects, meaning any of these objects passed to :func:`.inspect` return themselves. * ``object`` - an object given will be checked by the ORM for a mapping - if so, an :class:`.InstanceState` is returned representing the mapped state of the object. The :class:`.InstanceState` also provides access to per attribute state via the :class:`.AttributeState` interface as well as the per-flush "history" of any attribute via the :class:`.History` object. * ``type`` (i.e. a class) - a class given will be checked by the ORM for a mapping - if so, a :class:`.Mapper` for that class is returned. * mapped attribute - passing a mapped attribute to :func:`.inspect`, such as ``inspect(MyClass.some_attribute)``, returns a :class:`.QueryableAttribute` object, which is the :term:`descriptor` associated with a mapped class. This descriptor refers to a :class:`.MapperProperty`, which is usually an instance of :class:`.ColumnProperty` or :class:`.RelationshipProperty`, via its :attr:`.QueryableAttribute.property` attribute. * :class:`.AliasedClass` - returns an :class:`.AliasedInsp` object. SQLAlchemy-1.0.11/doc/build/core/engines_connections.rst0000664000175000017500000000022212636375552024214 0ustar classicclassic00000000000000========================= Engine and Connection Use ========================= .. toctree:: :maxdepth: 2 engines connections pooling events SQLAlchemy-1.0.11/doc/build/core/constraints.rst0000664000175000017500000007603012636375552022543 0ustar classicclassic00000000000000.. _metadata_constraints_toplevel: .. _metadata_constraints: .. module:: sqlalchemy.schema ================================= Defining Constraints and Indexes ================================= This section will discuss SQL :term:`constraints` and indexes. In SQLAlchemy the key classes include :class:`.ForeignKeyConstraint` and :class:`.Index`. .. _metadata_foreignkeys: Defining Foreign Keys --------------------- A *foreign key* in SQL is a table-level construct that constrains one or more columns in that table to only allow values that are present in a different set of columns, typically but not always located on a different table. We call the columns which are constrained the *foreign key* columns and the columns which they are constrained towards the *referenced* columns. The referenced columns almost always define the primary key for their owning table, though there are exceptions to this. The foreign key is the "joint" that connects together pairs of rows which have a relationship with each other, and SQLAlchemy assigns very deep importance to this concept in virtually every area of its operation. In SQLAlchemy as well as in DDL, foreign key constraints can be defined as additional attributes within the table clause, or for single-column foreign keys they may optionally be specified within the definition of a single column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object:: user_preference = Table('user_preference', metadata, Column('pref_id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), Column('pref_name', String(40), nullable=False), Column('pref_value', String(100)) ) Above, we define a new table ``user_preference`` for which each row must contain a value in the ``user_id`` column that also exists in the ``user`` table's ``user_id`` column. The argument to :class:`~sqlalchemy.schema.ForeignKey` is most commonly a string of the form *.*, or for a table in a remote schema or "owner" of the form *..*. It may also be an actual :class:`~sqlalchemy.schema.Column` object, which as we'll see later is accessed from an existing :class:`~sqlalchemy.schema.Table` object via its ``c`` collection:: ForeignKey(user.c.user_id) The advantage to using a string is that the in-python linkage between ``user`` and ``user_preference`` is resolved only when first needed, so that table objects can be easily spread across multiple modules and defined in any order. Foreign keys may also be defined at the table level, using the :class:`~sqlalchemy.schema.ForeignKeyConstraint` object. This object can describe a single- or multi-column foreign key. A multi-column foreign key is known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key:: invoice = Table('invoice', metadata, Column('invoice_id', Integer, primary_key=True), Column('ref_num', Integer, primary_key=True), Column('description', String(60), nullable=False) ) And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``:: invoice_item = Table('invoice_item', metadata, Column('item_id', Integer, primary_key=True), Column('item_name', String(60), nullable=False), Column('invoice_id', Integer, nullable=False), Column('ref_num', Integer, nullable=False), ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num']) ) It's important to note that the :class:`~sqlalchemy.schema.ForeignKeyConstraint` is the only way to define a composite foreign key. While we could also have placed individual :class:`~sqlalchemy.schema.ForeignKey` objects on both the ``invoice_item.invoice_id`` and ``invoice_item.ref_num`` columns, SQLAlchemy would not be aware that these two values should be paired together - it would be two individual foreign key constraints instead of a single composite foreign key referencing two columns. .. _use_alter: Creating/Dropping Foreign Key Constraints via ALTER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The behavior we've seen in tutorials and elsewhere involving foreign keys with DDL illustrates that the constraints are typically rendered "inline" within the CREATE TABLE statement, such as: .. sourcecode:: sql CREATE TABLE addresses ( id INTEGER NOT NULL, user_id INTEGER, email_address VARCHAR NOT NULL, PRIMARY KEY (id), CONSTRAINT user_id_fk FOREIGN KEY(user_id) REFERENCES users (id) ) The ``CONSTRAINT .. FOREIGN KEY`` directive is used to create the constraint in an "inline" fashion within the CREATE TABLE definition. The :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` methods do this by default, using a topological sort of all the :class:`.Table` objects involved such that tables are created and dropped in order of their foreign key dependency (this sort is also available via the :attr:`.MetaData.sorted_tables` accessor). This approach can't work when two or more foreign key constraints are involved in a "dependency cycle", where a set of tables are mutually dependent on each other, assuming the backend enforces foreign keys (always the case except on SQLite, MySQL/MyISAM). The methods will therefore break out constraints in such a cycle into separate ALTER statements, on all backends other than SQLite which does not support most forms of ALTER. Given a schema like:: node = Table( 'node', metadata, Column('node_id', Integer, primary_key=True), Column( 'primary_element', Integer, ForeignKey('element.element_id') ) ) element = Table( 'element', metadata, Column('element_id', Integer, primary_key=True), Column('parent_node_id', Integer), ForeignKeyConstraint( ['parent_node_id'], ['node.node_id'], name='fk_element_parent_node_id' ) ) When we call upon :meth:`.MetaData.create_all` on a backend such as the Postgresql backend, the cycle between these two tables is resolved and the constraints are created separately: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: ... metadata.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, PRIMARY KEY (element_id) ) CREATE TABLE node ( node_id SERIAL NOT NULL, primary_element INTEGER, PRIMARY KEY (node_id) ) ALTER TABLE element ADD CONSTRAINT fk_element_parent_node_id FOREIGN KEY(parent_node_id) REFERENCES node (node_id) ALTER TABLE node ADD FOREIGN KEY(primary_element) REFERENCES element (element_id) {stop} In order to emit DROP for these tables, the same logic applies, however note here that in SQL, to emit DROP CONSTRAINT requires that the constraint has a name. In the case of the ``'node'`` table above, we haven't named this constraint; the system will therefore attempt to emit DROP for only those constraints that are named: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: ... metadata.drop_all(conn, checkfirst=False) {opensql}ALTER TABLE element DROP CONSTRAINT fk_element_parent_node_id DROP TABLE node DROP TABLE element {stop} In the case where the cycle cannot be resolved, such as if we hadn't applied a name to either constraint here, we will receive the following error:: sqlalchemy.exc.CircularDependencyError: Can't sort tables for DROP; an unresolvable foreign key dependency exists between tables: element, node. Please ensure that the ForeignKey and ForeignKeyConstraint objects involved in the cycle have names so that they can be dropped using DROP CONSTRAINT. This error only applies to the DROP case as we can emit "ADD CONSTRAINT" in the CREATE case without a name; the database typically assigns one automatically. The :paramref:`.ForeignKeyConstraint.use_alter` and :paramref:`.ForeignKey.use_alter` keyword arguments can be used to manually resolve dependency cycles. We can add this flag only to the ``'element'`` table as follows:: element = Table( 'element', metadata, Column('element_id', Integer, primary_key=True), Column('parent_node_id', Integer), ForeignKeyConstraint( ['parent_node_id'], ['node.node_id'], use_alter=True, name='fk_element_parent_node_id' ) ) in our CREATE DDL we will see the ALTER statement only for this constraint, and not the other one: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: ... metadata.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, PRIMARY KEY (element_id) ) CREATE TABLE node ( node_id SERIAL NOT NULL, primary_element INTEGER, PRIMARY KEY (node_id), FOREIGN KEY(primary_element) REFERENCES element (element_id) ) ALTER TABLE element ADD CONSTRAINT fk_element_parent_node_id FOREIGN KEY(parent_node_id) REFERENCES node (node_id) {stop} :paramref:`.ForeignKeyConstraint.use_alter` and :paramref:`.ForeignKey.use_alter`, when used in conjunction with a drop operation, will require that the constraint is named, else an error like the following is generated:: sqlalchemy.exc.CompileError: Can't emit DROP CONSTRAINT for constraint ForeignKeyConstraint(...); it has no name .. versionchanged:: 1.0.0 - The DDL system invoked by :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` will now automatically resolve mutually depdendent foreign keys between tables declared by :class:`.ForeignKeyConstraint` and :class:`.ForeignKey` objects, without the need to explicitly set the :paramref:`.ForeignKeyConstraint.use_alter` flag. .. versionchanged:: 1.0.0 - The :paramref:`.ForeignKeyConstraint.use_alter` flag can be used with an un-named constraint; only the DROP operation will emit a specific error when actually called upon. .. seealso:: :ref:`constraint_naming_conventions` :func:`.sort_tables_and_constraints` .. _on_update_on_delete: ON UPDATE and ON DELETE ~~~~~~~~~~~~~~~~~~~~~~~ Most databases support *cascading* of foreign key values, that is the when a parent row is updated the new value is placed in child rows, or when the parent row is deleted all corresponding child rows are set to null or deleted. In data definition language these are specified using phrases like "ON UPDATE CASCADE", "ON DELETE CASCADE", and "ON DELETE SET NULL", corresponding to foreign key constraints. The phrase after "ON UPDATE" or "ON DELETE" may also other allow other phrases that are specific to the database in use. The :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.ForeignKeyConstraint` objects support the generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase:: child = Table('child', meta, Column('id', Integer, ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"), primary_key=True ) ) composite = Table('composite', meta, Column('id', Integer, primary_key=True), Column('rev_id', Integer), Column('note_id', Integer), ForeignKeyConstraint( ['rev_id', 'note_id'], ['revisions.id', 'revisions.note_id'], onupdate="CASCADE", ondelete="SET NULL" ) ) Note that these clauses are not supported on SQLite, and require ``InnoDB`` tables when used with MySQL. They may also not be supported on other databases. UNIQUE Constraint ----------------- Unique constraints can be created anonymously on a single column using the ``unique`` keyword on :class:`~sqlalchemy.schema.Column`. Explicitly named unique constraints and/or those with multiple columns are created via the :class:`~sqlalchemy.schema.UniqueConstraint` table-level construct. .. sourcecode:: python+sql from sqlalchemy import UniqueConstraint meta = MetaData() mytable = Table('mytable', meta, # per-column anonymous unique constraint Column('col1', Integer, unique=True), Column('col2', Integer), Column('col3', Integer), # explicit/composite unique constraint. 'name' is optional. UniqueConstraint('col2', 'col3', name='uix_1') ) CHECK Constraint ---------------- Check constraints can be named or unnamed and can be created at the Column or Table level, using the :class:`~sqlalchemy.schema.CheckConstraint` construct. The text of the check constraint is passed directly through to the database, so there is limited "database independent" behavior. Column level check constraints generally should only refer to the column to which they are placed, while table level constraints can refer to any columns in the table. Note that some databases do not actively support check constraints such as MySQL. .. sourcecode:: python+sql from sqlalchemy import CheckConstraint meta = MetaData() mytable = Table('mytable', meta, # per-column CHECK constraint Column('col1', Integer, CheckConstraint('col1>5')), Column('col2', Integer), Column('col3', Integer), # table level CHECK constraint. 'name' is optional. CheckConstraint('col2 > col3 + 5', name='check1') ) {sql}mytable.create(engine) CREATE TABLE mytable ( col1 INTEGER CHECK (col1>5), col2 INTEGER, col3 INTEGER, CONSTRAINT check1 CHECK (col2 > col3 + 5) ){stop} PRIMARY KEY Constraint ---------------------- The primary key constraint of any :class:`.Table` object is implicitly present, based on the :class:`.Column` objects that are marked with the :paramref:`.Column.primary_key` flag. The :class:`.PrimaryKeyConstraint` object provides explicit access to this constraint, which includes the option of being configured directly:: from sqlalchemy import PrimaryKeyConstraint my_table = Table('mytable', metadata, Column('id', Integer), Column('version_id', Integer), Column('data', String(50)), PrimaryKeyConstraint('id', 'version_id', name='mytable_pk') ) .. seealso:: :class:`.PrimaryKeyConstraint` - detailed API documentation. Setting up Constraints when using the Declarative ORM Extension ---------------------------------------------------------------- The :class:`.Table` is the SQLAlchemy Core construct that allows one to define table metadata, which among other things can be used by the SQLAlchemy ORM as a target to map a class. The :ref:`Declarative ` extension allows the :class:`.Table` object to be created automatically, given the contents of the table primarily as a mapping of :class:`.Column` objects. To apply table-level constraint objects such as :class:`.ForeignKeyConstraint` to a table defined using Declarative, use the ``__table_args__`` attribute, described at :ref:`declarative_table_args`. .. _constraint_naming_conventions: Configuring Constraint Naming Conventions ----------------------------------------- Relational databases typically assign explicit names to all constraints and indexes. In the common case that a table is created using ``CREATE TABLE`` where constraints such as CHECK, UNIQUE, and PRIMARY KEY constraints are produced inline with the table definition, the database usually has a system in place in which names are automatically assigned to these constraints, if a name is not otherwise specified. When an existing database table is altered in a database using a command such as ``ALTER TABLE``, this command typically needs to specify expicit names for new constraints as well as be able to specify the name of an existing constraint that is to be dropped or modified. Constraints can be named explicitly using the :paramref:`.Constraint.name` parameter, and for indexes the :paramref:`.Index.name` parameter. However, in the case of constraints this parameter is optional. There are also the use cases of using the :paramref:`.Column.unique` and :paramref:`.Column.index` parameters which create :class:`.UniqueConstraint` and :class:`.Index` objects without an explicit name being specified. The use case of alteration of existing tables and constraints can be handled by schema migration tools such as `Alembic `_. However, neither Alembic nor SQLAlchemy currently create names for constraint objects where the name is otherwise unspecified, leading to the case where being able to alter existing constraints means that one must reverse-engineer the naming system used by the relational database to auto-assign names, or that care must be taken to ensure that all constraints are named. In contrast to having to assign explicit names to all :class:`.Constraint` and :class:`.Index` objects, automated naming schemes can be constructed using events. This approach has the advantage that constraints will get a consistent naming scheme without the need for explicit name parameters throughout the code, and also that the convention takes place just as well for those constraints and indexes produced by the :paramref:`.Column.unique` and :paramref:`.Column.index` parameters. As of SQLAlchemy 0.9.2 this event-based approach is included, and can be configured using the argument :paramref:`.MetaData.naming_convention`. :paramref:`.MetaData.naming_convention` refers to a dictionary which accepts the :class:`.Index` class or individual :class:`.Constraint` classes as keys, and Python string templates as values. It also accepts a series of string-codes as alternative keys, ``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key, primary key, index, check, and unique constraint, respectively. The string templates in this dictionary are used whenever a constraint or index is associated with this :class:`.MetaData` object that does not have an existing name given (including one exception case where an existing name can be further embellished). An example naming convention that suits basic cases is as follows:: convention = { "ix": 'ix_%(column_0_label)s', "uq": "uq_%(table_name)s_%(column_0_name)s", "ck": "ck_%(table_name)s_%(constraint_name)s", "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(table_name)s" } metadata = MetaData(naming_convention=convention) The above convention will establish names for all constraints within the target :class:`.MetaData` collection. For example, we can observe the name produced when we create an unnamed :class:`.UniqueConstraint`:: >>> user_table = Table('user', metadata, ... Column('id', Integer, primary_key=True), ... Column('name', String(30), nullable=False), ... UniqueConstraint('name') ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' This same feature takes effect even if we just use the :paramref:`.Column.unique` flag:: >>> user_table = Table('user', metadata, ... Column('id', Integer, primary_key=True), ... Column('name', String(30), nullable=False, unique=True) ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' A key advantage to the naming convention approach is that the names are established at Python construction time, rather than at DDL emit time. The effect this has when using Alembic's ``--autogenerate`` feature is that the naming convention will be explicit when a new migration script is generated:: def upgrade(): op.create_unique_constraint("uq_user_name", "user", ["name"]) The above ``"uq_user_name"`` string was copied from the :class:`.UniqueConstraint` object that ``--autogenerate`` located in our metadata. The default value for :paramref:`.MetaData.naming_convention` handles the long-standing SQLAlchemy behavior of assigning a name to a :class:`.Index` object that is created using the :paramref:`.Column.index` parameter:: >>> from sqlalchemy.sql.schema import DEFAULT_NAMING_CONVENTION >>> DEFAULT_NAMING_CONVENTION immutabledict({'ix': 'ix_%(column_0_label)s'}) The tokens available include ``%(table_name)s``, ``%(referred_table_name)s``, ``%(column_0_name)s``, ``%(column_0_label)s``, ``%(column_0_key)s``, ``%(referred_column_0_name)s``, and ``%(constraint_name)s``; the documentation for :paramref:`.MetaData.naming_convention` describes each individually. New tokens can also be added, by specifying an additional token and a callable within the naming_convention dictionary. For example, if we wanted to name our foreign key constraints using a GUID scheme, we could do that as follows:: import uuid def fk_guid(constraint, table): str_tokens = [ table.name, ] + [ element.parent.name for element in constraint.elements ] + [ element.target_fullname for element in constraint.elements ] guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode('ascii')) return str(guid) convention = { "fk_guid": fk_guid, "ix": 'ix_%(column_0_label)s', "fk": "fk_%(fk_guid)s", } Above, when we create a new :class:`.ForeignKeyConstraint`, we will get a name as follows:: >>> metadata = MetaData(naming_convention=convention) >>> user_table = Table('user', metadata, ... Column('id', Integer, primary_key=True), ... Column('version', Integer, primary_key=True), ... Column('data', String(30)) ... ) >>> address_table = Table('address', metadata, ... Column('id', Integer, primary_key=True), ... Column('user_id', Integer), ... Column('user_version_id', Integer) ... ) >>> fk = ForeignKeyConstraint(['user_id', 'user_version_id'], ... ['user.id', 'user.version']) >>> address_table.append_constraint(fk) >>> fk.name fk_0cd51ab5-8d70-56e8-a83c-86661737766d .. seealso:: :paramref:`.MetaData.naming_convention` - for additional usage details as well as a listing of all available naming components. :ref:`alembic:tutorial_constraint_names` - in the Alembic documentation. .. versionadded:: 0.9.2 Added the :paramref:`.MetaData.naming_convention` argument. .. _naming_check_constraints: Naming CHECK Constraints ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.CheckConstraint` object is configured against an arbitrary SQL expression, which can have any number of columns present, and additionally is often configured using a raw SQL string. Therefore a common convention to use with :class:`.CheckConstraint` is one where we expect the object to have a name already, and we then enhance it with other convention elements. A typical convention is ``"ck_%(table_name)s_%(constraint_name)s"``:: metadata = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) Table('foo', metadata, Column('value', Integer), CheckConstraint('value > 5', name='value_gt_5') ) The above table will produce the name ``ck_foo_value_gt_5``:: CREATE TABLE foo ( value INTEGER, CONSTRAINT ck_foo_value_gt_5 CHECK (value > 5) ) :class:`.CheckConstraint` also supports the ``%(columns_0_name)s`` token; we can make use of this by ensuring we use a :class:`.Column` or :func:`.sql.expression.column` element within the constraint's expression, either by declaring the constraint separate from the table:: metadata = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} ) foo = Table('foo', metadata, Column('value', Integer) ) CheckConstraint(foo.c.value > 5) or by using a :func:`.sql.expression.column` inline:: from sqlalchemy import column metadata = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} ) foo = Table('foo', metadata, Column('value', Integer), CheckConstraint(column('value') > 5) ) Both will produce the name ``ck_foo_value``:: CREATE TABLE foo ( value INTEGER, CONSTRAINT ck_foo_value CHECK (value > 5) ) The determination of the name of "column zero" is performed by scanning the given expression for column objects. If the expression has more than one column present, the scan does use a deterministic search, however the structure of the expression will determine which column is noted as "column zero". .. versionadded:: 1.0.0 The :class:`.CheckConstraint` object now supports the ``column_0_name`` naming convention token. .. _naming_schematypes: Configuring Naming for Boolean, Enum, and other schema types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.SchemaType` class refers to type objects such as :class:`.Boolean` and :class:`.Enum` which generate a CHECK constraint accompanying the type. The name for the constraint here is most directly set up by sending the "name" parameter, e.g. :paramref:`.Boolean.name`:: Table('foo', metadata, Column('flag', Boolean(name='ck_foo_flag')) ) The naming convention feature may be combined with these types as well, normally by using a convention which includes ``%(constraint_name)s`` and then applying a name to the type:: metadata = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) Table('foo', metadata, Column('flag', Boolean(name='flag_bool')) ) The above table will produce the constraint name ``ck_foo_flag_bool``:: CREATE TABLE foo ( flag BOOL, CONSTRAINT ck_foo_flag_bool CHECK (flag IN (0, 1)) ) The :class:`.SchemaType` classes use special internal symbols so that the naming convention is only determined at DDL compile time. On Postgresql, there's a native BOOLEAN type, so the CHECK constraint of :class:`.Boolean` is not needed; we are safe to set up a :class:`.Boolean` type without a name, even though a naming convention is in place for check constraints. This convention will only be consulted for the CHECK constraint if we run against a database without a native BOOLEAN type like SQLite or MySQL. The CHECK constraint may also make use of the ``column_0_name`` token, which works nicely with :class:`.SchemaType` since these constraints have only one column:: metadata = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} ) Table('foo', metadata, Column('flag', Boolean()) ) The above schema will produce:: CREATE TABLE foo ( flag BOOL, CONSTRAINT ck_foo_flag CHECK (flag IN (0, 1)) ) .. versionchanged:: 1.0 Constraint naming conventions that don't include ``%(constraint_name)s`` again work with :class:`.SchemaType` constraints. Constraints API --------------- .. autoclass:: Constraint :members: .. autoclass:: ColumnCollectionMixin :members: .. autoclass:: ColumnCollectionConstraint :members: :inherited-members: .. autoclass:: CheckConstraint :members: :inherited-members: .. autoclass:: ForeignKey :members: :inherited-members: .. autoclass:: ForeignKeyConstraint :members: :inherited-members: .. autoclass:: PrimaryKeyConstraint :members: :inherited-members: .. autoclass:: UniqueConstraint :members: :inherited-members: .. autofunction:: sqlalchemy.schema.conv .. _schema_indexes: Indexes ------- Indexes can be created anonymously (using an auto-generated name ``ix_``) for a single column using the inline ``index`` keyword on :class:`~sqlalchemy.schema.Column`, which also modifies the usage of ``unique`` to apply the uniqueness to the index itself, instead of adding a separate UNIQUE constraint. For indexes with specific names or which encompass more than one column, use the :class:`~sqlalchemy.schema.Index` construct, which requires a name. Below we illustrate a :class:`~sqlalchemy.schema.Table` with several :class:`~sqlalchemy.schema.Index` objects associated. The DDL for "CREATE INDEX" is issued right after the create statements for the table: .. sourcecode:: python+sql meta = MetaData() mytable = Table('mytable', meta, # an indexed column, with index "ix_mytable_col1" Column('col1', Integer, index=True), # a uniquely indexed column with index "ix_mytable_col2" Column('col2', Integer, index=True, unique=True), Column('col3', Integer), Column('col4', Integer), Column('col5', Integer), Column('col6', Integer), ) # place an index on col3, col4 Index('idx_col34', mytable.c.col3, mytable.c.col4) # place a unique index on col5, col6 Index('myindex', mytable.c.col5, mytable.c.col6, unique=True) {sql}mytable.create(engine) CREATE TABLE mytable ( col1 INTEGER, col2 INTEGER, col3 INTEGER, col4 INTEGER, col5 INTEGER, col6 INTEGER ) CREATE INDEX ix_mytable_col1 ON mytable (col1) CREATE UNIQUE INDEX ix_mytable_col2 ON mytable (col2) CREATE UNIQUE INDEX myindex ON mytable (col5, col6) CREATE INDEX idx_col34 ON mytable (col3, col4){stop} Note in the example above, the :class:`.Index` construct is created externally to the table which it corresponds, using :class:`.Column` objects directly. :class:`.Index` also supports "inline" definition inside the :class:`.Table`, using string names to identify columns:: meta = MetaData() mytable = Table('mytable', meta, Column('col1', Integer), Column('col2', Integer), Column('col3', Integer), Column('col4', Integer), # place an index on col1, col2 Index('idx_col12', 'col1', 'col2'), # place a unique index on col3, col4 Index('idx_col34', 'col3', 'col4', unique=True) ) .. versionadded:: 0.7 Support of "inline" definition inside the :class:`.Table` for :class:`.Index`\ . The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method: .. sourcecode:: python+sql i = Index('someindex', mytable.c.col5) {sql}i.create(engine) CREATE INDEX someindex ON mytable (col5){stop} .. _schema_indexes_functional: Functional Indexes ~~~~~~~~~~~~~~~~~~~ :class:`.Index` supports SQL and function expressions, as supported by the target backend. To create an index against a column using a descending value, the :meth:`.ColumnElement.desc` modifier may be used:: from sqlalchemy import Index Index('someindex', mytable.c.somecol.desc()) Or with a backend that supports functional indexes such as Postgresql, a "case insensitive" index can be created using the ``lower()`` function:: from sqlalchemy import func, Index Index('someindex', func.lower(mytable.c.somecol)) .. versionadded:: 0.8 :class:`.Index` supports SQL expressions and functions as well as plain columns. Index API --------- .. autoclass:: Index :members: :inherited-members: SQLAlchemy-1.0.11/doc/build/core/tutorial.rst0000664000175000017500000023016212636375552022035 0ustar classicclassic00000000000000.. _sqlexpression_toplevel: ================================ SQL Expression Language Tutorial ================================ The SQLAlchemy Expression Language presents a system of representing relational database structures and expressions using Python constructs. These constructs are modeled to resemble those of the underlying database as closely as possible, while providing a modicum of abstraction of the various implementation differences between database backends. While the constructs attempt to represent equivalent concepts between backends with consistent structures, they do not conceal useful concepts that are unique to particular subsets of backends. The Expression Language therefore presents a method of writing backend-neutral SQL expressions, but does not attempt to enforce that expressions are backend-neutral. The Expression Language is in contrast to the Object Relational Mapper, which is a distinct API that builds on top of the Expression Language. Whereas the ORM, introduced in :ref:`ormtutorial_toplevel`, presents a high level and abstracted pattern of usage, which itself is an example of applied usage of the Expression Language, the Expression Language presents a system of representing the primitive constructs of the relational database directly without opinion. While there is overlap among the usage patterns of the ORM and the Expression Language, the similarities are more superficial than they may at first appear. One approaches the structure and content of data from the perspective of a user-defined `domain model `_ which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database. A successful application may be constructed using the Expression Language exclusively, though the application will need to define its own system of translating application concepts into individual database messages and from individual database result sets. Alternatively, an application constructed with the ORM may, in advanced scenarios, make occasional usage of the Expression Language directly in certain areas where specific database interactions are required. The following tutorial is in doctest format, meaning each ``>>>`` line represents something you can type at a Python command prompt, and the following text represents the expected return value. The tutorial has no prerequisites. Version Check ============= A quick check to verify that we are on at least **version 1.0** of SQLAlchemy: .. sourcecode:: pycon+sql >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest:+SKIP 1.0.0 Connecting ========== For this tutorial we will use an in-memory-only SQLite database. This is an easy way to test things without needing to have an actual database defined anywhere. To connect we use :func:`~sqlalchemy.create_engine`: .. sourcecode:: pycon+sql >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite:///:memory:', echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to ``False``. This tutorial will format the SQL behind a popup window so it doesn't get in our way; just click the "SQL" links to see what's being generated. The return value of :func:`.create_engine` is an instance of :class:`.Engine`, and it represents the core interface to the database, adapted through a :term:`dialect` that handles the details of the database and :term:`DBAPI` in use. In this case the SQLite dialect will interpret instructions to the Python built-in ``sqlite3`` module. .. sidebar:: Lazy Connecting The :class:`.Engine`, when first returned by :func:`.create_engine`, has not actually tried to connect to the database yet; that happens only the first time it is asked to perform a task against the database. The first time a method like :meth:`.Engine.execute` or :meth:`.Engine.connect` is called, the :class:`.Engine` establishes a real :term:`DBAPI` connection to the database, which is then used to emit the SQL. .. seealso:: :ref:`database_urls` - includes examples of :func:`.create_engine` connecting to several kinds of databases with links to more information. Define and Create Tables ========================= The SQL Expression Language constructs its expressions in most cases against table columns. In SQLAlchemy, a column is most often represented by an object called :class:`~sqlalchemy.schema.Column`, and in all cases a :class:`~sqlalchemy.schema.Column` is associated with a :class:`~sqlalchemy.schema.Table`. A collection of :class:`~sqlalchemy.schema.Table` objects and their associated child objects is referred to as **database metadata**. In this tutorial we will explicitly lay out several :class:`~sqlalchemy.schema.Table` objects, but note that SA can also "import" whole sets of :class:`~sqlalchemy.schema.Table` objects automatically from an existing database (this process is called **table reflection**). We define our tables all within a catalog called :class:`~sqlalchemy.schema.MetaData`, using the :class:`~sqlalchemy.schema.Table` construct, which resembles regular SQL CREATE TABLE statements. We'll make two tables, one of which represents "users" in an application, and another which represents zero or more "email addresses" for each row in the "users" table: .. sourcecode:: pycon+sql >>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey >>> metadata = MetaData() >>> users = Table('users', metadata, ... Column('id', Integer, primary_key=True), ... Column('name', String), ... Column('fullname', String), ... ) >>> addresses = Table('addresses', metadata, ... Column('id', Integer, primary_key=True), ... Column('user_id', None, ForeignKey('users.id')), ... Column('email_address', String, nullable=False) ... ) All about how to define :class:`~sqlalchemy.schema.Table` objects, as well as how to create them from an existing database automatically, is described in :ref:`metadata_toplevel`. Next, to tell the :class:`~sqlalchemy.schema.MetaData` we'd actually like to create our selection of tables for real inside the SQLite database, we use :func:`~sqlalchemy.schema.MetaData.create_all`, passing it the ``engine`` instance which points to our database. This will check for the presence of each table first before creating, so it's safe to call multiple times: .. sourcecode:: pycon+sql {sql}>>> metadata.create_all(engine) SE... CREATE TABLE users ( id INTEGER NOT NULL, name VARCHAR, fullname VARCHAR, PRIMARY KEY (id) ) () COMMIT CREATE TABLE addresses ( id INTEGER NOT NULL, user_id INTEGER, email_address VARCHAR NOT NULL, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT .. note:: Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it's not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: Column('name', String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by SQLAlchemy other than when creating tables. Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn't generate or assume these without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence Column('id', Integer, Sequence('user_id_seq'), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` is therefore:: users = Table('users', metadata, Column('id', Integer, Sequence('user_id_seq'), primary_key=True), Column('name', String(50)), Column('fullname', String(50)), Column('password', String(12)) ) We include this more verbose :class:`~.schema.Table` construct separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements. .. _coretutorial_insert_expressions: Insert Expressions ================== The first SQL expression we'll create is the :class:`~sqlalchemy.sql.expression.Insert` construct, which represents an INSERT statement. This is typically created relative to its target table:: >>> ins = users.insert() To see a sample of the SQL this construct produces, use the ``str()`` function:: >>> str(ins) 'INSERT INTO users (id, name, fullname) VALUES (:id, :name, :fullname)' Notice above that the INSERT statement names every column in the ``users`` table. This can be limited by using the ``values()`` method, which establishes the VALUES clause of the INSERT explicitly:: >>> ins = users.insert().values(name='jack', fullname='Jack Jones') >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (:name, :fullname)' Above, while the ``values`` method limited the VALUES clause to just two columns, the actual data we placed in ``values`` didn't get rendered into the string; instead we got named bind parameters. As it turns out, our data *is* stored within our :class:`~sqlalchemy.sql.expression.Insert` construct, but it typically only comes out when the statement is actually executed; since the data consists of literal values, SQLAlchemy automatically generates bind parameters for them. We can peek at this data for now by looking at the compiled form of the statement:: >>> ins.compile().params # doctest: +SKIP {'fullname': 'Jack Jones', 'name': 'jack'} Executing ========== The interesting part of an :class:`~sqlalchemy.sql.expression.Insert` is executing it. In this tutorial, we will generally focus on the most explicit method of executing a SQL construct, and later touch upon some "shortcut" ways to do it. The ``engine`` object we created is a repository for database connections capable of issuing SQL to the database. To acquire a connection, we use the ``connect()`` method:: >>> conn = engine.connect() >>> conn The :class:`~sqlalchemy.engine.Connection` object represents an actively checked out DBAPI connection resource. Lets feed it our :class:`~sqlalchemy.sql.expression.Insert` object and see what happens: .. sourcecode:: pycon+sql >>> result = conn.execute(ins) {opensql}INSERT INTO users (name, fullname) VALUES (?, ?) ('jack', 'Jack Jones') COMMIT So the INSERT statement was now issued to the database. Although we got positional "qmark" bind parameters instead of "named" bind parameters in the output. How come ? Because when executed, the :class:`~sqlalchemy.engine.Connection` used the SQLite **dialect** to help generate the statement; when we use the ``str()`` function, the statement isn't aware of this dialect, and falls back onto a default which uses named parameters. We can view this manually as follows: .. sourcecode:: pycon+sql >>> ins.bind = engine >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (?, ?)' What about the ``result`` variable we got when we called ``execute()`` ? As the SQLAlchemy :class:`~sqlalchemy.engine.Connection` object references a DBAPI connection, the result, known as a :class:`~sqlalchemy.engine.ResultProxy` object, is analogous to the DBAPI cursor object. In the case of an INSERT, we can get important information from it, such as the primary key values which were generated from our statement using :attr:`.ResultProxy.inserted_primary_key`: .. sourcecode:: pycon+sql >>> result.inserted_primary_key [1] The value of ``1`` was automatically generated by SQLite, but only because we did not specify the ``id`` column in our :class:`~sqlalchemy.sql.expression.Insert` statement; otherwise, our explicit value would have been used. In either case, SQLAlchemy always knows how to get at a newly generated primary key value, even though the method of generating them is different across different databases; each database's :class:`~sqlalchemy.engine.interfaces.Dialect` knows the specific steps needed to determine the correct value (or values; note that :attr:`.ResultProxy.inserted_primary_key` returns a list so that it supports composite primary keys). Methods here range from using ``cursor.lastrowid``, to selecting from a database-specific function, to using ``INSERT..RETURNING`` syntax; this all occurs transparently. .. _execute_multiple: Executing Multiple Statements ============================== Our insert example above was intentionally a little drawn out to show some various behaviors of expression language constructs. In the usual case, an :class:`~sqlalchemy.sql.expression.Insert` statement is usually compiled against the parameters sent to the ``execute()`` method on :class:`~sqlalchemy.engine.Connection`, so that there's no need to use the ``values`` keyword with :class:`~sqlalchemy.sql.expression.Insert`. Lets create a generic :class:`~sqlalchemy.sql.expression.Insert` statement again and use it in the "normal" way: .. sourcecode:: pycon+sql >>> ins = users.insert() >>> conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams') {opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?) (2, 'wendy', 'Wendy Williams') COMMIT {stop} Above, because we specified all three columns in the ``execute()`` method, the compiled :class:`~.expression.Insert` included all three columns. The :class:`~.expression.Insert` statement is compiled at execution time based on the parameters we specified; if we specified fewer parameters, the :class:`~.expression.Insert` would have fewer entries in its VALUES clause. To issue many inserts using DBAPI's ``executemany()`` method, we can send in a list of dictionaries each containing a distinct set of parameters to be inserted, as we do here to add some email addresses: .. sourcecode:: pycon+sql >>> conn.execute(addresses.insert(), [ ... {'user_id': 1, 'email_address' : 'jack@yahoo.com'}, ... {'user_id': 1, 'email_address' : 'jack@msn.com'}, ... {'user_id': 2, 'email_address' : 'www@www.org'}, ... {'user_id': 2, 'email_address' : 'wendy@aol.com'}, ... ]) {opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?) ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com')) COMMIT {stop} Above, we again relied upon SQLite's automatic generation of primary key identifiers for each ``addresses`` row. When executing multiple sets of parameters, each dictionary must have the **same** set of keys; i.e. you cant have fewer keys in some dictionaries than others. This is because the :class:`~sqlalchemy.sql.expression.Insert` statement is compiled against the **first** dictionary in the list, and it's assumed that all subsequent argument dictionaries are compatible with that statement. The "executemany" style of invocation is available for each of the :func:`.insert`, :func:`.update` and :func:`.delete` constructs. .. _coretutorial_selecting: Selecting ========== We began with inserts just so that our test database had some data in it. The more interesting part of the data is selecting it! We'll cover UPDATE and DELETE statements later. The primary construct used to generate SELECT statements is the :func:`.select` function: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import select >>> s = select([users]) >>> result = conn.execute(s) {opensql}SELECT users.id, users.name, users.fullname FROM users () Above, we issued a basic :func:`.select` call, placing the ``users`` table within the COLUMNS clause of the select, and then executing. SQLAlchemy expanded the ``users`` table into the set of each of its columns, and also generated a FROM clause for us. The result returned is again a :class:`~sqlalchemy.engine.ResultProxy` object, which acts much like a DBAPI cursor, including methods such as :func:`~sqlalchemy.engine.ResultProxy.fetchone` and :func:`~sqlalchemy.engine.ResultProxy.fetchall`. The easiest way to get rows from it is to just iterate: .. sourcecode:: pycon+sql >>> for row in result: ... print(row) (1, u'jack', u'Jack Jones') (2, u'wendy', u'Wendy Williams') Above, we see that printing each row produces a simple tuple-like result. We have more options at accessing the data in each row. One very common way is through dictionary access, using the string names of columns: .. sourcecode:: pycon+sql {sql}>>> result = conn.execute(s) SELECT users.id, users.name, users.fullname FROM users () {stop}>>> row = result.fetchone() >>> print("name:", row['name'], "; fullname:", row['fullname']) name: jack ; fullname: Jack Jones Integer indexes work as well: .. sourcecode:: pycon+sql >>> row = result.fetchone() >>> print("name:", row[1], "; fullname:", row[2]) name: wendy ; fullname: Wendy Williams But another way, whose usefulness will become apparent later on, is to use the :class:`~sqlalchemy.schema.Column` objects directly as keys: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(s): ... print("name:", row[users.c.name], "; fullname:", row[users.c.fullname]) SELECT users.id, users.name, users.fullname FROM users () {stop}name: jack ; fullname: Jack Jones name: wendy ; fullname: Wendy Williams Result sets which have pending rows remaining should be explicitly closed before discarding. While the cursor and connection resources referenced by the :class:`~sqlalchemy.engine.ResultProxy` will be respectively closed and returned to the connection pool when the object is garbage collected, it's better to make it explicit as some database APIs are very picky about such things: .. sourcecode:: pycon+sql >>> result.close() If we'd like to more carefully control the columns which are placed in the COLUMNS clause of the select, we reference individual :class:`~sqlalchemy.schema.Column` objects from our :class:`~sqlalchemy.schema.Table`. These are available as named attributes off the ``c`` attribute of the :class:`~sqlalchemy.schema.Table` object: .. sourcecode:: pycon+sql >>> s = select([users.c.name, users.c.fullname]) {sql}>>> result = conn.execute(s) SELECT users.name, users.fullname FROM users () {stop}>>> for row in result: ... print(row) (u'jack', u'Jack Jones') (u'wendy', u'Wendy Williams') Lets observe something interesting about the FROM clause. Whereas the generated statement contains two distinct sections, a "SELECT columns" part and a "FROM table" part, our :func:`.select` construct only has a list containing columns. How does this work ? Let's try putting *two* tables into our :func:`.select` statement: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(select([users, addresses])): ... print(row) SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users, addresses () {stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com') (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com') (1, u'jack', u'Jack Jones', 3, 2, u'www@www.org') (1, u'jack', u'Jack Jones', 4, 2, u'wendy@aol.com') (2, u'wendy', u'Wendy Williams', 1, 1, u'jack@yahoo.com') (2, u'wendy', u'Wendy Williams', 2, 1, u'jack@msn.com') (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org') (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com') It placed **both** tables into the FROM clause. But also, it made a real mess. Those who are familiar with SQL joins know that this is a **Cartesian product**; each row from the ``users`` table is produced against each row from the ``addresses`` table. So to put some sanity into this statement, we need a WHERE clause. We do that using :meth:`.Select.where`: .. sourcecode:: pycon+sql >>> s = select([users, addresses]).where(users.c.id == addresses.c.user_id) {sql}>>> for row in conn.execute(s): ... print(row) SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users, addresses WHERE users.id = addresses.user_id () {stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com') (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com') (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org') (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com') So that looks a lot better, we added an expression to our :func:`.select` which had the effect of adding ``WHERE users.id = addresses.user_id`` to our statement, and our results were managed down so that the join of ``users`` and ``addresses`` rows made sense. But let's look at that expression? It's using just a Python equality operator between two different :class:`~sqlalchemy.schema.Column` objects. It should be clear that something is up. Saying ``1 == 1`` produces ``True``, and ``1 == 2`` produces ``False``, not a WHERE clause. So lets see exactly what that expression is doing: .. sourcecode:: pycon+sql >>> users.c.id == addresses.c.user_id Wow, surprise ! This is neither a ``True`` nor a ``False``. Well what is it ? .. sourcecode:: pycon+sql >>> str(users.c.id == addresses.c.user_id) 'users.id = addresses.user_id' As you can see, the ``==`` operator is producing an object that is very much like the :class:`~.expression.Insert` and :func:`.select` objects we've made so far, thanks to Python's ``__eq__()`` builtin; you call ``str()`` on it and it produces SQL. By now, one can see that everything we are working with is ultimately the same type of object. SQLAlchemy terms the base class of all of these expressions as :class:`~.expression.ColumnElement`. Operators ========== Since we've stumbled upon SQLAlchemy's operator paradigm, let's go through some of its capabilities. We've seen how to equate two columns to each other: .. sourcecode:: pycon+sql >>> print(users.c.id == addresses.c.user_id) users.id = addresses.user_id If we use a literal value (a literal meaning, not a SQLAlchemy clause object), we get a bind parameter: .. sourcecode:: pycon+sql >>> print(users.c.id == 7) users.id = :id_1 The ``7`` literal is embedded the resulting :class:`~.expression.ColumnElement`; we can use the same trick we did with the :class:`~sqlalchemy.sql.expression.Insert` object to see it: .. sourcecode:: pycon+sql >>> (users.c.id == 7).compile().params {u'id_1': 7} Most Python operators, as it turns out, produce a SQL expression here, like equals, not equals, etc.: .. sourcecode:: pycon+sql >>> print(users.c.id != 7) users.id != :id_1 >>> # None converts to IS NULL >>> print(users.c.name == None) users.name IS NULL >>> # reverse works too >>> print('fred' > users.c.name) users.name < :name_1 If we add two integer columns together, we get an addition expression: .. sourcecode:: pycon+sql >>> print(users.c.id + addresses.c.id) users.id + addresses.id Interestingly, the type of the :class:`~sqlalchemy.schema.Column` is important! If we use ``+`` with two string based columns (recall we put types like :class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String` on our :class:`~sqlalchemy.schema.Column` objects at the beginning), we get something different: .. sourcecode:: pycon+sql >>> print(users.c.name + users.c.fullname) users.name || users.fullname Where ``||`` is the string concatenation operator used on most databases. But not all of them. MySQL users, fear not: .. sourcecode:: pycon+sql >>> print((users.c.name + users.c.fullname). ... compile(bind=create_engine('mysql://'))) # doctest: +SKIP concat(users.name, users.fullname) The above illustrates the SQL that's generated for an :class:`~sqlalchemy.engine.Engine` that's connected to a MySQL database; the ``||`` operator now compiles as MySQL's ``concat()`` function. If you have come across an operator which really isn't available, you can always use the :meth:`.ColumnOperators.op` method; this generates whatever operator you need: .. sourcecode:: pycon+sql >>> print(users.c.name.op('tiddlywinks')('foo')) users.name tiddlywinks :name_1 This function can also be used to make bitwise operators explicit. For example:: somecolumn.op('&')(0xff) is a bitwise AND of the value in `somecolumn`. Operator Customization ----------------------- While :meth:`.ColumnOperators.op` is handy to get at a custom operator in a hurry, the Core supports fundamental customization and extension of the operator system at the type level. The behavior of existing operators can be modified on a per-type basis, and new operations can be defined which become available for all column expressions that are part of that particular type. See the section :ref:`types_operators` for a description. Conjunctions ============= We'd like to show off some of our operators inside of :func:`.select` constructs. But we need to lump them together a little more, so let's first introduce some conjunctions. Conjunctions are those little words like AND and OR that put things together. We'll also hit upon NOT. :func:`.and_`, :func:`.or_`, and :func:`.not_` can work from the corresponding functions SQLAlchemy provides (notice we also throw in a :meth:`~.ColumnOperators.like`): .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import and_, or_, not_ >>> print(and_( ... users.c.name.like('j%'), ... users.c.id == addresses.c.user_id, ... or_( ... addresses.c.email_address == 'wendy@aol.com', ... addresses.c.email_address == 'jack@yahoo.com' ... ), ... not_(users.c.id > 5) ... ) ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) AND users.id <= :id_1 And you can also use the re-jiggered bitwise AND, OR and NOT operators, although because of Python operator precedence you have to watch your parenthesis: .. sourcecode:: pycon+sql >>> print(users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & ... ( ... (addresses.c.email_address == 'wendy@aol.com') | \ ... (addresses.c.email_address == 'jack@yahoo.com') ... ) \ ... & ~(users.c.id>5) ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) AND users.id <= :id_1 So with all of this vocabulary, let's select all users who have an email address at AOL or MSN, whose name starts with a letter between "m" and "z", and we'll also generate a column containing their full name combined with their email address. We will add two new constructs to this statement, :meth:`~.ColumnOperators.between` and :meth:`~.ColumnElement.label`. :meth:`~.ColumnOperators.between` produces a BETWEEN clause, and :meth:`~.ColumnElement.label` is used in a column expression to produce labels using the ``AS`` keyword; it's recommended when selecting from expressions that otherwise would not have a name: .. sourcecode:: pycon+sql >>> s = select([(users.c.fullname + ... ", " + addresses.c.email_address). ... label('title')]).\ ... where( ... and_( ... users.c.id == addresses.c.user_id, ... users.c.name.between('m', 'z'), ... or_( ... addresses.c.email_address.like('%@aol.com'), ... addresses.c.email_address.like('%@msn.com') ... ) ... ) ... ) >>> conn.execute(s).fetchall() SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) (', ', 'm', 'z', '%@aol.com', '%@msn.com') [(u'Wendy Williams, wendy@aol.com',)] Once again, SQLAlchemy figured out the FROM clause for our statement. In fact it will determine the FROM clause based on all of its other bits; the columns clause, the where clause, and also some other elements which we haven't covered yet, which include ORDER BY, GROUP BY, and HAVING. A shortcut to using :func:`.and_` is to chain together multiple :meth:`~.Select.where` clauses. The above can also be written as: .. sourcecode:: pycon+sql >>> s = select([(users.c.fullname + ... ", " + addresses.c.email_address). ... label('title')]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name.between('m', 'z')).\ ... where( ... or_( ... addresses.c.email_address.like('%@aol.com'), ... addresses.c.email_address.like('%@msn.com') ... ) ... ) >>> conn.execute(s).fetchall() SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) (', ', 'm', 'z', '%@aol.com', '%@msn.com') [(u'Wendy Williams, wendy@aol.com',)] The way that we can build up a :func:`.select` construct through successive method calls is called :term:`method chaining`. .. _sqlexpression_text: Using Textual SQL ================= Our last example really became a handful to type. Going from what one understands to be a textual SQL expression into a Python construct which groups components together in a programmatic style can be hard. That's why SQLAlchemy lets you just use strings, for those cases when the SQL is already known and there isn't a strong need for the statement to support dynamic features. The :func:`~.expression.text` construct is used to compose a textual statement that is passed to the database mostly unchanged. Below, we create a :func:`~.expression.text` object and execute it: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import text >>> s = text( ... "SELECT users.fullname || ', ' || addresses.email_address AS title " ... "FROM users, addresses " ... "WHERE users.id = addresses.user_id " ... "AND users.name BETWEEN :x AND :y " ... "AND (addresses.email_address LIKE :e1 " ... "OR addresses.email_address LIKE :e2)") {sql}>>> conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall() SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) ('m', 'z', '%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] Above, we can see that bound parameters are specified in :func:`~.expression.text` using the named colon format; this format is consistent regardless of database backend. To send values in for the parameters, we passed them into the :meth:`~.Connection.execute` method as additional arguments. Depending on how we are working, we can also send values to be associated directly with the :func:`~.expression.text` construct using the :meth:`~.TextClause.bindparams` method; if we are using datatypes that need special handling as they are received in Python, or we'd like to compose our :func:`~.expression.text` object into a larger expression, we may also wish to use the :meth:`~.TextClause.columns` method in order to specify column return types and names: .. sourcecode:: pycon+sql >>> s = text( ... "SELECT users.fullname || ', ' || addresses.email_address AS title " ... "FROM users, addresses " ... "WHERE users.id = addresses.user_id " ... "AND users.name BETWEEN :x AND :y " ... "AND (addresses.email_address LIKE :e1 " ... "OR addresses.email_address LIKE :e2)") >>> s = s.columns(title=String) >>> s = s.bindparams(x='m', y='z', e1='%@aol.com', e2='%@msn.com') >>> conn.execute(s).fetchall() SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) ('m', 'z', '%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] :func:`~.expression.text` can also be used freely within a :func:`~.expression.select` object, which accepts :func:`~.expression.text` objects as an argument for most of its builder functions. Below, we combine the usage of :func:`~.expression.text` within a :func:`.select` object. The :func:`~.expression.select` construct provides the "geometry" of the statement, and the :func:`~.expression.text` construct provides the textual content within this form. We can build a statement without the need to refer to any pre-established :class:`.Table` metadata: .. sourcecode:: pycon+sql >>> s = select([ ... text("users.fullname || ', ' || addresses.email_address AS title") ... ]).\ ... where( ... and_( ... text("users.id = addresses.user_id"), ... text("users.name BETWEEN 'm' AND 'z'"), ... text( ... "(addresses.email_address LIKE :x " ... "OR addresses.email_address LIKE :y)") ... ) ... ).select_from(text('users, addresses')) {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z' AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) ('%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] .. topic:: Why not use strings everywhere? When we use literal strings, the Core can't adapt our SQL to work on different database backends. Above, our expression won't work with MySQL since MySQL doesn't have the ``||`` construct. If we only use :func:`.text` to specify columns, our :func:`.select` construct will have an empty ``.c`` collection that we'd normally use to create subqueries. We also lose typing information about result columns and bound parameters, which is often needed to correctly translate data values between Python and the database. Overall, the more :func:`.text` we use, the less flexibility and ability for manipulation/transformation the statement will have. .. seealso:: :ref:`orm_tutorial_literal_sql` - integrating ORM-level queries with :func:`.text` .. fchanged:: 1.0.0 The :func:`.select` construct emits warnings when string SQL fragments are coerced to :func:`.text`, and :func:`.text` should be used explicitly. See :ref:`migration_2992` for background. .. _sqlexpression_literal_column: Using More Specific Text with :func:`.table`, :func:`.literal_column`, and :func:`.column` ------------------------------------------------------------------------------------------- We can move our level of structure back in the other direction too, by using :func:`~.expression.column`, :func:`~.expression.literal_column`, and :func:`~.expression.table` for some of the key elements of our statement. Using these constructs, we can get some more expression capabilities than if we used :func:`~.expression.text` directly, as they provide to the Core more information about how the strings they store are to be used, but still without the need to get into full :class:`.Table` based metadata. Below, we also specify the :class:`.String` datatype for two of the key :func:`~.expression.literal_column` objects, so that the string-specific concatenation operator becomes available. We also use :func:`~.expression.literal_column` in order to use table-qualified expressions, e.g. ``users.fullname``, that will be rendered as is; using :func:`~.expression.column` implies an individual column name that may be quoted: .. sourcecode:: pycon+sql >>> from sqlalchemy import select, and_, text, String >>> from sqlalchemy.sql import table, literal_column >>> s = select([ ... literal_column("users.fullname", String) + ... ', ' + ... literal_column("addresses.email_address").label("title") ... ]).\ ... where( ... and_( ... literal_column("users.id") == literal_column("addresses.user_id"), ... text("users.name BETWEEN 'm' AND 'z'"), ... text( ... "(addresses.email_address LIKE :x OR " ... "addresses.email_address LIKE :y)") ... ) ... ).select_from(table('users')).select_from(table('addresses')) {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() SELECT users.fullname || ? || addresses.email_address AS anon_1 FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z' AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) (', ', '%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] Ordering or Grouping by a Label ------------------------------- One place where we sometimes want to use a string as a shortcut is when our statement has some labeled column element that we want to refer to in a place such as the "ORDER BY" or "GROUP BY" clause; other candidates include fields within an "OVER" or "DISTINCT" clause. If we have such a label in our :func:`.select` construct, we can refer to it directly by passing the string straight into :meth:`.select.order_by` or :meth:`.select.group_by`, among others. This will refer to the named label and also prevent the expression from being rendered twice: .. sourcecode:: pycon+sql >>> from sqlalchemy import func >>> stmt = select([ ... addresses.c.user_id, ... func.count(addresses.c.id).label('num_addresses')]).\ ... order_by("num_addresses") {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses FROM addresses ORDER BY num_addresses () {stop}[(2, 4)] We can use modifiers like :func:`.asc` or :func:`.desc` by passing the string name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc >>> stmt = select([ ... addresses.c.user_id, ... func.count(addresses.c.id).label('num_addresses')]).\ ... order_by(desc("num_addresses")) {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses FROM addresses ORDER BY num_addresses DESC () {stop}[(2, 4)] Note that the string feature here is very much tailored to when we have already used the :meth:`~.ColumnElement.label` method to create a specifically-named label. In other cases, we always want to refer to the :class:`.ColumnElement` object directly so that the expression system can make the most effective choices for rendering. Below, we illustrate how using the :class:`.ColumnElement` eliminates ambiguity when we want to order by a column name that appears more than once: .. sourcecode:: pycon+sql >>> u1a, u1b = users.alias(), users.alias() >>> stmt = select([u1a, u1b]).\ ... where(u1a.c.name > u1b.c.name).\ ... order_by(u1a.c.name) # using "name" here would be ambiguous {sql}>>> conn.execute(stmt).fetchall() SELECT users_1.id, users_1.name, users_1.fullname, users_2.id, users_2.name, users_2.fullname FROM users AS users_1, users AS users_2 WHERE users_1.name > users_2.name ORDER BY users_1.name () {stop}[(2, u'wendy', u'Wendy Williams', 1, u'jack', u'Jack Jones')] Using Aliases ============== The alias in SQL corresponds to a "renamed" version of a table or SELECT statement, which occurs anytime you say "SELECT .. FROM sometable AS someothername". The ``AS`` creates a new name for the table. Aliases are a key construct as they allow any table or subquery to be referenced by a unique name. In the case of a table, this allows the same table to be named in the FROM clause multiple times. In the case of a SELECT statement, it provides a parent name for the columns represented by the statement, allowing them to be referenced relative to this name. In SQLAlchemy, any :class:`.Table`, :func:`.select` construct, or other selectable can be turned into an alias using the :meth:`.FromClause.alias` method, which produces a :class:`.Alias` construct. As an example, suppose we know that our user ``jack`` has two particular email addresses. How can we locate jack based on the combination of those two addresses? To accomplish this, we'd use a join to the ``addresses`` table, once for each address. We create two :class:`.Alias` constructs against ``addresses``, and then use them both within a :func:`.select` construct: .. sourcecode:: pycon+sql >>> a1 = addresses.alias() >>> a2 = addresses.alias() >>> s = select([users]).\ ... where(and_( ... users.c.id == a1.c.user_id, ... users.c.id == a2.c.user_id, ... a1.c.email_address == 'jack@msn.com', ... a2.c.email_address == 'jack@yahoo.com' ... )) {sql}>>> conn.execute(s).fetchall() SELECT users.id, users.name, users.fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ? ('jack@msn.com', 'jack@yahoo.com') {stop}[(1, u'jack', u'Jack Jones')] Note that the :class:`.Alias` construct generated the names ``addresses_1`` and ``addresses_2`` in the final SQL result. The generation of these names is determined by the position of the construct within the statement. If we created a query using only the second ``a2`` alias, the name would come out as ``addresses_1``. The generation of the names is also *deterministic*, meaning the same SQLAlchemy statement construct will produce the identical SQL string each time it is rendered for a particular dialect. Since on the outside, we refer to the alias using the :class:`.Alias` construct itself, we don't need to be concerned about the generated name. However, for the purposes of debugging, it can be specified by passing a string name to the :meth:`.FromClause.alias` method:: >>> a1 = addresses.alias('a1') Aliases can of course be used for anything which you can SELECT from, including SELECT statements themselves. We can self-join the ``users`` table back to the :func:`.select` we've created by making an alias of the entire statement. The ``correlate(None)`` directive is to avoid SQLAlchemy's attempt to "correlate" the inner ``users`` table with the outer one: .. sourcecode:: pycon+sql >>> a1 = s.correlate(None).alias() >>> s = select([users.c.name]).where(users.c.id == a1.c.id) {sql}>>> conn.execute(s).fetchall() SELECT users.name FROM users, (SELECT users.id AS id, users.name AS name, users.fullname AS fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ?) AS anon_1 WHERE users.id = anon_1.id ('jack@msn.com', 'jack@yahoo.com') {stop}[(u'jack',)] Using Joins ============ We're halfway along to being able to construct any SELECT expression. The next cornerstone of the SELECT is the JOIN expression. We've already been doing joins in our examples, by just placing two tables in either the columns clause or the where clause of the :func:`.select` construct. But if we want to make a real "JOIN" or "OUTERJOIN" construct, we use the :meth:`~.FromClause.join` and :meth:`~.FromClause.outerjoin` methods, most commonly accessed from the left table in the join: .. sourcecode:: pycon+sql >>> print(users.join(addresses)) users JOIN addresses ON users.id = addresses.user_id The alert reader will see more surprises; SQLAlchemy figured out how to JOIN the two tables ! The ON condition of the join, as it's called, was automatically generated based on the :class:`~sqlalchemy.schema.ForeignKey` object which we placed on the ``addresses`` table way at the beginning of this tutorial. Already the ``join()`` construct is looking like a much better way to join tables. Of course you can join on whatever expression you want, such as if we want to join on all users who use the same name in their email address as their username: .. sourcecode:: pycon+sql >>> print(users.join(addresses, ... addresses.c.email_address.like(users.c.name + '%') ... ) ... ) users JOIN addresses ON addresses.email_address LIKE (users.name || :name_1) When we create a :func:`.select` construct, SQLAlchemy looks around at the tables we've mentioned and then places them in the FROM clause of the statement. When we use JOINs however, we know what FROM clause we want, so here we make use of the :meth:`~.Select.select_from` method: .. sourcecode:: pycon+sql >>> s = select([users.c.fullname]).select_from( ... users.join(addresses, ... addresses.c.email_address.like(users.c.name + '%')) ... ) {sql}>>> conn.execute(s).fetchall() SELECT users.fullname FROM users JOIN addresses ON addresses.email_address LIKE (users.name || ?) ('%',) {stop}[(u'Jack Jones',), (u'Jack Jones',), (u'Wendy Williams',)] The :meth:`~.FromClause.outerjoin` method creates ``LEFT OUTER JOIN`` constructs, and is used in the same way as :meth:`~.FromClause.join`: .. sourcecode:: pycon+sql >>> s = select([users.c.fullname]).select_from(users.outerjoin(addresses)) >>> print(s) SELECT users.fullname FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id That's the output ``outerjoin()`` produces, unless, of course, you're stuck in a gig using Oracle prior to version 9, and you've set up your engine (which would be using ``OracleDialect``) to use Oracle-specific SQL: .. sourcecode:: pycon+sql >>> from sqlalchemy.dialects.oracle import dialect as OracleDialect >>> print(s.compile(dialect=OracleDialect(use_ansi=False))) SELECT users.fullname FROM users, addresses WHERE users.id = addresses.user_id(+) If you don't know what that SQL means, don't worry ! The secret tribe of Oracle DBAs don't want their black magic being found out ;). .. seealso:: :func:`.expression.join` :func:`.expression.outerjoin` :class:`.Join` Everything Else ================ The concepts of creating SQL expressions have been introduced. What's left are more variants of the same themes. So now we'll catalog the rest of the important things we'll need to know. .. _coretutorial_bind_param: Bind Parameter Objects ---------------------- Throughout all these examples, SQLAlchemy is busy creating bind parameters wherever literal expressions occur. You can also specify your own bind parameters with your own names, and use the same statement repeatedly. The :func:`.bindparam` construct is used to produce a bound parameter with a given name. While SQLAlchemy always refers to bound parameters by name on the API side, the database dialect converts to the appropriate named or positional style at execution time, as here where it converts to positional for SQLite: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import bindparam >>> s = users.select(users.c.name == bindparam('username')) {sql}>>> conn.execute(s, username='wendy').fetchall() SELECT users.id, users.name, users.fullname FROM users WHERE users.name = ? ('wendy',) {stop}[(2, u'wendy', u'Wendy Williams')] Another important aspect of :func:`.bindparam` is that it may be assigned a type. The type of the bind parameter will determine its behavior within expressions and also how the data bound to it is processed before being sent off to the database: .. sourcecode:: pycon+sql >>> s = users.select(users.c.name.like(bindparam('username', type_=String) + text("'%'"))) {sql}>>> conn.execute(s, username='wendy').fetchall() SELECT users.id, users.name, users.fullname FROM users WHERE users.name LIKE (? || '%') ('wendy',) {stop}[(2, u'wendy', u'Wendy Williams')] :func:`.bindparam` constructs of the same name can also be used multiple times, where only a single named value is needed in the execute parameters: .. sourcecode:: pycon+sql >>> s = select([users, addresses]).\ ... where( ... or_( ... users.c.name.like( ... bindparam('name', type_=String) + text("'%'")), ... addresses.c.email_address.like( ... bindparam('name', type_=String) + text("'@%'")) ... ) ... ).\ ... select_from(users.outerjoin(addresses)).\ ... order_by(addresses.c.id) {sql}>>> conn.execute(s, name='jack').fetchall() SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id WHERE users.name LIKE (? || '%') OR addresses.email_address LIKE (? || '@%') ORDER BY addresses.id ('jack', 'jack') {stop}[(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com'), (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')] .. seealso:: :func:`.bindparam` Functions --------- SQL functions are created using the :data:`~.expression.func` keyword, which generates functions using attribute access: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import func >>> print(func.now()) now() >>> print(func.concat('x', 'y')) concat(:param_1, :param_2) By "generates", we mean that **any** SQL function is created based on the word you choose:: >>> print(func.xyz_my_goofy_function()) xyz_my_goofy_function() Certain function names are known by SQLAlchemy, allowing special behavioral rules to be applied. Some for example are "ANSI" functions, which mean they don't get the parenthesis added after them, such as CURRENT_TIMESTAMP: .. sourcecode:: pycon+sql >>> print(func.current_timestamp()) CURRENT_TIMESTAMP Functions are most typically used in the columns clause of a select statement, and can also be labeled as well as given a type. Labeling a function is recommended so that the result can be targeted in a result row based on a string name, and assigning it a type is required when you need result-set processing to occur, such as for Unicode conversion and date conversions. Below, we use the result function ``scalar()`` to just read the first column of the first row and then close the result; the label, even though present, is not important in this case: .. sourcecode:: pycon+sql >>> conn.execute( ... select([ ... func.max(addresses.c.email_address, type_=String). ... label('maxemail') ... ]) ... ).scalar() {opensql}SELECT max(addresses.email_address) AS maxemail FROM addresses () {stop}u'www@www.org' Databases such as PostgreSQL and Oracle which support functions that return whole result sets can be assembled into selectable units, which can be used in statements. Such as, a database function ``calculate()`` which takes the parameters ``x`` and ``y``, and returns three columns which we'd like to name ``q``, ``z`` and ``r``, we can construct using "lexical" column objects as well as bind parameters: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import column >>> calculate = select([column('q'), column('z'), column('r')]).\ ... select_from( ... func.calculate( ... bindparam('x'), ... bindparam('y') ... ) ... ) >>> calc = calculate.alias() >>> print(select([users]).where(users.c.id > calc.c.z)) SELECT users.id, users.name, users.fullname FROM users, (SELECT q, z, r FROM calculate(:x, :y)) AS anon_1 WHERE users.id > anon_1.z If we wanted to use our ``calculate`` statement twice with different bind parameters, the :func:`~sqlalchemy.sql.expression.ClauseElement.unique_params` function will create copies for us, and mark the bind parameters as "unique" so that conflicting names are isolated. Note we also make two separate aliases of our selectable: .. sourcecode:: pycon+sql >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45) >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12) >>> s = select([users]).\ ... where(users.c.id.between(calc1.c.z, calc2.c.z)) >>> print(s) SELECT users.id, users.name, users.fullname FROM users, (SELECT q, z, r FROM calculate(:x_1, :y_1)) AS c1, (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2 WHERE users.id BETWEEN c1.z AND c2.z >>> s.compile().params # doctest: +SKIP {u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17} .. seealso:: :data:`.func` Window Functions ----------------- Any :class:`.FunctionElement`, including functions generated by :data:`~.expression.func`, can be turned into a "window function", that is an OVER clause, using the :meth:`.FunctionElement.over` method: .. sourcecode:: pycon+sql >>> s = select([ ... users.c.id, ... func.row_number().over(order_by=users.c.name) ... ]) >>> print(s) SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1 FROM users .. seealso:: :func:`.over` :meth:`.FunctionElement.over` Unions and Other Set Operations ------------------------------- Unions come in two flavors, UNION and UNION ALL, which are available via module level functions :func:`~.expression.union` and :func:`~.expression.union_all`: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import union >>> u = union( ... addresses.select(). ... where(addresses.c.email_address == 'foo@bar.com'), ... addresses.select(). ... where(addresses.c.email_address.like('%@yahoo.com')), ... ).order_by(addresses.c.email_address) {sql}>>> conn.execute(u).fetchall() SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address = ? UNION SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ORDER BY addresses.email_address ('foo@bar.com', '%@yahoo.com') {stop}[(1, 1, u'jack@yahoo.com')] Also available, though not supported on all databases, are :func:`~.expression.intersect`, :func:`~.expression.intersect_all`, :func:`~.expression.except_`, and :func:`~.expression.except_all`: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import except_ >>> u = except_( ... addresses.select(). ... where(addresses.c.email_address.like('%@%.com')), ... addresses.select(). ... where(addresses.c.email_address.like('%@msn.com')) ... ) {sql}>>> conn.execute(u).fetchall() SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? EXCEPT SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ('%@%.com', '%@msn.com') {stop}[(1, 1, u'jack@yahoo.com'), (4, 2, u'wendy@aol.com')] A common issue with so-called "compound" selectables arises due to the fact that they nest with parenthesis. SQLite in particular doesn't like a statement that starts with parenthesis. So when nesting a "compound" inside a "compound", it's often necessary to apply ``.alias().select()`` to the first element of the outermost compound, if that element is also a compound. For example, to nest a "union" and a "select" inside of "except\_", SQLite will want the "union" to be stated as a subquery: .. sourcecode:: pycon+sql >>> u = except_( ... union( ... addresses.select(). ... where(addresses.c.email_address.like('%@yahoo.com')), ... addresses.select(). ... where(addresses.c.email_address.like('%@msn.com')) ... ).alias().select(), # apply subquery here ... addresses.select(addresses.c.email_address.like('%@msn.com')) ... ) {sql}>>> conn.execute(u).fetchall() SELECT anon_1.id, anon_1.user_id, anon_1.email_address FROM (SELECT addresses.id AS id, addresses.user_id AS user_id, addresses.email_address AS email_address FROM addresses WHERE addresses.email_address LIKE ? UNION SELECT addresses.id AS id, addresses.user_id AS user_id, addresses.email_address AS email_address FROM addresses WHERE addresses.email_address LIKE ?) AS anon_1 EXCEPT SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ('%@yahoo.com', '%@msn.com', '%@msn.com') {stop}[(1, 1, u'jack@yahoo.com')] .. seealso:: :func:`.union` :func:`.union_all` :func:`.intersect` :func:`.intersect_all` :func:`.except_` :func:`.except_all` .. _scalar_selects: Scalar Selects -------------- A scalar select is a SELECT that returns exactly one row and one column. It can then be used as a column expression. A scalar select is often a :term:`correlated subquery`, which relies upon the enclosing SELECT statement in order to acquire at least one of its FROM clauses. The :func:`.select` construct can be modified to act as a column expression by calling either the :meth:`~.SelectBase.as_scalar` or :meth:`~.SelectBase.label` method: .. sourcecode:: pycon+sql >>> stmt = select([func.count(addresses.c.id)]).\ ... where(users.c.id == addresses.c.user_id).\ ... as_scalar() The above construct is now a :class:`~.expression.ScalarSelect` object, and is no longer part of the :class:`~.expression.FromClause` hierarchy; it instead is within the :class:`~.expression.ColumnElement` family of expression constructs. We can place this construct the same as any other column within another :func:`.select`: .. sourcecode:: pycon+sql >>> conn.execute(select([users.c.name, stmt])).fetchall() {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS anon_1 FROM users () {stop}[(u'jack', 2), (u'wendy', 2)] To apply a non-anonymous column name to our scalar select, we create it using :meth:`.SelectBase.label` instead: .. sourcecode:: pycon+sql >>> stmt = select([func.count(addresses.c.id)]).\ ... where(users.c.id == addresses.c.user_id).\ ... label("address_count") >>> conn.execute(select([users.c.name, stmt])).fetchall() {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS address_count FROM users () {stop}[(u'jack', 2), (u'wendy', 2)] .. seealso:: :meth:`.Select.as_scalar` :meth:`.Select.label` .. _correlated_subqueries: Correlated Subqueries --------------------- Notice in the examples on :ref:`scalar_selects`, the FROM clause of each embedded select did not contain the ``users`` table in its FROM clause. This is because SQLAlchemy automatically :term:`correlates` embedded FROM objects to that of an enclosing query, if present, and if the inner SELECT statement would still have at least one FROM clause of its own. For example: .. sourcecode:: pycon+sql >>> stmt = select([addresses.c.user_id]).\ ... where(addresses.c.user_id == users.c.id).\ ... where(addresses.c.email_address == 'jack@yahoo.com') >>> enclosing_stmt = select([users.c.name]).where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users WHERE users.id = (SELECT addresses.user_id FROM addresses WHERE addresses.user_id = users.id AND addresses.email_address = ?) ('jack@yahoo.com',) {stop}[(u'jack',)] Auto-correlation will usually do what's expected, however it can also be controlled. For example, if we wanted a statement to correlate only to the ``addresses`` table but not the ``users`` table, even if both were present in the enclosing SELECT, we use the :meth:`~.Select.correlate` method to specify those FROM clauses that may be correlated: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name == 'jack').\ ... correlate(addresses) >>> enclosing_stmt = select( ... [users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',) {stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')] To entirely disable a statement from correlating, we can pass ``None`` as the argument: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.name == 'wendy').\ ... correlate(None) >>> enclosing_stmt = select([users.c.name]).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users WHERE users.id = (SELECT users.id FROM users WHERE users.name = ?) ('wendy',) {stop}[(u'wendy',)] We can also control correlation via exclusion, using the :meth:`.Select.correlate_except` method. Such as, we can write our SELECT for the ``users`` table by telling it to correlate all FROM clauses except for ``users``: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name == 'jack').\ ... correlate_except(users) >>> enclosing_stmt = select( ... [users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',) {stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')] Ordering, Grouping, Limiting, Offset...ing... --------------------------------------------- Ordering is done by passing column expressions to the :meth:`~.SelectBase.order_by` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).order_by(users.c.name) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name FROM users ORDER BY users.name () {stop}[(u'jack',), (u'wendy',)] Ascending or descending can be controlled using the :meth:`~.ColumnElement.asc` and :meth:`~.ColumnElement.desc` modifiers: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).order_by(users.c.name.desc()) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name FROM users ORDER BY users.name DESC () {stop}[(u'wendy',), (u'jack',)] Grouping refers to the GROUP BY clause, and is usually used in conjunction with aggregate functions to establish groups of rows to be aggregated. This is provided via the :meth:`~.SelectBase.group_by` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\ ... select_from(users.join(addresses)).\ ... group_by(users.c.name) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name () {stop}[(u'jack', 2), (u'wendy', 2)] HAVING can be used to filter results on an aggregate value, after GROUP BY has been applied. It's available here via the :meth:`~.Select.having` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\ ... select_from(users.join(addresses)).\ ... group_by(users.c.name).\ ... having(func.length(users.c.name) > 4) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name HAVING length(users.name) > ? (4,) {stop}[(u'wendy', 2)] A common system of dealing with duplicates in composed SELECT statements is the DISTINCT modifier. A simple DISTINCT clause can be added using the :meth:`.Select.distinct` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).\ ... where(addresses.c.email_address. ... contains(users.c.name)).\ ... distinct() >>> conn.execute(stmt).fetchall() {opensql}SELECT DISTINCT users.name FROM users, addresses WHERE (addresses.email_address LIKE '%%' || users.name || '%%') () {stop}[(u'jack',), (u'wendy',)] Most database backends support a system of limiting how many rows are returned, and the majority also feature a means of starting to return rows after a given "offset". While common backends like Postgresql, MySQL and SQLite support LIMIT and OFFSET keywords, other backends need to refer to more esoteric features such as "window functions" and row ids to achieve the same effect. The :meth:`~.Select.limit` and :meth:`~.Select.offset` methods provide an easy abstraction into the current backend's methodology: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... limit(1).offset(1) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id LIMIT ? OFFSET ? (1, 1) {stop}[(u'jack', u'jack@msn.com')] .. _inserts_and_updates: Inserts, Updates and Deletes ============================ We've seen :meth:`~.TableClause.insert` demonstrated earlier in this tutorial. Where :meth:`~.TableClause.insert` produces INSERT, the :meth:`~.TableClause.update` method produces UPDATE. Both of these constructs feature a method called :meth:`~.ValuesBase.values` which specifies the VALUES or SET clause of the statement. The :meth:`~.ValuesBase.values` method accommodates any column expression as a value: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... values(fullname="Fullname: " + users.c.name) >>> conn.execute(stmt) {opensql}UPDATE users SET fullname=(? || users.name) ('Fullname: ',) COMMIT {stop} When using :meth:`~.TableClause.insert` or :meth:`~.TableClause.update` in an "execute many" context, we may also want to specify named bound parameters which we can refer to in the argument list. The two constructs will automatically generate bound placeholders for any column names passed in the dictionaries sent to :meth:`~.Connection.execute` at execution time. However, if we wish to use explicitly targeted named parameters with composed expressions, we need to use the :func:`~.expression.bindparam` construct. When using :func:`~.expression.bindparam` with :meth:`~.TableClause.insert` or :meth:`~.TableClause.update`, the names of the table's columns themselves are reserved for the "automatic" generation of bind names. We can combine the usage of implicitly available bind names and explicitly named parameters as in the example below: .. sourcecode:: pycon+sql >>> stmt = users.insert().\ ... values(name=bindparam('_name') + " .. name") >>> conn.execute(stmt, [ ... {'id':4, '_name':'name1'}, ... {'id':5, '_name':'name2'}, ... {'id':6, '_name':'name3'}, ... ]) {opensql}INSERT INTO users (id, name) VALUES (?, (? || ?)) ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name')) COMMIT An UPDATE statement is emitted using the :meth:`~.TableClause.update` construct. This works much like an INSERT, except there is an additional WHERE clause that can be specified: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... where(users.c.name == 'jack').\ ... values(name='ed') >>> conn.execute(stmt) {opensql}UPDATE users SET name=? WHERE users.name = ? ('ed', 'jack') COMMIT {stop} When using :meth:`~.TableClause.update` in an "executemany" context, we may wish to also use explicitly named bound parameters in the WHERE clause. Again, :func:`~.expression.bindparam` is the construct used to achieve this: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... where(users.c.name == bindparam('oldname')).\ ... values(name=bindparam('newname')) >>> conn.execute(stmt, [ ... {'oldname':'jack', 'newname':'ed'}, ... {'oldname':'wendy', 'newname':'mary'}, ... {'oldname':'jim', 'newname':'jake'}, ... ]) {opensql}UPDATE users SET name=? WHERE users.name = ? (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) COMMIT {stop} Correlated Updates ------------------ A correlated update lets you update a table using selection from another table, or the same table: .. sourcecode:: pycon+sql >>> stmt = select([addresses.c.email_address]).\ ... where(addresses.c.user_id == users.c.id).\ ... limit(1) >>> conn.execute(users.update().values(fullname=stmt)) {opensql}UPDATE users SET fullname=(SELECT addresses.email_address FROM addresses WHERE addresses.user_id = users.id LIMIT ? OFFSET ?) (1, 0) COMMIT {stop} .. _multi_table_updates: Multiple Table Updates ---------------------- .. versionadded:: 0.7.4 The Postgresql, Microsoft SQL Server, and MySQL backends all support UPDATE statements that refer to multiple tables. For PG and MSSQL, this is the "UPDATE FROM" syntax, which updates one table at a time, but can reference additional tables in an additional "FROM" clause that can then be referenced in the WHERE clause directly. On MySQL, multiple tables can be embedded into a single UPDATE statement separated by a comma. The SQLAlchemy :func:`.update` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: stmt = users.update().\ values(name='ed wood').\ where(users.c.id == addresses.c.id).\ where(addresses.c.email_address.startswith('ed%')) conn.execute(stmt) The resulting SQL from the above statement would render as:: UPDATE users SET name=:name FROM addresses WHERE users.id = addresses.id AND addresses.email_address LIKE :email_address_1 || '%%' When using MySQL, columns from each table can be assigned to in the SET clause directly, using the dictionary form passed to :meth:`.Update.values`:: stmt = users.update().\ values({ users.c.name:'ed wood', addresses.c.email_address:'ed.wood@foo.com' }).\ where(users.c.id == addresses.c.id).\ where(addresses.c.email_address.startswith('ed%')) The tables are referenced explicitly in the SET clause:: UPDATE users, addresses SET addresses.email_address=%s, users.name=%s WHERE users.id = addresses.id AND addresses.email_address LIKE concat(%s, '%%') SQLAlchemy doesn't do anything special when these constructs are used on a non-supporting database. The ``UPDATE FROM`` syntax generates by default when multiple tables are present, and the statement will be rejected by the database if this syntax is not supported. .. _updates_order_parameters: Parameter-Ordered Updates -------------------------- The default behavior of the :func:`.update` construct when rendering the SET clauses is to render them using the column ordering given in the originating :class:`.Table` object. This is an important behavior, since it means that the rendering of a particular UPDATE statement with particular columns will be rendered the same each time, which has an impact on query caching systems that rely on the form of the statement, either client side or server side. Since the parameters themselves are passed to the :meth:`.Update.values` method as Python dictionary keys, there is no other fixed ordering available. However in some cases, the order of parameters rendered in the SET clause of an UPDATE statement can be significant. The main example of this is when using MySQL and providing updates to column values based on that of other column values. The end result of the following statement:: UPDATE some_table SET x = y + 10, y = 20 Will have a different result than:: UPDATE some_table SET y = 20, x = y + 10 This because on MySQL, the individual SET clauses are fully evaluated on a per-value basis, as opposed to on a per-row basis, and as each SET clause is evaluated, the values embedded in the row are changing. To suit this specific use case, the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag may be used. When using this flag, we supply a **Python list of 2-tuples** as the argument to the :meth:`.Update.values` method:: stmt = some_table.update(preserve_parameter_order=True).\ values([(some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)]) The list of 2-tuples is essentially the same structure as a Python dictionary except it is ordered. Using the above form, we are assured that the "y" column's SET clause will render first, then the "x" column's SET clause. .. versionadded:: 1.0.10 Added support for explicit ordering of UPDATE parameters using the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag. .. _deletes: Deletes ------- Finally, a delete. This is accomplished easily enough using the :meth:`~.TableClause.delete` construct: .. sourcecode:: pycon+sql >>> conn.execute(addresses.delete()) {opensql}DELETE FROM addresses () COMMIT {stop} >>> conn.execute(users.delete().where(users.c.name > 'm')) {opensql}DELETE FROM users WHERE users.name > ? ('m',) COMMIT {stop} Matched Row Counts ------------------ Both of :meth:`~.TableClause.update` and :meth:`~.TableClause.delete` are associated with *matched row counts*. This is a number indicating the number of rows that were matched by the WHERE clause. Note that by "matched", this includes rows where no UPDATE actually took place. The value is available as :attr:`~.ResultProxy.rowcount`: .. sourcecode:: pycon+sql >>> result = conn.execute(users.delete()) {opensql}DELETE FROM users () COMMIT {stop}>>> result.rowcount 1 Further Reference ================== Expression Language Reference: :ref:`expression_api_toplevel` Database Metadata Reference: :ref:`metadata_toplevel` Engine Reference: :doc:`/core/engines` Connection Reference: :ref:`connections_toplevel` Types Reference: :ref:`types_toplevel` SQLAlchemy-1.0.11/doc/build/core/event.rst0000664000175000017500000001371612636375552021317 0ustar classicclassic00000000000000.. _event_toplevel: Events ====== SQLAlchemy includes an event API which publishes a wide variety of hooks into the internals of both SQLAlchemy Core and ORM. .. versionadded:: 0.7 The system supersedes the previous system of "extension", "proxy", and "listener" classes. Event Registration ------------------ Subscribing to an event occurs through a single API point, the :func:`.listen` function, or alternatively the :func:`.listens_for` decorator. These functions accept a target, a string identifier which identifies the event to be intercepted, and a user-defined listening function. Additional positional and keyword arguments to these two functions may be supported by specific types of events, which may specify alternate interfaces for the given event function, or provide instructions regarding secondary event targets based on the given target. The name of an event and the argument signature of a corresponding listener function is derived from a class bound specification method, which exists bound to a marker class that's described in the documentation. For example, the documentation for :meth:`.PoolEvents.connect` indicates that the event name is ``"connect"`` and that a user-defined listener function should receive two positional arguments:: from sqlalchemy.event import listen from sqlalchemy.pool import Pool def my_on_connect(dbapi_con, connection_record): print "New DBAPI connection:", dbapi_con listen(Pool, 'connect', my_on_connect) To listen with the :func:`.listens_for` decorator looks like:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool @listens_for(Pool, "connect") def my_on_connect(dbapi_con, connection_record): print "New DBAPI connection:", dbapi_con Named Argument Styles --------------------- There are some varieties of argument styles which can be accepted by listener functions. Taking the example of :meth:`.PoolEvents.connect`, this function is documented as receiving ``dbapi_connection`` and ``connection_record`` arguments. We can opt to receive these arguments by name, by establishing a listener function that accepts ``**keyword`` arguments, by passing ``named=True`` to either :func:`.listen` or :func:`.listens_for`:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool @listens_for(Pool, "connect", named=True) def my_on_connect(**kw): print("New DBAPI connection:", kw['dbapi_connection']) When using named argument passing, the names listed in the function argument specification will be used as keys in the dictionary. Named style passes all arguments by name regardless of the function signature, so specific arguments may be listed as well, in any order, as long as the names match up:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool @listens_for(Pool, "connect", named=True) def my_on_connect(dbapi_connection, **kw): print("New DBAPI connection:", dbapi_connection) print("Connection record:", kw['connection_record']) Above, the presence of ``**kw`` tells :func:`.listens_for` that arguments should be passed to the function by name, rather than positionally. .. versionadded:: 0.9.0 Added optional ``named`` argument dispatch to event calling. Targets ------- The :func:`.listen` function is very flexible regarding targets. It generally accepts classes, instances of those classes, and related classes or objects from which the appropriate target can be derived. For example, the above mentioned ``"connect"`` event accepts :class:`.Engine` classes and objects as well as :class:`.Pool` classes and objects:: from sqlalchemy.event import listen from sqlalchemy.pool import Pool, QueuePool from sqlalchemy import create_engine from sqlalchemy.engine import Engine import psycopg2 def connect(): return psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') my_pool = QueuePool(connect) my_engine = create_engine('postgresql://ed@localhost/test') # associate listener with all instances of Pool listen(Pool, 'connect', my_on_connect) # associate listener with all instances of Pool # via the Engine class listen(Engine, 'connect', my_on_connect) # associate listener with my_pool listen(my_pool, 'connect', my_on_connect) # associate listener with my_engine.pool listen(my_engine, 'connect', my_on_connect) Modifiers ---------- Some listeners allow modifiers to be passed to :func:`.listen`. These modifiers sometimes provide alternate calling signatures for listeners. Such as with ORM events, some event listeners can have a return value which modifies the subsequent handling. By default, no listener ever requires a return value, but by passing ``retval=True`` this value can be supported:: def validate_phone(target, value, oldvalue, initiator): """Strip non-numeric characters from a phone number""" return re.sub(r'(?![0-9])', '', value) # setup listener on UserContact.phone attribute, instructing # it to use the return value listen(UserContact.phone, 'set', validate_phone, retval=True) Event Reference ---------------- Both SQLAlchemy Core and SQLAlchemy ORM feature a wide variety of event hooks: * **Core Events** - these are described in :ref:`core_event_toplevel` and include event hooks specific to connection pool lifecycle, SQL statement execution, transaction lifecycle, and schema creation and teardown. * **ORM Events** - these are described in :ref:`orm_event_toplevel`, and include event hooks specific to class and attribute instrumentation, object initialization hooks, attribute on-change hooks, session state, flush, and commit hooks, mapper initialization, object/result population, and per-instance persistence hooks. API Reference ------------- .. autofunction:: sqlalchemy.event.listen .. autofunction:: sqlalchemy.event.listens_for .. autofunction:: sqlalchemy.event.remove .. autofunction:: sqlalchemy.event.contains SQLAlchemy-1.0.11/doc/build/core/dml.rst0000664000175000017500000000127112636375552020743 0ustar classicclassic00000000000000Insert, Updates, Deletes ======================== INSERT, UPDATE and DELETE statements build on a hierarchy starting with :class:`.UpdateBase`. The :class:`.Insert` and :class:`.Update` constructs build on the intermediary :class:`.ValuesBase`. .. module:: sqlalchemy.sql.expression .. autofunction:: delete .. autofunction:: insert .. autofunction:: update .. autoclass:: Delete :members: :inherited-members: .. autoclass:: Insert :members: :inherited-members: .. autoclass:: Update :members: :inherited-members: .. autoclass:: sqlalchemy.sql.expression.UpdateBase :members: :inherited-members: .. autoclass:: sqlalchemy.sql.expression.ValuesBase :members: SQLAlchemy-1.0.11/doc/build/core/internals.rst0000664000175000017500000000145512636375552022172 0ustar classicclassic00000000000000.. _core_internal_toplevel: Core Internals ============== Some key internal constructs are listed here. .. currentmodule: sqlalchemy .. autoclass:: sqlalchemy.engine.interfaces.Compiled :members: .. autoclass:: sqlalchemy.sql.compiler.DDLCompiler :members: :inherited-members: .. autoclass:: sqlalchemy.engine.default.DefaultDialect :members: :inherited-members: .. autoclass:: sqlalchemy.engine.interfaces.Dialect :members: .. autoclass:: sqlalchemy.engine.default.DefaultExecutionContext :members: .. autoclass:: sqlalchemy.engine.interfaces.ExecutionContext :members: .. autoclass:: sqlalchemy.log.Identified :members: .. autoclass:: sqlalchemy.sql.compiler.IdentifierPreparer :members: .. autoclass:: sqlalchemy.sql.compiler.SQLCompiler :members: SQLAlchemy-1.0.11/doc/build/core/api_basics.rst0000664000175000017500000000023112636375552022257 0ustar classicclassic00000000000000================= Core API Basics ================= .. toctree:: :maxdepth: 2 event inspection interfaces exceptions internals SQLAlchemy-1.0.11/doc/build/core/pooling.rst0000664000175000017500000004733312636375552021647 0ustar classicclassic00000000000000.. _pooling_toplevel: Connection Pooling ================== .. module:: sqlalchemy.pool A connection pool is a standard technique used to maintain long running connections in memory for efficient re-use, as well as to provide management for the total number of connections an application might use simultaneously. Particularly for server-side web applications, a connection pool is the standard way to maintain a "pool" of active database connections in memory which are reused across requests. SQLAlchemy includes several connection pool implementations which integrate with the :class:`.Engine`. They can also be used directly for applications that want to add pooling to an otherwise plain DBAPI approach. Connection Pool Configuration ----------------------------- The :class:`~.engine.Engine` returned by the :func:`~sqlalchemy.create_engine` function in most cases has a :class:`.QueuePool` integrated, pre-configured with reasonable pooling defaults. If you're reading this section only to learn how to enable pooling - congratulations! You're already done. The most common :class:`.QueuePool` tuning parameters can be passed directly to :func:`~sqlalchemy.create_engine` as keyword arguments: ``pool_size``, ``max_overflow``, ``pool_recycle`` and ``pool_timeout``. For example:: engine = create_engine('postgresql://me@localhost/mydb', pool_size=20, max_overflow=0) In the case of SQLite, the :class:`.SingletonThreadPool` or :class:`.NullPool` are selected by the dialect to provide greater compatibility with SQLite's threading and locking model, as well as to provide a reasonable default behavior to SQLite "memory" databases, which maintain their entire dataset within the scope of a single connection. All SQLAlchemy pool implementations have in common that none of them "pre create" connections - all implementations wait until first use before creating a connection. At that point, if no additional concurrent checkout requests for more connections are made, no additional connections are created. This is why it's perfectly fine for :func:`.create_engine` to default to using a :class:`.QueuePool` of size five without regard to whether or not the application really needs five connections queued up - the pool would only grow to that size if the application actually used five connections concurrently, in which case the usage of a small pool is an entirely appropriate default behavior. .. _pool_switching: Switching Pool Implementations ------------------------------ The usual way to use a different kind of pool with :func:`.create_engine` is to use the ``poolclass`` argument. This argument accepts a class imported from the ``sqlalchemy.pool`` module, and handles the details of building the pool for you. Common options include specifying :class:`.QueuePool` with SQLite:: from sqlalchemy.pool import QueuePool engine = create_engine('sqlite:///file.db', poolclass=QueuePool) Disabling pooling using :class:`.NullPool`:: from sqlalchemy.pool import NullPool engine = create_engine( 'postgresql+psycopg2://scott:tiger@localhost/test', poolclass=NullPool) Using a Custom Connection Function ---------------------------------- All :class:`.Pool` classes accept an argument ``creator`` which is a callable that creates a new connection. :func:`.create_engine` accepts this function to pass onto the pool via an argument of the same name:: import sqlalchemy.pool as pool import psycopg2 def getconn(): c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') # do things with 'c' to set up return c engine = create_engine('postgresql+psycopg2://', creator=getconn) For most "initialize on connection" routines, it's more convenient to use the :class:`.PoolEvents` event hooks, so that the usual URL argument to :func:`.create_engine` is still usable. ``creator`` is there as a last resort for when a DBAPI has some form of ``connect`` that is not at all supported by SQLAlchemy. Constructing a Pool ------------------------ To use a :class:`.Pool` by itself, the ``creator`` function is the only argument that's required and is passed first, followed by any additional options:: import sqlalchemy.pool as pool import psycopg2 def getconn(): c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') return c mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5) DBAPI connections can then be procured from the pool using the :meth:`.Pool.connect` function. The return value of this method is a DBAPI connection that's contained within a transparent proxy:: # get a connection conn = mypool.connect() # use it cursor = conn.cursor() cursor.execute("select foo") The purpose of the transparent proxy is to intercept the ``close()`` call, such that instead of the DBAPI connection being closed, it is returned to the pool:: # "close" the connection. Returns # it to the pool. conn.close() The proxy also returns its contained DBAPI connection to the pool when it is garbage collected, though it's not deterministic in Python that this occurs immediately (though it is typical with cPython). The ``close()`` step also performs the important step of calling the ``rollback()`` method of the DBAPI connection. This is so that any existing transaction on the connection is removed, not only ensuring that no existing state remains on next usage, but also so that table and row locks are released as well as that any isolated data snapshots are removed. This behavior can be disabled using the ``reset_on_return`` option of :class:`.Pool`. A particular pre-created :class:`.Pool` can be shared with one or more engines by passing it to the ``pool`` argument of :func:`.create_engine`:: e = create_engine('postgresql://', pool=mypool) Pool Events ----------- Connection pools support an event interface that allows hooks to execute upon first connect, upon each new connection, and upon checkout and checkin of connections. See :class:`.PoolEvents` for details. Dealing with Disconnects ------------------------ The connection pool has the ability to refresh individual connections as well as its entire set of connections, setting the previously pooled connections as "invalid". A common use case is allow the connection pool to gracefully recover when the database server has been restarted, and all previously established connections are no longer functional. There are two approaches to this. Disconnect Handling - Optimistic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The most common approach is to let SQLAlchemy handle disconnects as they occur, at which point the pool is refreshed. This assumes the :class:`.Pool` is used in conjunction with a :class:`.Engine`. The :class:`.Engine` has logic which can detect disconnection events and refresh the pool automatically. When the :class:`.Connection` attempts to use a DBAPI connection, and an exception is raised that corresponds to a "disconnect" event, the connection is invalidated. The :class:`.Connection` then calls the :meth:`.Pool.recreate` method, effectively invalidating all connections not currently checked out so that they are replaced with new ones upon next checkout:: from sqlalchemy import create_engine, exc e = create_engine(...) c = e.connect() try: # suppose the database has been restarted. c.execute("SELECT * FROM table") c.close() except exc.DBAPIError, e: # an exception is raised, Connection is invalidated. if e.connection_invalidated: print "Connection was invalidated!" # after the invalidate event, a new connection # starts with a new Pool c = e.connect() c.execute("SELECT * FROM table") The above example illustrates that no special intervention is needed, the pool continues normally after a disconnection event is detected. However, an exception is raised. In a typical web application using an ORM Session, the above condition would correspond to a single request failing with a 500 error, then the web application continuing normally beyond that. Hence the approach is "optimistic" in that frequent database restarts are not anticipated. .. _pool_setting_recycle: Setting Pool Recycle ~~~~~~~~~~~~~~~~~~~~~~~ An additional setting that can augment the "optimistic" approach is to set the pool recycle parameter. This parameter prevents the pool from using a particular connection that has passed a certain age, and is appropriate for database backends such as MySQL that automatically close connections that have been stale after a particular period of time:: from sqlalchemy import create_engine e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600) Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, upon next checkout. Note that the invalidation **only** occurs during checkout - not on any connections that are held in a checked out state. ``pool_recycle`` is a function of the :class:`.Pool` itself, independent of whether or not an :class:`.Engine` is in use. .. _pool_disconnects_pessimistic: Disconnect Handling - Pessimistic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ At the expense of some extra SQL emitted for each connection checked out from the pool, a "ping" operation established by a checkout event handler can detect an invalid connection before it is used. In modern SQLAlchemy, the best way to do this is to make use of the :meth:`.ConnectionEvents.engine_connect` event, assuming the use of a :class:`.Engine` and not just a raw :class:`.Pool` object:: from sqlalchemy import exc from sqlalchemy import event from sqlalchemy import select some_engine = create_engine(...) @event.listens_for(some_engine, "engine_connect") def ping_connection(connection, branch): if branch: # "branch" refers to a sub-connection of a connection, # we don't want to bother pinging on these. return try: # run a SELECT 1. use a core select() so that # the SELECT of a scalar value without a table is # appropriately formatted for the backend connection.scalar(select([1])) except exc.DBAPIError as err: # catch SQLAlchemy's DBAPIError, which is a wrapper # for the DBAPI's exception. It includes a .connection_invalidated # attribute which specifies if this connection is a "disconnect" # condition, which is based on inspection of the original exception # by the dialect in use. if err.connection_invalidated: # run the same SELECT again - the connection will re-validate # itself and establish a new connection. The disconnect detection # here also causes the whole connection pool to be invalidated # so that all stale connections are discarded. connection.scalar(select([1])) else: raise The above recipe has the advantage that we are making use of SQLAlchemy's facilities for detecting those DBAPI exceptions that are known to indicate a "disconnect" situation, as well as the :class:`.Engine` object's ability to correctly invalidate the current connection pool when this condition occurs and allowing the current :class:`.Connection` to re-validate onto a new DBAPI connection. For the much less common case of where a :class:`.Pool` is being used without an :class:`.Engine`, an older approach may be used as below:: from sqlalchemy import exc from sqlalchemy import event from sqlalchemy.pool import Pool @event.listens_for(Pool, "checkout") def ping_connection(dbapi_connection, connection_record, connection_proxy): cursor = dbapi_connection.cursor() try: cursor.execute("SELECT 1") except: # raise DisconnectionError - pool will try # connecting again up to three times before raising. raise exc.DisconnectionError() cursor.close() Above, the :class:`.Pool` object specifically catches :class:`~sqlalchemy.exc.DisconnectionError` and attempts to create a new DBAPI connection, up to three times, before giving up and then raising :class:`~sqlalchemy.exc.InvalidRequestError`, failing the connection. The disadvantage of the above approach is that we don't have any easy way of determining if the exception raised is in fact a "disconnect" situation, since there is no :class:`.Engine` or :class:`.Dialect` in play, and also the above error would occur individually for all stale connections still in the pool. .. _pool_connection_invalidation: More on Invalidation ^^^^^^^^^^^^^^^^^^^^ The :class:`.Pool` provides "connection invalidation" services which allow both explicit invalidation of a connection as well as automatic invalidation in response to conditions that are determined to render a connection unusable. "Invalidation" means that a particular DBAPI connection is removed from the pool and discarded. The ``.close()`` method is called on this connection if it is not clear that the connection itself might not be closed, however if this method fails, the exception is logged but the operation still proceeds. When using a :class:`.Engine`, the :meth:`.Connection.invalidate` method is the usual entrypoint to explicit invalidation. Other conditions by which a DBAPI connection might be invalidated include: * a DBAPI exception such as :class:`.OperationalError`, raised when a method like ``connection.execute()`` is called, is detected as indicating a so-called "disconnect" condition. As the Python DBAPI provides no standard system for determining the nature of an exception, all SQLAlchemy dialects include a system called ``is_disconnect()`` which will examine the contents of an exception object, including the string message and any potential error codes included with it, in order to determine if this exception indicates that the connection is no longer usable. If this is the case, the :meth:`._ConnectionFairy.invalidate` method is called and the DBAPI connection is then discarded. * When the connection is returned to the pool, and calling the ``connection.rollback()`` or ``connection.commit()`` methods, as dictated by the pool's "reset on return" behavior, throws an exception. A final attempt at calling ``.close()`` on the connection will be made, and it is then discarded. * When a listener implementing :meth:`.PoolEvents.checkout` raises the :class:`~sqlalchemy.exc.DisconnectionError` exception, indicating that the connection won't be usable and a new connection attempt needs to be made. All invalidations which occur will invoke the :meth:`.PoolEvents.invalidate` event. Using Connection Pools with Multiprocessing ------------------------------------------- It's critical that when using a connection pool, and by extension when using an :class:`.Engine` created via :func:`.create_engine`, that the pooled connections **are not shared to a forked process**. TCP connections are represented as file descriptors, which usually work across process boundaries, meaning this will cause concurrent access to the file descriptor on behalf of two or more entirely independent Python interpreter states. There are two approaches to dealing with this. The first is, either create a new :class:`.Engine` within the child process, or upon an existing :class:`.Engine`, call :meth:`.Engine.dispose` before the child process uses any connections. This will remove all existing connections from the pool so that it makes all new ones. Below is a simple version using ``multiprocessing.Process``, but this idea should be adapted to the style of forking in use:: eng = create_engine("...") def run_in_process(): eng.dispose() with eng.connect() as conn: conn.execute("...") p = Process(target=run_in_process) The next approach is to instrument the :class:`.Pool` itself with events so that connections are automatically invalidated in the subprocess. This is a little more magical but probably more foolproof:: from sqlalchemy import event from sqlalchemy import exc import os eng = create_engine("...") @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info['pid'] != pid: connection_record.connection = connection_proxy.connection = None raise exc.DisconnectionError( "Connection record belongs to pid %s, " "attempting to check out in pid %s" % (connection_record.info['pid'], pid) ) Above, we use an approach similar to that described in :ref:`pool_disconnects_pessimistic` to treat a DBAPI connection that originated in a different parent process as an "invalid" connection, coercing the pool to recycle the connection record to make a new connection. API Documentation - Available Pool Implementations --------------------------------------------------- .. autoclass:: sqlalchemy.pool.Pool .. automethod:: __init__ .. automethod:: connect .. automethod:: dispose .. automethod:: recreate .. automethod:: unique_connection .. autoclass:: sqlalchemy.pool.QueuePool .. automethod:: __init__ .. automethod:: connect .. automethod:: unique_connection .. autoclass:: SingletonThreadPool .. automethod:: __init__ .. autoclass:: AssertionPool .. autoclass:: NullPool .. autoclass:: StaticPool .. autoclass:: _ConnectionFairy :members: .. autoattribute:: _connection_record .. autoclass:: _ConnectionRecord :members: Pooling Plain DB-API Connections -------------------------------- Any :pep:`249` DB-API module can be "proxied" through the connection pool transparently. Usage of the DB-API is exactly as before, except the ``connect()`` method will consult the pool. Below we illustrate this with ``psycopg2``:: import sqlalchemy.pool as pool import psycopg2 as psycopg psycopg = pool.manage(psycopg) # then connect normally connection = psycopg.connect(database='test', username='scott', password='tiger') This produces a :class:`_DBProxy` object which supports the same ``connect()`` function as the original DB-API module. Upon connection, a connection proxy object is returned, which delegates its calls to a real DB-API connection object. This connection object is stored persistently within a connection pool (an instance of :class:`.Pool`) that corresponds to the exact connection arguments sent to the ``connect()`` function. The connection proxy supports all of the methods on the original connection object, most of which are proxied via ``__getattr__()``. The ``close()`` method will return the connection to the pool, and the ``cursor()`` method will return a proxied cursor object. Both the connection proxy and the cursor proxy will also return the underlying connection to the pool after they have both been garbage collected, which is detected via weakref callbacks (``__del__`` is not used). Additionally, when connections are returned to the pool, a ``rollback()`` is issued on the connection unconditionally. This is to release any locks still held by the connection that may have resulted from normal activity. By default, the ``connect()`` method will return the same connection that is already checked out in the current thread. This allows a particular connection to be used in a given thread without needing to pass it around between functions. To disable this behavior, specify ``use_threadlocal=False`` to the ``manage()`` function. .. autofunction:: sqlalchemy.pool.manage .. autofunction:: sqlalchemy.pool.clear_managers SQLAlchemy-1.0.11/doc/build/core/connections.rst0000664000175000017500000007107112636375552022516 0ustar classicclassic00000000000000.. _connections_toplevel: ===================================== Working with Engines and Connections ===================================== .. module:: sqlalchemy.engine This section details direct usage of the :class:`.Engine`, :class:`.Connection`, and related objects. Its important to note that when using the SQLAlchemy ORM, these objects are not generally accessed; instead, the :class:`.Session` object is used as the interface to the database. However, for applications that are built around direct usage of textual SQL statements and/or SQL expression constructs without involvement by the ORM's higher level management services, the :class:`.Engine` and :class:`.Connection` are king (and queen?) - read on. Basic Usage =========== Recall from :doc:`/core/engines` that an :class:`.Engine` is created via the :func:`.create_engine` call:: engine = create_engine('mysql://scott:tiger@localhost/test') The typical usage of :func:`.create_engine()` is once per particular database URL, held globally for the lifetime of a single application process. A single :class:`.Engine` manages many individual DBAPI connections on behalf of the process and is intended to be called upon in a concurrent fashion. The :class:`.Engine` is **not** synonymous to the DBAPI ``connect`` function, which represents just one connection resource - the :class:`.Engine` is most efficient when created just once at the module level of an application, not per-object or per-function call. For a multiple-process application that uses the ``os.fork`` system call, or for example the Python ``multiprocessing`` module, it's usually required that a separate :class:`.Engine` be used for each child process. This is because the :class:`.Engine` maintains a reference to a connection pool that ultimately references DBAPI connections - these tend to not be portable across process boundaries. An :class:`.Engine` that is configured not to use pooling (which is achieved via the usage of :class:`.NullPool`) does not have this requirement. The engine can be used directly to issue SQL to the database. The most generic way is first procure a connection resource, which you get via the :meth:`.Engine.connect` method:: connection = engine.connect() result = connection.execute("select username from users") for row in result: print "username:", row['username'] connection.close() The connection is an instance of :class:`.Connection`, which is a **proxy** object for an actual DBAPI connection. The DBAPI connection is retrieved from the connection pool at the point at which :class:`.Connection` is created. The returned result is an instance of :class:`.ResultProxy`, which references a DBAPI cursor and provides a largely compatible interface with that of the DBAPI cursor. The DBAPI cursor will be closed by the :class:`.ResultProxy` when all of its result rows (if any) are exhausted. A :class:`.ResultProxy` that returns no rows, such as that of an UPDATE statement (without any returned rows), releases cursor resources immediately upon construction. When the :meth:`~.Connection.close` method is called, the referenced DBAPI connection is :term:`released` to the connection pool. From the perspective of the database itself, nothing is actually "closed", assuming pooling is in use. The pooling mechanism issues a ``rollback()`` call on the DBAPI connection so that any transactional state or locks are removed, and the connection is ready for its next usage. The above procedure can be performed in a shorthand way by using the :meth:`~.Engine.execute` method of :class:`.Engine` itself:: result = engine.execute("select username from users") for row in result: print "username:", row['username'] Where above, the :meth:`~.Engine.execute` method acquires a new :class:`.Connection` on its own, executes the statement with that object, and returns the :class:`.ResultProxy`. In this case, the :class:`.ResultProxy` contains a special flag known as ``close_with_result``, which indicates that when its underlying DBAPI cursor is closed, the :class:`.Connection` object itself is also closed, which again returns the DBAPI connection to the connection pool, releasing transactional resources. If the :class:`.ResultProxy` potentially has rows remaining, it can be instructed to close out its resources explicitly:: result.close() If the :class:`.ResultProxy` has pending rows remaining and is dereferenced by the application without being closed, Python garbage collection will ultimately close out the cursor as well as trigger a return of the pooled DBAPI connection resource to the pool (SQLAlchemy achieves this by the usage of weakref callbacks - *never* the ``__del__`` method) - however it's never a good idea to rely upon Python garbage collection to manage resources. Our example above illustrated the execution of a textual SQL string. The :meth:`~.Connection.execute` method can of course accommodate more than that, including the variety of SQL expression constructs described in :ref:`sqlexpression_toplevel`. Using Transactions ================== .. note:: This section describes how to use transactions when working directly with :class:`.Engine` and :class:`.Connection` objects. When using the SQLAlchemy ORM, the public API for transaction control is via the :class:`.Session` object, which makes usage of the :class:`.Transaction` object internally. See :ref:`unitofwork_transaction` for further information. The :class:`~sqlalchemy.engine.Connection` object provides a :meth:`~.Connection.begin` method which returns a :class:`.Transaction` object. This object is usually used within a try/except clause so that it is guaranteed to invoke :meth:`.Transaction.rollback` or :meth:`.Transaction.commit`:: connection = engine.connect() trans = connection.begin() try: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') trans.commit() except: trans.rollback() raise The above block can be created more succinctly using context managers, either given an :class:`.Engine`:: # runs a transaction with engine.begin() as connection: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') Or from the :class:`.Connection`, in which case the :class:`.Transaction` object is available as well:: with connection.begin() as trans: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') .. _connections_nested_transactions: Nesting of Transaction Blocks ------------------------------ The :class:`.Transaction` object also handles "nested" behavior by keeping track of the outermost begin/commit pair. In this example, two functions both issue a transaction on a :class:`.Connection`, but only the outermost :class:`.Transaction` object actually takes effect when it is committed. .. sourcecode:: python+sql # method_a starts a transaction and calls method_b def method_a(connection): trans = connection.begin() # open a transaction try: method_b(connection) trans.commit() # transaction is committed here except: trans.rollback() # this rolls back the transaction unconditionally raise # method_b also starts a transaction def method_b(connection): trans = connection.begin() # open a transaction - this runs in the context of method_a's transaction try: connection.execute("insert into mytable values ('bat', 'lala')") connection.execute(mytable.insert(), col1='bat', col2='lala') trans.commit() # transaction is not committed yet except: trans.rollback() # this rolls back the transaction unconditionally raise # open a Connection and call method_a conn = engine.connect() method_a(conn) conn.close() Above, ``method_a`` is called first, which calls ``connection.begin()``. Then it calls ``method_b``. When ``method_b`` calls ``connection.begin()``, it just increments a counter that is decremented when it calls ``commit()``. If either ``method_a`` or ``method_b`` calls ``rollback()``, the whole transaction is rolled back. The transaction is not committed until ``method_a`` calls the ``commit()`` method. This "nesting" behavior allows the creation of functions which "guarantee" that a transaction will be used if one was not already available, but will automatically participate in an enclosing transaction if one exists. .. index:: single: thread safety; transactions .. _autocommit: Understanding Autocommit ======================== The previous transaction example illustrates how to use :class:`.Transaction` so that several executions can take part in the same transaction. What happens when we issue an INSERT, UPDATE or DELETE call without using :class:`.Transaction`? While some DBAPI implementations provide various special "non-transactional" modes, the core behavior of DBAPI per PEP-0249 is that a *transaction is always in progress*, providing only ``rollback()`` and ``commit()`` methods but no ``begin()``. SQLAlchemy assumes this is the case for any given DBAPI. Given this requirement, SQLAlchemy implements its own "autocommit" feature which works completely consistently across all backends. This is achieved by detecting statements which represent data-changing operations, i.e. INSERT, UPDATE, DELETE, as well as data definition language (DDL) statements such as CREATE TABLE, ALTER TABLE, and then issuing a COMMIT automatically if no transaction is in progress. The detection is based on the presence of the ``autocommit=True`` execution option on the statement. If the statement is a text-only statement and the flag is not set, a regular expression is used to detect INSERT, UPDATE, DELETE, as well as a variety of other commands for a particular backend:: conn = engine.connect() conn.execute("INSERT INTO users VALUES (1, 'john')") # autocommits The "autocommit" feature is only in effect when no :class:`.Transaction` has otherwise been declared. This means the feature is not generally used with the ORM, as the :class:`.Session` object by default always maintains an ongoing :class:`.Transaction`. Full control of the "autocommit" behavior is available using the generative :meth:`.Connection.execution_options` method provided on :class:`.Connection`, :class:`.Engine`, :class:`.Executable`, using the "autocommit" flag which will turn on or off the autocommit for the selected scope. For example, a :func:`.text` construct representing a stored procedure that commits might use it so that a SELECT statement will issue a COMMIT:: engine.execute(text("SELECT my_mutating_procedure()").execution_options(autocommit=True)) .. _dbengine_implicit: Connectionless Execution, Implicit Execution ============================================= Recall from the first section we mentioned executing with and without explicit usage of :class:`.Connection`. "Connectionless" execution refers to the usage of the ``execute()`` method on an object which is not a :class:`.Connection`. This was illustrated using the :meth:`~.Engine.execute` method of :class:`.Engine`:: result = engine.execute("select username from users") for row in result: print "username:", row['username'] In addition to "connectionless" execution, it is also possible to use the :meth:`~.Executable.execute` method of any :class:`.Executable` construct, which is a marker for SQL expression objects that support execution. The SQL expression object itself references an :class:`.Engine` or :class:`.Connection` known as the **bind**, which it uses in order to provide so-called "implicit" execution services. Given a table as below:: from sqlalchemy import MetaData, Table, Column, Integer meta = MetaData() users_table = Table('users', meta, Column('id', Integer, primary_key=True), Column('name', String(50)) ) Explicit execution delivers the SQL text or constructed SQL expression to the :meth:`~.Connection.execute` method of :class:`~sqlalchemy.engine.Connection`: .. sourcecode:: python+sql engine = create_engine('sqlite:///file.db') connection = engine.connect() result = connection.execute(users_table.select()) for row in result: # .... connection.close() Explicit, connectionless execution delivers the expression to the :meth:`~.Engine.execute` method of :class:`~sqlalchemy.engine.Engine`: .. sourcecode:: python+sql engine = create_engine('sqlite:///file.db') result = engine.execute(users_table.select()) for row in result: # .... result.close() Implicit execution is also connectionless, and makes usage of the :meth:`~.Executable.execute` method on the expression itself. This method is provided as part of the :class:`.Executable` class, which refers to a SQL statement that is sufficient for being invoked against the database. The method makes usage of the assumption that either an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection` has been **bound** to the expression object. By "bound" we mean that the special attribute :attr:`.MetaData.bind` has been used to associate a series of :class:`.Table` objects and all SQL constructs derived from them with a specific engine:: engine = create_engine('sqlite:///file.db') meta.bind = engine result = users_table.select().execute() for row in result: # .... result.close() Above, we associate an :class:`.Engine` with a :class:`.MetaData` object using the special attribute :attr:`.MetaData.bind`. The :func:`.select` construct produced from the :class:`.Table` object has a method :meth:`~.Executable.execute`, which will search for an :class:`.Engine` that's "bound" to the :class:`.Table`. Overall, the usage of "bound metadata" has three general effects: * SQL statement objects gain an :meth:`.Executable.execute` method which automatically locates a "bind" with which to execute themselves. * The ORM :class:`.Session` object supports using "bound metadata" in order to establish which :class:`.Engine` should be used to invoke SQL statements on behalf of a particular mapped class, though the :class:`.Session` also features its own explicit system of establishing complex :class:`.Engine`/ mapped class configurations. * The :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`, :meth:`.Table.create`, :meth:`.Table.drop`, and "autoload" features all make usage of the bound :class:`.Engine` automatically without the need to pass it explicitly. .. note:: The concepts of "bound metadata" and "implicit execution" are not emphasized in modern SQLAlchemy. While they offer some convenience, they are no longer required by any API and are never necessary. In applications where multiple :class:`.Engine` objects are present, each one logically associated with a certain set of tables (i.e. *vertical sharding*), the "bound metadata" technique can be used so that individual :class:`.Table` can refer to the appropriate :class:`.Engine` automatically; in particular this is supported within the ORM via the :class:`.Session` object as a means to associate :class:`.Table` objects with an appropriate :class:`.Engine`, as an alternative to using the bind arguments accepted directly by the :class:`.Session`. However, the "implicit execution" technique is not at all appropriate for use with the ORM, as it bypasses the transactional context maintained by the :class:`.Session`. Overall, in the *vast majority* of cases, "bound metadata" and "implicit execution" are **not useful**. While "bound metadata" has a marginal level of usefulness with regards to ORM configuration, "implicit execution" is a very old usage pattern that in most cases is more confusing than it is helpful, and its usage is discouraged. Both patterns seem to encourage the overuse of expedient "short cuts" in application design which lead to problems later on. Modern SQLAlchemy usage, especially the ORM, places a heavy stress on working within the context of a transaction at all times; the "implicit execution" concept makes the job of associating statement execution with a particular transaction much more difficult. The :meth:`.Executable.execute` method on a particular SQL statement usually implies that the execution is not part of any particular transaction, which is usually not the desired effect. In both "connectionless" examples, the :class:`~sqlalchemy.engine.Connection` is created behind the scenes; the :class:`~sqlalchemy.engine.ResultProxy` returned by the ``execute()`` call references the :class:`~sqlalchemy.engine.Connection` used to issue the SQL statement. When the :class:`.ResultProxy` is closed, the underlying :class:`.Connection` is closed for us, resulting in the DBAPI connection being returned to the pool with transactional resources removed. .. _engine_disposal: Engine Disposal =============== The :class:`.Engine` refers to a connection pool, which means under normal circumstances, there are open database connections present while the :class:`.Engine` object is still resident in memory. When an :class:`.Engine` is garbage collected, its connection pool is no longer referred to by that :class:`.Engine`, and assuming none of its connections are still checked out, the pool and its connections will also be garbage collected, which has the effect of closing out the actual database connections as well. But otherwise, the :class:`.Engine` will hold onto open database connections assuming it uses the normally default pool implementation of :class:`.QueuePool`. The :class:`.Engine` is intended to normally be a permanent fixture established up-front and maintained throughout the lifespan of an application. It is **not** intended to be created and disposed on a per-connection basis; it is instead a registry that maintains both a pool of connections as well as configurational information about the database and DBAPI in use, as well as some degree of internal caching of per-database resources. However, there are many cases where it is desirable that all connection resources referred to by the :class:`.Engine` be completely closed out. It's generally not a good idea to rely on Python garbage collection for this to occur for these cases; instead, the :class:`.Engine` can be explicitly disposed using the :meth:`.Engine.dispose` method. This disposes of the engine's underlying connection pool and replaces it with a new one that's empty. Provided that the :class:`.Engine` is discarded at this point and no longer used, all **checked-in** connections which it refers to will also be fully closed. Valid use cases for calling :meth:`.Engine.dispose` include: * When a program wants to release any remaining checked-in connections held by the connection pool and expects to no longer be connected to that database at all for any future operations. * When a program uses multiprocessing or ``fork()``, and an :class:`.Engine` object is copied to the child process, :meth:`.Engine.dispose` should be called so that the engine creates brand new database connections local to that fork. Database connections generally do **not** travel across process boundaries. * Within test suites or multitenancy scenarios where many ad-hoc, short-lived :class:`.Engine` objects may be created and disposed. Connections that are **checked out** are **not** discarded when the engine is disposed or garbage collected, as these connections are still strongly referenced elsewhere by the application. However, after :meth:`.Engine.dispose` is called, those connections are no longer associated with that :class:`.Engine`; when they are closed, they will be returned to their now-orphaned connection pool which will ultimately be garbage collected, once all connections which refer to it are also no longer referenced anywhere. Since this process is not easy to control, it is strongly recommended that :meth:`.Engine.dispose` is called only after all checked out connections are checked in or otherwise de-associated from their pool. An alternative for applications that are negatively impacted by the :class:`.Engine` object's use of connection pooling is to disable pooling entirely. This typically incurs only a modest performance impact upon the use of new connections, and means that when a connection is checked in, it is entirely closed out and is not held in memory. See :ref:`pool_switching` for guidelines on how to disable pooling. .. _threadlocal_strategy: Using the Threadlocal Execution Strategy ======================================== The "threadlocal" engine strategy is an optional feature which can be used by non-ORM applications to associate transactions with the current thread, such that all parts of the application can participate in that transaction implicitly without the need to explicitly reference a :class:`.Connection`. .. note:: The "threadlocal" feature is generally discouraged. It's designed for a particular pattern of usage which is generally considered as a legacy pattern. It has **no impact** on the "thread safety" of SQLAlchemy components or one's application. It also should not be used when using an ORM :class:`~sqlalchemy.orm.session.Session` object, as the :class:`~sqlalchemy.orm.session.Session` itself represents an ongoing transaction and itself handles the job of maintaining connection and transactional resources. Enabling ``threadlocal`` is achieved as follows:: db = create_engine('mysql://localhost/test', strategy='threadlocal') The above :class:`.Engine` will now acquire a :class:`.Connection` using connection resources derived from a thread-local variable whenever :meth:`.Engine.execute` or :meth:`.Engine.contextual_connect` is called. This connection resource is maintained as long as it is referenced, which allows multiple points of an application to share a transaction while using connectionless execution:: def call_operation1(): engine.execute("insert into users values (?, ?)", 1, "john") def call_operation2(): users.update(users.c.user_id==5).execute(name='ed') db.begin() try: call_operation1() call_operation2() db.commit() except: db.rollback() Explicit execution can be mixed with connectionless execution by using the :meth:`.Engine.connect` method to acquire a :class:`.Connection` that is not part of the threadlocal scope:: db.begin() conn = db.connect() try: conn.execute(log_table.insert(), message="Operation started") call_operation1() call_operation2() db.commit() conn.execute(log_table.insert(), message="Operation succeeded") except: db.rollback() conn.execute(log_table.insert(), message="Operation failed") finally: conn.close() To access the :class:`.Connection` that is bound to the threadlocal scope, call :meth:`.Engine.contextual_connect`:: conn = db.contextual_connect() call_operation3(conn) conn.close() Calling :meth:`~.Connection.close` on the "contextual" connection does not :term:`release` its resources until all other usages of that resource are closed as well, including that any ongoing transactions are rolled back or committed. .. _dbapi_connections: Working with Raw DBAPI Connections ================================== There are some cases where SQLAlchemy does not provide a genericized way at accessing some :term:`DBAPI` functions, such as calling stored procedures as well as dealing with multiple result sets. In these cases, it's just as expedient to deal with the raw DBAPI connection directly. The most common way to access the raw DBAPI connection is to get it from an already present :class:`.Connection` object directly. It is present using the :attr:`.Connection.connection` attribute:: connection = engine.connect() dbapi_conn = connection.connection The DBAPI connection here is actually a "proxied" in terms of the originating connection pool, however this is an implementation detail that in most cases can be ignored. As this DBAPI connection is still contained within the scope of an owning :class:`.Connection` object, it is best to make use of the :class:`.Connection` object for most features such as transaction control as well as calling the :meth:`.Connection.close` method; if these operations are performed on the DBAPI connection directly, the owning :class:`.Connection` will not be aware of these changes in state. To overcome the limitations imposed by the DBAPI connection that is maintained by an owning :class:`.Connection`, a DBAPI connection is also available without the need to procure a :class:`.Connection` first, using the :meth:`.Engine.raw_connection` method of :class:`.Engine`:: dbapi_conn = engine.raw_connection() This DBAPI connection is again a "proxied" form as was the case before. The purpose of this proxying is now apparent, as when we call the ``.close()`` method of this connection, the DBAPI connection is typically not actually closed, but instead :term:`released` back to the engine's connection pool:: dbapi_conn.close() While SQLAlchemy may in the future add built-in patterns for more DBAPI use cases, there are diminishing returns as these cases tend to be rarely needed and they also vary highly dependent on the type of DBAPI in use, so in any case the direct DBAPI calling pattern is always there for those cases where it is needed. Some recipes for DBAPI connection use follow. .. _stored_procedures: Calling Stored Procedures ------------------------- For stored procedures with special syntactical or parameter concerns, DBAPI-level `callproc `_ may be used:: connection = engine.raw_connection() try: cursor = connection.cursor() cursor.callproc("my_procedure", ['x', 'y', 'z']) results = list(cursor.fetchall()) cursor.close() connection.commit() finally: connection.close() Multiple Result Sets -------------------- Multiple result set support is available from a raw DBAPI cursor using the `nextset `_ method:: connection = engine.raw_connection() try: cursor = connection.cursor() cursor.execute("select * from table1; select * from table2") results_one = cursor.fetchall() cursor.nextset() results_two = cursor.fetchall() cursor.close() finally: connection.close() Registering New Dialects ======================== The :func:`.create_engine` function call locates the given dialect using setuptools entrypoints. These entry points can be established for third party dialects within the setup.py script. For example, to create a new dialect "foodialect://", the steps are as follows: 1. Create a package called ``foodialect``. 2. The package should have a module containing the dialect class, which is typically a subclass of :class:`sqlalchemy.engine.default.DefaultDialect`. In this example let's say it's called ``FooDialect`` and its module is accessed via ``foodialect.dialect``. 3. The entry point can be established in setup.py as follows:: entry_points=""" [sqlalchemy.dialects] foodialect = foodialect.dialect:FooDialect """ If the dialect is providing support for a particular DBAPI on top of an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if ``FooDialect`` were in fact a MySQL dialect, the entry point could be established like this:: entry_points=""" [sqlalchemy.dialects] mysql.foodialect = foodialect.dialect:FooDialect """ The above entrypoint would then be accessed as ``create_engine("mysql+foodialect://")``. Registering Dialects In-Process ------------------------------- SQLAlchemy also allows a dialect to be registered within the current process, bypassing the need for separate installation. Use the ``register()`` function as follows:: from sqlalchemy.dialects import registry registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect") The above will respond to ``create_engine("mysql+foodialect://")`` and load the ``MyMySQLDialect`` class from the ``myapp.dialect`` module. .. versionadded:: 0.8 Connection / Engine API ======================= .. autoclass:: Connection :members: .. autoclass:: Connectable :members: .. autoclass:: Engine :members: .. autoclass:: ExceptionContext :members: .. autoclass:: NestedTransaction :members: .. autoclass:: ResultProxy :members: :private-members: _soft_close .. autoclass:: RowProxy :members: .. autoclass:: Transaction :members: .. autoclass:: TwoPhaseTransaction :members: SQLAlchemy-1.0.11/doc/build/core/compiler.rst0000664000175000017500000000027612636375552022005 0ustar classicclassic00000000000000.. _sqlalchemy.ext.compiler_toplevel: Custom SQL Constructs and Compilation Extension =============================================== .. automodule:: sqlalchemy.ext.compiler :members: SQLAlchemy-1.0.11/doc/build/core/events.rst0000664000175000017500000000142012636375552021467 0ustar classicclassic00000000000000.. _core_event_toplevel: Core Events ============ This section describes the event interfaces provided in SQLAlchemy Core. For an introduction to the event listening API, see :ref:`event_toplevel`. ORM events are described in :ref:`orm_event_toplevel`. .. autoclass:: sqlalchemy.event.base.Events :members: Connection Pool Events ----------------------- .. autoclass:: sqlalchemy.events.PoolEvents :members: SQL Execution and Connection Events ------------------------------------ .. autoclass:: sqlalchemy.events.ConnectionEvents :members: .. autoclass:: sqlalchemy.events.DialectEvents :members: Schema Events ----------------------- .. autoclass:: sqlalchemy.events.DDLEvents :members: .. autoclass:: sqlalchemy.events.SchemaEventTarget :members: SQLAlchemy-1.0.11/doc/build/core/sqlelement.rst0000664000175000017500000000463512636375552022347 0ustar classicclassic00000000000000Column Elements and Expressions =============================== .. module:: sqlalchemy.sql.expression The most fundamental part of the SQL expression API are the "column elements", which allow for basic SQL expression support. The core of all SQL expression constructs is the :class:`.ClauseElement`, which is the base for several sub-branches. The :class:`.ColumnElement` class is the fundamental unit used to construct any kind of typed SQL expression. .. autofunction:: and_ .. autofunction:: asc .. autofunction:: between .. autofunction:: bindparam .. autofunction:: case .. autofunction:: cast .. autofunction:: sqlalchemy.sql.expression.column .. autofunction:: collate .. autofunction:: desc .. autofunction:: distinct .. autofunction:: extract .. autofunction:: false .. autodata:: func .. autofunction:: funcfilter .. autofunction:: label .. autofunction:: literal .. autofunction:: literal_column .. autofunction:: not_ .. autofunction:: null .. autofunction:: nullsfirst .. autofunction:: nullslast .. autofunction:: or_ .. autofunction:: outparam .. autofunction:: over .. autofunction:: text .. autofunction:: true .. autofunction:: tuple_ .. autofunction:: type_coerce .. autoclass:: BinaryExpression :members: .. autoclass:: BindParameter :members: .. autoclass:: Case :members: .. autoclass:: Cast :members: .. autoclass:: ClauseElement :members: .. autoclass:: ClauseList :members: .. autoclass:: ColumnClause :members: .. autoclass:: ColumnCollection :members: .. autoclass:: ColumnElement :members: :inherited-members: :undoc-members: .. autoclass:: sqlalchemy.sql.operators.ColumnOperators :members: :special-members: :inherited-members: .. autoclass:: sqlalchemy.sql.base.DialectKWArgs :members: .. autoclass:: Extract :members: .. autoclass:: sqlalchemy.sql.elements.False_ :members: .. autoclass:: FunctionFilter :members: .. autoclass:: Label :members: .. autoclass:: sqlalchemy.sql.elements.Null :members: .. autoclass:: Over :members: .. autoclass:: TextClause :members: .. autoclass:: Tuple :members: .. autoclass:: sqlalchemy.sql.elements.True_ :members: .. autoclass:: sqlalchemy.sql.operators.custom_op :members: .. autoclass:: sqlalchemy.sql.operators.Operators :members: :special-members: .. autoclass:: sqlalchemy.sql.elements.quoted_name .. autoclass:: UnaryExpression :members: SQLAlchemy-1.0.11/doc/build/core/engines.rst0000664000175000017500000003030612636375552021620 0ustar classicclassic00000000000000.. _engines_toplevel: ==================== Engine Configuration ==================== The :class:`.Engine` is the starting point for any SQLAlchemy application. It's "home base" for the actual database and its :term:`DBAPI`, delivered to the SQLAlchemy application through a connection pool and a :class:`.Dialect`, which describes how to talk to a specific kind of database/DBAPI combination. The general structure can be illustrated as follows: .. image:: sqla_engine_arch.png Where above, an :class:`.Engine` references both a :class:`.Dialect` and a :class:`.Pool`, which together interpret the DBAPI's module functions as well as the behavior of the database. Creating an engine is just a matter of issuing a single call, :func:`.create_engine()`:: from sqlalchemy import create_engine engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase') The above engine creates a :class:`.Dialect` object tailored towards PostgreSQL, as well as a :class:`.Pool` object which will establish a DBAPI connection at ``localhost:5432`` when a connection request is first received. Note that the :class:`.Engine` and its underlying :class:`.Pool` do **not** establish the first actual DBAPI connection until the :meth:`.Engine.connect` method is called, or an operation which is dependent on this method such as :meth:`.Engine.execute` is invoked. In this way, :class:`.Engine` and :class:`.Pool` can be said to have a *lazy initialization* behavior. The :class:`.Engine`, once created, can either be used directly to interact with the database, or can be passed to a :class:`.Session` object to work with the ORM. This section covers the details of configuring an :class:`.Engine`. The next section, :ref:`connections_toplevel`, will detail the usage API of the :class:`.Engine` and similar, typically for non-ORM applications. .. _supported_dbapis: Supported Databases ==================== SQLAlchemy includes many :class:`.Dialect` implementations for various backends. Dialects for the most common databases are included with SQLAlchemy; a handful of others require an additional install of a separate dialect. See the section :ref:`dialect_toplevel` for information on the various backends available. .. _database_urls: Database Urls ============= The :func:`.create_engine` function produces an :class:`.Engine` object based on a URL. These URLs follow `RFC-1738 `_, and usually can include username, password, hostname, database name as well as optional keyword arguments for additional configuration. In some cases a file path is accepted, and in others a "data source name" replaces the "host" and "database" portions. The typical form of a database URL is:: dialect+driver://username:password@host:port/database Dialect names include the identifying name of the SQLAlchemy dialect, a name such as ``sqlite``, ``mysql``, ``postgresql``, ``oracle``, or ``mssql``. The drivername is the name of the DBAPI to be used to connect to the database using all lowercase letters. If not specified, a "default" DBAPI will be imported if available - this default is typically the most widely known driver available for that backend. Examples for common connection styles follow below. For a full index of detailed information on all included dialects as well as links to third-party dialects, see :ref:`dialect_toplevel`. Postgresql ---------- The Postgresql dialect uses psycopg2 as the default DBAPI. pg8000 is also available as a pure-Python substitute:: # default engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') # psycopg2 engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase') # pg8000 engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase') More notes on connecting to Postgresql at :ref:`postgresql_toplevel`. MySQL ----- The MySQL dialect uses mysql-python as the default DBAPI. There are many MySQL DBAPIs available, including MySQL-connector-python and OurSQL:: # default engine = create_engine('mysql://scott:tiger@localhost/foo') # mysql-python engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo') # MySQL-connector-python engine = create_engine('mysql+mysqlconnector://scott:tiger@localhost/foo') # OurSQL engine = create_engine('mysql+oursql://scott:tiger@localhost/foo') More notes on connecting to MySQL at :ref:`mysql_toplevel`. Oracle ------ The Oracle dialect uses cx_oracle as the default DBAPI:: engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname') engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname') More notes on connecting to Oracle at :ref:`oracle_toplevel`. Microsoft SQL Server -------------------- The SQL Server dialect uses pyodbc as the default DBAPI. pymssql is also available:: # pyodbc engine = create_engine('mssql+pyodbc://scott:tiger@mydsn') # pymssql engine = create_engine('mssql+pymssql://scott:tiger@hostname:port/dbname') More notes on connecting to SQL Server at :ref:`mssql_toplevel`. SQLite ------ SQLite connects to file-based databases, using the Python built-in module ``sqlite3`` by default. As SQLite connects to local files, the URL format is slightly different. The "file" portion of the URL is the filename of the database. For a relative file path, this requires three slashes:: # sqlite:/// # where is relative: engine = create_engine('sqlite:///foo.db') And for an absolute file path, the three slashes are followed by the absolute path:: #Unix/Mac - 4 initial slashes in total engine = create_engine('sqlite:////absolute/path/to/foo.db') #Windows engine = create_engine('sqlite:///C:\\path\\to\\foo.db') #Windows alternative using raw string engine = create_engine(r'sqlite:///C:\path\to\foo.db') To use a SQLite ``:memory:`` database, specify an empty URL:: engine = create_engine('sqlite://') More notes on connecting to SQLite at :ref:`sqlite_toplevel`. Others ------ See :ref:`dialect_toplevel`, the top-level page for all additional dialect documentation. .. _create_engine_args: Engine Creation API =================== .. autofunction:: sqlalchemy.create_engine .. autofunction:: sqlalchemy.engine_from_config .. autofunction:: sqlalchemy.engine.url.make_url .. autoclass:: sqlalchemy.engine.url.URL :members: Pooling ======= The :class:`.Engine` will ask the connection pool for a connection when the ``connect()`` or ``execute()`` methods are called. The default connection pool, :class:`~.QueuePool`, will open connections to the database on an as-needed basis. As concurrent statements are executed, :class:`.QueuePool` will grow its pool of connections to a default size of five, and will allow a default "overflow" of ten. Since the :class:`.Engine` is essentially "home base" for the connection pool, it follows that you should keep a single :class:`.Engine` per database established within an application, rather than creating a new one for each connection. .. note:: :class:`.QueuePool` is not used by default for SQLite engines. See :ref:`sqlite_toplevel` for details on SQLite connection pool usage. For more information on connection pooling, see :ref:`pooling_toplevel`. .. _custom_dbapi_args: Custom DBAPI connect() arguments ================================= Custom arguments used when issuing the ``connect()`` call to the underlying DBAPI may be issued in three distinct ways. String-based arguments can be passed directly from the URL string as query arguments: .. sourcecode:: python+sql db = create_engine('postgresql://scott:tiger@localhost/test?argument1=foo&argument2=bar') If SQLAlchemy's database connector is aware of a particular query argument, it may convert its type from string to its proper type. :func:`~sqlalchemy.create_engine` also takes an argument ``connect_args`` which is an additional dictionary that will be passed to ``connect()``. This can be used when arguments of a type other than string are required, and SQLAlchemy's database connector has no type conversion logic present for that parameter: .. sourcecode:: python+sql db = create_engine('postgresql://scott:tiger@localhost/test', connect_args = {'argument1':17, 'argument2':'bar'}) The most customizable connection method of all is to pass a ``creator`` argument, which specifies a callable that returns a DBAPI connection: .. sourcecode:: python+sql def connect(): return psycopg.connect(user='scott', host='localhost') db = create_engine('postgresql://', creator=connect) .. _dbengine_logging: Configuring Logging ==================== Python's standard `logging `_ module is used to implement informational and debug log output with SQLAlchemy. This allows SQLAlchemy's logging to integrate in a standard way with other applications and libraries. The ``echo`` and ``echo_pool`` flags that are present on :func:`~sqlalchemy.create_engine`, as well as the ``echo_uow`` flag used on :class:`~sqlalchemy.orm.session.Session`, all interact with regular loggers. This section assumes familiarity with the above linked logging module. All logging performed by SQLAlchemy exists underneath the ``sqlalchemy`` namespace, as used by ``logging.getLogger('sqlalchemy')``. When logging has been configured (i.e. such as via ``logging.basicConfig()``), the general namespace of SA loggers that can be turned on is as follows: * ``sqlalchemy.engine`` - controls SQL echoing. set to ``logging.INFO`` for SQL query output, ``logging.DEBUG`` for query + result set output. * ``sqlalchemy.dialects`` - controls custom logging for SQL dialects. See the documentation of individual dialects for details. * ``sqlalchemy.pool`` - controls connection pool logging. set to ``logging.INFO`` or lower to log connection pool checkouts/checkins. * ``sqlalchemy.orm`` - controls logging of various ORM functions. set to ``logging.INFO`` for information on mapper configurations. For example, to log SQL queries using Python logging instead of the ``echo=True`` flag:: import logging logging.basicConfig() logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) By default, the log level is set to ``logging.WARN`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an application that has logging enabled otherwise. The ``echo`` flags present as keyword arguments to :func:`~sqlalchemy.create_engine` and others as well as the ``echo`` property on :class:`~sqlalchemy.engine.Engine`, when set to ``True``, will first attempt to ensure that logging is enabled. Unfortunately, the ``logging`` module provides no way of determining if output has already been configured (note we are referring to if a logging configuration has been set up, not just that the logging level is set). For this reason, any ``echo=True`` flags will result in a call to ``logging.basicConfig()`` using sys.stdout as the destination. It also sets up a default format using the level name, timestamp, and logger name. Note that this configuration has the affect of being configured **in addition** to any existing logger configurations. Therefore, **when using Python logging, ensure all echo flags are set to False at all times**, to avoid getting duplicate log lines. The logger name of instance such as an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.pool.Pool` defaults to using a truncated hex identifier string. To set this to a specific name, use the "logging_name" and "pool_logging_name" keyword arguments with :func:`sqlalchemy.create_engine`. .. note:: The SQLAlchemy :class:`.Engine` conserves Python function call overhead by only emitting log statements when the current logging level is detected as ``logging.INFO`` or ``logging.DEBUG``. It only checks this level when a new connection is procured from the connection pool. Therefore when changing the logging configuration for an already-running application, any :class:`.Connection` that's currently active, or more commonly a :class:`~.orm.session.Session` object that's active in a transaction, won't log any SQL according to the new configuration until a new :class:`.Connection` is procured (in the case of :class:`~.orm.session.Session`, this is after the current transaction ends and a new one begins). SQLAlchemy-1.0.11/doc/build/core/functions.rst0000664000175000017500000000153112636375552022176 0ustar classicclassic00000000000000.. _functions_toplevel: .. _generic_functions: ========================= SQL and Generic Functions ========================= .. module:: sqlalchemy.sql.expression SQL functions which are known to SQLAlchemy with regards to database-specific rendering, return types and argument behavior. Generic functions are invoked like all SQL functions, using the :attr:`func` attribute:: select([func.count()]).select_from(sometable) Note that any name not known to :attr:`func` generates the function name as is - there is no restriction on what SQL functions can be called, known or unknown to SQLAlchemy, built-in or user defined. The section here only describes those functions where SQLAlchemy already knows what argument and return types are in use. .. automodule:: sqlalchemy.sql.functions :members: :undoc-members: :exclude-members: func SQLAlchemy-1.0.11/doc/build/core/serializer.rst0000664000175000017500000000021312636375552022333 0ustar classicclassic00000000000000Expression Serializer Extension =============================== .. automodule:: sqlalchemy.ext.serializer :members: :undoc-members: SQLAlchemy-1.0.11/doc/build/core/expression_api.rst0000664000175000017500000000062012636375552023214 0ustar classicclassic00000000000000.. _expression_api_toplevel: SQL Statements and Expressions API ================================== .. module:: sqlalchemy.sql.expression This section presents the API reference for the SQL Expression Language. For a full introduction to its usage, see :ref:`sqlexpression_toplevel`. .. toctree:: :maxdepth: 1 sqlelement selectable dml functions compiler serializer SQLAlchemy-1.0.11/doc/build/core/type_api.rst0000664000175000017500000000041512636375552022000 0ustar classicclassic00000000000000.. module:: sqlalchemy.types .. _types_api: Base Type API -------------- .. autoclass:: TypeEngine :members: .. autoclass:: Concatenable :members: :inherited-members: .. autoclass:: NullType .. autoclass:: Variant :members: with_variant, __init__ SQLAlchemy-1.0.11/doc/build/core/reflection.rst0000664000175000017500000001637712636375552022336 0ustar classicclassic00000000000000.. module:: sqlalchemy.schema .. _metadata_reflection_toplevel: .. _metadata_reflection: Reflecting Database Objects =========================== A :class:`~sqlalchemy.schema.Table` object can be instructed to load information about itself from the corresponding database schema object already existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload=True`` flag. If the :class:`~sqlalchemy.schema.MetaData` is not persistently bound, also add the ``autoload_with`` argument:: >>> messages = Table('messages', meta, autoload=True, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] The above operation will use the given engine to query the database for information about the ``messages`` table, and will then generate :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey`, and other objects corresponding to this information as though the :class:`~sqlalchemy.schema.Table` object were hand-constructed in Python. When tables are reflected, if a given table references another one via foreign key, a second :class:`~sqlalchemy.schema.Table` object is created within the :class:`~sqlalchemy.schema.MetaData` object representing the connection. Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload=True, autoload_with=engine) >>> 'shopping_carts' in meta.tables: True The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like" behavior such that if you requested both tables individually, :class:`~sqlalchemy.schema.MetaData` will ensure that exactly one :class:`~sqlalchemy.schema.Table` object is created for each distinct table name. The :class:`~sqlalchemy.schema.Table` constructor actually returns to you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: shopping_carts = Table('shopping_carts', meta) Of course, it's a good idea to use ``autoload=True`` with the above table regardless. This is so that the table's attributes will be loaded if they have not been already. The autoload operation only occurs for the table if it hasn't already been loaded; once loaded, new calls to :class:`~sqlalchemy.schema.Table` with the same name will not re-issue any reflection queries. Overriding Reflected Columns ----------------------------- Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: >>> mytable = Table('mytable', meta, ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode ... autoload=True) Reflecting Views ----------------- The reflection system can also reflect views. Basic usage is the same as that of a table:: my_view = Table("some_view", metadata, autoload=True) Above, ``my_view`` is a :class:`~sqlalchemy.schema.Table` object with :class:`~sqlalchemy.schema.Column` objects representing the names and types of each column within the view "some_view". Usually, it's desired to have at least a primary key constraint when reflecting a view, if not foreign keys as well. View reflection doesn't extrapolate these constraints. Use the "override" technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:: my_view = Table("some_view", metadata, Column("view_id", Integer, primary_key=True), Column("related_thing", Integer, ForeignKey("othertable.thing_id")), autoload=True ) Reflecting All Tables at Once ----------------------------- The :class:`~sqlalchemy.schema.MetaData` object can also get a listing of tables and reflect the full set. This is achieved by using the :func:`~sqlalchemy.schema.MetaData.reflect` method. After calling it, all located tables are present within the :class:`~sqlalchemy.schema.MetaData` object's dictionary of tables:: meta = MetaData() meta.reflect(bind=someengine) users_table = meta.tables['users'] addresses_table = meta.tables['addresses'] ``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database:: meta = MetaData() meta.reflect(bind=someengine) for table in reversed(meta.sorted_tables): someengine.execute(table.delete()) .. _metadata_reflection_inspector: Fine Grained Reflection with Inspector -------------------------------------- A low level interface which provides a backend-agnostic system of loading lists of schema, table, column, and constraint descriptions from a given database is also available. This is known as the "Inspector":: from sqlalchemy import create_engine from sqlalchemy.engine import reflection engine = create_engine('...') insp = reflection.Inspector.from_engine(engine) print insp.get_table_names() .. autoclass:: sqlalchemy.engine.reflection.Inspector :members: :undoc-members: Limitations of Reflection ------------------------- It's important to note that the reflection process recreates :class:`.Table` metadata using only information which is represented in the relational database. This process by definition cannot restore aspects of a schema that aren't actually stored in the database. State which is not available from reflection includes but is not limited to: * Client side defaults, either Python functions or SQL expressions defined using the ``default`` keyword of :class:`.Column` (note this is separate from ``server_default``, which specifically is what's available via reflection). * Column information, e.g. data that might have been placed into the :attr:`.Column.info` dictionary * The value of the ``.quote`` setting for :class:`.Column` or :class:`.Table` * The association of a particular :class:`.Sequence` with a given :class:`.Column` The relational database also in many cases reports on table metadata in a different format than what was specified in SQLAlchemy. The :class:`.Table` objects returned from reflection cannot be always relied upon to produce the identical DDL as the original Python-defined :class:`.Table` objects. Areas where this occurs includes server defaults, column-associated sequences and various idosyncrasies regarding constraints and datatypes. Server side defaults may be returned with cast directives (typically Postgresql will include a ``::`` cast) or different quoting patterns than originally specified. Another category of limitation includes schema structures for which reflection is only partially or not yet defined. Recent improvements to reflection allow things like views, indexes and foreign key options to be reflected. As of this writing, structures like CHECK constraints, table comments, and triggers are not reflected. SQLAlchemy-1.0.11/doc/build/core/exceptions.rst0000664000175000017500000000011612636375552022345 0ustar classicclassic00000000000000Core Exceptions =============== .. automodule:: sqlalchemy.exc :members: SQLAlchemy-1.0.11/doc/build/core/type_basics.rst0000664000175000017500000001130412636375552022472 0ustar classicclassic00000000000000Column and Data Types ===================== .. module:: sqlalchemy.types SQLAlchemy provides abstractions for most common database data types, and a mechanism for specifying your own custom data types. The methods and attributes of type objects are rarely used directly. Type objects are supplied to :class:`~sqlalchemy.schema.Table` definitions and can be supplied as type hints to `functions` for occasions where the database driver returns an incorrect type. .. code-block:: pycon >>> users = Table('users', metadata, ... Column('id', Integer, primary_key=True) ... Column('login', String(32)) ... ) SQLAlchemy will use the ``Integer`` and ``String(32)`` type information when issuing a ``CREATE TABLE`` statement and will use it again when reading back rows ``SELECTed`` from the database. Functions that accept a type (such as :func:`~sqlalchemy.schema.Column`) will typically accept a type class or instance; ``Integer`` is equivalent to ``Integer()`` with no construction arguments in this case. .. _types_generic: Generic Types ------------- Generic types specify a column that can read, write and store a particular type of Python data. SQLAlchemy will choose the best database column type available on the target database when issuing a ``CREATE TABLE`` statement. For complete control over which column type is emitted in ``CREATE TABLE``, such as ``VARCHAR`` see `SQL Standard Types`_ and the other sections of this chapter. .. autoclass:: BigInteger :members: .. autoclass:: Boolean :members: .. autoclass:: Date :members: .. autoclass:: DateTime :members: .. autoclass:: Enum :members: __init__, create, drop .. autoclass:: Float :members: .. autoclass:: Integer :members: .. autoclass:: Interval :members: .. autoclass:: LargeBinary :members: .. autoclass:: MatchType :members: .. autoclass:: Numeric :members: .. autoclass:: PickleType :members: .. autoclass:: SchemaType :members: :undoc-members: .. autoclass:: SmallInteger :members: .. autoclass:: String :members: .. autoclass:: Text :members: .. autoclass:: Time :members: .. autoclass:: Unicode :members: .. autoclass:: UnicodeText :members: .. _types_sqlstandard: SQL Standard Types ------------------ The SQL standard types always create database column types of the same name when ``CREATE TABLE`` is issued. Some types may not be supported on all databases. .. autoclass:: BIGINT .. autoclass:: BINARY .. autoclass:: BLOB .. autoclass:: BOOLEAN .. autoclass:: CHAR .. autoclass:: CLOB .. autoclass:: DATE .. autoclass:: DATETIME .. autoclass:: DECIMAL .. autoclass:: FLOAT .. autoclass:: INT .. autoclass:: sqlalchemy.types.INTEGER .. autoclass:: NCHAR .. autoclass:: NVARCHAR .. autoclass:: NUMERIC .. autoclass:: REAL .. autoclass:: SMALLINT .. autoclass:: TEXT .. autoclass:: TIME .. autoclass:: TIMESTAMP .. autoclass:: VARBINARY .. autoclass:: VARCHAR .. _types_vendor: Vendor-Specific Types --------------------- Database-specific types are also available for import from each database's dialect module. See the :ref:`dialect_toplevel` reference for the database you're interested in. For example, MySQL has a ``BIGINT`` type and PostgreSQL has an ``INET`` type. To use these, import them from the module explicitly:: from sqlalchemy.dialects import mysql table = Table('foo', metadata, Column('id', mysql.BIGINT), Column('enumerates', mysql.ENUM('a', 'b', 'c')) ) Or some PostgreSQL types:: from sqlalchemy.dialects import postgresql table = Table('foo', metadata, Column('ipaddress', postgresql.INET), Column('elements', postgresql.ARRAY(String)) ) Each dialect provides the full set of typenames supported by that backend within its `__all__` collection, so that a simple `import *` or similar will import all supported types as implemented for that backend:: from sqlalchemy.dialects.postgresql import * t = Table('mytable', metadata, Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(300)), Column('inetaddr', INET) ) Where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types, and INET is specific to the Postgresql dialect. Some dialect level types have the same name as the SQL standard type, but also provide additional arguments. For example, MySQL implements the full range of character and string types including additional arguments such as `collation` and `charset`:: from sqlalchemy.dialects.mysql import VARCHAR, TEXT table = Table('foo', meta, Column('col1', VARCHAR(200, collation='binary')), Column('col2', TEXT(charset='latin1')) ) SQLAlchemy-1.0.11/doc/build/core/types.rst0000664000175000017500000000021712636375552021332 0ustar classicclassic00000000000000.. _types_toplevel: Column and Data Types ===================== .. toctree:: :maxdepth: 2 type_basics custom_types type_api SQLAlchemy-1.0.11/doc/build/core/defaults.rst0000664000175000017500000004045412636375552022004 0ustar classicclassic00000000000000.. module:: sqlalchemy.schema .. _metadata_defaults_toplevel: .. _metadata_defaults: Column Insert/Update Defaults ============================== SQLAlchemy provides a very rich featureset regarding column level events which take place during INSERT and UPDATE statements. Options include: * Scalar values used as defaults during INSERT and UPDATE operations * Python functions which execute upon INSERT and UPDATE operations * SQL expressions which are embedded in INSERT statements (or in some cases execute beforehand) * SQL expressions which are embedded in UPDATE statements * Server side default values used during INSERT * Markers for server-side triggers used during UPDATE The general rule for all insert/update defaults is that they only take effect if no value for a particular column is passed as an ``execute()`` parameter; otherwise, the given value is used. Scalar Defaults --------------- The simplest kind of default is a scalar value used as the default value of a column:: Table("mytable", meta, Column("somecolumn", Integer, default=12) ) Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied. A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):: Table("mytable", meta, Column("somecolumn", Integer, onupdate=25) ) Python-Executed Functions ------------------------- The :paramref:`.Column.default` and :paramref:`.Column.onupdate` keyword arguments also accept Python functions. These functions are invoked at the time of insert or update if no other value for that column is supplied, and the value returned is used for the column's value. Below illustrates a crude "sequence" that assigns an incrementing counter to a primary key column:: # a function which counts upwards i = 0 def mydefault(): global i i += 1 return i t = Table("mytable", meta, Column('id', Integer, primary_key=True, default=mydefault), ) It should be noted that for real "incrementing sequence" behavior, the built-in capabilities of the database should normally be used, which may include sequence objects or other autoincrementing capabilities. For primary key columns, SQLAlchemy will in most cases use these capabilities automatically. See the API documentation for :class:`~sqlalchemy.schema.Column` including the :paramref:`.Column.autoincrement` flag, as well as the section on :class:`~sqlalchemy.schema.Sequence` later in this chapter for background on standard primary key generation techniques. To illustrate onupdate, we assign the Python ``datetime`` function ``now`` to the :paramref:`.Column.onupdate` attribute:: import datetime t = Table("mytable", meta, Column('id', Integer, primary_key=True), # define 'last_updated' to be populated with datetime.now() Column('last_updated', DateTime, onupdate=datetime.datetime.now), ) When an update statement executes and no value is passed for ``last_updated``, the ``datetime.datetime.now()`` Python function is executed and its return value used as the value for ``last_updated``. Notice that we provide ``now`` as the function itself without calling it (i.e. there are no parenthesis following) - SQLAlchemy will execute the function at the time the statement executes. Context-Sensitive Default Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Python functions used by :paramref:`.Column.default` and :paramref:`.Column.onupdate` may also make use of the current statement's context in order to determine a value. The `context` of a statement is an internal SQLAlchemy object which contains all information about the statement being executed, including its source expression, the parameters associated with it and the cursor. The typical use case for this context with regards to default generation is to have access to the other values being inserted or updated on the row. To access the context, provide a function that accepts a single ``context`` argument:: def mydefault(context): return context.current_parameters['counter'] + 12 t = Table('mytable', meta, Column('counter', Integer), Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) ) Above we illustrate a default function which will execute for all INSERT and UPDATE statements where a value for ``counter_plus_twelve`` was otherwise not provided, and the value will be that of whatever value is present in the execution for the ``counter`` column, plus the number 12. While the context object passed to the default function has many attributes, the ``current_parameters`` member is a special member provided only during the execution of a default function for the purposes of deriving defaults from its existing values. For a single statement that is executing many sets of bind parameters, the user-defined function is called for each set of parameters, and ``current_parameters`` will be provided with each individual parameter set for each execution. SQL Expressions --------------- The "default" and "onupdate" keywords may also be passed SQL expressions, including select statements or direct function calls:: t = Table("mytable", meta, Column('id', Integer, primary_key=True), # define 'create_date' to default to now() Column('create_date', DateTime, default=func.now()), # define 'key' to pull its default from the 'keyvalues' table Column('key', String(20), default=keyvalues.select(keyvalues.c.type='type1', limit=1)), # define 'last_modified' to use the current_timestamp SQL function on update Column('last_modified', DateTime, onupdate=func.utc_timestamp()) ) Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` or ``CURRENT_TIMESTAMP`` in most cases) during an INSERT statement, and the ``key`` column with the result of a SELECT subquery from another table. The ``last_modified`` column will be populated with the value of ``UTC_TIMESTAMP()``, a function specific to MySQL, when an UPDATE statement is emitted for this table. Note that when using ``func`` functions, unlike when using Python `datetime` functions we *do* call the function, i.e. with parenthesis "()" - this is because what we want in this case is the return value of the function, which is the SQL expression construct that will be rendered into the INSERT or UPDATE statement. The above SQL functions are usually executed "inline" with the INSERT or UPDATE statement being executed, meaning, a single statement is executed which embeds the given expressions or subqueries within the VALUES or SET clause of the statement. Although in some cases, the function is "pre-executed" in a SELECT statement of its own beforehand. This happens when all of the following is true: * the column is a primary key column * the database dialect does not support a usable ``cursor.lastrowid`` accessor (or equivalent); this currently includes PostgreSQL, Oracle, and Firebird, as well as some MySQL dialects. * the dialect does not support the "RETURNING" clause or similar, or the ``implicit_returning`` flag is set to ``False`` for the dialect. Dialects which support RETURNING currently include Postgresql, Oracle, Firebird, and MS-SQL. * the statement is a single execution, i.e. only supplies one set of parameters and doesn't use "executemany" behavior * the ``inline=True`` flag is not set on the :class:`~sqlalchemy.sql.expression.Insert()` or :class:`~sqlalchemy.sql.expression.Update()` construct, and the statement has not defined an explicit `returning()` clause. Whether or not the default generation clause "pre-executes" is not something that normally needs to be considered, unless it is being addressed for performance reasons. When the statement is executed with a single set of parameters (that is, it is not an "executemany" style execution), the returned :class:`~sqlalchemy.engine.ResultProxy` will contain a collection accessible via :meth:`.ResultProxy.postfetch_cols` which contains a list of all :class:`~sqlalchemy.schema.Column` objects which had an inline-executed default. Similarly, all parameters which were bound to the statement, including all Python and SQL expressions which were pre-executed, are present in the :meth:`.ResultProxy.last_inserted_params` or :meth:`.ResultProxy.last_updated_params` collections on :class:`~sqlalchemy.engine.ResultProxy`. The :attr:`.ResultProxy.inserted_primary_key` collection contains a list of primary key values for the row inserted (a list so that single-column and composite-column primary keys are represented in the same format). .. _server_defaults: Server Side Defaults -------------------- A variant on the SQL expression default is the :paramref:`.Column.server_default`, which gets placed in the CREATE TABLE statement during a :meth:`.Table.create` operation: .. sourcecode:: python+sql t = Table('test', meta, Column('abc', String(20), server_default='abc'), Column('created_at', DateTime, server_default=text("sysdate")) ) A create call for the above table will produce:: CREATE TABLE test ( abc varchar(20) default 'abc', created_at datetime default sysdate ) The behavior of :paramref:`.Column.server_default` is similar to that of a regular SQL default; if it's placed on a primary key column for a database which doesn't have a way to "postfetch" the ID, and the statement is not "inlined", the SQL expression is pre-executed; otherwise, SQLAlchemy lets the default fire off on the database side normally. .. _triggered_columns: Triggered Columns ------------------ Columns with values set by a database trigger or other external process may be called out using :class:`.FetchedValue` as a marker:: t = Table('test', meta, Column('abc', String(20), server_default=FetchedValue()), Column('def', String(20), server_onupdate=FetchedValue()) ) .. versionchanged:: 0.8.0b2,0.7.10 The ``for_update`` argument on :class:`.FetchedValue` is set automatically when specified as the ``server_onupdate`` argument. If using an older version, specify the onupdate above as ``server_onupdate=FetchedValue(for_update=True)``. These markers do not emit a "default" clause when the table is created, however they do set the same internal flags as a static ``server_default`` clause, providing hints to higher-level tools that a "post-fetch" of these rows should be performed after an insert or update. .. note:: It's generally not appropriate to use :class:`.FetchedValue` in conjunction with a primary key column, particularly when using the ORM or any other scenario where the :attr:`.ResultProxy.inserted_primary_key` attribute is required. This is becaue the "post-fetch" operation requires that the primary key value already be available, so that the row can be selected on its primary key. For a server-generated primary key value, all databases provide special accessors or other techniques in order to acquire the "last inserted primary key" column of a table. These mechanisms aren't affected by the presence of :class:`.FetchedValue`. For special situations where triggers are used to generate primary key values, and the database in use does not support the ``RETURNING`` clause, it may be necessary to forego the usage of the trigger and instead apply the SQL expression or function as a "pre execute" expression:: t = Table('test', meta, Column('abc', MyType, default=func.generate_new_value(), primary_key=True) ) Where above, when :meth:`.Table.insert` is used, the ``func.generate_new_value()`` expression will be pre-executed in the context of a scalar ``SELECT`` statement, and the new value will be applied to the subsequent ``INSERT``, while at the same time being made available to the :attr:`.ResultProxy.inserted_primary_key` attribute. Defining Sequences ------------------- SQLAlchemy represents database sequences using the :class:`~sqlalchemy.schema.Sequence` object, which is considered to be a special case of "column default". It only has an effect on databases which have explicit support for sequences, which currently includes Postgresql, Oracle, and Firebird. The :class:`~sqlalchemy.schema.Sequence` object is otherwise ignored. The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a "default" generator to be used during INSERT operations, and can also be configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:: table = Table("cartitems", meta, Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True), Column("description", String(40)), Column("createdate", DateTime()) ) Where above, the table "cartitems" is associated with a sequence named "cart_id_seq". When INSERT statements take place for "cartitems", and no value is passed for the "cart_id" column, the "cart_id_seq" sequence will be used to generate a value. When the :class:`~sqlalchemy.schema.Sequence` is associated with a table, CREATE and DROP statements issued for that table will also issue CREATE/DROP for the sequence object as well, thus "bundling" the sequence object with its parent table. The :class:`~sqlalchemy.schema.Sequence` object also implements special functionality to accommodate Postgresql's SERIAL datatype. The SERIAL type in PG automatically generates a sequence that is used implicitly during inserts. This means that if a :class:`~sqlalchemy.schema.Table` object defines a :class:`~sqlalchemy.schema.Sequence` on its primary key column so that it works with Oracle and Firebird, the :class:`~sqlalchemy.schema.Sequence` would get in the way of the "implicit" sequence that PG would normally use. For this use case, add the flag ``optional=True`` to the :class:`~sqlalchemy.schema.Sequence` object - this indicates that the :class:`~sqlalchemy.schema.Sequence` should only be used if the database provides no other option for generating primary key identifiers. The :class:`~sqlalchemy.schema.Sequence` object also has the ability to be executed standalone like a SQL expression, which has the effect of calling its "next value" function:: seq = Sequence('some_sequence') nextid = connection.execute(seq) Associating a Sequence as the Server Side Default ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we associate a :class:`.Sequence` with a :class:`.Column` as above, this association is an **in-Python only** association. The CREATE TABLE that would be generated for our :class:`.Table` would not refer to this sequence. If we want the sequence to be used as a server-side default, meaning it takes place even if we emit INSERT commands to the table from the SQL commandline, we can use the :paramref:`.Column.server_default` parameter in conjunction with the value-generation function of the sequence, available from the :meth:`.Sequence.next_value` method:: cart_id_seq = Sequence('cart_id_seq') table = Table("cartitems", meta, Column( "cart_id", Integer, cart_id_seq, server_default=cart_id_seq.next_value(), primary_key=True), Column("description", String(40)), Column("createdate", DateTime()) ) The above metadata will generate a CREATE TABLE statement on Postgresql as:: CREATE TABLE cartitems ( cart_id INTEGER DEFAULT nextval('cart_id_seq') NOT NULL, description VARCHAR(40), createdate TIMESTAMP WITHOUT TIME ZONE, PRIMARY KEY (cart_id) ) We place the :class:`.Sequence` also as a Python-side default above, that is, it is mentioned twice in the :class:`.Column` definition. Depending on the backend in use, this may not be strictly necessary, for example on the Postgresql backend the Core will use ``RETURNING`` to access the newly generated primary key value in any case. However, for the best compatibility, :class:`.Sequence` was originally intended to be a Python-side directive first and foremost so it's probably a good idea to specify it in this way as well. Default Objects API ------------------- .. autoclass:: ColumnDefault .. autoclass:: DefaultClause .. autoclass:: DefaultGenerator .. autoclass:: FetchedValue .. autoclass:: PassiveDefault .. autoclass:: Sequence :members: SQLAlchemy-1.0.11/doc/build/core/selectable.rst0000664000175000017500000000307612636375552022277 0ustar classicclassic00000000000000Selectables, Tables, FROM objects ================================= The term "selectable" refers to any object that rows can be selected from; in SQLAlchemy, these objects descend from :class:`.FromClause` and their distinguishing feature is their :attr:`.FromClause.c` attribute, which is a namespace of all the columns contained within the FROM clause (these elements are themselves :class:`.ColumnElement` subclasses). .. module:: sqlalchemy.sql.expression .. autofunction:: alias .. autofunction:: except_ .. autofunction:: except_all .. autofunction:: exists .. autofunction:: intersect .. autofunction:: intersect_all .. autofunction:: join .. autofunction:: outerjoin .. autofunction:: select .. autofunction:: subquery .. autofunction:: sqlalchemy.sql.expression.table .. autofunction:: union .. autofunction:: union_all .. autoclass:: Alias :members: :inherited-members: .. autoclass:: CompoundSelect :members: :inherited-members: .. autoclass:: CTE :members: :inherited-members: .. autoclass:: Executable :members: .. autoclass:: FromClause :members: .. autoclass:: GenerativeSelect :members: :inherited-members: .. autoclass:: HasPrefixes :members: .. autoclass:: HasSuffixes :members: .. autoclass:: Join :members: :inherited-members: .. autoclass:: ScalarSelect :members: .. autoclass:: Select :members: :inherited-members: .. autoclass:: Selectable :members: .. autoclass:: SelectBase :members: .. autoclass:: TableClause :members: :inherited-members: .. autoclass:: TextAsFrom :members: SQLAlchemy-1.0.11/doc/build/core/index.rst0000664000175000017500000000070512636375552021277 0ustar classicclassic00000000000000.. _core_toplevel: SQLAlchemy Core =============== The breadth of SQLAlchemy’s SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM’s domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm. .. toctree:: :maxdepth: 2 tutorial expression_api schema types engines_connections api_basics SQLAlchemy-1.0.11/doc/build/core/schema.rst0000664000175000017500000000324612636375552021433 0ustar classicclassic00000000000000.. _schema_toplevel: ========================== Schema Definition Language ========================== .. module:: sqlalchemy.schema This section references SQLAlchemy **schema metadata**, a comprehensive system of describing and inspecting database schemas. The core of SQLAlchemy's query and object mapping operations are supported by *database metadata*, which is comprised of Python objects that describe tables and other schema-level objects. These objects are at the core of three major types of operations - issuing CREATE and DROP statements (known as *DDL*), constructing SQL queries, and expressing information about structures that already exist within the database. Database metadata can be expressed by explicitly naming the various components and their properties, using constructs such as :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.Sequence`, all of which are imported from the ``sqlalchemy.schema`` package. It can also be generated by SQLAlchemy using a process called *reflection*, which means you start with a single object such as :class:`~sqlalchemy.schema.Table`, assign it a name, and then instruct SQLAlchemy to load all the additional information related to that name from a particular engine source. A key feature of SQLAlchemy's database metadata constructs is that they are designed to be used in a *declarative* style which closely resembles that of real DDL. They are therefore most intuitive to those who have some background in creating real schema generation scripts. .. toctree:: :maxdepth: 2 metadata reflection defaults constraints ddl SQLAlchemy-1.0.11/doc/build/core/ddl.rst0000664000175000017500000002147212636375552020737 0ustar classicclassic00000000000000.. _metadata_ddl_toplevel: .. _metadata_ddl: .. module:: sqlalchemy.schema Customizing DDL =============== In the preceding sections we've discussed a variety of schema constructs including :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.ForeignKeyConstraint`, :class:`~sqlalchemy.schema.CheckConstraint`, and :class:`~sqlalchemy.schema.Sequence`. Throughout, we've relied upon the ``create()`` and :func:`~sqlalchemy.schema.MetaData.create_all` methods of :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.MetaData` in order to issue data definition language (DDL) for all constructs. When issued, a pre-determined order of operations is invoked, and DDL to create each table is created unconditionally including all constraints and other objects associated with it. For more complex scenarios where database-specific DDL is required, SQLAlchemy offers two techniques which can be used to add any DDL based on any condition, either accompanying the standard generation of tables or by itself. Custom DDL ---------- Custom DDL phrases are most easily achieved using the :class:`~sqlalchemy.schema.DDL` construct. This construct works like all the other DDL elements except it accepts a string which is the text to be emitted: .. sourcecode:: python+sql event.listen( metadata, "after_create", DDL("ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length " " CHECK (length(user_name) >= 8)") ) A more comprehensive method of creating libraries of DDL constructs is to use custom compilation - see :ref:`sqlalchemy.ext.compiler_toplevel` for details. .. _schema_ddl_sequences: Controlling DDL Sequences ------------------------- The :class:`~.schema.DDL` construct introduced previously also has the ability to be invoked conditionally based on inspection of the database. This feature is available using the :meth:`.DDLElement.execute_if` method. For example, if we wanted to create a trigger but only on the Postgresql backend, we could invoke this as:: mytable = Table( 'mytable', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) trigger = DDL( "CREATE TRIGGER dt_ins BEFORE INSERT ON mytable " "FOR EACH ROW BEGIN SET NEW.data='ins'; END" ) event.listen( mytable, 'after_create', trigger.execute_if(dialect='postgresql') ) The :paramref:`.DDLElement.execute_if.dialect` keyword also accepts a tuple of string dialect names:: event.listen( mytable, "after_create", trigger.execute_if(dialect=('postgresql', 'mysql')) ) event.listen( mytable, "before_drop", trigger.execute_if(dialect=('postgresql', 'mysql')) ) The :meth:`.DDLElement.execute_if` method can also work against a callable function that will receive the database connection in use. In the example below, we use this to conditionally create a CHECK constraint, first looking within the Postgresql catalogs to see if it exists: .. sourcecode:: python+sql def should_create(ddl, target, connection, **kw): row = connection.execute( "select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar() return not bool(row) def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) event.listen( users, "after_create", DDL( "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length CHECK (length(user_name) >= 8)" ).execute_if(callable_=should_create) ) event.listen( users, "before_drop", DDL( "ALTER TABLE users DROP CONSTRAINT cst_user_name_length" ).execute_if(callable_=should_drop) ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id) ) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop} {sql}users.drop(engine) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users DROP CONSTRAINT cst_user_name_length DROP TABLE users{stop} Using the built-in DDLElement Classes -------------------------------------- The ``sqlalchemy.schema`` package contains SQL expression constructs that provide DDL expressions. For example, to produce a ``CREATE TABLE`` statement: .. sourcecode:: python+sql from sqlalchemy.schema import CreateTable {sql}engine.execute(CreateTable(mytable)) CREATE TABLE mytable ( col1 INTEGER, col2 INTEGER, col3 INTEGER, col4 INTEGER, col5 INTEGER, col6 INTEGER ){stop} Above, the :class:`~sqlalchemy.schema.CreateTable` construct works like any other expression construct (such as ``select()``, ``table.insert()``, etc.). All of SQLAlchemy's DDL oriented constructs are subclasses of the :class:`.DDLElement` base class; this is the base of all the objects corresponding to CREATE and DROP as well as ALTER, not only in SQLAlchemy but in Alembic Migrations as well. A full reference of available constructs is in :ref:`schema_api_ddl`. User-defined DDL constructs may also be created as subclasses of :class:`.DDLElement` itself. The documentation in :ref:`sqlalchemy.ext.compiler_toplevel` has several examples of this. The event-driven DDL system described in the previous section :ref:`schema_ddl_sequences` is available with other :class:`.DDLElement` objects as well. However, when dealing with the built-in constructs such as :class:`.CreateIndex`, :class:`.CreateSequence`, etc, the event system is of **limited** use, as methods like :meth:`.Table.create` and :meth:`.MetaData.create_all` will invoke these constructs unconditionally. In a future SQLAlchemy release, the DDL event system including conditional execution will taken into account for built-in constructs that currently invoke in all cases. We can illustrate an event-driven example with the :class:`.AddConstraint` and :class:`.DropConstraint` constructs, as the event-driven system will work for CHECK and UNIQUE constraints, using these as we did in our previous example of :meth:`.DDLElement.execute_if`: .. sourcecode:: python+sql def should_create(ddl, target, connection, **kw): row = connection.execute( "select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar() return not bool(row) def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) event.listen( users, "after_create", AddConstraint(constraint).execute_if(callable_=should_create) ) event.listen( users, "before_drop", DropConstraint(constraint).execute_if(callable_=should_drop) ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id) ) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop} {sql}users.drop(engine) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users DROP CONSTRAINT cst_user_name_length DROP TABLE users{stop} While the above example is against the built-in :class:`.AddConstraint` and :class:`.DropConstraint` objects, the main usefulness of DDL events for now remains focused on the use of the :class:`.DDL` construct itself, as well as with user-defined subclasses of :class:`.DDLElement` that aren't already part of the :meth:`.MetaData.create_all`, :meth:`.Table.create`, and corresponding "drop" processes. .. _schema_api_ddl: DDL Expression Constructs API ----------------------------- .. autofunction:: sort_tables .. autofunction:: sort_tables_and_constraints .. autoclass:: DDLElement :members: :undoc-members: .. autoclass:: DDL :members: :undoc-members: .. autoclass:: _CreateDropBase .. autoclass:: CreateTable :members: :undoc-members: .. autoclass:: DropTable :members: :undoc-members: .. autoclass:: CreateColumn :members: :undoc-members: .. autoclass:: CreateSequence :members: :undoc-members: .. autoclass:: DropSequence :members: :undoc-members: .. autoclass:: CreateIndex :members: :undoc-members: .. autoclass:: DropIndex :members: :undoc-members: .. autoclass:: AddConstraint :members: :undoc-members: .. autoclass:: DropConstraint :members: :undoc-members: .. autoclass:: CreateSchema :members: :undoc-members: .. autoclass:: DropSchema :members: :undoc-members: SQLAlchemy-1.0.11/doc/build/core/interfaces.rst0000664000175000017500000000137012636375552022312 0ustar classicclassic00000000000000.. _dep_interfaces_core_toplevel: Deprecated Event Interfaces ============================ .. module:: sqlalchemy.interfaces This section describes the class-based core event interface introduced in SQLAlchemy 0.5. The ORM analogue is described at :ref:`dep_interfaces_orm_toplevel`. .. deprecated:: 0.7 The new event system described in :ref:`event_toplevel` replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing. Execution, Connection and Cursor Events --------------------------------------- .. autoclass:: ConnectionProxy :members: :undoc-members: Connection Pool Events ---------------------- .. autoclass:: PoolListener :members: :undoc-members: SQLAlchemy-1.0.11/doc/build/core/custom_types.rst0000664000175000017500000004470412636375552022735 0ustar classicclassic00000000000000.. module:: sqlalchemy.types .. _types_custom: Custom Types ------------ A variety of methods exist to redefine the behavior of existing types as well as to provide new ones. Overriding Type Compilation ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A frequent need is to force the "string" version of a type, that is the one rendered in a CREATE TABLE statement or other SQL function like CAST, to be changed. For example, an application may want to force the rendering of ``BINARY`` for all platforms except for one, in which is wants ``BLOB`` to be rendered. Usage of an existing generic type, in this case :class:`.LargeBinary`, is preferred for most use cases. But to control types more accurately, a compilation directive that is per-dialect can be associated with any type:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import BINARY @compiles(BINARY, "sqlite") def compile_binary_sqlite(type_, compiler, **kw): return "BLOB" The above code allows the usage of :class:`.types.BINARY`, which will produce the string ``BINARY`` against all backends except SQLite, in which case it will produce ``BLOB``. See the section :ref:`type_compilation_extension`, a subsection of :ref:`sqlalchemy.ext.compiler_toplevel`, for additional examples. .. _types_typedecorator: Augmenting Existing Types ~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.TypeDecorator` allows the creation of custom types which add bind-parameter and result-processing behavior to an existing type object. It is used when additional in-Python marshaling of data to and from the database is required. .. note:: The bind- and result-processing of :class:`.TypeDecorator` is *in addition* to the processing already performed by the hosted type, which is customized by SQLAlchemy on a per-DBAPI basis to perform processing specific to that DBAPI. To change the DBAPI-level processing for an existing type, see the section :ref:`replacing_processors`. .. autoclass:: TypeDecorator :members: :inherited-members: TypeDecorator Recipes ~~~~~~~~~~~~~~~~~~~~~ A few key :class:`.TypeDecorator` recipes follow. .. _coerce_to_unicode: Coercing Encoded Strings to Unicode ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A common source of confusion regarding the :class:`.Unicode` type is that it is intended to deal *only* with Python ``unicode`` objects on the Python side, meaning values passed to it as bind parameters must be of the form ``u'some string'`` if using Python 2 and not 3. The encoding/decoding functions it performs are only to suit what the DBAPI in use requires, and are primarily a private implementation detail. The use case of a type that can safely receive Python bytestrings, that is strings that contain non-ASCII characters and are not ``u''`` objects in Python 2, can be achieved using a :class:`.TypeDecorator` which coerces as needed:: from sqlalchemy.types import TypeDecorator, Unicode class CoerceUTF8(TypeDecorator): """Safely coerce Python bytestrings to Unicode before passing off to the database.""" impl = Unicode def process_bind_param(self, value, dialect): if isinstance(value, str): value = value.decode('utf-8') return value Rounding Numerics ^^^^^^^^^^^^^^^^^ Some database connectors like those of SQL Server choke if a Decimal is passed with too many decimal places. Here's a recipe that rounds them down:: from sqlalchemy.types import TypeDecorator, Numeric from decimal import Decimal class SafeNumeric(TypeDecorator): """Adds quantization to Numeric.""" impl = Numeric def __init__(self, *arg, **kw): TypeDecorator.__init__(self, *arg, **kw) self.quantize_int = - self.impl.scale self.quantize = Decimal(10) ** self.quantize_int def process_bind_param(self, value, dialect): if isinstance(value, Decimal) and \ value.as_tuple()[2] < self.quantize_int: value = value.quantize(self.quantize) return value .. _custom_guid_type: Backend-agnostic GUID Type ^^^^^^^^^^^^^^^^^^^^^^^^^^ Receives and returns Python uuid() objects. Uses the PG UUID type when using Postgresql, CHAR(32) on other backends, storing them in stringified hex format. Can be modified to store binary in CHAR(16) if desired:: from sqlalchemy.types import TypeDecorator, CHAR from sqlalchemy.dialects.postgresql import UUID import uuid class GUID(TypeDecorator): """Platform-independent GUID type. Uses Postgresql's UUID type, otherwise uses CHAR(32), storing as stringified hex values. """ impl = CHAR def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(UUID()) else: return dialect.type_descriptor(CHAR(32)) def process_bind_param(self, value, dialect): if value is None: return value elif dialect.name == 'postgresql': return str(value) else: if not isinstance(value, uuid.UUID): return "%.32x" % uuid.UUID(value) else: # hexstring return "%.32x" % value def process_result_value(self, value, dialect): if value is None: return value else: return uuid.UUID(value) Marshal JSON Strings ^^^^^^^^^^^^^^^^^^^^^ This type uses ``simplejson`` to marshal Python data structures to/from JSON. Can be modified to use Python's builtin json encoder:: from sqlalchemy.types import TypeDecorator, VARCHAR import json class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string. Usage:: JSONEncodedDict(255) """ impl = VARCHAR def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value Note that the ORM by default will not detect "mutability" on such a type - meaning, in-place changes to values will not be detected and will not be flushed. Without further steps, you instead would need to replace the existing value with a new one on each parent object to detect changes. Note that there's nothing wrong with this, as many applications may not require that the values are ever mutated once created. For those which do have this requirement, support for mutability is best applied using the ``sqlalchemy.ext.mutable`` extension - see the example in :ref:`mutable_toplevel`. .. _replacing_processors: Replacing the Bind/Result Processing of Existing Types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most augmentation of type behavior at the bind/result level is achieved using :class:`.TypeDecorator`. For the rare scenario where the specific processing applied by SQLAlchemy at the DBAPI level needs to be replaced, the SQLAlchemy type can be subclassed directly, and the ``bind_processor()`` or ``result_processor()`` methods can be overridden. Doing so requires that the ``adapt()`` method also be overridden. This method is the mechanism by which SQLAlchemy produces DBAPI-specific type behavior during statement execution. Overriding it allows a copy of the custom type to be used in lieu of a DBAPI-specific type. Below we subclass the :class:`.types.TIME` type to have custom result processing behavior. The ``process()`` function will receive ``value`` from the DBAPI cursor directly:: class MySpecialTime(TIME): def __init__(self, special_argument): super(MySpecialTime, self).__init__() self.special_argument = special_argument def result_processor(self, dialect, coltype): import datetime time = datetime.time def process(value): if value is not None: microseconds = value.microseconds seconds = value.seconds minutes = seconds / 60 return time( minutes / 60, minutes % 60, seconds - minutes * 60, microseconds) else: return None return process def adapt(self, impltype): return MySpecialTime(self.special_argument) .. _types_sql_value_processing: Applying SQL-level Bind/Result Processing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As seen in the sections :ref:`types_typedecorator` and :ref:`replacing_processors`, SQLAlchemy allows Python functions to be invoked both when parameters are sent to a statement, as well as when result rows are loaded from the database, to apply transformations to the values as they are sent to or from the database. It is also possible to define SQL-level transformations as well. The rationale here is when only the relational database contains a particular series of functions that are necessary to coerce incoming and outgoing data between an application and persistence format. Examples include using database-defined encryption/decryption functions, as well as stored procedures that handle geographic data. The Postgis extension to Postgresql includes an extensive array of SQL functions that are necessary for coercing data into particular formats. Any :class:`.TypeEngine`, :class:`.UserDefinedType` or :class:`.TypeDecorator` subclass can include implementations of :meth:`.TypeEngine.bind_expression` and/or :meth:`.TypeEngine.column_expression`, which when defined to return a non-``None`` value should return a :class:`.ColumnElement` expression to be injected into the SQL statement, either surrounding bound parameters or a column expression. For example, to build a ``Geometry`` type which will apply the Postgis function ``ST_GeomFromText`` to all outgoing values and the function ``ST_AsText`` to all incoming data, we can create our own subclass of :class:`.UserDefinedType` which provides these methods in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: from sqlalchemy import func from sqlalchemy.types import UserDefinedType class Geometry(UserDefinedType): def get_col_spec(self): return "GEOMETRY" def bind_expression(self, bindvalue): return func.ST_GeomFromText(bindvalue, type_=self) def column_expression(self, col): return func.ST_AsText(col, type_=self) We can apply the ``Geometry`` type into :class:`.Table` metadata and use it in a :func:`.select` construct:: geometry = Table('geometry', metadata, Column('geom_id', Integer, primary_key=True), Column('geom_data', Geometry) ) print select([geometry]).where( geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)') The resulting SQL embeds both functions as appropriate. ``ST_AsText`` is applied to the columns clause so that the return value is run through the function before passing into a result set, and ``ST_GeomFromText`` is run on the bound parameter so that the passed-in value is converted:: SELECT geometry.geom_id, ST_AsText(geometry.geom_data) AS geom_data_1 FROM geometry WHERE geometry.geom_data = ST_GeomFromText(:geom_data_2) The :meth:`.TypeEngine.column_expression` method interacts with the mechanics of the compiler such that the SQL expression does not interfere with the labeling of the wrapped expression. Such as, if we rendered a :func:`.select` against a :func:`.label` of our expression, the string label is moved to the outside of the wrapped expression:: print select([geometry.c.geom_data.label('my_data')]) Output:: SELECT ST_AsText(geometry.geom_data) AS my_data FROM geometry For an example of subclassing a built in type directly, we subclass :class:`.postgresql.BYTEA` to provide a ``PGPString``, which will make use of the Postgresql ``pgcrypto`` extension to encrpyt/decrypt values transparently:: from sqlalchemy import create_engine, String, select, func, \ MetaData, Table, Column, type_coerce from sqlalchemy.dialects.postgresql import BYTEA class PGPString(BYTEA): def __init__(self, passphrase, length=None): super(PGPString, self).__init__(length) self.passphrase = passphrase def bind_expression(self, bindvalue): # convert the bind's type from PGPString to # String, so that it's passed to psycopg2 as is without # a dbapi.Binary wrapper bindvalue = type_coerce(bindvalue, String) return func.pgp_sym_encrypt(bindvalue, self.passphrase) def column_expression(self, col): return func.pgp_sym_decrypt(col, self.passphrase) metadata = MetaData() message = Table('message', metadata, Column('username', String(50)), Column('message', PGPString("this is my passphrase", length=1000)), ) engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) with engine.begin() as conn: metadata.create_all(conn) conn.execute(message.insert(), username="some user", message="this is my message") print conn.scalar( select([message.c.message]).\ where(message.c.username == "some user") ) The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied to the INSERT and SELECT statements:: INSERT INTO message (username, message) VALUES (%(username)s, pgp_sym_encrypt(%(message)s, %(pgp_sym_encrypt_1)s)) {'username': 'some user', 'message': 'this is my message', 'pgp_sym_encrypt_1': 'this is my passphrase'} SELECT pgp_sym_decrypt(message.message, %(pgp_sym_decrypt_1)s) AS message_1 FROM message WHERE message.username = %(username_1)s {'pgp_sym_decrypt_1': 'this is my passphrase', 'username_1': 'some user'} .. versionadded:: 0.8 Added the :meth:`.TypeEngine.bind_expression` and :meth:`.TypeEngine.column_expression` methods. See also: :ref:`examples_postgis` .. _types_operators: Redefining and Creating New Operators ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SQLAlchemy Core defines a fixed set of expression operators available to all column expressions. Some of these operations have the effect of overloading Python's built in operators; examples of such operators include :meth:`.ColumnOperators.__eq__` (``table.c.somecolumn == 'foo'``), :meth:`.ColumnOperators.__invert__` (``~table.c.flag``), and :meth:`.ColumnOperators.__add__` (``table.c.x + table.c.y``). Other operators are exposed as explicit methods on column expressions, such as :meth:`.ColumnOperators.in_` (``table.c.value.in_(['x', 'y'])``) and :meth:`.ColumnOperators.like` (``table.c.value.like('%ed%')``). The Core expression constructs in all cases consult the type of the expression in order to determine the behavior of existing operators, as well as to locate additional operators that aren't part of the built in set. The :class:`.TypeEngine` base class defines a root "comparison" implementation :class:`.TypeEngine.Comparator`, and many specific types provide their own sub-implementations of this class. User-defined :class:`.TypeEngine.Comparator` implementations can be built directly into a simple subclass of a particular type in order to override or define new operations. Below, we create a :class:`.Integer` subclass which overrides the :meth:`.ColumnOperators.__add__` operator:: from sqlalchemy import Integer class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): return self.op("goofy")(other) The above configuration creates a new class ``MyInt``, which establishes the :attr:`.TypeEngine.comparator_factory` attribute as referring to a new class, subclassing the :class:`.TypeEngine.Comparator` class associated with the :class:`.Integer` type. Usage:: >>> sometable = Table("sometable", metadata, Column("data", MyInt)) >>> print sometable.c.data + 5 sometable.data goofy :data_1 The implementation for :meth:`.ColumnOperators.__add__` is consulted by an owning SQL expression, by instantiating the :class:`.TypeEngine.Comparator` with itself as the ``expr`` attribute. The mechanics of the expression system are such that operations continue recursively until an expression object produces a new SQL expression construct. Above, we could just as well have said ``self.expr.op("goofy")(other)`` instead of ``self.op("goofy")(other)``. New methods added to a :class:`.TypeEngine.Comparator` are exposed on an owning SQL expression using a ``__getattr__`` scheme, which exposes methods added to :class:`.TypeEngine.Comparator` onto the owning :class:`.ColumnElement`. For example, to add a ``log()`` function to integers:: from sqlalchemy import Integer, func class MyInt(Integer): class comparator_factory(Integer.Comparator): def log(self, other): return func.log(self.expr, other) Using the above type:: >>> print sometable.c.data.log(5) log(:log_1, :log_2) Unary operations are also possible. For example, to add an implementation of the Postgresql factorial operator, we combine the :class:`.UnaryExpression` construct along with a :class:`.custom_op` to produce the factorial expression:: from sqlalchemy import Integer from sqlalchemy.sql.expression import UnaryExpression from sqlalchemy.sql import operators class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): return UnaryExpression(self.expr, modifier=operators.custom_op("!"), type_=MyInteger) Using the above type:: >>> from sqlalchemy.sql import column >>> print column('x', MyInteger).factorial() x ! See also: :attr:`.TypeEngine.comparator_factory` .. versionadded:: 0.8 The expression system was enhanced to support customization of operators on a per-type level. Creating New Types ~~~~~~~~~~~~~~~~~~ The :class:`.UserDefinedType` class is provided as a simple base class for defining entirely new database types. Use this to represent native database types not known by SQLAlchemy. If only Python translation behavior is needed, use :class:`.TypeDecorator` instead. .. autoclass:: UserDefinedType :members: SQLAlchemy-1.0.11/doc/build/core/metadata.rst0000664000175000017500000002736412636375552021762 0ustar classicclassic00000000000000.. _metadata_toplevel: .. _metadata_describing_toplevel: .. _metadata_describing: ================================== Describing Databases with MetaData ================================== .. module:: sqlalchemy.schema This section discusses the fundamental :class:`.Table`, :class:`.Column` and :class:`.MetaData` objects. A collection of metadata entities is stored in an object aptly named :class:`~sqlalchemy.schema.MetaData`:: from sqlalchemy import * metadata = MetaData() :class:`~sqlalchemy.schema.MetaData` is a container object that keeps together many different features of a database (or multiple databases) being described. To represent a table, use the :class:`~sqlalchemy.schema.Table` class. Its two primary arguments are the table name, then the :class:`~sqlalchemy.schema.MetaData` object which it will be associated with. The remaining positional arguments are mostly :class:`~sqlalchemy.schema.Column` objects describing each column:: user = Table('user', metadata, Column('user_id', Integer, primary_key=True), Column('user_name', String(16), nullable=False), Column('email_address', String(60)), Column('password', String(20), nullable=False) ) Above, a table called ``user`` is described, which contains four columns. The primary key of the table consists of the ``user_id`` column. Multiple columns may be assigned the ``primary_key=True`` flag which denotes a multi-column primary key, known as a *composite* primary key. Note also that each column describes its datatype using objects corresponding to genericized types, such as :class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String`. SQLAlchemy features dozens of types of varying levels of specificity as well as the ability to create custom types. Documentation on the type system can be found at :ref:`types`. Accessing Tables and Columns ---------------------------- The :class:`~sqlalchemy.schema.MetaData` object contains all of the schema constructs we've associated with it. It supports a few methods of accessing these table objects, such as the ``sorted_tables`` accessor which returns a list of each :class:`~sqlalchemy.schema.Table` object in order of foreign key dependency (that is, each table is preceded by all tables which it references):: >>> for t in metadata.sorted_tables: ... print t.name user user_preference invoice invoice_item In most cases, individual :class:`~sqlalchemy.schema.Table` objects have been explicitly declared, and these objects are typically accessed directly as module-level variables in an application. Once a :class:`~sqlalchemy.schema.Table` has been defined, it has a full set of accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition:: employees = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('employee_name', String(60), nullable=False), Column('employee_dept', Integer, ForeignKey("departments.department_id")) ) Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - this construct defines a reference to a remote table, and is fully described in :ref:`metadata_foreignkeys`. Methods of accessing information about this table include:: # access the column "EMPLOYEE_ID": employees.columns.employee_id # or just employees.c.employee_id # via string employees.c['employee_id'] # iterate through all columns for c in employees.c: print c # get the table's primary key columns for primary_key in employees.primary_key: print primary_key # get the table's foreign key objects: for fkey in employees.foreign_keys: print fkey # access the table's MetaData: employees.metadata # access the table's bound Engine or Connection, if its MetaData is bound: employees.bind # access a column's name, type, nullable, primary key, foreign key employees.c.employee_id.name employees.c.employee_id.type employees.c.employee_id.nullable employees.c.employee_id.primary_key employees.c.employee_dept.foreign_keys # get the "key" of a column, which defaults to its name, but can # be any user-defined string: employees.c.employee_name.key # access a column's table: employees.c.employee_id.table is employees # get the table related by a foreign key list(employees.c.employee_dept.foreign_keys)[0].column.table Creating and Dropping Database Tables ------------------------------------- Once you've defined some :class:`~sqlalchemy.schema.Table` objects, assuming you're working with a brand new database one thing you might want to do is issue CREATE statements for those tables and their related constructs (as an aside, it's also quite possible that you *don't* want to do this, if you already have some preferred methodology such as tools included with your database or an existing scripting system - if that's the case, feel free to skip this section - SQLAlchemy has no requirement that it be used to create your tables). The usual way to issue CREATE is to use :func:`~sqlalchemy.schema.MetaData.create_all` on the :class:`~sqlalchemy.schema.MetaData` object. This method will issue queries that first check for the existence of each individual table, and if not found will issue the CREATE statements: .. sourcecode:: python+sql engine = create_engine('sqlite:///:memory:') metadata = MetaData() user = Table('user', metadata, Column('user_id', Integer, primary_key=True), Column('user_name', String(16), nullable=False), Column('email_address', String(60), key='email'), Column('password', String(20), nullable=False) ) user_prefs = Table('user_prefs', metadata, Column('pref_id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), Column('pref_name', String(40), nullable=False), Column('pref_value', String(100)) ) {sql}metadata.create_all(engine) PRAGMA table_info(user){} CREATE TABLE user( user_id INTEGER NOT NULL PRIMARY KEY, user_name VARCHAR(16) NOT NULL, email_address VARCHAR(60), password VARCHAR(20) NOT NULL ) PRAGMA table_info(user_prefs){} CREATE TABLE user_prefs( pref_id INTEGER NOT NULL PRIMARY KEY, user_id INTEGER NOT NULL REFERENCES user(user_id), pref_name VARCHAR(40) NOT NULL, pref_value VARCHAR(100) ) :func:`~sqlalchemy.schema.MetaData.create_all` creates foreign key constraints between tables usually inline with the table definition itself, and for this reason it also generates the tables in order of their dependency. There are options to change this behavior such that ``ALTER TABLE`` is used instead. Dropping all tables is similarly achieved using the :func:`~sqlalchemy.schema.MetaData.drop_all` method. This method does the exact opposite of :func:`~sqlalchemy.schema.MetaData.create_all` - the presence of each table is checked first, and tables are dropped in reverse order of dependency. Creating and dropping individual tables can be done via the ``create()`` and ``drop()`` methods of :class:`~sqlalchemy.schema.Table`. These methods by default issue the CREATE or DROP regardless of the table being present: .. sourcecode:: python+sql engine = create_engine('sqlite:///:memory:') meta = MetaData() employees = Table('employees', meta, Column('employee_id', Integer, primary_key=True), Column('employee_name', String(60), nullable=False, key='name'), Column('employee_dept', Integer, ForeignKey("departments.department_id")) ) {sql}employees.create(engine) CREATE TABLE employees( employee_id SERIAL NOT NULL PRIMARY KEY, employee_name VARCHAR(60) NOT NULL, employee_dept INTEGER REFERENCES departments(department_id) ) {} ``drop()`` method: .. sourcecode:: python+sql {sql}employees.drop(engine) DROP TABLE employees {} To enable the "check first for the table existing" logic, add the ``checkfirst=True`` argument to ``create()`` or ``drop()``:: employees.create(engine, checkfirst=True) employees.drop(engine, checkfirst=False) .. _schema_migrations: Altering Schemas through Migrations ----------------------------------- While SQLAlchemy directly supports emitting CREATE and DROP statements for schema constructs, the ability to alter those constructs, usually via the ALTER statement as well as other database-specific constructs, is outside of the scope of SQLAlchemy itself. While it's easy enough to emit ALTER statements and similar by hand, such as by passing a string to :meth:`.Connection.execute` or by using the :class:`.DDL` construct, it's a common practice to automate the maintenance of database schemas in relation to application code using schema migration tools. There are two major migration tools available for SQLAlchemy: * `Alembic `_ - Written by the author of SQLAlchemy, Alembic features a highly customizable environment and a minimalistic usage pattern, supporting such features as transactional DDL, automatic generation of "candidate" migrations, an "offline" mode which generates SQL scripts, and support for branch resolution. * `SQLAlchemy-Migrate `_ - The original migration tool for SQLAlchemy, SQLAlchemy-Migrate is widely used and continues under active development. SQLAlchemy-Migrate includes features such as SQL script generation, ORM class generation, ORM model comparison, and extensive support for SQLite migrations. Specifying the Schema Name --------------------------- Some databases support the concept of multiple schemas. A :class:`~sqlalchemy.schema.Table` can reference this by specifying the ``schema`` keyword argument:: financial_info = Table('financial_info', meta, Column('id', Integer, primary_key=True), Column('value', String(100), nullable=False), schema='remote_banks' ) Within the :class:`~sqlalchemy.schema.MetaData` collection, this table will be identified by the combination of ``financial_info`` and ``remote_banks``. If another table called ``financial_info`` is referenced without the ``remote_banks`` schema, it will refer to a different :class:`~sqlalchemy.schema.Table`. :class:`~sqlalchemy.schema.ForeignKey` objects can specify references to columns in this table using the form ``remote_banks.financial_info.id``. The ``schema`` argument should be used for any name qualifiers required, including Oracle's "owner" attribute and similar. It also can accommodate a dotted name for longer schemes:: schema="dbo.scott" Backend-Specific Options ------------------------ :class:`~sqlalchemy.schema.Table` supports database-specific options. For example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``:: addresses = Table('engine_email_addresses', meta, Column('address_id', Integer, primary_key=True), Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), Column('email_address', String(20)), mysql_engine='InnoDB' ) Other backends may support table-level options as well - these would be described in the individual documentation sections for each dialect. Column, Table, MetaData API --------------------------- .. autoclass:: Column :members: :inherited-members: .. autoclass:: MetaData :members: .. autoclass:: SchemaItem :members: .. autoclass:: Table :members: :inherited-members: .. autoclass:: ThreadLocalMetaData :members: SQLAlchemy-1.0.11/doc/build/core/sqla_engine_arch.png0000664000175000017500000006703612636375552023440 0ustar classicclassic00000000000000‰PNG  IHDR«¹švHLiCCPICC ProfilexÕYgTTÏ’ï{'Á0 HfÈ9KÎ9JÎ*0ä ’AA’ ˆ`@$)Q (*¢ (Š F ‚"‚ˆ$…½þïíy»ßöËö9Ó÷7UÕÕ5·ªCÕÀ>G † а5Ö£8»¸Rp¯0&°pP½"Ãu­­-ÀÿÚ¾h›ùTj[×ÿ*ö?3HÞ>‘^@ÖÛÓ;Ò+ÁW€õ¼Â#¢@­"ôDZQáFßE0sb ‚ßlc¿ßxa{þÂô/{[}0lШÔ?ˆ‚ãå‡è!€e õ€ìŒ`-/ª7숌dHHØ6îA°¨ç¿éñû7L¥zþ£“Jõûÿþ-ÈHdbƒ€Èð`jü¯/ÿ—]Hp4ò¾~5&¤'„[nû†ùÌxS Ì‘'òÙ þå3Dâð u°ChÛX2ÔÓÒêÖò0²E02²ÒÛÆÈ;ƒ|ã¬íÿГüõ-L@èy>‘†õ”RͶ}FЛ#¢m,ˆàÎÈ;C#M&øÛ;ý‘ùæícð‡þF¦¿e`¦€(Óí¹˜Ÿó…™oÛ€Ì+s |@4ˆ@úP ,€>0øÓK_@E81/i‡ #Â1a¦ü‘ÓÿŠÑ¯q~ȸÿ®‘¼Ùèæü=™ó¯Îàà¿t*2Ç6oÛºH÷€ÔÍùWb[ß/kdëegeüµ -Œ–G+¢õКh-´*  YÑ@ ½­‚ÖEk£Õž*0“ˆf¿¿6nëiö)‹WsôG¸Û¿Ýó/8þ’øçûXÍ]ŸûkQ>qÈ:@?,<>"ÀÏ?Š¢‹¬\IŠi¨—´$E^VNv›ýÿ¦mïY¿]²ýµA¬ƒÿ¢ùÉ ¸ˆÄ”È¿hg¸Ä5nß¿hÂHÜÑwp/Ï+:"æ·>ôöð€‰PvÀ€(òžåP:À˜+`\€?þH F€Xp¤€ p§A(•บÁuк@xƒçà5SàXßÁA8ˆ‘!vˆ‚$ yHÒ‚ ! Èr< ?(І@iÐQè$T•CµÐe¨ ê‚îAO —Ð84 }…ÖaL€™anX–U`]ض‡÷Â~ð>8N‡sà¸n€[à.øüƒ?ÁË(€¢C±¢øPR(”>Ê åŠòEE ’PY¨|TªÕŽêG=E¡æPkh,šŒ¦ ¥85A; ½ÐûÐIèltúº݃~ŠG/ 71D F£†1Å8cü0±˜ L>¦s Ó‹yŽ™Â|Çb±¬X¬2Öë‚ ÄîÇfcÏb›°Ø'Ø ì2‡cÇIà4qV8*. —+Ä5ànã†pS¸U:^y#WšPšTš|š:š[4C4h6hi…hÕh­h½iãisi«hÛii§h7ð$¼^oħà ðø^üü?* ]]2]Ý%º»tãtk&‚8AŸ°‡MÈ!Ô: / KD"Q˜¨Ct%FsˆµÄ;ÄQâ*=™^šÞ”Þ›þ }1} ýý<-ƒƒ.ƒCC>ÆA†9FZFaF}F*cc1cãã2‰L’#Y‘BHÙ¤:Ò=Ò ŽI˜ÉÉ›)©’éÓE 듽Èiä*r/yŠË,ÂlÊÈ|”ù"ó#æ&–,Ž,q,Å,7YÆXQ¬Â¬¦¬Á¬¹¬Í¬Ã¬ë;¸wèîðÙqdG㎡+lœl:l>lYlMlÏÙÖÙ)ì†ìAì'د³¿å@sˆsØpÄrœãèå˜ãdæTçôâÌâlæ|Ås‰sÙríçªäàZææá6æç.ä¾Ã=ÇÃÊ£ÃÈ“Çs‹g–—Ì«ÅÀ›Ç{›÷#……¢K ¦Pz( |\|&|Ñ|å|ø6øEøøSù›øß àT|òºyw ¬|%D+¤"ä/tF¨_hEXDØI8SøºðŒ›ˆ©H‚H½ÈQ¢¨¶è>Ñ ÑgbX1± ±³bÅaqEqñbñA XBI"@â¬ÄIŒ¤ªd¨d…äˆAJW*Fª^j\šUÚB:Uúºô¼Œ Œ«Ì ™~™MYEÙ`Ù*Ù×rLrfr©rír_åÅå½ä‹åŸ)Œ*´*,î”Øé³óÜΊdÅ]Š™ŠÝŠ?•”•"”•f••=”K”GT˜U¬U²UîªbTõTªv¨®©)©E©5«}Q—RR¯SŸÑÑðѨҘÐäפj–kŽiQ´<´Ê´Æ´ù´©ÚÚïut¼uªu>èŠéê6èÎëÉêEè]Ó[ÑWÓOÔï4@d<2d2t0,25â7ò3ª7Z0V4ÞoÜi‚1179a2bÊmêeZkº`¦l–hÖcN0·3/2o!naѾ Þe¶ëÔ®7–B–¡–×­€•©Õ)«·Ö"Öû¬oØ`m¬mŠm¦målØöÛ‘íÜíêì¾ÛëÙçÚ¿vuˆvèvdpÜãXë¸âdàtÒiÌYÆ9Ñù ‡K€K«+ÎÕѵÚuy·áîÓ»§ö(îÉØ3¼WdoÜÞ{nnÁn7ÝÜ©îW<0Nu?¨VÔ ê²§©g‰ç‚—¾×¯OÞ:ÞyÞ³>š>'}>øjúžôñÓô;å7ë¯íŸï? P°hX¸dT´ìÜBâÒÊÚÆö$\"<#|lŸÚ¾Óû"Ì#ª#¡È½‘­QÌÈåp Z4úPôxŒVLqÌj¬cì•8R\hÜ@¼xü‘ø F ç÷£÷{íï>Àw åÀx¢nby”ä™Ô}Pà`úÁ©dãä )ø” ”‡©²©'S¿¥9¥µ§s§'§O2>TŸAŸ‘1’©žYz}8àð£# G lfygÝ?*{4ÿèl¯ìûÇäŽÛÊñÍy”«”{î8öxèñáÚ'.œ$L89qjש–£¾;ýºý·ïjÞí¸§v¯í¾Êýë”´ (\{¨øðÚ#¥G-ƒÊƒ­U·?ÑxrkH{¨ë©ÁÓ¾g¦Ï<·|þdØaøÅÈž‘±Þ/f^¿\|ójãuòÌ›¬·ŒoóG¹F+Þ‰½kS»9n0>ðÞîýë ¯‰O“‘“?¦Ò§‰Óùx?ÔÎÈÏtÌÍ>þ¸ûãÔ§ðOsŸIŸKæEç¯~Ñù2°à¼0µ±¸õ5{‰}©æÛÎoÝËÖË£ßC¾o¬d­²¯^XSYë_wZÿ°û÷£à§ØÏöMóÍ7[![[áÔ꯻ éa__¾Ö 9„ ’;<ßù;§ø%¤+"ƒ`Gè¼¥‚Üí¦0w±5¸\šxZün:‚ ёޖÁ•уäÏJÎ`îe%íØÍV;ȩÁ•Éý’W–’Ä÷L@B0Mè­ˆšhØw {É«Ò,2ñ²ãò ÍŠ\JÊ_TíÕ:4ø5kÍëØê^Óg3ˆ3|i¬brÆtÙÜÞ¢~ׄÙZ×&ÄöŒ]—ý´#ÞIÌÙÈÅÍ5bwÚžã{‹ÝÊÝ«=.Pkè‹òfad±ákú’Ç7Ýe¡ï ß®,¬Î¬½_¿1ñcúççÍo[[¿üoë¢dPóè.L&Ö'MÃBó¶_M—Mˆ!ºÓ›0(3r“èH›df7–<Ö6»2‡?g9×[^J߀^ÐXèð=Q13ñ‰gR¼Òþ2×äPòV …;'”¤”cUºÕèÔí4Š5ǵÅtBt›õÖ ´ ÓŒ:Œ_˜Ìš®šc-H»x,E¬d¬l”l•ì쥄9œó—Qׇ»Û÷”ïMvÛí®èÁà1Kíñ,óÚïíà#ã‹÷òëô/ˆ ´ ’ Æ¿¹Zn±O8D¼Š¼u,Ú/F7–3ö[Ü£øš„Ôý»ìL$$N'Ý>X™âœª•&N“>èYF{fÙáCG‚²¬*f³go›Ì¹›Ûxü䉘“»OiçQNƒÓïòoŸ)+H)¤é ” K¦ÎöŸ«-=ZZn]¡PÉR¹Zõúü­êŠšÔ žµzuüõ¨ú‰†;«Óš¼/\lF7O^é½zþZz‹÷uƒVÁ6lÛ\ûó·:jožº•t;¨Ó¥Ë¸[ùŽhG/}Ü·Ú?wêÞÛûÃî=x48øäñÓ'ÃC/ž¾zöæùèðØÈû“/§_}xýñÍÜÛ…Ñ¥wßÇÖÞCB“–SñÓ5^Î’>Z~:1÷r^üKÚÂÄW‹¥Ë ß›VÖnlýÙ øãiè|e‡¦A·ab°JØE\+M­ ž ¿@×CÈ'Ò2ð1l’°L¬dqfUCVÇlÉì§9.rörr¯ó2Sdù,ùÃr/ /‰²‰i‹ûIdK^’‘þ)Ë'g$ﯵ³Añ¡Òœ AUTÍDÝ[ã f‰V£v½Î9Ý\½$ý gC#acZã“^ÓsfQæFÌc»ê,#¬”¬Ö­oÚ$ÙªÙ.Û]²rrxçXädïLïÜï’âªæº´»aß^Þ½#nÇÝM= j‚§ªçŠW‹w”¼Ï¢o“_ˆ¿¸ÿL@u w%èmpQˆc(9t0ìh¸É>̾;)‘Z‘?¢Ú¢ãbccã‚âEâ'J‘Ha=0”˜“druðNr6+B©‹i7Ó³9fd|ɼq8óˆmOÖǣײ“™ç°æŒç69¡w’áä›Sµy1§õóIù£gê â Š˜‹Æ‘S3ñ¬Å9Žs3¥-eéåvB•ÃU—ÎgU{×h_à¼ð½v¨®©>»!à¢q£pºiòÒË•ÍiW¼¯š]Sk‘½.ÒÊÛÆÖÎ|ƒÔAºÉ|‹ó¶P§|—A·ËÈžÜÞ¦¾¡þµ{”û{Œ?<<¸óñÔPÙ3¿a­⯤ÞìÍû:‘8-5 >-}ÁÕ]Î]]ú¶íÿßµ¥í3«@r <À!ò7ÒFά‰Ø«ø€wà”=ðÏù!§Ð!¤ª"4<Ó‚ý É&ÛüqBAœH–hyA‰PtÉ¿Àô°¼ ƒOÀWáðO?ÊŽ*@u¡>!g‘:]‚¾^È"yY¦ó+€uÆæ`û°›8e\î"î#/M5²[‰Ñ†Ð^¦]Áká3ðƒtt>t— X•p“ÈE<@|G¯O_Ë@fHbøÌèÆø”dNêCr™>²ù93•y‘%•õòÛ«leìfì+5œ.\D®î$5ž5ÞvJ"Ÿ>?ÿ…@µ`¬™° $2&Ú-Vƒìtq’>RvÒ†2j²rrâò B;E%”ä•ÕUäTÅÔ„Ô45Eµd´ÕtÌt÷èEéç4­››:še™w[ü´4°:i=a«bw 9ýtJ·\©»ûöJ¹å{@Ô0ÏwÞ¶>ý~:þmºA!{BÂDGÞ‹ÞË÷,!ç€UóÁw)—Ò2yfê!fdWä„×9I:5yºíLn¡±þYʹ­²ñŠÞªÆê‚ ‡ê¢\õ/É4ó^ejÁ·bÚ±Ä[ìÝú=Ô¾Œ»Í÷ß?dtx’ÿŒc¸æ¥Âë£jcW&ø§2?¼ÿ¨0—0ߺ0¿DY6[‰\+ܸñsì×þâ n¤Þ°©4Ù!u¦8p T!5„°1B¢´Šƒò fÄ÷_a2¬;Ãüþ.¼€bCé¡BP…¨>ÔZm‡NGòóY$+·Arñ›˜oX)¬¶ûÇÛƒ+Á½¥¡ÐxÓÔÒ,ЪЦÐàÙð>ø+t(:{ºjLð#<"ª«¬7~…!„á£ãÉ4ÏOÆ‘ ™e˜ï²x#ùhõ6<Ûöƒš›œÝ\YÜö<|<‹¼½”"¾(~kA&ÁïBcÂE:D›ÄªÅK%J$‹¥ÎIWÉ4ʶËÝ—UXVdP’T6WqSVËU¯×¸¯9§MÒQÓõÑËÓï3Ø0’75©7ýl.o·«×ŠÝzŸÍ€˜ýa‡9'çvWáÝy{1nqîóTÏ o/ŸI¿@ÿ…À”`æú0ƒðñˆ”(ÁèØèx„g'i\KiM‹?¤‰=Ü—•ž­ŸƒÊí?‘uÊò49ÿeAi‘o‰ÔÙåқ噕¶çy«ç.ܨ;Ú`ÙHnz{¹öJì5“뜭‹í:ªoêôé6í‘éc½ Ý›{ðêáƒÁ›O.?x.5üáEÙ+§7ø·×ßyÓ¾¯›4™ý:³þ1uõ9y~c!|qbÉþ[çw‰•ü5°î·ñð§âfñÿ£- N ‚TšŒµÒ@1hC`ñ½$dŠT„2‘д³À°7| n…'Q$”²âËQOÑ8´:]‡žÀp#«=3„eÂÚaOc_âxq¾¸&Ü:REÉ£™¤U¢=Bû¯ˆÏÁÏÒÒU艄¯Ä@â4RߘacØ`Ì& n3y’éÈíÌa,,Ÿ•ÂæÈ.ËAϱÀ9ÂÕÍÝÌSÍ[J)æ;Ë_)Ð(Ø!tOxDdV ç’P—t—:,}MfZŽKÞIáÌÎ×J‚Êá*·ÔHê½Z‚ÚGt–ô<õ‡ ­ŒL¬L‡Í½,–,C­¦lÜmGíݦœ7]ïÙÛåîAE{Ö{;ùbýn„‰‡–!ws‘ÈÅè[±¹ñ^û5Y“–“_¤ÞJ¯ÍÈ?œå-ŸCÎ]=1zªÿtó™²ÂãÅ©gcJƒË}*=λ׸ÕRëí.Ê51\úÜ•¾ˆ€””€ÄjIÉé>}äçç»s UÓ f÷¹I'¥&@‘ÌÆÇÇ;ÖeD@D ¼X-¯5§|‹@Œð R©tóòò E+iìzŒd½Ügƒ"Õ„hBB‚§<ç1ëÅ®“» Úr_h@D ÒX­tU®‹@Ù0±ê¨¹¹¹>|8Ö®][Ø»Z¶©*¶@£ffÍš…–-[ Y´þ0V®ˆ€Ä:‰ÕX¯!åOb˜…* Uö R¨æää`Íš5xþù瑚šÃ%¨xYëß¿?233]T©²·‰÷÷¶V¼R«D" ™ÀÞ–¬"—Re V«&T³³³…j«V­Â–¶"þ=ŠRþX 5cÃÔ³jD䊀”'«å©¶”Wˆ1&Té²W•‰B•={ÖëcY®ÙÙ½{·«–•VãV+EÕ«"P! H¬VÈjU¡D rL¨šX¥P’Xÿ`)±hM¨²nlÜj°ðòX& õLb¹v”7ˆqֳ꫚••å„’zV£Wy¬Zötshë‡=«ª“èÕ‰R(9‰Õ’³Ó"P© Pø˜ø±1«6 €½zô“‰ UÇ`}˜PeNüuœ)U(>‰Õâ3Ó" ¿ ø±;Š"Z $Š%²‚y~¡Ê:ñ×Säs£E@D t$VKÇOw‹@¥'`BÈzWM°Vz0Q`=ª¬ Ö‹ý ˆb–”´ˆ€”˜€Äj‰ÑéF#`‚•âˆc$iÕ³jt"ïZ=˜XU]D¾”¢ˆ@ÙX-;–ŠI*% !³Ö‹G‘$½ÇÁDªõ¨ZýD/GJYD@JN@bµäìt§ˆÀo(†LQ(ÑÊD€Õ¿^¢—¥," ¥# ±Z:~º[DÀGÀzSÍõ]Òa¨¢]IŠ€”9‰Õ2GªE r ÚŸ­b³”û« ØØ¬3åJD 4‰ÕÐltED@D@D@D Ê$V£\J^D@D@D@D 4‰ÕÐltED@D@D@D Ê$V£\J^D@D@D@D 4‰ÕÐltED@D@D@D Ê$V£\J^D ü2–¾†~½»!--m¯zýh2³óK”].Î+ðÉÒ-È)Q ºID@D (ª%ˆ€ˆ@y&P“Õ«WáÔ«îÄàÖ_° sßúnwÚ̺ ýÚÖGb1 ¸'Ο±5+Û[ÂË»9®˜(¸ˆ€ˆ@‘¨gµH˜HD üˆ‡`àÐ!2lþzéÈËþ‹×§#77³_º½;Äõ¼^vï ¬ÍÈ5hî–…xìº3еs{¤ý®76 åŒJ " 1M@=«1]=Êœˆ@ÙHǮݹžúÌG~B–Ì[€œ¬vhT;¿Lÿλê;œ|ådô®½te×ÄÃçµÇôcÏÀ-uÄä[ïA¸á‚cQð蛕B)«Õ²«Å$•ƒÀ6æØßõ¸¸ÐŸpöw­¼“•X-ï5¨ü‹€‘ÀBL8¾?.Od_h²wmC÷q÷¢[ãÍxpì[ö÷cÜIýѰzžÞ¹§>ÿVõ^ƒ{WTŤ[ÏǨ^mPmPGlœù:^ød9å _JMÀ/,ýÇÅïêØÂ–Öõ‹U;670n¿¿ÿ80\¬œK¬ÆJM(" a&ƒ‹¦Ü‹¡íšxZ5ñ5ë¢M›6h\e9–oÝVMš ¹z5TMŠÇ¡G‚É?aÓö|lÁtoѵ«%"Ñ®S²r‘ï „9ËŠ^D êLdšË ñØÎÍ=¿]÷»<6ãÇüB¹Á¦ùºŒÃïgÇEõ•‡HúK¬F’¶Òˆ"î8ìð#гCo2Uââï}RÛY€r‚½eË–8ì°ÃлwoôèÑ£p( W¿0÷ ×¢æ)T8‰ÕPdä/"PÉx½¡zO¤†ºTÉ ©¸"P.˜H¥kÂÔ\Š3ö*Rœ~ðÁøâ‹/Ügùž={bôèѸæškÜøNŽõ¤(¥e¯j´iYƒgy(´i«U«ö»è)Lûöíë„+}FF–,Y‚o¿ýO<ñ®»î:zè¡2dˆ³ø&˜)TyÌ4ÊB´J¬þ®zä!" " "P^ ˜Pµ^TŠSöÒrâÓôéÓñÚk¯¹O÷ Àý÷ßïÆlr)*…)Ve7&Ô9¶–†\)`ûõëç„+YRèøá‡¸ï¾û0xð`œqÆ®·–÷’?+ï3·¤\%V+ûÓ¨ò‹€ˆ€ˆ@!`B•Õ¬}Þ~öÙgñôÓO£K—.¸êª«\¯ Ç`R¤RLÉìŸ{I)äi9IŒ³ÒÒÒp '¸!o½õÎ?ÿ|×ËzÑE¹•L¨2f U Ø’V‰Õý׮Ѐˆ€ˆ€”Á„*Çbr æÕW_í&ñ¯^½Üg~Š.™’ xe¯+m½zõбcGüñ¸øâ‹ñé§Ÿâ?ÿù³f`ÆzXyoqŒú¹‹CKaE@D@D@b’ŪÿÓ¿ ÕiÓ¦aÁ‚˜2e †îz%T˶ 9éŒë¶vëÖ /¿ü²çz÷Ýw»Ulò–õt³žŠk$V‹KLáE@D@D@b’€ V #Š$ÎjñÅÝgÿC9Ä}ÂŽÉŒWLQ´r½V¹`ï*צåÄ,Å`Xïwq«Äjy@T ¨¬LYïõªr;gþsÂLäp<+뀛?Y7Öó-±¹ºPJ" " " 1@€â‡Bˆ®ÍüçXUŠUúŸwÞyøå—_Üq d·BgÌÉ›[ÏZP¬ú{W‹ @=«Å%¦ð" A lÛ¶ œ zíµ×âÁ Fž" ".&Xm€-nOÿaÆá¯ý+žzê)×Ó®»íƒ*¢'ü±ÀÉÇ{,Ž9æ·4OD3 ÄD (Jm…{ó(–è?~üx¬X±>ú(^zé%œxâ‰8餓ÜßI²Z¡’$s®µÊqªÜ–vìØ±3f >ûì3'Rý½ª Ëú0[ÔU$V+Ô#£Âˆ@ø pVíŒ3ðꫯºL‚¥Èõ%YK/X\ò+¾ ~øá÷ùíã?v»ñ}ôÑîåAñZ«V­âE¨Ð"P˜ø¡Ë¿«ìѣẠGu¸KÕW_}…7Þx<ð€Û2”ýû÷wËYU<•$aÍ"Ì›73gÎÄ{g ¸Æ\¿¶iÓ¦¨Q£†{X¯ª Vöªªg5¬U£ÈE r`ãÿÍ7ß8qJºhÑ¢ 8Ûöˆ#Žp‹mOž<9d8]£g)prƒõ~³Çû¸ãŽÃ)§œâ^,Á¶X´ûäŠ@y#À¶Ê„]¬ô§árUœøÃ¯ §ëׯw;0=÷Üs˜4iÚµkç¶íÞ½;;ì0‰WßÀv…¨Üµê¿ÿý¯Ûõ‹Û±N:ÕíZů9©~ÁÏ: å«‹–õâkþÁ\õ¬£"?¨äøk˜½r&r¸üH0ÃeJ¸]!{îèö—Þºu+6oÞ,¸ü"@€‚ô‚ .pŸ<9~lÙ²e…©²ÇûùçŸw–=¬&\Y®…˜tP Pø˜¥02qD×Ī‹Ï:-÷²oݺ5FíÛ·ãÇÄ÷ßï¶_]¸p¡ë!lß¾=:uê„Î;£mÛ¶N UôÝ®6mÚäÚŠSZ²X¼x±úäÀíV'L˜€F9qÊÀø.f¬GÕꃂ5P´»/Ð/xì¡t."Pá ìØ±ï¼óލœ(Å SÁ ¦#F¸Jè²Ñçò$;wît ~°{äYÍš5s[Nœ8ÑõÍš5 o¾ù¦{áXNXßcFKáÊgØãJáªÓ’ÜòDÀÄ*óLAd¢5T8T‰ímÆ ‘ššêö·ç'nþ°ã×¥K—bÉ’%îÇ;ÏÙÎqQöÂ2<ÿÖZ´háÜÆ»ÞØXEaÎ…5kÖ8»jÕ*×Np¼)ËËÞN–-ÍÛJ•½§tëÖ­ëÚûE)g0‘øã!Týøý%Vý4t,•Œ—áØ-~Þÿý÷Ýd„`ø š½pÜšã¾(fø2àç6ì2±K€=Cü¬Éaìyå•WÜ3ì-1CáúÌ3Ï8˱}&\¹Û„«Q’«üâ‡ÇÖsgÇEÉ7š‰0 OÁvøá‡ï3›+œXʯM{Î1›7n{$ùÃ[²Íäߦ¤¤8ËcúÑÒ_A8cž=µœÊÞIZúÑ¥m8C k½–üÆv˜"Ô,;˜Wÿ9*Û|ŠLö*ÓrXíA„#<Ò o¥L›y -i;ÀºðׇՕ¹Æú@®ÄêéºT0üõlŸ÷9öˆ I0Ó¦MŒ5ÊÍšågŸÀ_ÑlÜCÙÀ°Áâ—_ø„ª—C=Ôm‡xã7ºÏœMë‡Ì—ÛÓO?í,_¨~áÊ—–ŒÄ"`â‡~fK’gj•fÛFŽì9¤hd;J?ŽíäP¨-[¶8¡ÈZþMÑ¥Àe/-E$Ï–BÓâ0—q™?ýØžRÐR<ò˜ùâ¹_ÜRhs¼(1óK—b”½£<çß2E2…4ÃY\t-nÍeÙ~¿`5–Åq%V‹C«‡ &HøÉ” Šƒ@S–ðq÷œƒâÙ{J‘Êž€P†“ Ø{JÛµk×PÁ‚ú ¤ äE© Ö/-…+Ÿ®ì}5ÃÞ®KIË—Ÿ .IÃÞõh W{Z>å–-{~Ê6ÖÈÅfÏG8Þqdc"6T‰(ZMœQlò8Ô¹½›-ÏŒÓòm~L“Æ_/ÁŽùÎ1ËëŸvN—ç´Ÿ‹4ÿYyJ“”ÄjièÅð½|8ŠòGÃE(WY l8˜yò·F …G¢`l 9¹†â”"•c­‚6\ì5嚃´­Zµ l¿~,—•ÏÆh4ˆûÍd%»HþVþú …?Rh§L™‚¹sç Wö™¡p}â‰'œå¬_¿peïL8Œ½ä‚µeLÏ®‡#íʧ¿â±óy¢±óÊʧ(åf§LÙѲåõØØxû-y ÓZonÔ3]2` 8] ,–‰rç5³á(2—(âD Ô×_=äŒ|~.bÏÅ?ñr}¼²0Vn–™5ËÊåax.9OGæöì™È(jºuëæ† ÜtÓM˜3gN¡påÄ 3üÌùøã;Ëñm&\¹KPY Wk«ØNÙ1];·¶Îò%·t¬}¢kÏ ]ž“µ]·k¥KMwWþ¿Q;6·8å—X-­kr¿¥X5k¼¹,ï“)6Þ4Ö¸›kblÙ°û­…w7–ò?Ž‹âÌ}öžr&?÷3\øš¢‚3øýc°‚…/ŽŸ•™ed¹)T)X8¡‡½qœ=Ë|Ñr–M ÐsWÊû†%sãÌO’ƒFËYÍ«ÆE¹yÝ/XyOq 'fÑÞ|óÍøî»ï …+Ç=›á3øØc9ËçŒ=ô*0tèÐ W¶M4tÙnñ9±öËÎù™ÕÂØsd®» ÿŠEÀž û[æÍöƒÓÚ2º¼N׌D«‘n«á&ÁøÙX³·Fž :w.E³víÚBQªF=|•b>Sàñ»ï¾ëfW²á7ÃÞÎü‹ê²çŒ;H±õco-T{qÞÏ™¬ì9¥x4h{ù†)í9ËaÖÄ*…*—w¡xâD VöürÆ*Å*ŸK%¥ÍCe¼ßÏš³t«W¯îXÛD ž³L¬Z•æ¹ëÑ£‡Ûíç–[nq ‚¿øâ‹nUÿ:®®ÜÎ’–9üÂÕÿ7p :³6Ì\>3ü‘¥vì@äÊæº='æò‡0‡I¨– _ÅR|{ß Å¿WwÄ P³là)`hÙ“Åå5^xá·nZ e¹Âg…ŸÀÙ›hbÒƒÕ“½ŠâçŸ.œ ÅCBýààâÕì=¥íÕ«W©Dñòeù§Ë—-­ (+;ŬÄê¨íz XåÐ TŠUºäouau㯯¢¥:·¬¤åÎ5ß~û-L¸._¾¼ð¦ôôt<òÈ#ÎrV2…+×qçF2¯)-2'Wòöÿ8 PåpŠWòæu†cýX]•5þ(¢½í¶Ûܶ¼&\ý“ú¸¾ãÃ?ì,…+w ¢pÐãÏç%˜PU;VÖµVôøøüØð»‹ÏëÉž+ó—+á" ±.²Qˆ×z Š60\Çþ2Ñ!À—,ëºÙPõÁp}ô‘¨üÌj‹SŠöNY*Ç'F˘PeÙL8™Ø`9éÏUŠ'–Ï„ªÄjéjÌĪ V2¦8¥¥`õ÷®²^ìÙã}á4üáD{ûí·»[&\W®\Y˜,…ëC=ä,'÷ù…+óØŽñoˆ={|®d¢C€ïÖ Ÿ%>w¡Ú±èäP©Vt«¤†ÙpP°A7¡ªF>ú•k¼‰6ò¬'žÓp,çÛo¿í>ñÏœ9s¿[œsÌ1N r¢×¼Œc‚•¢Èÿ£?ý¬G•Ï%ËnBUâ£ä5Èç‡|ù<ÑÚ'­Ö³Ês« IÓ»woÐ Wníh†;þ<øàƒÎr»Kþ£xíÓ§{Nøã†C«þgËî—¬Z>k|žì©µc‘É…R©Ì$V+Hí[Cnb€<Å*»VAŠZ®ŠÁ: ¥˜°ºáX> TnsZ”-NùyŸKQôÅš¡2ÑÄçÌÿ¬Ñß/V­ü #¡Zúš &XÉ›ÏEª U´VW¥O¹x10Ý#Ž8ÂÙ;gÏ.ãÊÉ‚f¸]¥¿Ç•?ʸÝkçÎÕŽ¤(¹lÃøeÄ~ño™Ï ÿžYÇ2"N«á¤¡¸M Ð¥eCB±Êž,62v=BÙQ2>làY\êçcoæ>×AåXÔPuÂmüìó~°-N}QÇÄ¡ ºE4æg=~öéßzcXv‰ÕÒWŸ‰Uº´ÖëEa6P¨F[T0}öšÒÞyçøê«¯ …+'P™a«m@ÀU8ɇK2Ñ!À÷ˆ}áß3E«ýóÙ“pX 7áÅφƒ–B•B€ -xúËDžù?ÿüóøñÇá_P=0'Ü9ˆ½§©]ºt ¼óç þ–‰UPþ›=§æÆ|áb4ƒÆØ\²¶z0J—þfy=– óÓ·o_g§M›†/¾øÂý½LŸ>ëׯ/Ì*¿Dp“ N“‰¶e«tù~á߯~pF§.*kª«¨æMØçV6.´ô—‰<rçZ¨†âÁ¶8åø¼ò¾Jƒ‰ L~Ñd/6ÿËMÏcàQòóPì­.Ìe8 [òÔÂw'óÆ¿ Žqå:®Ÿ|ò‰Û€€^°—•†Ãd¢CÀ„ª½[ô7z¨Ì©J¬V Ú·Ä„õ°V "–«¢°G±I“&®—ˆ/Úâ¸ãŽs õsÁ|~ªeQØþr°GÏ …’ÿ™ô'n… MÀ¸Ó <<Kl]a¾¹¢À!‡‚ÿû¿ÿs=®\ËõÍ7ߌ­ŒV¢ÜX*Å*zU+QåÇHQ%Vc¤"Ê*´ö Øz¶Ê*~ÅS<ÜAŠkÜr‚…+wtâ²BÑø“„iôjØ/R£—‹’§ìoÃXŠÖfÍš¹ ‰%Uw–†ß#|§Ø»Åê¨4qê^(‰ÕâÐ*aýˆ5,å Û6‹ÝºuCÇŽ@õ×M…-ðo+ï‚©¢×Oy*ÿnÔ“Ý«LmWtI+õP4/™räo I(·¥Âe5T˜…+° $%$`~W_†J3 ·±^¬>üuÄc7‰ÕpVü" " ¥" AT*|¥¾YüKP”’€Äj)êvðX [Å," " " "PJ«¥¨ÛE@D@D@D@ÂG@b5Ll·lÙ¦˜­ˆ€ˆ€ˆ€T•F¬FbénmúÊ+¯¸…ß»víZyž"•TD@D@D@ÂD BŠUîÃÎíá̬\¹Òí?½páB·¥ßêÕ«íR™¸sæÌÁÅ_ì®>餓ÜN+Üñ£òš]øðú¿ ÇÁí‘––¶¯}~Ù´%YìdÉÓÐûîYX½#»ò¢UÉE@"G k!®ïß÷iÇÎĽ3¾GF֞ݜ–ΜŠÞÝÙ·;ó|²l rü ÝÆÙ˜Ð·N¿a6íÈ*,CÆÒ×Яw·}ïzýh2³ó‘µb&úq1>X´yßø cÐT|rS€3Î8/¿ü2Z´hájð”SNÁ?þñ·;ÅÓo¼?ýéO¨Y³f‰kø×_Å3Ï<ƒÇ?þøc‰ã©˜7 }á\¬è: 7Ÿx8êð'ÑovbýNhQ«âJPð”NGãâÝ­P§j…üU"ºED ¼r°fÕ*t=óJœÖ¿-’ârñ¿Ï^Ä—žˆåÏâêѽ³åX½²?®»gZ7®†ì+ðö]×cܵðî´3Ѷ~ —Å¥Ÿ¾·ÿ‚uó_Ây§@ÿZIHô®äd`õêU8õª;1¸æ›÷ IDATucÄlÃÜ·þƒÇÝ€6³îÂa9[°jÕ2ìö:@üÚ7¼åVì"[J,V³²²pþùçãË/¿D½zõðÀ K—.`⥗^ŠY³f{£?üðÃèÙ³'víÚ…n¸íÛ·Çý÷ߎé¤hìܹ³‹cöìÙX¼x1>ýôSpן»ï¾ 4pÛ»ÝvÛmxòÉ'ÝV•ôïÞ½»ÛW=¨¦Ž}ûöÅ´iÓ0~üx|ÿý÷>|¸ËÃòå˱yóf—·jÕªáæ›oÆÈ‘#qì±Ç[¬ò3?÷§~ì±ÇðÎ;︲ÆVuÆNnòórÑé°#1lè 4Kf“ü›‰¯â5øðÜõwacãæXóÞ3xó‡qÙ¿qåØAhP£*~û.îœz^ßz(ÎúÃ¡ÈØš‚Ó.ä­éØ•‰¼ìuxîÖ{CÞŸ¹ü[ÜsëåxòÃè:b<&\zz´¨‹Ä’(dË·\JI`‡WêN½û£ß€ƒQ»jôé‹6Éçâ¢k^Áè>‘‚í@÷ÞпÚ4ñ:@ ú uÝ LŸ¼»2ò_ˆÇ:¼}ãËxê_°áéçñÅ÷«¼6©“ì‡÷@ 8Û7FUä£_Ç8<0ã,^ŸŽCRˆ=¿R²W¡EÀØ_ŠÙ:t(>ùä'<Û´iƒÑ£Gcݺu ÿgŸ}æÄëÿøGüùÏÆÏ?ÿŒŒŒ ¼öÚk¸ë®»0iÒ$×ëyúé§cýúõàgúûî»Ï‰[ Ú/¾ø=ôvî܉1cƸk7Þx#Ž>úhœyæ™ HݰaN=õT 4<ò¾úê+ÜqÇ8á„P»vmŒ7uêÔÁ½÷Þëü(¨iØÛÊx×®]ëö:.J¿ûî;L˜0AŸù‹Ëféú…^ÝÏw?øâû9ßcõÖÝÈݽ _=÷*n¹òZTr6.û˼rëxnÎ lZòFóX˜v &ý©'ý×]¸÷ö—°dã.l]ý!\¼Ûv¦‡¼?cÃ\\0æ\¼4WO½í<‰³Î½ 6ìPs響€{3ª ª×ù’˜X5j7@¿@î®ÅØ”™…¼O Î_ˆÙÿƒo¾ý/>}÷YLžö5?·3ÕIò„*±ôKügCFžs.Î:³ {ïklÈØíË@:víö†Žåæ#¿ Kæ-@NV;4ªŒ„¿¥}ÑëPÊ9õ¬nܸ+V¬À³Ï>‹Þ½{ƒ“‰Î=÷\¤§§;ÿ—^z =zô@¯^½Ü§rž;»wïvŸÍÙÓÚ¡CŒ1ì¡¥x¤à¥ lÕª,Xàü7mÚŽ¥p0`† âz5ßzë-ÄÇÇ#33gŸ}6Ú¶mëâ}ÿý÷1lØ0·ûqÇçzN9NµÿþN³®¸gyÓ¦Mñᇺ^ÝZµj­BöÖ>ýôÓ.ÞŸ~ú)h˜ýyR¸Ûþèû ‰k&Ô#‘–¥‘X£ rž» g?_ñ¾Í‹Ÿœ…¿ö‰Cö®LŒœôÎ>m'ôLJ÷¾Ž;2°bÉëXÕcî9,iR=ëe`ØYï¢ ÏëYÈM@Rvâ«„¼éG/âÛµqû¸spd‡Ò*œp9æ,?íÔBõË¡\("À-E«U÷^&¹Hpßå½ÏüYÏãïS¿G‡jU±rÑÏØ¸~#zíÞŽŒ‚|ïÓ}.~|íFlì&º¤¶@í“ÎCöÙ/᧠ǣeÝ=C€…˜p|\žÈŠíÛ6tw/º±÷u}óÆ`ü¢ùË/¿„1…ðFw`xKTùb/‘XÍËÛ3°¼eË–¨Zµ*6lè>÷ó39 Å ‡Ðò³¿_´¥¦¦"))É}â7nåvä‘G¢I“&à§zÆGAL!ˉR¤Œ‹†=¢ü¬ÿÁ¸Þ\K‹Ã;ì07T€ñ2_t™W+`šfœpÞßrì5~÷Ýwp¶ûä@nfê3à ØË>¥Q3$Ç/Á.¯AnÓ©êx=Õ[bX»¬JÈÄO³f£ÖÁCѤ^mT¯–äõ„§zC>¼1®|+ÄaÖŒ q?³+Ûw¿‚sGÎ@UvIäîÆZïű|ËnääxbÕ§œ‹^…½ò¼ ¼Þ{kÏ4ÞÍ@»óñÜýcÑ©q2ò¼wÖ¦ïgàäK®Ã—§õE³NÙxãñ ÈXvŽúê_ˆËÙuÞ—¢·¾YŠ#Û6tãV½– M¹CÛ5ñâÍC|ͺ®§qJudÆ€XÝ[pí%™×ǤÖ =c¯ïñÞŒ¿_tâçfän_…kóкm*j¸){ãØ{ä¥qåpLownõž¥5}CêöªG%«ÁfºsüéGáÆ˜úÉQXîOZX®Mˆüñ˜1c†ËSà Yö¼r(€õúò‡ôë×Ïõâò|†c\ÿùÏbÑ¢E®\îÀ1±2L×Ên—åd/ôöíÛÝà%K–¸áêÀ!Ñ2‹Öý?/üFQhY‰Ìœ==ó®³ô·Ìíù;ŒGÛî‘èM¦ûäǕغq!ûóTìØàý¡z#¿~÷·êó°ÃFmFÜÚÿb}fÚÔÙ ÞÀõ“_ÃÚm™žŽ ¥+"P^ ${_±x –yvÑÂï1ãž+pÁ=K1êÆqPCoL)¼KVa±×î.Y¸Kù/½îlÌN-«ãë'ÂæQÇaôqG{‡aÈУpÚØ+Eÿö†'y_]sȯOUÜûŽ*±&T¹4#ß'–Ç÷ çšð}Ã÷ß?á~Ç(~®£Îw<ßíìÄZ¶l~øá7a›seÂf¼ºëÞ¾'÷ì:ÊFýñ<úèåÈù`*>øi-2½aÈ+>{ ö< ÓÒ0ôÌIøè—tìXö&ž6 ~¼ï|˽÷Óò ἕËä ‰ž3ãYÜ0a4éÔçMz‹Ò3]‡OÆêïðÀå§¡‹çŸ6ôLL{ý{ìäåuÒ|öÌ-èÙuÏ’j—=ø&6ìÚÓñ“±b6n½°阆3/ž†oVn-K¢½«ÑWÛžœýÏYÿ“'OvÈí·ßîÆ›þë_ÿ•W^é{`9V•cZù q|¨¿Ç“Qžû’qŸòŸ{î97 €ñò<æ˜cÜÐ FŽKåÄ.ƽmÛ67™ŠBšéŒ5ÊMº:묳ÜJ™ô7QGñ[œžVæ‹—–ãw¸ø?{[ù‰ ü\ÊÃqõIÈya2Îx±Ê>cVÚOÀ»OŽ@}ï÷f5¯;Õ:+½sþkzÌükùT\9îX\™vÕ·"/±R’¸Š€•< ƒÞÔ=b,îºègüã‚qcvŽ·ŒL:ιóÖÔûe·Ë"`7Ëóמ‡7{­”÷)(»ÙawùSúá¨ïÍß”ä…Èyçò¼7‹ý?ùÈl;7†­[·®³vŸ¹þtØ{J±Ê岸¾ëÔ©S]¯°‰f»§¤® àP®9û¸×Ûú /”4º r_ª×kŠ{V Z¦ºû<;q¨é­¹J³=s ®t#:!NlŸ…[oyÏ¿ë%yím4ý-¶P÷órR­úhÛ±®7t`Ïøgµï¿A“#"P<Þ8úz^[µŸ¦ ‰5ëyB$Tˆ4öVø½IDН LMõ¾õ†0q‰5½w`èë!n“w´ä¦cÁü<ô=½‰÷ƒÆë_9 c†\Šm;v!+c36µïíÆ$ÇÇ{(+½±ÉqÞ²hU¼Õ%‚…ó^^qÛ€SF‚æõ=1Y­ :ôêÄì×¼O÷:q¶àá3Gb¶Èò6—X·¡‡÷1©ýÿ„ †ŸÉç«=Ÿ~'^ˆ Æ÷Bõ,oR`ÁK8gä«ÞdoÚrž7yÝF¬Ü’é& WóübÕ”X¬¨@~¡z °E¹^šø¸V+4Wø÷¿ÿíV (ä‘+ÐrÈ{ƒeŠO VûQxâžZøjÎBü°` ÆþóAœ4›é[Q H±rüt‘*ˆ€ˆ€””ß4{EÞ–ï?ÆkÛ3qn½šXóùý˜òÐ&Œ¿åßèß½%vϾÇ?è­TäuƺþØßöã]ñé}!ÃqHôâ_½^ÚÜ==¸9;EA~g4Þ5\}Ž<û\:´š$-Ãÿ ¹ßÍîÈFcŒ¾æ ŒÉõ–ƒüeÞ¼l*¦4è+j¯GAÛ³ðÐ]§¡•›€œŽù Ö¢s¯VÞW€½e()‰pÞËCʬÜülOÉáVÀópÆÏñ´2Å'W5‡ôõ68÷<\þ+pþIƒÑÊû<¡‰±Åg©;D@D@ÂK`κeX´| .˜Ùï=î­ ~=– úŽîÜ qÛ‘¾­!îz(Z×Êħw¼‰]9;‘íéBo…2ï¿ lÍð&†mÙ:œ7Ÿû§i⫟½å϶,Å3<„#Ú¢¶·sÚ¦ÍõÑþ°î8´]C¬þüeÌËÜÙ¹X:ã4Œ½âìJiþÃr°+? õÒº aí·ø5³*Ú´k]?OÇäë^Çš­±?9l=«á}<Š;ÇÏÒ„£Gµø¹Ñû#˜„šž•X%ÔÐÛâÅIó’·„™—Éü¼,ôówÜ1ñÏèÐ(9½Fâð¤KqÖÐéÞ´»\ Öùo>„kžé‡†{ûèÆOÁÉ%aæ Ñ3éÚ áºcdFZuÙŽ § B•Ü]ØÑä8ê¤ÔDo©›Ä6#ðà—Ü&ñÉ)hÔ¨6®»bâ´@“ÚñÞÒZ]‘•PiMªãá/{ —Šºg~ƒ=)¹+}Ò½ÍvR5Gj“zÞ7ù8÷Î1|­·ü'7m„¤¬¿cS^]´nZÿú¼7V®ñ&]y÷ÖmØ-S¢†·űãoÇaǯu«yS›·(%VËÛ߇ò+" " "eÞÄàz©ž ¸*ÞŽœ­Ú£~*7zˆß³TcÝúÞšß{–)kÕºMáqèpÉÞ'Ïx+µò†­&xñì1Þ ¿ Z¡£—¯‹Ö[݈cN뢾· 8—„LJmƒºMÓ<¿8Ä{ËkÙˆÔjµ MÇznVN`7/`L‰Õ˜®eND@D@D ü XôIÂß„ª+ÿ؉Êá8Λ4ì»l<â½ÝÎöš=BuϹ·=¹·êR0CáüJ°Ð±ág=6r£\ˆ€ˆ€ˆ€ˆ€ˆ€€Äª†E ¬ ps ’Ð0€’³Ó"p@=ö˜ÛCû¨£ŽrË™õìÙó€÷(€ˆÀ¾¸¥¶Œˆ@å% ±Zyë^%3n!<þ|·õïþóжjÕ 'œpF~ýú…9Š^Ê/mÛ¶áõ×_Ç[o½…Å‹—x{ìòK@90«FB®”1à6Á~³bÅ ·Ãw9kÒ¤‰­'žx" òÖÐó¶–ÊL`ûöíxå•WðÒK/áã?Fnnn!Žädo+JI@oÇJYí*t$$%%á¶ÛnÃÖ­[1{öl|ñÅعsgaÒëׯÇý÷ßïlݺuÝ0ö¸><컬fB"exã7ðüóÏãí·ßF¨OþyyÜòGFD 2X­Œµ®2GŒë‘G‰Q£F¡Zµj˜;w.Þ}÷]÷RÞ¸qca>¶lÙ‚'Ÿ|ÒÙš5kbĈ®×õØcEJŠ·Ó‰ŒT ™™™˜9s&^xá¼ùæ›ày0Ó¹sg 8]»vÅ?ÿùÏ`Aä'"P H¬V‚JVcƒ·ü:t¨ëAMLLt½­¯½öh9<À {š¦OŸî,ï2dˆ®Ç<7nlÁäŠ@¹"••å~¨Q r,ªÿ+ƒ¿ ¦øÃÀI‰µk×v_&6oÞì¢cJF@bµ‚Wx·²‰Pü¹s{Œ8Võ®»îÂwß}çÆê½úê«øùçŸ óËO¢ï¼ó޳\p›”Å¡œ¤•––VN"‹8nûý÷ßw=¨üQÆISÁÌ¡‡ŠSN9'tZ´h]»v91»cÇ7±Š/2Ñ#ÀvŒuàv<òŽyî·ÑË™R®,$V+@Mû ;ö7, ˆåºþº°ú1× Ö£GÐN™2 .t•MþûßÿZpuÏ>ûÌÙ‰'¢{÷î…´>øàÂp:hàØÒ?üÐ TþøJOOšŽ;âÔSOu¶S§Nn2"'T±–†#fø7$=~þþz‰^Ž”re# ±ZjÜ5&tCm·VŠÓE!}Xí/Ó:tÀ•W^éìªU«À>…ëçŸÿ$“9sæ€vÒ¤Ihß¾½®ìuåZ®–æþÒ‰ôµÀ•"~EN/ÚõÍRŸ~ú©¨ÂâíçÞ®];׃J‘Ú¥Kÿ¥Âc–Å~àùÝh—±0ƒ•ðÀ_v¬ú¨„B‹,±EøáHÚzŠ$³jTÂAºhqZÐe#_ܺà'Ñ‹/¾ØÙM›6aÆŒN¸~ðÁ…=PÌÉ¢E‹pË-·8›šš .‡Eá:`À€ˆþ`1Az ·hôª(ì™:[”¸Š†uüå—_:Ê¥¦¸ºE0õ…ù‰Ÿ•_ŠjXþÍðoG˺•ZxÂY;V’6,<9R¬•€Äj«q6ð´Ö¸°‘çyÿþý#*Z*ÖgõêÕ…õÀú°º)QdÞM 4À¸qãœåX>.–ÎW.÷㟬Âtï¹çgy'fQ¸6 \ \†½k41<¦̆+ýʯ=W® ÖËÊ|óÍ7N ¾øâ‹à³Ì4oÞcÆŒqõˆ#Ž$¤ËÁüÒòï†m'ò\íXHla»À:&ÖëÃê†õ$#‘" ±)ÒHÇ^VlPh9㜠ÇArb '.ìÞ½ÛõÊq|­‰Œd±Â%Á†›8-… —§ªQ£¸€y­ZµÐ¬Y3WV'VG¥Á¸O;í4gYŸ³fÍr•ëUúgN³7öÑGu–÷Œ9Ò ×cŽ9Æå¯´ùàý~Aj"•.‡,ع _°ó²HWqì×Ég†Ï˜ =³^ìœÜKóìqÉ5Îâ§]¾|¹K/ð?nrqòÉ';ÊÝÙ˜^qåÑËÁ6ŒmÙÍ7ßìÚ1.seí' r—=oÅMOá÷ g2·w†µcÕ«WwíDÓ¦M]=X8†µºCˆ‰ÕHPŽ@Öx°11ˆ‡н*\‰–=x6ô|©É”Œlò¶*w®“JËew(\YV'ty뫬 _,Çwœ³¬ÏO>ùÄsåìkÏ{cMpð¹àÒ@ìqå½ì-©1±j‚.qsƒµk׊ْƯûŠF€Ï•þxiÙ²eásfÏ›?Œ… åþøã`ï)ŸnwÌð¹á ~~âçz–N°°ò³¼Ù߇ý]qŸW–‡Ï0tóÛ1«jÇDwÿ×Éœ¼M¬²c»b?ºÙŽY7ÃÙû†±Z½í?]ÒX-¿˜¸Û k@üy64ֳ‡_Ö#Ák2%#@æ´öÀ¼É—b•ÜynB–õb/b¦fõV²”ƒßÅ4¸.+íÝwß ~²µ Z~ÁÁúçbì´¼‡Ëhqœ+-ǼÕØª ª|¶Ö¬Yãv%*N|EMWáBà§rûAÊ¿w3¬gëm5¿@÷—_~)üAã_BÍŽ»­ñ9¡@åsæOî$Çü›°¿æ×Ú+{¾˜ú±|Töƒ›Ïœ=‹%I·²ßCæäJæl¯hM°²£e»faì]Ž6¬²×…ÊœÀÞ–,øuù–3llhí2 kŒüBÕ†H¬–¼’M¬²÷ V6ôÖØûÅ*ë&R†uÞ»wog9ùê§Ÿ~*®óæÍ+ÌŸ>úÈÙ¿ýíoèÕ«Wá-®2p C‘`b„*Eý(T9ÁF&røŒñÇ­> &0ÌÏÜ¥K— Ô~øÁ¼÷qù•€»°Q ²ÇœÏz8Œå“ye|6ùƒÏŸ3–Í~òãóÆ0v=yª qwòe[Æ6‹œi­ã±µsÖîñ>ˆ‰ÕHPŽ@þƆ<okÀy ëQ5¡Ê†^=%¯ ²eÏÆÛrrfcÏ´ ì¥åòYÜîÒVèÖ­Ûï@™P¥Ëg‰‰"‚=_z®~‡+büLÎz ±g”íi¹cš}âç†Á ¿pˆ*Ç8ó9Ž„ñçן¦ùÓÏÄ8Ÿ9kÃX>™’°2lŸh­½¢KÞlÇLÀZÖ‡ŒDŠ€Äj¤HG 68l°é²‘1±Às60ltø+™ =x†¥µpÈb…K‚ 6ùcr§hµlàiéÏ0>Ú Z·nK.¹ÄÙ_ýÕmùÊ•ØÃêï‘›?>ho¼ñF¤¥¥ ×¾}ûºò°&TM¬ÚxB=WÑ«eÖ­ <ÖͺuëÜ.35{öì ™cûpì±Çº¥¦¸å)Ï#i˜_ûáߟ!¬ô§h›µö‹®LéøÛ'kÃè²í¢e=ð=B? Ë:‘HXå¤a3Öè[ãoàM¬2¬D…+¾kÜM¬Òµ†ÞyºVtyÝWüËþŽÆãüóÏwvëÖ­n+ǹr›Wκ6³Ü›>mÚ4gy? Ór-W>C|¾(tÙ{O¡¤çÊÈEÞeÐnٲŭñú믃=èÁ EȈ#\*{R9©&ZÆþ6èòo‰;æg‰ÏÛ/>_öƒ[bµô5FÞfÉÜÚ,kÏìœa¬Nèʈ@$H¬F‚rÒ`£ÁÆÛ&éoPØà˜@µ†žáÍF ‹2eé ‡tIDAT ?ccÏÆœ ;]ª<§esc J:u0vìXgù)Ÿ‚••KbQÈšaoìƒ>è,Ç3}ôÑn,#×ÔäóE±*a´"ë’?ë‹ã’¹]/ÿÆ ŸK®ÁOü'œp‚[¹"0L´Îù·Á¿3Ì« $þ ñœeâóeeS;f´Jæ’¹µIÆß\²çûÃÚ/ž[}”,5Ý%Å' ±Z|f1{‡56Ì 6*<6¡jz`Cïë¿b0æÖ°Óµcr·FÝz»^ì„¢p?Û ì1å W.‰åß­hûöíà§eZöÒQ°rbŸ9™È`êC=ô»„ù,<Ø­ÍËå¦8«? ÿFhìo‡mýø7dí–‰U†£ŸLÙ scom™µ[~«§²IY±ˆÀþ H¬îŸO¹ºêo<¬q±F ½[¡x.S6üìý ºÿ˜)Y½”Mª‘‹…½YœN{ÿý÷»OÊÜžâ•“µÌP(qWZöÒÊDždpBŒYåŠS.ØÏ5—ÙKæ^#ŸÃ§èÏŸµ]l¯ø÷DÃcµ_æXÒþvÊŽ­N켤që¾ÊE ðy±ç¨¸$V‹K,ÆÃÛƒa ¹[¶ÍŸçþc».·dü€ÁŽë¡d©ÄÆ], 'XõéÓ\‹³É)\9.rÑ¢E.“ 6TW”ª‹õÃÙû\2ŒcŠ9™Ž3û#=Yª´Å÷ÿÍX[eniãÖýE'Àz 1·èw*¤ìKÀž!ÿßö¾!BŸI¬†fS®¯> Áù`~åºÐQ̼ýú³ÌϽ¢s9¬¶mÛbüøñn-×·ÞzËM¶âê2Ñ!@±Ú±cG¤¤¤öB–ç¿wû[27:T•ªˆ@´H¬F‹|„Ó ÖÈó‹p¶”\!@!ıƒ-Z´p“vÒÓÓ!±½ÊåxaZ«—ò,T£GQ)—ï2ž›-/e¨Èù,‹ºØ;å²"“RÙD@"BÀD‘¹IT‰„$ z‰F*@ÁÊâ™Hânާ—‰òæ\›$iõQ’\H¬–„šîØçó2EQ ¢è¬‹ÀóèåL)‹@x Pù'¶ò¸Y³fxä‘G°mÛ¶ð&®Ø pÕÛ'Øj8Á~\Þä@b5y‰€ˆ€ˆ€”/þž;ŠTŠ$Ú1cÆà™gžq;ömܸ±|ªœå–½¨=ö˜ÛÖ›»à5hÐÀõ°²>ü?"Š[,‰ÕâSx˜"`B•®‰TsSSS1nÜ8üðÃ=z´[rÏ¿;_L¤œf†_o¾ýö[7gáî»ïÆ!CpÐA¹eôlS ¬%­«åôÁP¶E@D@D@ö P5!D—b•ã&9f•“?GŽ n=eÊ·AÆí·ßŽ… j“½‹}ÄÝ Ÿ~úi·eó_þòìÜ¹Ó ÕÎ;»­›9€bÕ/X‹ˆwƒV( 5Ý#" " "3(Ti¬g•âˆB•b‰–‚µQ£FNÀrsŒU«V¹Ýø~øa·YÆÀ1hÐ ôêÕ 5jÔˆ™rÅZF؃:þ||öÙgøàƒðóÏ?»uœÉ–kosíúõë£V­Znmg·Êºàë]µú*jù$V‹JJáD@D@D@b–€ !ûüOÄ- ¹!Fvv¶[ÿ™Kºñ:ýš4i‚;v€ãXß}÷]·± ‡¤¥¥9ÑÊÝß>ø`´lÙÒݳcÆ6lØàzŸ¹ùË×_í„*…&ù%''£K—.n=gŠSîZÈ Hèo‘0ëÀzW­ç›qG°J¬†±’µˆ@$ d`æõÃ1é…5HÏØ›îñÞŒ¿_t"ZÔª†Ì…Ïà¨?íÀCÓÿˆN©µ°§/foXw”µ×=-¦<…Sú„š‰ACÜ´÷4wûj,\›‡ÖmSQ#1a„•Åÿó?{õL¬æææ®=Ìp¬¼f"‹Ûs¹%ŠÕíÛ·ãý÷ßÇ{ï½çÄ,' ¥y–c0;uêä6AáÎpì¡­½°999 (]¾|9þ÷¿ÿ9qÊÝ9D‚"Ÿâ“b“eå0<7®£fy"•L†çìÑfXþpð(ŽPåC#±Ö?E."9زh5Vö<wžÜ«&bÛªÙxð¦«qä’íxgê™h]§#.¼8 ëT ­ÜÝXè}"LÌÈB¾÷ÉËû°:l°+¿~€³OßçÞ8 m=A,#"~?b•½¨Kt)8ù Û/f)¢(R)¶(ØÌR Q¸Úyff¦±sçÎu“ˆ¯³W–¢ŒŸÀ9&–=µ°<§­]»¶nÜIŽÇÌK¤ Å9óGáÍ%»8–tÓ¦MàÓõë×cíÚµX³fs·nÝêòÆüY(¹e3Å'E?ým8…'-ÏÍ )NM°’‹õ®Z8ëñ.®P%3‰ÕH=9JGD üò€îzbpßÁhR»*òsûâˆvMÑ÷œ[ñÁOCqzómØ”ž‰ü¼| w#Þ{ê>\y×Kؼ-ìèõÀ¶ªZ/xo6—ߌ³qï-ðÔGÑmøß0áïg£[‹:HŒÛŽÏžy§>…MÛcì?¯À… póØ)Xðã&L¼³:î¾ö ´J©^\¹~NJA*¿Pµu…)PM¤²¸ª]6<€‚Œ¢“‚••â”"–Ç¥fyÎ0vnáìÞeË–añâÅî:Óbº ËpLƒ÷2}Š9rì´üPÐSLÓŸñ˜xf<–?úñóÆ|P˜R”fdd¸ðÖ£i½šLß81-^oÚ´)ØKìÃc³ G‘I׬]cÞ-Y²l~ÑÊsú[Ùx?ó`ù §¢‰Õ¢’R8rA )!Þk÷4¶^‹‹ý†âìüɘ7o þPm1¹/ÃNÈÅö÷.Á¥7¬Ç×Þˆîu6ãá¿_‡':tÀÄ!¾fqËw¸èä¿`]ß1¸æÖöøñþk0v\¦?=xÿŸõœ|Õ$tÞý®˜z!ê7}Gœ: /,ý{uBJR ÕrñÔ(“…€‰1 ,Š* W3ILOQV|&&)úÌš¥k•® `;æu¦ÎÂÐeúvî“Çìñ´0Žyµ<Óµc–‹–ÆŽMø1 ËEqÈ^\•d`ÂÐ\ó£K˰~?‹ÓÝÂÐõ Ur4ÁJžÁ,…,Ãð>KÇòï SÄÿ|­rïP0ˆi{_NÌf\bCÔ½ >÷z²½F}ûŽ\Äy«)íOÇÕ÷7Ç>mP-w5¾oq-Þþi .è×¼°tK?zŸ,Š?:;ÔÇàVÀ£®ÂÜE‡ãÇ›fbÀÅ÷áœÑÑ0i(rÒw ³N ~(j¼ÙÃûxbµÚží #Ôˆ@ØPQlQ¼Q™1qdbËzM¤²·Ò,E$k °ä¹_ŒšX5!j.Óö Pž›e¼Ràȹ¼×Ží>Ë?Ëf‚/ð8P4òœaü÷’‡ùÑõ[‹—®ß’™ßšX¥Ë4ÌZ«åÃüéú…ª¥ouSTWbµ¨¤ND |ÈMÇ‚ùyè{zTOZë•Áë¡ðþ¯šT ù3&OØ‚Y9رñWôê5à¾!ª9™Þ'¶‚é8g䫨š‡‚¼ÝX»n#VnJÇ"o8Aï6©¨]³’ªÖÀ »¹IÞÕe³•¹HˆS¯jù|`”ëòJ€ÂŒ†bËoL”™X¥0¥ˆbO õ¤šJ¬R@³&.M¤šP¥kÇvÍħ¹Ì£Ó-®1!jå¦Ë²Ò˜(4¿@טР%TéO¡In<6~æšp5AjÖüéò>¿e>h‹k$V‹KLáE@bœ_T{Ã-ߌ׶gâÜz5Q¥P‰fà®ÀcNÁ´Ç‡£k³j˜õÃñ¦w«ÿ•‘—³mÏÂCwæÆž&zÂwþ‚µèÜ£9¾÷V ˆ¯Êž âÈÃ/Ÿ¿Í­û¡kžCŽ?–Ç¥ì‰@"`BˆÉš 3¿à¢Àò÷žR¬RŒšX5aj=ªæš?]¤þcó£øôÛ¹_œúýU@ÿPÆÊÇëvL7КX t¹ðÜ«›85^tí˜×-Ei Ÿ?ãoy U¶ýùK¬îŽ®‰€”;sÖ-âåK°5![V‹ñ—\%ƒþ£;7C­&bó°+}#úõ8}=ÿ›‰ûÈFíCröQ« [wEÂÚYø5óOÜ£¾ú&_·ÓÞ˜„GVÁMÏÌÄ.ÍÑfÇ—wùU8yÚk8¸¡‡¬ [3¼ÉÞ;§Š%YîH*Ã"P> ˜ˆ3¡ÆsLQ—XtEh°sŠN†5Qèš(õ»&NégÇ&NéGc®‰Ó@7}+›_øù­Ì¡\ŠN?ãb‚”ç<6×üÍõ‹PúÙ¹¹Îâ¥kù£[R#±ZRrºOD æ$yB1ëÅIóRØ¿šŸ—…>cþŽ;&þ%#g«çÙÖ³‰ÉèzÊiÞ¶‹—àðý -‡áØñÊwcÎ)× NÁžuô9wÿW]x&{C²wnÆyw¼Ž®Í[¢çµcÞÑÅ{ß‹¬¼]hwüåøC×HÉXê}ƒ» '_”„_8í'ûúyc™2$’€ $ @¿8£@¤°¢ þsÛ9ï±cséǸíœÇæʵ0na >¯Ñúÿ-¿Åuã<ûÒ)n /"P) °±e/' p!m.™Â¥S¸fßæÍ›qþùçcΜ9n­¾È*@Fº·!ÀNïóœ¯U«™RuR¼!Þ¯ú‚œX“^€& ’‘¿{;V¯XíY¨Ù )ê×ÌÅÆ hÚ:97#±AÔª–€¬›±fÕZlÉÊEµä:Hmî Ò‰Þ$­llY»k7oG^BM4ô–€iä 5ˆ÷Ƶ®\¹Y uÐ:µ.½Õ "iÒÒÒpÅW cÇŽnG®ïh ts_Ò|‘Ȉ@e"@©crÇ„¢¹~ÁiǼv Ëø,Ž@×Ò3;t­èOc®ùs)i̵c†n p´s¿kÇž¼Ÿç~ë÷7qÊp~»ÏŸ—…QÏjYPT" 1@ 5ë¥z6tVâ¼ÕÔÆ¿]¯™‚Ö½!Þ)ešºu÷ô 5Õó¿jµ MÇzà‡;× Û•øª¨›Ú)ͼâ|³ªTG«ÖmPà5ä%ÿèe‰È( &à) )´(&éòÜ„eI]Æa–qÐX\<¶4xLcaé–İ4eóŸ»6ë·pvÌëÖæ™_I]\þtKRžýÝ#±º?:º&"P¡ Äy"Õ/(ÙØ3 Ça¿7lôƒÜãÅÄ÷÷·ËGD âü¢ŠÇ&Å£ M¿?Cù†cÁýxnþîà·0~v-”km•¹ gå2¿ýû¯ñ8P¼†ºî÷÷§*Ÿeå/±ZV$ˆ€ˆ€ˆ@¹"`âË2í“Öãj‚“aìØÎü‚]ߟŸ]ó»<.ªaÞi]ó ,›ûÛ_à=þvÝïò8RFb5R¤•Žˆ€ˆ€ˆ@Lð 9FýâÔüý~vÌkvL׎ƒÝãk×KâæÙÎ-®`“×öço×-Žh»«Ñ®¥/" " "ÓLš˜Y¿(õ[8¿Ÿÿ˜×Ï힢¸ùñŸû-.¿ŸÿخǪ+±«5£|‰€ˆ€ˆ€” ~áç?.JæËR¬%½òFbµ<Öšò," " "P!WÜVˆB³Zl¯˜À\D@D@D@D r$V#ÇZ)‰€ˆ€ˆ€ˆ€“€Äj1)¸ˆ€ˆ€ˆ€ˆ@äH¬F޵R(&‰ÕbSpÈXk¥$" " " "PL«Å¦à" " " " ‘# ±9ÖJID@D@D@D ˜$V‹ LÁE@D@D@D@"G@b5r¬•’T(Üu%”WÓÍÊU/æͼ)m(.½QŠKLáE@‚0!dnÐ@òŒ«s#’¨0X XE+•…{RÍåqBB‚;¯, b­œäÏzð× ëGFD@Ê#*å1Óʳˆ@ì0qD¡T¥J'Vû÷ïï„kìå¶âæhõêÕŽ¹_°VÜÒªd" •€Äje¨e•QÂH °G•B511“'OƶmÛ°sçNìÚµËÙììläää 77aÌUÅŽšÌsÕªUQ£F g“““Q«V-4mÚÔ]÷ Võ¬VìgB¥ŠL@bµ"×®Ê&a&@dÖ>ÿS¨R@5oÞON°fff"++ˉռ¼<äçç‡9w7z?뤤$T¯^ݱ®Y³&RRRÜ9ëÁĪՑkÅ}&T2¨È$V+ríªl"F&|èRÑR Ñš€b* ŬÄjÙTH X­V­š¨«®äouau㯯²É…bÈX g¥"’­‰'~š¦P¢HµOý&béoCسªa%$ì‚ °ª@ñJ?^'ÖÕUÉSÕ" "«Ñá®TE B0¡J1d‰Ÿ÷i)FéÏUŠ'ŽW¥€¥P•X-]õ›X¥%w2¦8õ‹V³w•×Y&XK—²îÈX`_. Those methods are __get__(), __set__(), and __delete__(). If any of those methods are defined for an object, it is said to be a descriptor. In SQLAlchemy, descriptors are used heavily in order to provide attribute behavior on mapped classes. When a class is mapped as such:: class MyClass(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) data = Column(String) The ``MyClass`` class will be :term:`mapped` when its definition is complete, at which point the ``id`` and ``data`` attributes, starting out as :class:`.Column` objects, will be replaced by the :term:`instrumentation` system with instances of :class:`.InstrumentedAttribute`, which are descriptors that provide the above mentioned ``__get__()``, ``__set__()`` and ``__delete__()`` methods. The :class:`.InstrumentedAttribute` will generate a SQL expression when used at the class level:: >>> print MyClass.data == 5 data = :data_1 and at the instance level, keeps track of changes to values, and also :term:`lazy loads` unloaded attributes from the database:: >>> m1 = MyClass() >>> m1.id = 5 >>> m1.data = "some data" >>> from sqlalchemy import inspect >>> inspect(m1).attrs.data.history.added "some data" DDL An acronym for *Data Definition Language*. DDL is the subset of SQL that relational databases use to configure tables, constraints, and other permanent objects within a database schema. SQLAlchemy provides a rich API for constructing and emitting DDL expressions. .. seealso:: :ref:`metadata_toplevel` `DDL (via Wikipedia) `_ discriminator A result-set column which is used during :term:`polymorphic` loading to determine what kind of mapped class should be applied to a particular incoming result row. In SQLAlchemy, the classes are always part of a hierarchy mapping using inheritance mapping. .. seealso:: :ref:`inheritance_toplevel` instrumentation instrumented instrumenting Instrumentation refers to the process of augmenting the functionality and attribute set of a particular class. Ideally, the behavior of the class should remain close to a regular class, except that additional behviors and features are made available. The SQLAlchemy :term:`mapping` process, among other things, adds database-enabled :term:`descriptors` to a mapped class which each represent a particular database column or relationship to a related class. identity map A mapping between Python objects and their database identities. The identity map is a collection that's associated with an ORM :term:`session` object, and maintains a single instance of every database object keyed to its identity. The advantage to this pattern is that all operations which occur for a particular database identity are transparently coordinated onto a single object instance. When using an identity map in conjunction with an :term:`isolated` transaction, having a reference to an object that's known to have a particular primary key can be considered from a practical standpoint to be a proxy to the actual database row. .. seealso:: Martin Fowler - Identity Map - http://martinfowler.com/eaaCatalog/identityMap.html lazy load lazy loads lazy loading In object relational mapping, a "lazy load" refers to an attribute that does not contain its database-side value for some period of time, typically when the object is first loaded. Instead, the attribute receives a *memoization* that causes it to go out to the database and load its data when it's first used. Using this pattern, the complexity and time spent within object fetches can sometimes be reduced, in that attributes for related tables don't need to be addressed immediately. .. seealso:: `Lazy Load (on Martin Fowler) `_ :term:`N plus one problem` :doc:`orm/loading_relationships` mapping mapped We say a class is "mapped" when it has been passed through the :func:`.orm.mapper` function. This process associates the class with a database table or other :term:`selectable` construct, so that instances of it can be persisted using a :class:`.Session` as well as loaded using a :class:`.Query`. N plus one problem The N plus one problem is a common side effect of the :term:`lazy load` pattern, whereby an application wishes to iterate through a related attribute or collection on each member of a result set of objects, where that attribute or collection is set to be loaded via the lazy load pattern. The net result is that a SELECT statement is emitted to load the initial result set of parent objects; then, as the application iterates through each member, an additional SELECT statement is emitted for each member in order to load the related attribute or collection for that member. The end result is that for a result set of N parent objects, there will be N + 1 SELECT statements emitted. The N plus one problem is alleviated using :term:`eager loading`. .. seealso:: :doc:`orm/loading_relationships` polymorphic polymorphically Refers to a function that handles several types at once. In SQLAlchemy, the term is usually applied to the concept of an ORM mapped class whereby a query operation will return different subclasses based on information in the result set, typically by checking the value of a particular column in the result known as the :term:`discriminator`. Polymorphic loading in SQLAlchemy implies that a one or a combination of three different schemes are used to map a hierarchy of classes; "joined", "single", and "concrete". The section :ref:`inheritance_toplevel` describes inheritance mapping fully. generative A term that SQLAlchemy uses to refer what's normally known as :term:`method chaining`; see that term for details. method chaining An object-oriented technique whereby the state of an object is constructed by calling methods on the object. The object features any number of methods, each of which return a new object (or in some cases the same object) with additional state added to the object. The two SQLAlchemy objects that make the most use of method chaining are the :class:`~.expression.Select` object and the :class:`~.orm.query.Query` object. For example, a :class:`~.expression.Select` object can be assigned two expressions to its WHERE clause as well as an ORDER BY clause by calling upon the :meth:`~.Select.where` and :meth:`~.Select.order_by` methods:: stmt = select([user.c.name]).\ where(user.c.id > 5).\ where(user.c.name.like('e%').\ order_by(user.c.name) Each method call above returns a copy of the original :class:`~.expression.Select` object with additional qualifiers added. .. seealso:: :term:`generative` release releases released In the context of SQLAlchemy, the term "released" refers to the process of ending the usage of a particular database connection. SQLAlchemy features the usage of connection pools, which allows configurability as to the lifespan of database connections. When using a pooled connection, the process of "closing" it, i.e. invoking a statement like ``connection.close()``, may have the effect of the connection being returned to an existing pool, or it may have the effect of actually shutting down the underlying TCP/IP connection referred to by that connection - which one takes place depends on configuration as well as the current state of the pool. So we used the term *released* instead, to mean "do whatever it is you do with connections when we're done using them". The term will sometimes be used in the phrase, "release transactional resources", to indicate more explicitly that what we are actually "releasing" is any transactional state which as accumulated upon the connection. In most situations, the proces of selecting from tables, emitting updates, etc. acquires :term:`isolated` state upon that connection as well as potential row or table locks. This state is all local to a particular transaction on the connection, and is released when we emit a rollback. An important feature of the connection pool is that when we return a connection to the pool, the ``connection.rollback()`` method of the DBAPI is called as well, so that as the connection is set up to be used again, it's in a "clean" state with no references held to the previous series of operations. .. seealso:: :ref:`pooling_toplevel` DBAPI DBAPI is shorthand for the phrase "Python Database API Specification". This is a widely used specification within Python to define common usage patterns for all database connection packages. The DBAPI is a "low level" API which is typically the lowest level system used in a Python application to talk to a database. SQLAlchemy's :term:`dialect` system is constructed around the operation of the DBAPI, providing individual dialect classes which service a specific DBAPI on top of a specific database engine; for example, the :func:`.create_engine` URL ``postgresql+psycopg2://@localhost/test`` refers to the :mod:`psycopg2 <.postgresql.psycopg2>` DBAPI/dialect combination, whereas the URL ``mysql+mysqldb://@localhost/test`` refers to the :mod:`MySQL for Python <.mysql.mysqldb>` DBAPI DBAPI/dialect combination. .. seealso:: `PEP 249 - Python Database API Specification v2.0 `_ domain model A domain model in problem solving and software engineering is a conceptual model of all the topics related to a specific problem. It describes the various entities, their attributes, roles, and relationships, plus the constraints that govern the problem domain. (via Wikipedia) .. seealso:: `Domain Model (wikipedia) `_ unit of work This pattern is where the system transparently keeps track of changes to objects and periodically flushes all those pending changes out to the database. SQLAlchemy's Session implements this pattern fully in a manner similar to that of Hibernate. .. seealso:: `Unit of Work by Martin Fowler `_ :doc:`orm/session` expire expires expiring In the SQLAlchemy ORM, refers to when the data in a :term:`persistent` or sometimes :term:`detached` object is erased, such that when the object's attributes are next accessed, a :term:`lazy load` SQL query will be emitted in order to refresh the data for this object as stored in the current ongoing transaction. .. seealso:: :ref:`session_expire` Session The container or scope for ORM database operations. Sessions load instances from the database, track changes to mapped instances and persist changes in a single unit of work when flushed. .. seealso:: :doc:`orm/session` columns clause The portion of the ``SELECT`` statement which enumerates the SQL expressions to be returned in the result set. The expressions follow the ``SELECT`` keyword directly and are a comma-separated list of individual expressions. E.g.: .. sourcecode:: sql SELECT user_account.name, user_account.email FROM user_account WHERE user_account.name = 'fred' Above, the list of columns ``user_acount.name``, ``user_account.email`` is the columns clause of the ``SELECT``. WHERE clause The portion of the ``SELECT`` statement which indicates criteria by which rows should be filtered. It is a single SQL expression which follows the keyword ``WHERE``. .. sourcecode:: sql SELECT user_account.name, user_account.email FROM user_account WHERE user_account.name = 'fred' AND user_account.status = 'E' Above, the phrase ``WHERE user_account.name = 'fred' AND user_account.status = 'E'`` comprises the WHERE clause of the ``SELECT``. FROM clause The portion of the ``SELECT`` statement which incicates the initial source of rows. A simple ``SELECT`` will feature one or more table names in its FROM clause. Multiple sources are separated by a comma: .. sourcecode:: sql SELECT user.name, address.email_address FROM user, address WHERE user.id=address.user_id The FROM clause is also where explicit joins are specified. We can rewrite the above ``SELECT`` using a single ``FROM`` element which consists of a ``JOIN`` of the two tables: .. sourcecode:: sql SELECT user.name, address.email_address FROM user JOIN address ON user.id=address.user_id subquery Refers to a ``SELECT`` statement that is embedded within an enclosing ``SELECT``. A subquery comes in two general flavors, one known as a "scalar select" which specifically must return exactly one row and one column, and the other form which acts as a "derived table" and serves as a source of rows for the FROM clause of another select. A scalar select is eligible to be placed in the :term:`WHERE clause`, :term:`columns clause`, ORDER BY clause or HAVING clause of the enclosing select, whereas the derived table form is eligible to be placed in the FROM clause of the enclosing ``SELECT``. Examples: 1. a scalar subquery placed in the :term:`columns clause` of an enclosing ``SELECT``. The subquery in this example is a :term:`correlated subquery` because part of the rows which it selects from are given via the enclosing statement. .. sourcecode:: sql SELECT id, (SELECT name FROM address WHERE address.user_id=user.id) FROM user 2. a scalar subquery placed in the :term:`WHERE clause` of an enclosing ``SELECT``. This subquery in this example is not correlated as it selects a fixed result. .. sourcecode:: sql SELECT id, name FROM user WHERE status=(SELECT status_id FROM status_code WHERE code='C') 3. a derived table subquery placed in the :term:`FROM clause` of an enclosing ``SELECT``. Such a subquery is almost always given an alias name. .. sourcecode:: sql SELECT user.id, user.name, ad_subq.email_address FROM user JOIN (select user_id, email_address FROM address WHERE address_type='Q') AS ad_subq ON user.id = ad_subq.user_id correlates correlated subquery correlated subqueries A :term:`subquery` is correlated if it depends on data in the enclosing ``SELECT``. Below, a subquery selects the aggregate value ``MIN(a.id)`` from the ``email_address`` table, such that it will be invoked for each value of ``user_account.id``, correlating the value of this column against the ``email_address.user_account_id`` column: .. sourcecode:: sql SELECT user_account.name, email_address.email FROM user_account JOIN email_address ON user_account.id=email_address.user_account_id WHERE email_address.id = ( SELECT MIN(a.id) FROM email_address AS a WHERE a.user_account_id=user_account.id ) The above subquery refers to the ``user_account`` table, which is not itself in the ``FROM`` clause of this nested query. Instead, the ``user_account`` table is received from the enclosing query, where each row selected from ``user_account`` results in a distinct execution of the subquery. A correlated subquery is in most cases present in the :term:`WHERE clause` or :term:`columns clause` of the immediately enclosing ``SELECT`` statement, as well as in the ORDER BY or HAVING clause. In less common cases, a correlated subquery may be present in the :term:`FROM clause` of an enclosing ``SELECT``; in these cases the correlation is typically due to the enclosing ``SELECT`` itself being enclosed in the WHERE, ORDER BY, columns or HAVING clause of another ``SELECT``, such as: .. sourcecode:: sql SELECT parent.id FROM parent WHERE EXISTS ( SELECT * FROM ( SELECT child.id AS id, child.parent_id AS parent_id, child.pos AS pos FROM child WHERE child.parent_id = parent.id ORDER BY child.pos LIMIT 3) WHERE id = 7) Correlation from one ``SELECT`` directly to one which encloses the correlated query via its ``FROM`` clause is not possible, because the correlation can only proceed once the original source rows from the enclosing statement's FROM clause are available. ACID ACID model An acronym for "Atomicity, Consistency, Isolation, Durability"; a set of properties that guarantee that database transactions are processed reliably. (via Wikipedia) .. seealso:: :term:`atomicity` :term:`consistency` :term:`isolation` :term:`durability` http://en.wikipedia.org/wiki/ACID_Model atomicity Atomicity is one of the components of the :term:`ACID` model, and requires that each transaction is "all or nothing": if one part of the transaction fails, the entire transaction fails, and the database state is left unchanged. An atomic system must guarantee atomicity in each and every situation, including power failures, errors, and crashes. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Atomicity_(database_systems) consistency Consistency is one of the compoments of the :term:`ACID` model, and ensures that any transaction will bring the database from one valid state to another. Any data written to the database must be valid according to all defined rules, including but not limited to :term:`constraints`, cascades, triggers, and any combination thereof. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Consistency_(database_systems) isolation isolated The isolation property of the :term:`ACID` model ensures that the concurrent execution of transactions results in a system state that would be obtained if transactions were executed serially, i.e. one after the other. Each transaction must execute in total isolation i.e. if T1 and T2 execute concurrently then each should remain independent of the other. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Isolation_(database_systems) durability Durability is a property of the :term:`ACID` model which means that once a transaction has been committed, it will remain so, even in the event of power loss, crashes, or errors. In a relational database, for instance, once a group of SQL statements execute, the results need to be stored permanently (even if the database crashes immediately thereafter). (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Durability_(database_systems) RETURNING This is a non-SQL standard clause provided in various forms by certain backends, which provides the service of returning a result set upon execution of an INSERT, UPDATE or DELETE statement. Any set of columns from the matched rows can be returned, as though they were produced from a SELECT statement. The RETURNING clause provides both a dramatic performance boost to common update/select scenarios, including retrieval of inline- or default- generated primary key values and defaults at the moment they were created, as well as a way to get at server-generated default values in an atomic way. An example of RETURNING, idiomatic to Postgresql, looks like:: INSERT INTO user_account (name) VALUES ('new name') RETURNING id, timestamp Above, the INSERT statement will provide upon execution a result set which includes the values of the columns ``user_account.id`` and ``user_account.timestamp``, which above should have been generated as default values as they are not included otherwise (but note any series of columns or SQL expressions can be placed into RETURNING, not just default-value columns). The backends that currently support RETURNING or a similar construct are Postgresql, SQL Server, Oracle, and Firebird. The Postgresql and Firebird implementations are generally full featured, whereas the implementations of SQL Server and Oracle have caveats. On SQL Server, the clause is known as "OUTPUT INSERTED" for INSERT and UPDATE statements and "OUTPUT DELETED" for DELETE statements; the key caveat is that triggers are not supported in conjunction with this keyword. On Oracle, it is known as "RETURNING...INTO", and requires that the value be placed into an OUT paramter, meaning not only is the syntax awkward, but it can also only be used for one row at a time. SQLAlchemy's :meth:`.UpdateBase.returning` system provides a layer of abstraction on top of the RETURNING systems of these backends to provide a consistent interface for returning columns. The ORM also includes many optimizations that make use of RETURNING when available. one to many A style of :func:`~sqlalchemy.orm.relationship` which links the primary key of the parent mapper's table to the foreign key of a related table. Each unique parent object can then refer to zero or more unique related objects. The related objects in turn will have an implicit or explicit :term:`many to one` relationship to their parent object. An example one to many schema (which, note, is identical to the :term:`many to one` schema): .. sourcecode:: sql CREATE TABLE department ( id INTEGER PRIMARY KEY, name VARCHAR(30) ) CREATE TABLE employee ( id INTEGER PRIMARY KEY, name VARCHAR(30), dep_id INTEGER REFERENCES department(id) ) The relationship from ``department`` to ``employee`` is one to many, since many employee records can be associated with a single department. A SQLAlchemy mapping might look like:: class Department(Base): __tablename__ = 'department' id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee") class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(30)) dep_id = Column(Integer, ForeignKey('department.id')) .. seealso:: :term:`relationship` :term:`many to one` :term:`backref` many to one A style of :func:`~sqlalchemy.orm.relationship` which links a foreign key in the parent mapper's table to the primary key of a related table. Each parent object can then refer to exactly zero or one related object. The related objects in turn will have an implicit or explicit :term:`one to many` relationship to any number of parent objects that refer to them. An example many to one schema (which, note, is identical to the :term:`one to many` schema): .. sourcecode:: sql CREATE TABLE department ( id INTEGER PRIMARY KEY, name VARCHAR(30) ) CREATE TABLE employee ( id INTEGER PRIMARY KEY, name VARCHAR(30), dep_id INTEGER REFERENCES department(id) ) The relationship from ``employee`` to ``department`` is many to one, since many employee records can be associated with a single department. A SQLAlchemy mapping might look like:: class Department(Base): __tablename__ = 'department' id = Column(Integer, primary_key=True) name = Column(String(30)) class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(30)) dep_id = Column(Integer, ForeignKey('department.id')) department = relationship("Department") .. seealso:: :term:`relationship` :term:`one to many` :term:`backref` backref bidirectional relationship An extension to the :term:`relationship` system whereby two distinct :func:`~sqlalchemy.orm.relationship` objects can be mutually associated with each other, such that they coordinate in memory as changes occur to either side. The most common way these two relationships are constructed is by using the :func:`~sqlalchemy.orm.relationship` function explicitly for one side and specifying the ``backref`` keyword to it so that the other :func:`~sqlalchemy.orm.relationship` is created automatically. We can illustrate this against the example we've used in :term:`one to many` as follows:: class Department(Base): __tablename__ = 'department' id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee", backref="department") class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(30)) dep_id = Column(Integer, ForeignKey('department.id')) A backref can be applied to any relationship, including one to many, many to one, and :term:`many to many`. .. seealso:: :term:`relationship` :term:`one to many` :term:`many to one` :term:`many to many` many to many A style of :func:`sqlalchemy.orm.relationship` which links two tables together via an intermediary table in the middle. Using this configuration, any number of rows on the left side may refer to any number of rows on the right, and vice versa. A schema where employees can be associated with projects: .. sourcecode:: sql CREATE TABLE employee ( id INTEGER PRIMARY KEY, name VARCHAR(30) ) CREATE TABLE project ( id INTEGER PRIMARY KEY, name VARCHAR(30) ) CREATE TABLE employee_project ( employee_id INTEGER PRIMARY KEY, project_id INTEGER PRIMARY KEY, FOREIGN KEY employee_id REFERENCES employee(id), FOREIGN KEY project_id REFERENCES project(id) ) Above, the ``employee_project`` table is the many-to-many table, which naturally forms a composite primary key consisting of the primary key from each related table. In SQLAlchemy, the :func:`sqlalchemy.orm.relationship` function can represent this style of relationship in a mostly transparent fashion, where the many-to-many table is specified using plain table metadata:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key) name = Column(String(30)) projects = relationship( "Project", secondary=Table('employee_project', Base.metadata, Column("employee_id", Integer, ForeignKey('employee.id'), primary_key=True), Column("project_id", Integer, ForeignKey('project.id'), primary_key=True) ), backref="employees" ) class Project(Base): __tablename__ = 'project' id = Column(Integer, primary_key) name = Column(String(30)) Above, the ``Employee.projects`` and back-referencing ``Project.employees`` collections are defined:: proj = Project(name="Client A") emp1 = Employee(name="emp1") emp2 = Employee(name="emp2") proj.employees.extend([emp1, emp2]) .. seealso:: :term:`association relationship` :term:`relationship` :term:`one to many` :term:`many to one` relationship relationships A connecting unit between two mapped classes, corresponding to some relationship between the two tables in the database. The relationship is defined using the SQLAlchemy function :func:`~sqlalchemy.orm.relationship`. Once created, SQLAlchemy inspects the arguments and underlying mappings involved in order to classify the relationship as one of three types: :term:`one to many`, :term:`many to one`, or :term:`many to many`. With this classification, the relationship construct handles the task of persisting the appropriate linkages in the database in response to in-memory object associations, as well as the job of loading object references and collections into memory based on the current linkages in the database. .. seealso:: :ref:`relationship_config_toplevel` association relationship A two-tiered :term:`relationship` which links two tables together using an association table in the middle. The association relationship differs from a :term:`many to many` relationship in that the many-to-many table is mapped by a full class, rather than invisibly handled by the :func:`sqlalchemy.orm.relationship` construct as in the case with many-to-many, so that additional attributes are explicitly available. For example, if we wanted to associate employees with projects, also storing the specific role for that employee with the project, the relational schema might look like: .. sourcecode:: sql CREATE TABLE employee ( id INTEGER PRIMARY KEY, name VARCHAR(30) ) CREATE TABLE project ( id INTEGER PRIMARY KEY, name VARCHAR(30) ) CREATE TABLE employee_project ( employee_id INTEGER PRIMARY KEY, project_id INTEGER PRIMARY KEY, role_name VARCHAR(30), FOREIGN KEY employee_id REFERENCES employee(id), FOREIGN KEY project_id REFERENCES project(id) ) A SQLAlchemy declarative mapping for the above might look like:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key) name = Column(String(30)) class Project(Base): __tablename__ = 'project' id = Column(Integer, primary_key) name = Column(String(30)) class EmployeeProject(Base): __tablename__ = 'employee_project' employee_id = Column(Integer, ForeignKey('employee.id'), primary_key=True) project_id = Column(Integer, ForeignKey('project.id'), primary_key=True) role_name = Column(String(30)) project = relationship("Project", backref="project_employees") employee = relationship("Employee", backref="employee_projects") Employees can be added to a project given a role name:: proj = Project(name="Client A") emp1 = Employee(name="emp1") emp2 = Employee(name="emp2") proj.project_employees.extend([ EmployeeProject(employee=emp1, role="tech lead"), EmployeeProject(employee=emp2, role="account executive") ]) .. seealso:: :term:`many to many` constraint constraints constrained Rules established within a relational database that ensure the validity and consistency of data. Common forms of constraint include :term:`primary key constraint`, :term:`foreign key constraint`, and :term:`check constraint`. candidate key A :term:`relational algebra` term referring to an attribute or set of attributes that form a uniquely identifying key for a row. A row may have more than one candidate key, each of which is suitable for use as the primary key of that row. The primary key of a table is always a candidate key. .. seealso:: :term:`primary key` http://en.wikipedia.org/wiki/Candidate_key primary key primary key constraint A :term:`constraint` that uniquely defines the characteristics of each :term:`row`. The primary key has to consist of characteristics that cannot be duplicated by any other row. The primary key may consist of a single attribute or multiple attributes in combination. (via Wikipedia) The primary key of a table is typically, though not always, defined within the ``CREATE TABLE`` :term:`DDL`: .. sourcecode:: sql CREATE TABLE employee ( emp_id INTEGER, emp_name VARCHAR(30), dep_id INTEGER, PRIMARY KEY (emp_id) ) .. seealso:: http://en.wikipedia.org/wiki/Primary_Key foreign key constraint A referential constraint between two tables. A foreign key is a field or set of fields in a relational table that matches a :term:`candidate key` of another table. The foreign key can be used to cross-reference tables. (via Wikipedia) A foreign key constraint can be added to a table in standard SQL using :term:`DDL` like the following: .. sourcecode:: sql ALTER TABLE employee ADD CONSTRAINT dep_id_fk FOREIGN KEY (employee) REFERENCES department (dep_id) .. seealso:: http://en.wikipedia.org/wiki/Foreign_key_constraint check constraint A check constraint is a condition that defines valid data when adding or updating an entry in a table of a relational database. A check constraint is applied to each row in the table. (via Wikipedia) A check constraint can be added to a table in standard SQL using :term:`DDL` like the following: .. sourcecode:: sql ALTER TABLE distributors ADD CONSTRAINT zipchk CHECK (char_length(zipcode) = 5); .. seealso:: http://en.wikipedia.org/wiki/Check_constraint unique constraint unique key index A unique key index can uniquely identify each row of data values in a database table. A unique key index comprises a single column or a set of columns in a single database table. No two distinct rows or data records in a database table can have the same data value (or combination of data values) in those unique key index columns if NULL values are not used. Depending on its design, a database table may have many unique key indexes but at most one primary key index. (via Wikipedia) .. seealso:: http://en.wikipedia.org/wiki/Unique_key#Defining_unique_keys transient This describes one of the four major object states which an object can have within a :term:`session`; a transient object is a new object that doesn't have any database identity and has not been associated with a session yet. When the object is added to the session, it moves to the :term:`pending` state. .. seealso:: :ref:`session_object_states` pending This describes one of the four major object states which an object can have within a :term:`session`; a pending object is a new object that doesn't have any database identity, but has been recently associated with a session. When the session emits a flush and the row is inserted, the object moves to the :term:`persistent` state. .. seealso:: :ref:`session_object_states` persistent This describes one of the four major object states which an object can have within a :term:`session`; a persistent object is an object that has a database identity (i.e. a primary key) and is currently associated with a session. Any object that was previously :term:`pending` and has now been inserted is in the persistent state, as is any object that's been loaded by the session from the database. When a persistent object is removed from a session, it is known as :term:`detached`. .. seealso:: :ref:`session_object_states` detached This describes one of the four major object states which an object can have within a :term:`session`; a detached object is an object that has a database identity (i.e. a primary key) but is not associated with any session. An object that was previously :term:`persistent` and was removed from its session either because it was expunged, or the owning session was closed, moves into the detached state. The detached state is generally used when objects are being moved between sessions or when being moved to/from an external object cache. .. seealso:: :ref:`session_object_states` SQLAlchemy-1.0.11/doc/dialects/0000775000175000017500000000000012636376632017175 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/0000775000175000017500000000000012636376632017214 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/0000775000175000017500000000000012636376632021032 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/large_collection/0000775000175000017500000000000012636376632024337 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/nested_sets/0000775000175000017500000000000012636376632023352 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/graphs/0000775000175000017500000000000012636376632022316 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/custom_attributes/0000775000175000017500000000000012636376632024612 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/association/0000775000175000017500000000000012636376632023346 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/adjacency_list/0000775000175000017500000000000012636376632024006 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/dogpile_caching/0000775000175000017500000000000012636376632024131 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/inheritance/0000775000175000017500000000000012636376632023323 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/sharding/0000775000175000017500000000000012636376632022631 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/materialized_paths/0000775000175000017500000000000012636376632024703 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/performance/0000775000175000017500000000000012636376632023333 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/postgis/0000775000175000017500000000000012636376632022522 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/join_conditions/0000775000175000017500000000000012636376632024222 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/generic_associations/0000775000175000017500000000000012636376632025225 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/vertical/0000775000175000017500000000000012636376632022643 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/versioned_history/0000775000175000017500000000000012636376632024611 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/elementtree/0000775000175000017500000000000012636376632023343 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/versioned_rows/0000775000175000017500000000000012636376632024102 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_modules/examples/dynamic_dict/0000775000175000017500000000000012636376632023461 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_images/0000775000175000017500000000000012636376632017011 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/doc/_images/sqla_arch_small.png0000664000175000017500000012335312636375552022653 0ustar classicclassic00000000000000‰PNG  IHDRÕMVŒõÙMiCCPICC ProfilexÕYy8•ß·ßï{&ŽspdvÌóœyžÇÌs¦cžÉL‘) Q’„"D(J%I”R¡Idˆ’H2÷Õðýýîó»÷¿ûÏÝÏsöû9k¯½ö>ïZ{XŸû522f ,<&ÊÖXâìâJÁ½(Àè-P¦úDGêZ[[€ÿµ|Ðvã3©m[ÿ«ÚÿÜ@òõ‹ö²Fš½}£}Â|XÏ'2*Ô*"‰`ô=3G!Dð›mð/lcï_ƒþ¥co«† •Q‘Sâ|;D°Lá¾Aá¬åHõ€½Ñ‘ ‹ØÆÝõþ7;ÿ†©TïlR©ÿàß¿é‰ lJMüõåÿ² EÞׯ„ԄðPËmß°"Ÿ_ª9òäB>›‘¡¿|†è@~ávˆlK†{[ZýÁZþQF¶FúBÖ‘1zÛygdŒµýyJR ¾%‚ ˆ¼È/Úð¯Ê`ªÙ¶ÏèyKT¬­‚|':ÎÎÁHDA“IöNt¾ùúü‘ð‘éo˜)(Æt{,fÄçü!æÛs@Æ‚•€9~ D!u8@ü©¥€? "-qH[4ÓCzD }"Lù£§ÿ£_ý~ÿÝ"ø º±ÿŒù{4 2æ_›AÀÁåTdŒí¶íÙE{¥ÿkÌ¿Ûö~ÍF¶QvVöÇß9¡…ÑòhE´Z­…V4+šH¡w¢Uкhm´:Ò¦ ŒÀ$b9àï·í‡µøÇ•D$ª9"­Û¿Ýûo+pü¥ôÏ÷ÿ˜zðl@„ƒˆb‡x!!H’‡T -Ȳ€l!È €Â¡Xh”åB…P)tª‡.AסNè>ôz C³ÐWhFÁ˜憅aXÖ…Ía{Ø€÷ÀIp&œ—ÀUðy¸î„Â/à1ø¼Œ(:+Š%…RA飬P®(T*•ƒ*FU¡šPí¨>Ô3Ôjµ†Æ¢Éh Z ‰S´Ú½‚ÎC—¢Ï¡[ÑÝègèqôzCÄpa$0jSŒ3&ÉÂcj1W1=˜˜)Ìw,ËŠÁ*cM°.Ø`ì^lö¶{û;]Æápì8 œ&Î GÅÅà²p'qçq·qƒ¸)Ü*  /<+M8M:M1MÍ-šAš4´Œ´B´j´V´¾´‰´´5´í´´S´x^¯‰·ÇãÓð%ø&|þ ~‰ŽŽŽŸN•Ά.ˆ.•®„î"Ý=ºqº5Aœ Op#Äò u„;„—„%"‘(LÔ!ºcˆùÄzâ]â(q•žL/MoJïK¿Ÿ¾Œ¾•~~ž–AˆA—Áƒ!‰¡˜á2ÃÃ#-£0£>#•1…±Œñ:ã0ã2‰L’#Y‘ÂHy¤Ò}Ò ŽI˜ÉÉ—)“©šé.ÓE ë“}ÈäryŠË,ÂlÊÌœË|ù1ó ËNG––2–›,c¬(VaVSÖPÖÖÖ!ÖõÜ;twøí8´£iÇàŽ6N66?¶¶f¶lëìvCöö£ìרßr 9Ä9l8â9NsôpÌq2sªsúpæp¶p¾â‚¹Ä¹l¹örUsõs-sópsGrŸä¾Ë=ÇÃÊ£ÃÌSÄs‹g–—Ì«ÅÄ[Ä{›÷#……¢K ¥”Pº) |\|&|±|gøómð‹ð;ð§ó7ó¿À ¨ø  t ,ò îÜ'Ø(øJˆVHE(Pè„PŸÐаˆ°“p¶ð5á6S‘$‘F‘7¢DQmÑ=¢U¢ÏŰb*b!b§ÄžˆÃâŠââeâ°„’DÄ)‰§’IUÉpÉ*Éa)‚”®TœT£Ô¸4«´…tºô5éyAW™£2}2›²Š²¡²5²¯å˜äÌäÒåÚå¾Ê‹ËûÈ—É?W *)ìWhSXÜ)±Óoçé#ŠdÅ]ŠÙŠ]Š?•”•¢”š”f••½”Ë•‡U˜U¬UòTî©bTõT÷«v¨®©)©Å¨µ¨}Q—RQoPŸÑÑðӨјÐäפjžÑÓ¢hyiUjióiSµ«´ßëèøêÔê|ÐÓ Ö=¯;¯'«¥wUoE_M?YÿŽÊÀØ Çà±!“¡ƒa©á¨¿Q€Q£Ñ‚±¢ñ^ã;&s“£&æܦ>¦õ¦ fÊfÉfÝæs;óRó÷âQí»à]f»Žízc)dnyÍ X™Z³zk-b½Çú† ÖÆÚ¦ÌfÚVÎvŸmŸÙÎÓ®Áž}ýkQ‡X‡.GG7ÇzÇ'§B§1gçdç‡..A.m®8WG×Z×å݆»ïžrStËrrqOp¿ïÁáêqÓ“Á“êyÙ ãåäÕàõƒjE­¢.{›z—{/øèûœðùä«ã[ä;ë§éWè÷Á_Ó¿Ð&@3àXÀl v`qà\~PiÐb°IpEðJˆUH]ÈV¨ShsM˜WØõp¦ðð„ˆ§‘‘Y‘c{Ôöß³eU E»G·Å0#—ÃþXÑØ±ãqZqeq«ñŽñ—H á ý‰â‰‡?$%Ý‹Þë³·kß¾´}ãɺÉgR ï”®ýû3÷O¥§žKç…¤=J—M/Lÿ–á”ўəš9qÀø@c}VTÖp¶zvÅAôÁ ƒ):yh3Ç7çA®lnqî<Ÿ¼‡å—ÞÊ÷Ï\ TpúöHø‘¡£ÚGÏ’ “ 'Ží:ÖZD)Ê)úvÜóøýâÅ'ð'bOŒ•X”´3xÖûÜôùÖ/ž9 » øŽÌ¼ }¹ø*îÕÆëÔ7˜79oßrV½{×<¦4vsÜ`¼ÿ½Ýû×>Ÿ&£'LeN§‹?ð~¨Ÿ‘Ÿé˜5š}òq÷Ç©O‘Ÿ6æ²>“>—Ï‹Î_ù¢ó¥Áyaj1jqëkÞûRݷߺ–­—G¿‡}ßXÉYe_=·¦²Ö·î´þa#þîGÉO±Ÿí›æ›o¶Â¶¶"©QÔ_wRÃþþ|­Cr$wxžþwNñKIW DÁŽÐ!xJ¹ÛMaîaëp4‰´~øÝt¢#½-ƒ+£))œœÅÜÃJÚ±›­–}‘Sƒ+›û%¯,%…ï¹€„`†Ð[5ѱïö’W¤YdeÇå-Z¹”²”¿¨Ú«uhðkÔš×±Õ½ªÏf`øÒXÅä„é²¹½Eã® +²µ®M˜í »NûiG¼“˜³‘‹‡kÔî ·#îeg1Ù·çZT]tqÌØ˜8ŸxÛÝDɤ{¡½sû†“;SÎï?‘š––Ÿ)}€# “µ˜ýæà½CWr*sóò.ÈÏ/È?’ôpaEéÇ“‹cN„–xŸt,5-S/—<ÅušîôzÅtåã3—«Jª÷ÕxŸ5©•®c®Û8÷¾þAÃ¥ÆÒóéB›šµ/Š]"_Úl™¹üìJÛÕòÖÔk>m†×EÚiÚgnôuœ½™vËí¶òÆ;³]%wûõ{Ø{{Ÿô]¾W|?éÇC½~ÑGô–¿è~rþéñÁ”gÏ­_¨ ‡WFÆ_¾êzÝò¦êíÑÑÔwcîãfï'(“øÉ¯S/§o¨™Éž ù¸ë“ôanøså|ÈåÌÂðâù¯™K^ßt—…¾3|ÿ¹²°:³ö~ýýÆÄ韟7¿mmýò¿!¬‹’AÍ£;1ÙX;œ4 ÍÚ^|-]!ŽèIo ÌÈM¢#m’e˜=XŠXûÙPìÊœg¸ÞòpðÚPòùúð‚ÆB„ï‹Òˆ™‰çK<—â•”¹*‡’·R8¹sBIJ9^¥KNÝN£Ls\[L'L·EoÍ@Ë0èÃxÄdÖtÕkAÚÅc)b%c­`£d«d§`/å äÈéÄà œ¿¸Œº>ÚÝîvÆ=Õc·§¢ƒ×,µÛ»Òg¯¯ƒŸŒ?Þ*àN`YP|°Mˆd(.ô}ØÍð“Q‘{„£@Ô«èË1‡câtã9ã¿%QY’v’ZªW&PŽ.Ÿ:Õwº¾"·2üŒu•B5KõjÍë³·j«êÒÏy×ë5ð7¢'Îß½PÓ”Ñì{Ñà’` ºeòrÏ•³W3[}¯´ ^Ç^ŸkqãVGýÍc·Rn‡Üqé4îR¾+ÚÍÑCß ÷®öÍß›ºÿöÁÐÃþûú <}òìéÐàȳWÏß¼~?2ùrúÕ‡×ß̽]]z÷}lí=4!4i9•8]÷áå,é£å§£s/çÅ¿d,L|µXº±¬ð½yUaíÆ†ÑáÍ ?þ—†>Á§PvhôuLV »ˆk£I¡5Ásáèº ÅÄ`zC>†M–‰•,άÊbÈê¸#˜-•ý8ÇήQîu^fŠ,Ÿ%„@¾à¡á%Q61mñ‰<É‹RÃÒ?eùäŒärvžW|¤4§BPU3Q÷ÕØ¯Y®Õ¤Ý¨sZ·@/E?ÄÀÙPÇHؘÖxƤÇô´YŒ¹‘³ÅØ®Ë(+%«uë›6)¶j¶ËvíC„Þ9–:Ù;Ó;÷¹¤¹ª¹.í>ïàÎë>ìqÄÓÔ òê &y«z¯ø´úÆøÉû-ú7„ŠÎÕû†PBÞ†–†9†“Ã"r#Mö`öÜJ‹ÖŠþs=6!N1n1¾)!$Q$q"©‰Ö}ƒÉù)&ûQûï¦æ!±"”¾˜q33ç€c–@Ö—ì³Ùæðä|̽š—zØ<Ÿ5¼àü‘¸£z… …oŽÕÅ×/&žh,I°JÔ0! ¢c!mäüèÀš€½*€ðx@yýÿœrŠ`þ p ¬Š Ð@òL ö‚$›¼Žä³ âD²D3ÈJ†J +Hø¦‡¥à]p|¾À?Qü(ST$ªÕ‰ú„œEzèpt9úz #Šäe9˜Ì¬Ö›íÅnâ”qQ¸ ¸4"4þ4µÈn%FF{‰v¯…ÏÂÐqÐùÑ]"` TÂM"qñ½>}=™!…á3£ã3’9©ÉezÉäÌTæE– VvÖK;lw¬²U²›±¯pÔqºp¹º¹SxÔxÖxÛ)É|úüþZÁx!3aAHdL´K¬Ùé$ý¤ì¤ eÔB\^XAh§ˆ¢„’¼²ºŠœª˜šº€† ¦¨–Œ¶šŽ™®›^Œ~A“á Ñº‰°©£YŽy—ÅOK«Bë [»ÃÈé§ëTá¼åJÝÝë.åQìQ#¼ßùÚúõè^Ö és _ˆ<%}?vOà­sˆ˜3œW•qD§tlòøõ'ËôOQNoUŽWõÔ4Õ–œ;Ð{ÞµIÿ¢L ï¦V|¦ÛA¼Å~G¢K¿›Ú›u¯åÁûGìO‹Ÿs Õ½Tx}cTmìòÿTö‡÷æ’æÛæ—(Ëf+Ñk'7nüûµ@ÿŠEx02àFø†Ód‡ðL à0¨A8„a°1B¢ä%@EP âû¯0V„á}H~^@±¡ôPa¨“¨^ÔZm‡ÎDòóY$+·Arñ›˜oX)l¶ûÇsÕãÞÒPh|iêihUhÓhûñlx?üe:=]-&Uˆ5HÖ›A¿ÂÆðчq‚@šgJ$ãÈ'™e˜ï±ø"ùhí6<Û]öýš›œ]\9Üö<|<‹¼=”R¾~kA&ÁïBcÂD:D›ÅjÅ+$Ê%ˤNK×È4ɶË=UXVdP’T6WñPU+PoÔx 9§MÒQÓõÓ+Òï5Ø0’77i4ýl.o‘°«ÇŠÝzM¿˜ýA‡9'çvWáÝEîÏyj ÷„¯ßd@pàBpZ(sXc„AäxTZŒ`l|l¢@Òó}S´÷¯¥µe$ÐÎÆìÍÉÌÓÏGôÍ9fyœ\ü²¤¢Ô¿\êÔrÅÍ3ÙÕ¶gykçÎÝhÈ=oÙDn~{©þrüU“kœm‹í;jo¸ã×eÚ-ÓËzº?÷ðÕ£‡7Ÿ^zõBjèÃHå+§7ø·×ÞùŒÓ¾o˜4™ý>³þ1}õ9u~c!rqbÉþÛï+Åk`=`ãÑOÅͲ?þG#Ü; p„i2BÖ~Èe  ‚Ä÷’)Âe#P?´³À°/|nƒ'Q$”²âÏ ž¡qh t4º=áFV{fË„µÃǾÄñâüq͸u„E)¢™¤U¢=Dû¯ˆÏÇÏÒÒUèÉ„¯Ä`â4ÂoÌ0D0l0æ‘H·™¼Étävæ –OÈÊNcsd—å çXàæêâná©å­ ”ñâ¯hìº/<,2+‰sI¨KzJ”¾*3-Ç%ï¤pbçk%AåH•[j$õ -AíC:KzÞúC†VFý&V¦Cæ>K–áVS6ž¶£öžÓNΛ®GÜDÜ;=½¨hïF_'lÀ È‘ÐñðJän.½{+¾ Ñg¯F2kÊrêHú­Ìú¬âƒI9yòùä‚Õ££ÇúŽ·œ¨›üÑó“"rc|û¹n>ê‹Ò—Õ…ë‹1_¥¿Î.•³Z†—/~w_Á¯\Yu_î]X·]_Ý8õCûÇøÏŒMÍ®-¯mÿGû+ œ%R ‚B?Žnm- #\e!?nmmTmmý¬F’ ä?;¡¿ÿ¯ØVÆ"œ{yë6êÕÏLÝ~þ{ù/×Q‚D4²ûœ IDATxì]|Å×=)/½’)$zïEº ˆHUŠ(E:JUzA¥(MAAý¬ 6ÄDš€ -¡$@zß½&,—dƒ!ÊÝrçÌfÏÜ;wfmÌ AA@þ3¶ÿ¹)@A@4„TåAA@Š !ÕbRŠA@„TåA@Š !ÕbRŠA@„TåA@Š !ÕbRŠA@„TåA@Š {#åtéÒû÷ïGvv¶–Ü××›7oF™2e´k¹/øÈó!ò~÷#ÂÝÆo¾ù&–,Y‚„„üôÓO Òx/¿ÿlŒì¨tîÜ9dddä•agg§*9È}ÁGžùûP/y?Èû‘®»…˜L¯\¹‚&MšàÇDhh¨zÔ­ ‘ªÕœ)‚€ Ü#Ìž=£G†··w-R-¹)‚€ @3ÿº»»ÃÆÆ¦@8 9*ñœ)›x%‚€ ÷"…*ãbˆTÙ E?gv/*mA@ CÀ©*¯¾Â “û‚€ ‚À݈€Q‹­¡%57 Pzzº–ÕÑÑÑjYYYàÒÕÉÉÉjmµ ;8’ÛÎ8ñ‘q²µ54ιƒ[,¢ ‚€ pç!`Ôbkè îççW¤—ýñãÇѰaCT¬XQû………aÇŽÈÌÌÌCræÌ™¨Q£†v¿R¥J(W®þüóϼµ°=zôÀŠ+œœœ—Çò¤ÿþˆŒŒDTTTÞ­óçÏ£qãÆ8}út^œåɆ ðÊ+¯hÏ–÷Jòú·ß~Cƒ ò0(_¾<^ýu¤¤¤hbÄÆÆjmaüô?öBc7o£FÒò3æúð÷ßk}Àí,C}9A@°Ž€Q‹­!Mõ³Ï>ƒ¿¿¿õš¬ÄöéÓG{¡4ÎÎÎXºt)ˆ¯¾ú ááá`2ܲe /^ŒFid;qâDtíÚUÛT¢jÕª`r¼páBÉZVÃDÁk†˜LNœ8Ò¥KÃÞÞ^KÏ$Ëp~!)) ©©©ùÝ.‘x–}øðáhÙ²% OOO Ÿ  ..N#KîDn˼yóP¡Bm`sôèQ̘1xð`­í'OžÄ®]»´Eɬñs`l8 ‘oNNN‰´I*Aà^GÀ¦ª_È[`ñññš§0gõêÕ5Mrîܹ𦕖–†þùßÿ=6n܈îÝ»kdQ¹re¬]»V#î)S¦àÒ¥KZ5lÍ/lÚ´ >>>š¦·råJ\¾|ÙjRÞ ƒ5bÖôôZà7ß|£×̓€‹/æÕ¹páBT«VM#ü_ýU#hÖ 'L˜€Õ«W£víÚ`ûúü¡#""4Í—‰ž_|ñ…fÒåÿúë/Œ;6¯MJHŽcBåô\6üqLš4 ¯½öÚuéYóf¶^½zèÙ³§†/“.cÄ¿^xŒ=P°e@i¼ªN9 ‚€ ÜF-¶†Hµ("°ÆÅó‚?ü0˜LY[bmõÝwßÕöçŸÖŠc³­‹‹‹vÎë~X`ÖØ:&ßÂk¿lú|ùå—ñË/¿€µOËÀ„ʦR&êçž{L–‡†ƒƒöíÛ§ÕÉš!ËÄdÉe0i-_¾\“½}ûöèׯŸ¦ 3A}òÉ'X³f &Ož¬™ª;uê„:uêh×lfe­™ÛÊĨHzÚ´išæ©—Íº¬…sÙysÉ®®®èÕ«—F–¼„I™”Ùœqbâä¼¼ïX’˜˜¨*—Éç<¨¹löÚ.hp¢%”ÿA@ D€-¶üN-,"U£^O\*›yÛ´i£Í‰¶k×kĬ1r›ÂÈôÌó½*¨öñ=ΣlúæºXkU6þùçøòË/5M™ÍÞ,“6cÉm›:u*–-[¦ ˜ÈyPÀù-Ë××%ç‚€ Å‹€!RU„b¤jž+­[·®6ŸW¶lYmΔµ7ÖêØÌDÅd wBbOà½{÷jZ k޼& E@ªn&‘>úH›‹d/Þ÷Þ{O›eg%&UKaÒTeðØLÊeèÓ)òRfÖõë×ãÛo¿Å÷4÷ËD6dÈä” êÈùTÙú¸#F`ÕªU˜>}:ºuë///u[;òÞ‘œ—ç…•ÉV%à›š•ã™àYÛgoêÖ­[ksªÜe&çÁ@«V­4Bå¹U¾æ:YsÎo£ê“£ ‚@áµØ"Õ«»–‚_î¬mñ%ïìÏäuìØ1M³ªU«–F¸L*lBeX&€;w⡇‘#G0lØ0P¸ÄÝ»wkNOÑÑÑš,{sZ6ý²)—¿ œw˜<Ùùˆµ?LZì1Ë)ÏQ²)˜,“úi‰¯þÇÎl.e‚Ј”µlöÀÕ“°Êc­ ¾÷ÄOhš*2Ï‘²§®>°llZ^·n6Ë5—Ń…Y³fzok&nEÞÜN.OoÖ弬á³ÃÏ]³§5“ª5™õrȹ ‚€1ŒZl¯Ù$ (W™G H’w‹—¶¼ôÒKš©•=z™@˜8Ù¨eË–Úœ!›2Y›cG¾ÏdYªT)­ &ž;dm•µEÖÌTà8Ö^Y›e™”I• †=x™¤xΑ“;*5oÞ\‹c‚aÂg홽v­&,Ö´ÙüËs§¬åñ|,›˜Y[7²™2—˄Ȥ̚/›ƒõ¨êå:x³‡ùóçkZ-ËËš%/—á9Z5_Ê顪¼l‚þàƒП<¬õa̘1¡ò€…çfÊ«/CÎA@nDÀ¨ÅÖÐWjX»SZÜUÝÃfIåʤÉäÁšÿAð}6×2ñ0™ñ¼$_³“/koʼ©jà²øÇÄÊeñ¹ ¬³s{ÅÆÄÄä}LÖ^™€YÃc-™Ó1a2épl‚U×|d’ç4&“)On&e6Y3Y2Iêóp\½Ú=еF^‡Êš7ÏZ \kÝ\–Ò6g5¯ÌÈØ[N3vìÕË–‰çX¹^¼¨¶s{ùdz)YánM‰A@(öa¡ÿé÷T•ù± :§Q÷™Ðɪ¸‚›yëîêå*J-ìùûàƒjNZì¨ÄZnam1‚SQd´‚€ Å‹[AyŠŽ­CšjAȽë`'£W_}U35ó¦¬íJA@¸³0j±5DªìõÄ»±RBÁ°ÖÉs£ú%1绂€ w †•Œz=E™:õi­Åéïßiçj•ÍÙA@þ7X›z³WœÒ"U£^O–‚1Y*´/J0DªF¼~ùœ U)ïïË?&Qþf(¯ñá=tyƒ{Þàf.Jã$­ ‚€  `ŽbNâ½ð.|üéOÞî–7bcbå 4Z•F-¶†–Ôè ¶v®'TE¦¼ãïîÃ{ß2™òÉy+Bµãµr$NA@¸•07ñn}Û·o×¶Ô7ožö™LÞÕ5Yþ)­U¯ü]§zmŸ¿ZQØîü–¤ÊBó7Õç½{™PyK=!Ô@–[‚€ ·õQþtgçεýÞÕÖ¯¬²µ•5Zæ5}°Ü.VOnˆT òz⊭‘*ï·Ëùš5k¦í·«g|½r.‚€ ”4¬äñGVø+j¼×<+‚–¤jI¬Fd4DªÌÜùE¨j>•÷îUêõñãÇQ½zuÑPóOâA@þgðçHÏž=«í‚ǼÅü¥4UKmµ0‹­j„!RU‰ó;*be–ד*k«l§-5?ä$^Aà…ó¯FáŸ"Uæ1KBeù ²Øêå7Dª…y=1©ê½~Y8µlF_™œ ‚€ ·ÌYüSæ_æ3KÓoA[}{ -©)h*W¬4U®T¯­Z ¥¯XÎA@Û½–Ê<¦4Õ›á0C¤šß:UE¨ê(¤z;<"ƒ ‚@QàiKþ)MUqš:òfa[UŸ!RU‰­¹RfuÅìzbå{A@Û&Sþ)Ósš’» ‹­JÃGCsªF¼ž˜@•Ú¬ˆUHUµœ ‚€ ÜŽ( •¹‹Ö¸«ÄÖ©*€”z¦W÷ä(‚€ Ü®Xò–â³›‘צÊÌ]P`ÔÓ) Ê#÷A@Ûæ/æ-ŠËø¨F,¶œÞ©ê .è\ Ãi,*(ŸÜA@þ×è9ÌR–]§jY¹\ ‚€ w…YlU[ yÿõzR…ÊQA@¸0DªFשêM¾úó{Xi³ ‚À€ž¯” X,Ñuª–)A,ãåúD =± iä`†£‡ŽºÕ"víצ–}ö’Ó-Ó__þñ÷G£Ïk?âB¢…¼údY 8²÷0µ1,ù‘ O áÒo•Èm.Æ~³ £µ;Ž"ù:Œ×Ù„yÕ~øtÿ%¤ç:3jÇþ±Mê×Äeß ÞóbífŠJ8¸­Z¾ƒCÑIn?;ôôÿMÖ¾Ö‡Wûó©ëq"19ü|À!öä×hwß|üu÷þÍ´JòÜm]§jÈükÔëénQÚssD}·ÝG-Fè#1­kK”uÏÂÑŸ6a‚8š´o ©øSQˆñKC¶%oÚz£Ó ðóƒƒíMè¾Á€Þ‰Ø¸ù „»ë`ÆÅý?ãTÝǰ¨G=8˜€ìtÝù>íŠR›¾E·ºap²ÓeѦ%Åâ|B:Ék)°.¢ÑopÌÛ´-ÃáU©;žJ££C>…ê³å<+ GΜ‰´»M…“)ÑQ8}r6ÿ6Í#ÂÑ™ëNÇ_-Æñ#áA–ƒ¬ÛPAŠ#í9¼9`>ZŒ~¥}œ]è³Ó N™ÉˆŠ:ƒ~S¡Y¹ØjpåÌ.L|~&b]B°l`MC8À¯)ži0 Ë¿¨‡{·„Ÿ+=ÿˆ€!MÕ¨×Ó”E²ß dÂêñ«àöÀTÌ}zîo\Õª7@§'fãÍI­±wÙ»ø3*¶.ÀžOßÅœÑÝP­r$†L}GãH˰ÉÄå q%5“4ÒJNí‚áõQ­Rú=µ¿ž¾Œ\å,;7,@½šÕHciƒ©¯…è¿¿ÄØ^óphÿt<½è}œ¼’ªi> VsV&êDÖC«&­Ðºuk´nû>;5²RqâB"2ˆáó«OO¥ÉQ`åÄ^¨Ar‡‘¦»˜4ݤäSXûôc8þ÷~<6h~þ÷ ’.Ç"î21™¿Ïí݆‰ ê 2iU]ŸZ€Ý§ãµv¤G}‡Ùó×ཷ^DÝÚ5¨-ý°þ§HÍ¢³bñÕÚ™¨[³ª¦•=µ`#ibdN·§Ý˸QVÈ15§aÓ¦_Mi5¹þÁ–."9)G»æ8«m måYHò¼µ|j¶múMÅW‡c‘‘“Ž]báªÕX0ª»Öö!sÖã_M3¤ÓÏaÛª‰¨S£2ºÁ›¤E§fähåÍ^´m|uëŒÀ7G.]í?ÊC!ê»×ðº©nOS”¡g'·ÿ›£aóÖ¸¯ õ#õåÃ}ŸÁôšY8›œB²ša;´:?/ù?ì=wØ r…•ÿïiŒZl ‘ê=¤4¾H¤Ÿú œ¿ˆ®›¡l€7ììH“°ƒ£«'švîƒ ™ßc賓gÓ wïÿ!%üAÌœ:'è¥ÛîÕ/qöb ¾zg /½È/ý†=㇇0}Á ;²’kÇ$bÿ†ç0rÚ§èüÔT¼<¡!6-Ž÷٠ѣmáìÖ ÍëW†§£ý se餦Ûyðç 3’pâ—°7½2Ê–ö)aO¾õ]³¤&ck¡xñx&¼´ {bÅ„ùøñL*¶ê çZèÞ¡>B¼‘½Ëÿ9‹Øc_ Û€gÝf–¼¾u®Áã-^ÅèD¤'ŸÇƵó1rÃi 7 ×Û™3þ§.%ãØÇÏ`ÜœŸÐ㙹xméTœ[3ë¾9„D½]ײw’€'çLAý+ðÏ9™=ø¶ùÜN \µ•éfs>m8GÄ{o‘<ãvš0yþ<4Oy#:ÎÇ_§/âø®·°rö$ ïŒùóFáäºÙºøkÄ&^Ä'ã»bì;ñøäç1­A:^Òüu Ô¾×§cè¨_вKS„ø9ãš"û?}^÷×A°·+² =;ç‘ÎÄ! YiôLúæ¥S¿aÏ_i(ãì{®À€[Åx4éì;~i×™Ò-•ë{£[Cæ_£»óßë Kû šÑº‚¼a²WfÉ\dL®^ð±åoÚ -‘âcû?ŠH_"rŽ£ãô£¸Ø×I‰WÈ4›ƒ¿Ý€'Û`Õ›ƒÐ¼¢Z…ëž‚?6Àþç·á¾§–c`·æðslƒÌ¸D¤z… m»êpÙ‰v‰T®7ç9ºGWGË7œ‘k5#351Íf¢a¨'¢xÉz}'E•¼I7{Tœ0+*4C“p_¤‹EÚìCHËvE³ûÛ ±ËN<ø`”ñrÂñ,{øeÚâôwkp:ì ,ü(j…¸£qy|×b ~ý÷•ÊFJ²f?7½†"»Âe¼Õs7‘jªWèi+‚кqy8eEá¯øü@4†5 ÊÔÊÿ™]Ù-Rg-¶î=ƒû*øc×ÚuhØíiÔßÿ¶k ϯ ÄÀ¶ä¼“ÜËÆ?ŽÕK£CüÐr,ví ×óIH¼*F÷쌊~Ê8ƒ‡Ÿ;ŒS­“0}Û¿ðò2ômS n¨Œ³«Úà›ÿ¢y‹t¤$ú`úÛóðHÓ ðõtºŠ= Ÿ‰øcɈìèG{ßyv”‚~c»·Ä³&´QΜÌ4Äǔź֑ðp1Á”ÑÎUëdâÃQè×´"ÜM×?3”B‚  !`ÔbkˆTz= ö‚@.¿"&>™y‘^Z×0I¿p {ÉÛÜÕv‰6x¤s5ù¸Ãɉ^òõ”ñ 9õÔ£9¿\oÌÌÔl2~ˆ?&׿ì4œ=‹Óã4SqÃòÁðpu£ƒ ºŒ™,GšC=± 8;+Zj2Úc2f÷n /“ ìí²p`ëR<³j7Ž]ê‹à®ïƒë‹OE2)ª`vˆÇš~1ú iš4¿w.¦.L´9™ƒÉŽ4Ágg«ˆÃ–â3°ÇTmð$B|=(‚Ë!Ô>±Iô©)/vbz ÃÊÀÓÅ)ž^°É¾LdÃäè€CoôǬÑñHJÏDbìÔmc«Óô”D׎fÂ<Û)­ïŽÅëá|3¬û>¨ ÇC°¹Ú ëm`z¢µP5°\aF²¦Ñg±2`& êDFÀßÃŽ$[ÅÚuÈìû¢.¹"65/ï‚Ušu  ÑÐMÕn> õ‡·' f®g‘ɨîG"ÇÜPسã@ørÊPŒy:Z”ó…­ƒ=RþÝ^ÃàÀ¿qhWÑ“d5†«îÙätœFŽmìý)Aø¯"ÕüÖ©þ×Ê%ÿ݇€CPt¡9¿w¾ÞªÃÙÝ!¯‘ÿüº“L—Q)¤ÎxÙàŸ ÉÈÐLy¤³$] aUÒ² ¥àfg^‚9ü ¬^Ò ¡ôB6eÅáࡳ¨Z7)Ú:Ðí%Ã?~K嚢&{>ågÆ£[>¨V¥&ÜMZÞH—.˜¶j#ÕM_§È¯¾ú¡H=›ËY±»1dÚ40ãÚÔEiÇÛz½šù…L?úGܪ ¶pöËÄßôÒÎ++1”°¦#µA+–HÊV¿ÜˆËJÆs&a­ÿ#XüV;Ô tÂö±í°…¸‡ïÒÓìÔô!„M^Š÷Vÿˆ¼úà¹PK!6"²)¸ Tò©LØ‘°šh6&¤Ñ™¹J's¾9 ¤ÆŸÓúÌߎÒ#Ï­zmÂKÁÞdÆ™ÃûaS®&‡º¤™žÇK[¡jå꘿ÓuB÷aâ„M8I/ç*21§ÿSøáßXÔm(Wèýã0§›™âÛ bDUt™r׎F`/2Sˆ ßuSyn0™ÜQó‘^ؽâ4¨^ FoE“æUqtÝ+Øs6^Äò<0°lP®]€ÝÖ m»õAfNe<ܪ"ÜœžSæFÔ†Ó±ŒYV>ÝÕ*Ö@§1¯¡ûìa¨ZŠ–8ß^þÃ:5@dµ&ÿY-,Ò !e›`úšIØñÚD4¨ZµZ<÷ûF¢O½²pãL4ËêDƒËI‚ª÷?€c.ƒGCÆžJH1Ò—h×"èv#:5íª^]0Z1ñG°äp&<¼\i:@_žvWþò0ºNÕ†æ ÷áááøöÛošWŸð§røã®éä}—’’‚¤¤$òöKÀåË—qéÒ% :{öì¹!ßu…ÈÅ]ˆ€É ˆ9…èØÍ´çZª4BB‚PŠHÓ†&ö’/F#1ÇDÚØEÄÑÜ©§‚K—‚sÎa ®Ðe—Ž1÷W…Cz<¢ÏœE<½tܼDÄIŽ(6¤ÆŸÂÙK ȶ#-¥ iL¥\aKó®§OŸEºÊ“³TîaLk8©Î$øºëú®¢‘íÁe›“–€s±Éðð‡ ‘·?Yk¿Ròiƒß•OQgt Ö¼Ô‘4Îغz l­µuÉÀ»}«b]ÄÌéQ t ž"S¾‹‰´ÆŒdœŠÂ…¸Ø;.4¢ðvMVŠÖ_j“æŒtýãž»‹˜¢Çú­èZ¯œí {v¨'u9ç•IýK™nðó |ÎÀÁ'Þ‚æŸUÇ/õGµ2î:,¯—S®î^ÂÂÂ0iÒ$TªT ^^^ððð€››\\È_ƒü ìíÉ"BÖ£þª F½ž /IRÜØÀ–ЗwG™°Ü1/«¹¶”‚´ ß`ÐÀÏ¡”„MxH?…UãÃÖ‹0ÃÙ–FØÁÉÝå+•ÒÖ¬òƒG ¶ð&‡Ï@ÊLÎAyeÛ;#´\y2×ZjF6p¡:iyìõÁƾÁÁyqùÕgró)BZpó E¥Rœ‡êÕ*ö†iެ;„ÂÝÿªiº–ÇÅ3€´;_2YR.Ò0óÚarC°*˜J´1¹"8XC°÷D¹JÕ4k¯ú£öö¦¹N®H's®Tü?kk×Úbçäಳ_ y˜Óu~mH>H†ÚSö(†ÊÁnÚË$Û dþA&ûòžÔ§äfÊí³«eÛ:Üå"Q†:Ó†ú)¯?ôíQ‚èŽö¾u0ýÙzè÷Ön4ˆ(çBž ŒòÊ¢þ¥>Qýë`‡äƒ˜þüvL\4åhP“×'yeʉ p ¶Ø®\¹…ù"ÕkÅÊ™ PØV9uæ—Ó¨·eùúƒ±°m$:Ô. Vä8ðKÚz1äè“÷öÎM››Á’Pu÷ œæ_ßµÌ<{-䪺fγx`¡Ïe-eË¢/N#TËD7ym­ Žþõ0ïÅtÒœµ¹çku;¢ñ¢xÕ¿|h©’6²¬—n—×™–7ó¹¦AMµGgáåÀKð ÝŸx¦ÕiäÙɧH£ÑYé¶è»à]4j×ëLÉFKt÷²Nõ^êí»¥­&4ïÞf{G8YÙ¸ániæíÜ“outïLó¯´d桲Ä&„·í‚0[- 2n 3ÒV{—P´l“ƒeFrß|{¯´iMK¡hÉëÛzóeJλ£[Cšª¬S½{”Û«edîu½jú¼½»w¤!kó5_±ëÚmïètÕèºèb¸ b#/ñ@„ÿEµ%ÞN©°D0Dª…ÙKTb©LA@(aŒ®S5Dª%,»T'‚€ ÜVµØš‘uª·Uߊ0‚€ %Œ€Ñuª†HÕ¨×S ·QªA@n+ ‘ªQ¯§Ûªe"Œ ‚€ PLµØ"Õ¢ÈÄëèŠs-]Qê–´‚€ ‚ÀÍ PwµØ"Õü¼ž”ú£jŒ«BBŽ‚€ ·3z¾Òó™:gÙZl yÿõzºAÙA@[€!R•uª·º¤|A@Ûü,¶–2"UËLù]ëUe>çÝýªÌù•)ñ‚€ ‚À­@€ùIû„âÕÂõfYŸQ‹­¡9Õ¼ž” |ä }M„ÎYÃ=zô(22´¯7[Ê(ׂ€ ‚Àÿ #GŽÀÇÇçºÏ»Yò™®ÄשZ*+«Ë¿þú+ÒÒÒ”\rA@n vîÜ©};Õ?æ@Š úĢⳛÒ¦jÄ„ËB°­~lúm×®6n܈íÛ·k2¿% ‚€ Pܰ·hÑ"4lØžžžw1©2‡Y#ÕÂ,¶J¾ÿ<§ªØ]™|ùÈ„j2™ˆ:`üøñà5>íÛ·G­Zµàìì¬ê—£ ‚€ P"°‚xøðaüüóÏÚÇ›6mŠŠ+jœÄ¼¥•yLqšÌè:UC¤šŸ×ªþÇ)BeušÉ“îÑ£~øálݺñññšó’ÙÌŸ#– ‚€ ” L”ÞÞÞprrjåÊ•áááAßÔuÐA=±ê¹¥3b±åt†Hµ ¯'U1 kIª,8 \¾|yÍn˜˜ˆäädÍq)333\…`¹+$‚€ 'ŠŸ˜›ØzÊäéJßlvssƒ¿¿?¼¼¼4‚µ$U¥¥rþ¢C¤ZØ:U®˜…V¤Ê:::jšª"ONãîîŽÔÔÔ!Re¯§•+WjëNõ™ù\ ÍB°àLœL–Š09%¡²öª4X¥© ±2RA@Šæ&õSÜÄüĤÊ$Ê**“«žT9-s–%±f±U2"Õ‚¼ž”ЊTY[eÆWã9ŽÂª"T¥Å*RåôB¬ 59 ‚€ Ü,ÌKôü¤,©ŠX¹*‚Õ›¡ªrŠ"‡!R-Ìë‰+V¤jIŒj„À Ð*—Éi•y˜…¶Ì[”†HZA@A€Г¡"Hæ"=±2‰Zþø¾ÒTõep™Ylù¾ †HU%ÎïÈ•ë‰U¥SDË‚²ð¬òO™†¡*2UG•_Ž‚€ ‚@QP„¨ŽÌEz>bNÒ¬ºÖªÊ«ê.Èb«ÒðÑ©ñzR¤Ê…ò¹ºV£&Rõc2UJz"ÕŸs9A@¢" 'D=)be^RܤÎù¨ÈWŸ_ÕÍüe$"U#^OJJß%¸ÒN™8õ„Ê×B¦FºJÒ‚€ ÅEê¨øI‘«"QuTéøx³Á©õzR1i*ÖWÊ׊@‰ªë›^ò ‚€ ! x‰Ó¨s>ê –¯9p\~ÁˆÅ–óڱݲýõ¤iy®¿…Õ«*ä(‚€ p" “›¯Îùhí¼ ˆÎ;§íÂÄ bAÁ©õz*¨"uωZ‹Séå(‚€ Ü Š8õy­Åéïÿ×sCæ_£^OF„±Ö kqFÊ’4‚€ ‚Àí„@þd”F½žtYäTA@¸kxóÍ7‘Ph{ ‘j¡¥HA@Aà.F`É’%¸råJ¡-4DªF½ž ­M‚€ w ¬¥òj–‚¡9U#ëT «Hî ‚€ w;†HU­Se/`vZRs¬¾¾¾Ø¼ysÞ×kä¾à#χü}Èû!wçy?Þ]ü0pà@xxx:&0´¤F•Âët222Ô¥¶Á®Z·#÷y>äïC½ ø½ ïy?Þ-üÀæ_ww÷¼õ­ê9·<‰T-3˵ ‚€ \CÀ£Òµär&‚€ ù! ¤š2/‚€ !Õ"&ÉA@üR͉A@Šˆ€j“ä‚€ ‚@~©æ‡ŒÄ ‚€ ED@Hµˆ€IrA@A ?„TóCFâA@""`h›BÙ~P¶”íeûAÙ~P¶d~¹×¶_äO¾ñjxG¥Ÿ~ú AAAÒ¬¡•dûAÙ~P¶”íÕ›D¶”íï¥í'™Lù“oMš4Á?þˆÐÐPõ§`õhˆT­æ”HA@AàA`öìÙ=z4¼½½ l±jðÈMA@Ašù×Ȇú†•xN•MÀA@{þì›M¡M7Dªì¤¢ŸS+´TI ‚€ ܃"Uåõwâ#MA@`ÔbkhIÍÍ♞ž®eutt´ZDVVøg6›áäädHµ¶ZÐÉmW81A@n?ŒZl iª~~~°µ5”TCâøñãhذ!*V¬¨ý°cÇdffæ!5sæLÔ¨QC»_©R%”+Wþù'”VÜ£G¬X±ÉÉÉyy,Oú÷ïÈÈHDEEåÝ:þ<7nŒÓ§OçÅYžlذ¯¼òŠ6ñly¯$¯Ù=»~ýúy0N?ÿü³6Ð(I9¤.A@‚PÜTp*ÀS~öÙg ,¬¬¼û}úôÑHõÓO?ÅW_}…|Ì#:&ÃeË–áÙgŸÅ×_Ï?ÿÕªUC×®]qèÐ!äää€ÉñÂ… y$›WøÕ&[&¥þù'NœÈ#"n8“,kÀù…¤¤$¤¦¦æw»Dâ¿ùæ :;vÄ–-[òpzüñÇqòäIM{/A¤A@bCÀ©êúVs||¼æ)ÌÄY½zuM“œ;w.X KKKÓHðûï¿ÇÆѽ{wT¨P•+WÆÚµkáïï)S¦àÒ¥KZ5lÍ/lÚ´ >>>hРV®\‰Ë—/[MÊ»a°FÌõ¿þúëHIIÑÒ1©5J«›/^Ôâ¹Î… j$ߨQ#üúë¯Aóà &`õêÕ¨]»¶f_ÿã?´cDD„¦ù2ÑOœ8_|ñEžI÷¯¿þÂØ±cóÚ¤„ät<Ø6l˜VkÜŒ/,fRåAÒøYv–‘x0ÁZ<0ïôÁƒŽ«W¯žÖNÖ Òð• rA@0†€a‹-‘H±ÒÍD æàà`ó¬Y³Ìd‡6“Vh&ÍÓLÄæ·ÞzK»OÄqC½Dæððp3™nÍM›65O›6ÍL;YÜŽ#ˆ°Ío¿ý¶yçÎf"3i«Z:ÒRµº‰Ìo¼ñ†V°ùµ×^ÓÊ&"4ÑšÌÏ<󌙴i-ÍóÏ?oNLL4ћ˖-kþøãÍÓ§O7Ù™>l¦%Ef"O3‘¸™Ëã4¥K—ÖÚHD«•Mªå!7³zè!s¿~ýÌDÚÚ5ÿGƒ ­N"B3™Äóâù„q"â7ÇÅÅ™‰$ÍD¾æ_~ùÅ\³fM­Î3gΘÉ|m&s¹™HßüÃ?˜¿ýö[sÕªUÍD¦æO>ùD“™ÈÖLd{]Ùr!‚€ psœ={ÖLÊN¡™ iªF½ž˜ïyî•M¾mÚ´Ñ´©víÚçLY%Ò‚³³³–ÆÚ­›››¶› <_J¤£™˜YS¥Vâ÷ßÏÓ9/DZ7yòdtèÐDlàyZÞb5fžË>|8xàÍ4ÍÎB¬­îÙ³GÓFÛ·oqãÆifâ­[·jšç[ºt)~øaŒ94p@ï޽ѷo_m¾˜5FÖY[gM‘µM6g0žžžyMb­—ÍÔl°·¿ÞWŒÈ^ÃèÃ?Ôp4h&ëæÍ›µ<,#ÏM»¸¸hørûið‚N:áÑGÕÚÃøš5kòÕÞó‘A@CµØ^ÿFϧh£^Oœ ‘´M,Z´¼­“ϛΛ7­[·ÖÌ ù‘&Ïs2Y¶À–͸LLLÜL’<‡ÊõÝwß}y-`Rå4UªTÑÊdòâ9\&õ]»vi„Ì꼃ƒø«ÉÊ„Å$h2™´²ht¢É¬&©XöÒe’d"ã¼Ês™ë ÑÈ´[¹Ml¾Õ“§———&7 BÉÜËmPÍÒL¬ìLÅÒ†µûLà\›¯ùÇus¹,?2ØÌÌ&uÆŽÉqdy$‚€ ”†HUбøÅþâ‹/jÎ7d"Õ²°“ÓºuëpàÀ°ÈÏr<ÊÌ™3µ¹IÖYsä­ 80Z,òG}¤Í]6oÞ¼\çèÑ£Ú5Ï«ºººjyÕL:ª þ™Q5â׎Қ•wòúõë5Ââxž×ä9S=)ª²ù¾*[7bĬZµJ#ænݺITTYdªÕæœy§Ü6žk4i’6¿ÌÚ®Â^ÉËíá \?çc ;&xÖ”ùÇ_“ ‚€ ðß`‹-ûï°bUP0dþ-¨Ë{­ZµÒ4;vèáý™ Ž;ÖøjÕª…ºuëj³ •kX“¥yQÐÜ#Ž9¢9î(SéîÝ»5§§èèhMe`NË›rù«ìœÓ³gOMcc-Oyõ2á±ÔÔ©S5-”·YdS0Y&õÓËÏŽRLò{÷îÕ´E&h&8Ö„©éÓ[+ƒï?ñÄ`MõË/¿D¯^½ HSåe"d-žµQv¤b³8›–¹Ô«ú“£{O³ʤùÒK/iæ\vÎRƒUÍ?ke±ùœ5e&uî|ÖX%‚€ üwŒZl iª†½žHn6W2°©•=z™˜8™0Z¶l ~ñó~ñó<'ßg²,Uª”Öêwß}åË—×´UrÀÑLÆ Ö`Y{eŒeR!{Dz6Ìs¥˜¸˜°X›å8&@&|ÖžÙk×Z`-5m6ÿ²ÖÌdÆó±ì™Ë£‘Í”¹\&g6á²æË$§7ïªzÖ¬/^¬ý8ž5Sž¿eó9¯ÛeyÙCš5{Ö<É1Ëê·üxî˜Më<Èàvó`æÕW_-ôk J9 ‚€ Œ€²œŠ¸‡È¦Ð‰7Öî”WX|Ÿµ.ž£d ŒI“M´lå¿ô9ð}&&&36[ò5y·jäËó¡\Ž>pYücbå²ø\&&%6yÆÄÄh*:“k™¬½2³ÆÈŸíátL˜L’\Ë©®ùÈ$Ïix^UÉÍ0±ÉšÉ’ËÕçá2¸&zµ{›ŒÙŠ—ÌXš¤•̬I2Yªe>Ü.6‰«Ákäüã}—ù—±²¬›ËcKË̲°ÜœÖ™«ºå(‚€ `0òaáMŒØ¦ `ˆT *  {Н‘ZKËiÔ}&4E²*ÎZž’ˆÓËU”ú˜$yý)¯oeS.k¹…µ¥°º »_ù$­ ‚@Ñ`‹'ûó°õ± pKIµ ŠïÖ{¬u²é•M·¼I„ò"¾[Û+íAà^@À¨ÅÖ©õz2,k]–ÁZœeš;åšÛÂf]6ùêÍÓ·«üÖ´hkqE•ßZŸZ‹+j¹’þæ°Ö§ÖâŠZºµ>µWÔr%½ PX{Æ­ÅG]ª CŽJF½žT¡êÈ\êÌò\ŸFß-GžÿdS6ÿî„ ÈÔ9­[keßêû\¥WqêZŽ%€êO®YK?—|?H%‹€zÖ¹Vu^”羨Ò"U£^OªrE&ü"åsý‘ÏÕ V®òʱä°ö€qkÚú#K¦×¾¥ŸK¾¯þKÒÏÿ=É{§"p³Ï½e{Zl ‘ªeáù]+’ä#¿põ?&æüV‘l~åJü­C@=pê¨'RöækýO)K$ý|ëú¥¸KVý«ŽÒÏŰ”w;" žwu4òÜ«´–í1j±5DªFÖ©*BUDÊ$ª~¼_-o-¸oß>í+5¼nU¥·üN¼æN¸Óܼœ‡×ÈòZY^ÄGv¶b‚åŸzàTÿ©>æ£ôóíÿôW?Ó*òþžyÉ—znî ï†÷Êí‚¶zîù¦Þo¼äÑòý¦–[åQmàwœ‘`ÈQɈד%™ò†üãïòÖ{ìŽÌ;ÿð^¼¼–ÓR`#ÂJš[÷#÷/ âÍ&,X m*ÁóÄL¸ÜoüS/Qéç[ß'·¢†ÿÒÏü Dþƒü=ߊž‘2o%ê¹çÝúx;ZË÷›Rôäªä +Éuªü‚ea™ÉÕK–G¯¼ oÓ7dÈmg Þ5ImŽ •ãí‡÷o¼±}ûvmw,þDžûN™OTŸóºbéçÛ¯Ht³ýpÿ±Fª~JCUG&UéçÛ§¿nV#ýÌ}-Ï7‹°ä»àçY½ËøÈüÅ<¦4U½ÌYlõéŠe’“I•‰W?§Ê/aŽ—pw Àÿ¸OõšªôóÝÑ¿ªÒÏ 9ÞK¨çž¿¦¸ÌÒ¨ÄÖ©òK–Êü«×V…TïžÇR=tÜ"Eª'¤z÷ô1·DúùîêOi1ø¹WZ*“ªÒTõÆñF‚!Mõ³Ï>Ó¾§jY "TuTšª2ë²Ì+×wêeËs zRåxéç;«/ ’Vú¹ täÞÝŠs–žT§©cQó ‘*{=å¸RfuÅìzb•—m~¨Ýyñê¡S&±HÜy}hDbég#(Iš» ~ŸéçRõœ¦ÚjdgANkˆTU¡™@•Ú¬ˆUHµ Äî¬{ܧª_Õ§®ÿk?g%' 1= 6¶Îðôäµb–Ød!>64xƒ£ ü<Ý`{c"ËLr}ÜÊ~¾ q$‹ P"0¡ªgŸÖÞiùYl-4äýkÄëI ¡4V>J¸{PýÊõàý·~NÀ® иq#Ô¤¥;5jTB¹Ççà›cñȼúø¤Ÿû 7FÓ:uP¯^=Ô«Q ÷YŽ=’¡I’~£› ÃŽ#‘Q€_\òñmhÚ°Âhó‚ë]ñe!yKºSŽl YWáPT" hÒ-ëÖôó-U Š õÜ«÷™â3}%¶N•+eÔ¯•€|.áî@€ûW=pú£µ‡Ïh‹~Š‘s—ãþQ ѽyØÇÿƒ×ŸˆÁÏøcûýŠ¿1¤Ëlõmަô@½Po\9½¯QúŸÃ{;'¡N™L\<‹Ô¬‚Ì™1ˆŠª…13Û"ØË„kʰª”vƒýµ£âß²t&¯JþT:ü¼nYù|+ú9¿º$^¸]Ð?÷|®~7#Ÿ!MUi'…U „Ï%Ü=¨þä£~Фâo¦¥Ñ{¿ÅyÏÇУcÔ®Z5wÂÌuSÐ:ÌŒôœ ì~g¾ËnŽu &¡W»f¨Y­:šÜÿ^ù`ÍlĆ 1­`2Í“+›žÇðZh{tîÜY÷k ›ãxuÔP¬øx¯V^V,iÇ#‡`ù—{°ýÍç±pÕj,Õ5*GbÈœõø719YQØ0{V­}£jתo!)é,¶­šˆ:5*#¬ë¼¹ã(R3HåΊÇÎ / ^Íjš–<áõ-ˆIÉÈ7>'ý .ÆÅ#'›Õõdìú`ê׺šwÙ§8›LËÕ´ú'â•×_Á„î5Q52 ^ÿ—R2óš|3'ª?‹³ŸoFÉ#ü/àç^ý XÖoÄbËyŠmNÕR¹ CÀ3¸ l£—£Ë˜DŒëõ š×©„òu{bAuxxîA¥­Q;Än¹ªÉÁ¾µÛaRãIøðܤf:VMî}>ž>…‡ˆübè+IZl6œ}#P)$µ"0d├¬÷‡`Õž–X;Ág6¯ÇÊ. í„Řߺ–Nœ¡f/¼3$»6nÀ‡1qhÙ{ÂCœ±ubWLÛW#&?÷SŸà…!aÿÎ×hkþcæü…ǧ¾€H‡ƒ3õiØ—ùÃ|¿·ÿTÈ¿xcyÚvÉBÌWÏaȳ Ç³³ÐÐã$FM€£®XÓ×êÿ?üß…x ›3þÙ/ Fx•íx¼Q8\ä/ÛØs!©ƒ”Ø:UƒòH2Aà Åÿ-óÄÚ÷6âå‰aaŽ ˆ<Ÿœ³c»âÄ®lØ–w„­…mÖÎŽ6p&MÎâÎ uäEØ)5óŒ°A+Oå«ûÌ:¬Þ =‡G6wƨ[#!Î Þ„ZžˆŠKAâýS1ºggTôs@PÆ<üÜaœëã‹TÚ÷¸ÝÄÕ˜úx+ø§þ„æ_žÀ€——£o›jpCeœ]Õßìü•ýÿDÌ%” Cã÷á}ÿZÈ(ãÔ}û­Æ;’›HŽ[©Ç±iÞV´¿ ƒº7ƒŸ³ï$Á£›àb÷FHKICç©oa`¯Öðµo„o–mÅ•„dkþ †ŒPyðȉ Œ€Q‹­¡¿<öz ¼¡F¥*ë*Q~*´º/Ç; eòå~µv~3­IKÏF¥6½1oéFü¼óklÙôF¶Äê©‹°;Ú~Ul‘B\x£Ë[*.ì¡ ¯ÓéC†+f³è`¬ÿèkü²kv]ý­r|\Mpt¯‚'ÇôDZòÄvƒÎÃàá@d5®WÚÃÚk×ÒE™€/Ó½ Õ#áïã ÇÝ‘p IDATŒtħåàåá]P¯z%T®~?G_@Lb›ôðv»1k`{ÔªV/~ºé40iÞßj¼ÿYÒwk3Óp"> Îôe 7g'8PýÕ5‚ùÈiÄ%gà ¥ ­Tžînpö,‡–‘ö°µ'ó•aLnLh­oõ}~c‰î|ô|Åç–¿¢´Ð‘¨ uª–•)a,ãoî: ñ)È" ÆÍÛ–ËÍ*¹nÕ·¼ZßdQ”-?Ð ž‹m#š£¬_‚‚ÃQ1ÌË·Å™øL”©lƒW~‡kwØÅïÅÒ·Ž¢~ÍD¼šŠ~µƒáB+Úç¿@TáIŸ" EX »2ŽÃ[¶"59øôk×a>¹¦åtÚdÛ|•½SãÏÁœS.¤ASœÕJh 3;‰83V½Œ6á¥`o2ãÌáý°)W“öËMG·é›Ñ3+§ïÁ– 1Ï·.VöµßNÍ‹Ò25*ßÍÉ––å pùÌQäùÁÙ>O÷ré—i”67]’®þKP}[<ýü_$‘¼‚@É! žûüj4ºNÕ¦š_%·2þÜÞmÑ 9wÔD­Ú5Y¾–mÙäŒüS’€¦}ÖâHLÒ­K»âwá‰&sðÛÉËÈ¢“BË$j[,ç˽n³‡n™‹¥áF qCØÃe³b6 专•z ¿mÿÌšÕ@ŸkØwl³>DIKðõhÄ'^ÄêÕÓÑ­÷D ~ík‡Â…µIìÁñCûpøÀ¸ú;v–pϱÐÙNÃ߇ä¥á´{ÆÅÔLþäEÌ߆…›>Áôš{0é¥ÿ#-1nÀÑWÞÃ/ÿ\Bzâ)|²x ’Ú•»;}­‡›J£Z®A•ÑÂþþ:~Þaáðµ‹Æä³ñ}Ôeù´›ôR<#q_§nhÖ6)9é8öé£V㳯– ×2hS׫i^öp,™uŽà½É"±vÜ®«Ÿe°h-GI(IÒa^³F¨VóÙß’SZ'VD™Ž½3 _ÙNÖšð?C ?‹­¥@†4UözZ¹r%Š¢±ZVT¤ë¬Sxmð$\h1˺·@ S2~Þ´ ÇŒCð'ëÑ¡j­ Ì™Iˆú9‘æ¢þ÷¯—¨_7cgÕ xÎͤi3.¥kcÎìyäXcƒ¤ÓŸcÔ´0s‡¹ÃÖ¥=”óL‘ºƒÛ¢þÐy˜;/ŒéŠ˜s²‘–†¡/ÎEÔrìŽm͘ûÄt i¿†Ô²LDŸ‹EÍšåqùèZ|÷ç#¨ÔÊ~HÇœ!!ÛžõÆÜP÷™·ñæ°ð#üsÏ©nÄ>oÁþ:‹G]¬~o¦L{µF¾ŠŽÍÛÀ¡ôt¬}t^û¢2ª‘‰:óò‡Öé;8›rÓ‹¿î„`Ï+T¯ }&ŠD2ÕzÖÆô5“ÐaôD4X:6Y)ˆè6}ê•E`ÄTT]0Ý/E‰˜Ws>®‚Z¥g êÒñ7Ä{9Ÿ'Oe²m;xã‘y+ñk§gЧábdÙdâJDw¬Þe<ã´ú¯ÖÏm,ù8W¡•ÃUèù<ò½FâÑz‘pÌ]IM‹ºrApÔÌúE˳r{<• /mYôü’£x0ʆHÕ¨×SñˆNSVÑáýsç1¬k4ª[îö9(>¢Wy.•1Ì8·ïSz‘η1ñ¨ßy ž?å™K3Ïáë^F¯w>DÜåÆ˜¿a º6, 'ûlùn=f_‚ß/ùâáñãñôz99ÆâÝçWãRiòôür5¶¥ÔÀ˜§úÃÿè6Ìyc¼õÇ‹ÓG f;Ò¢ÿÀÛ¯¼ˆ•›÷ !°!ÆŒÁíÉ)ÅñÚË<ZñÆGèÒí-ø¹:if:¸£Y+ÞîÑiÇcàât MZ6G›ÈÒ¸°c &/8‚‰c:¡œí(”p ŸÛˆ;ãä»áWÙŸÏ}?^*‡¡/ÍÀà‡ªku&ŸÚ…e/ŒÆúïbQ»ÝŒ?€K>øW’[bκÉèÑ$ŒHÁÊH¤¸:í&ÊqôŠDŸi¯¡õ€sˆMH‚ yö–òö‡6oòZRª4ée?6ÁÙ ±HÊ6ÁÝÃ>ž.8²}32Býaï䇙?ýŠñ¼ ŠNG/m®TE¹†wÁO»›Ó¦–3´Žð ðÀ–ÏëÃ…òx¹Òà¦ö£4ÇÛ.ޮغÅuƯĜµ`GY==R ÎöþTïnØûÒ|§fvu@ù–ýñÝçmqœ›ìÜàO~Þ.°ñiÅ?VÅéèXÐiá턲Á~p¶ ¤øj7ÆÛ<ŠŸ>0£´/­Ÿ h‹;>GÔ…8¤Qû½ƒC@s¸¶¥´úMyõ»¢Ë[?!Û#îòòUÝ^âG¶‘ÕkÙ4¤÷Ö5s¼½#rÎîÄì·£R`",} ñ—«bƆyx¤ýmÚ'ã×÷_ÇÈç×"¸á#x°º 2ÃÛâ±VÕ~91éôÞË8‡ –!6 Ñ_mÀ–}±x`Â*<ûXKøº8 õäoxuÁD¼ýí)Ô|`$Fˆº!Þ(¦YÇòN­Ð©õz*."pŸ^~d(.ÁµmQ""0zñ|8ÐZ‹œSŸ£kÿIìð6pÂÖqóѳT¶vd 6bêÝðüsÓ·e¦Íª€Úë‡ÃõÐ ô¹ ÷˜žA ˜0s,¢Í¯áÕG½µ¥ÿwÁcæ?…A‡7cÁÈH¸¯æLï§½„É‘u±~X=ìè1/=€Y/ ‚㙯1eÂ|TŽ|­+úÁáÚßMż=á˜ö\\áÚØ‘gk.ùfØ0ìv0Ù:‘>5äè†oÞy­º4"5ñ¿}‚7?»ˆ•ÃÒðç;˱-¶"&,ž6¶`ú³=èE¾«$bDÁ8פ'¦/ˆÄþÓñØ t|øÎ“ˆ~yæýÛó^ˆÔŸ_ÇÜÁÃQæó·Ñ"<€^é·S°ƒ«§Ê»û ŒMž¤ñÙÑÐõÁÖä ßàò(U&Œ^W¹›ùS2øõì‡{g8Ð@Á)8¥ô™¬œÛ˜\ìjåNn”K°Ëµ{&JË×IÈÞcƒáž(VAluȓϥ¨^}°u :ÊE¢L(IJéòbrJâ6xSxPeKKîãb—O¼‚TÉŽTO9x• ÕÚ¯iÆÚ-“Eý6p ¸^U‚KBûû/ôÉ„³ÒTɼ\­ pIŠÅÆ·^@lؘ9n"â>~sg~€ëGÛç`ÈÌÑéé ˆL;ˆ¹/¿¿!‘èÖ¬ ’¢¿Åëñ¾è^øeãÇ´Œ*–ÖKó2ª}˜µ`ª|‰‘IÑóIœnÚÓVÂáeóðÄ“ixýS¨@Ö°’ƒà®­É¨ÅÖ©–4J6N0ëõˆ\¿ K_Š5KrÈÅõ} §>†ÌkqÆï1¼üä#¨솪 °ßO_m7³¶Rs' Á£uËÁ¶B VuÙS—bpfå[(Õ}6ž|¼+B<íá·>ûç:7¦9¼ôœ»=Òî§ÍXò^-þ€®ÕQã쇘BÚM6Ùí*N‡š¡I¸/RŽ‘Ö1ûi–šûçV\ªÓK{sUáèyTlŠ®™)øîÓhéƒß7½‹R=§ ÜÏfÚ·ëô§Ð³Scx›jâü¶møì瓨sj vœlƒUoBóŠ>h ¬{x þ<ñ²ö_Àe?/”._u¼€°¦Ñ¨àë©™¡ —¦äS0Yêú×KÁDt} “sþy}ÞÿråˆÆ‹àUZãã¤'ÔÊä{0Ý8þú6ä&É/^_Àí×ß•óÛwåã¹Oc»ãµ]»ìl›aÍ·+Ј–:¥¦ø`æsÃѧA8ÌéXÛë[œ<;|‚£^ÆàÞ-áozö‡¿Ãóô±Ëš9ËŽ¼Ëí4ÏTtœúôj‹»føvÙgHLLÆñïÞÇog›ã¥AÑ¢¢/Z‡Úá.±ç䣈ôu'«ÈíƒÑ*‰Q‹­!R5êõT\`e‘‰×Èkð¬ºxdÜ4\‰?‹?~Šñ çaIµ ¨ùÅo¨Úêq”÷ó„³£ʵŒ¹÷ÑC½‰Æ‰h>®4<É;3ÕÙ‹æ¶Èa%òÿq~= ­ßž©iÉS¡ -×0#‘/_6®®°sä Ýáïêg2ÕùÂîR.3šⱦ_GŒ¾’ˆôÌdœ‹©KšŸå 4|²í|–a\5ýŒ) =Æ6DïÏbL+g¬Ûq-­ o—Ò•êàÁf•áíá'Û´îÚ[’íž’…Ló‡Øñc8П9; gi¾ñt¼ÝÇNAÓá ðD›¥´½Gzƒˆf4ïWˆrÛÂÛvA˜­IÓˆ-ïʵ `‰@*Eô^ô67­DK²“9Á?Èæ2àìÐBƒéýäŒ OOšo½Bžãˆ¹’Šˆš7us¥4î¨Ó¸*ãi–\›Ó —V‘ R(¢|åxy¸ÁÉTmÉ£ý Ý̤÷ABÚGx²ã§ô> ÷U½ÎÇâ$-Éʤé2g«=Kéåº ŒZl ‘ªÑÝù ¨(÷Nm…^óª`ÝÖ‘¨\¶-µ¡®2¾Þ„g.¡–W:þNNÑ.7)êOl=`Bóú¶'¼àH¦V&Ü96úŸ2;ò=<éUŒìTF"¥óÇp8ÁŸ´ÖÜ´66l\¼q_Ž-ÅÚ +v7†L[‚fæ`\›º(íxc[¯ ³Jn-yíKþÿ·Íí ¥yOCðRV;TéÐY¯|†-îÅOq]ñT…ÒôÇEË7@ž±ii`?úÓÁ©=‘F›Ég^¢O`õ’^¥/»˜²âpðÐYT­ï‡øXôUk¤\<‹ƒ¿)‹ŸA™ÚñdÓŠp• –¼®2rbïè$ÛŽJÒhðœj8™ùCÃÊÁÃâo-…RØ€Ç45@çyï§œL$Ñô‡ƒ‰ÖóÜ…´8rVËæu×¶‹Ù‘Y™^WÚ™½bx¹Wfúe²ÌõÅÊÅ}QN{ÄãС(T¬F»kÉpúzoí•ã$4¯_릫[#\PÍvˆ¹øMêã“‘•™Ž û¿Æ«¦ jhY4lÝæMkðáÓHJ8‰ ½GcêŽh$ÑgJX³|„2mƒÐ¤‹=¾Û}i.Aˆ,ã€?ÇOÅŒ-Çp9oïØëÉ‘Í.Tl6-爹èƒÈZuP=ÂQ?þö¦&"1#ë*áå¦?úvtBÅOòöË3ò¿kx Œ(ÿ;–/|•žl…?rR¡ŒnäÙúÜÛ_âí”séȬû>uËù£ldØý RÉA&")ˆY3?Côå³XýX_, g…ÒUë¡K—û–‘„ òbUm1"Ï‘Æàjˆwl›:sôÿ½ÿº¥[ÿœÏ[âeù~Êr @³ºÞxý£/±ÿ|¢÷|Œ«ÿ‚kZ¦u'#ݲ:õ¯›³¿ã|ªÊWAơ͘=ëœ% X­±¾+þ6¨ÅÖ¨*U¢Mq œÀÓ'£åªñÚD2ӓѨï,Ú¦2"=žÆ ûÆ`Nÿö˜’žŽDgÒÌz5 OÛí´ÀŸÖr]ç㓃3ZOx}Aÿ6o ƒF€ ÎaåÀ(ãÃÒ¦2'eòÃϼÈG‡º¤™žÇK[a~Ž-™Ÿ;¢Nè>Lœ° 56 %uÓÒþz5šôY„2äÅ[ðˆ…@j9èÀgöeЩÿƒX:æ tiW•ÌCdú¡a)×zp ºÐÜ(›ÅCÚOÀ¬¶Uî]KFÆ”áaVz&2’.aÈËŸ¡f`Exî€î3‡â‹yY°¥m‡Ü E‹Áäýkùç¬U}ÿgì 5wpEô; žS}gæ(l¥µ~x_uÜ›XÝ<éei ®{?Ñf›°u,…>/½†Ä)c1 ÕR¤¥F ¦íÐE¯Lš×’›´w•©§WZmú‰ÿª½=†%#þÆäa]17ƒßq¸è#Ô*ã¥Ë‡y›‰kÔbkˆTz=ö.hÒ} ~lÜb¯ L°îä%@ÛµùiÃÎÆÝ'®BýÞç’m ÏR¥Q& œð~z?w)?h¹Ë(ÚÁ§´Í)4ÁÄu_¡7Í3dPJ7Ÿ2.Myì|¯_š -½h_ÊÃÑólÈq‡/-örÑ÷hw6éfZ.QÆŽéãq1Ûå}hÙÄկײ ‹ìýh>ôÚŸ‚%.z¹®y “‰Ú66u'㾊¹HKyNõ™f¢’‡‰>Òm¿ P”övc‹6ùju>‹xúÀ·“›‚ƒBˆŒMðè>ßÕï‰sq¼LÅd>/íçqu^ÆRš»ô:+_­_Žg—|€KW’Ñyø|<=¢+B/bãó¯Ðª`ZBõ¶í¿‚ŽãßÀäǚǰãMG/˜ŠÍ—ë¡§Hºì‰>ú!4é'ÌÙŠá#Ú!„L§¾ÂLuí–„íÖêrwBNü¬_º˜–7ѳóä3¨˜} A¡UEdGY_u×}îÒGì†f9D`êO¿`ŒÅò.Nç@˵üœs°ãýl”öÉÝÑË9¢~ø¥½ŸÜpbÇÔ»=è=æ`g¾EÝ1ƒ†åYĪ¡=_À~ç¸Ùa:•oç@˸xÈî‚Në¿D–‡¿¶­ãÈù¨Ñ™6I¡÷#½B‚xî6ÿ÷Ð òKDÜÑëT¹e&g—¯DKr ¶¶4òSÌEæìá‡ÈÊ4ùOç¶ykôKHË»n…-<BQÙ—МF^‹¥ ×å!9܈̯BíæŠJ¥ôù½áC¶•krNžÁDÙ‡ë墴駰væsxqÃ6ô^8å|\sµÜÌLüAsªÝ=ýQ¾R0-ÛÑ·•êr÷¥øRÚÞ¸¶4O£àaìB#«!˜'bi@rÆô‹wWÜ=þñ37ç<ú̘‹:^—°füL¬«XO7wÈ÷ë.ÝËA×O#¨ýpLëì‚3^À…˜šhØ­²Ïᣵ‰èûXè#5ÈLºv}l{>uuÀÆûà‡f˜<õ9Ä}3ó¶Å`pµ‡ÐÌçFY]5Ue Äù Úð:â`x }°ÿµ›ú÷ÀåßW`›Ѓ3h ÍÒœ0jCmpn²÷¯pçÀå_ ¼Œ*(ïÒ‘–¦…WòÖòèßy ä¤D0¤©õz*v‰™ ˜—䥊HŒÖÍäü_Âõù¯'Ô›.×Ö há÷cx˜6“ðtº*£c^œ·•ÊÖLª©5sr¾ËQ Áî¦e½C2zDöÆ´Ahݸ<œèÛ£…ÌÀç´Õá°¦Aä9mýë.'}€3u†bÙ°¾¨ZÚµKÅ¡m¿`C;=Á6‡–^‘=žÇ)ŒîÚ3Ÿºú…ÁòS˜ôÎp<Ú0 v-‚ðÛ—}`C˰Ž·!Ÿ%QÒò'w² Ü!@‹˜Å‚@ÝA‹ñrÈvüyì ¢@;n­mëÓ6œEu2*tyZ±ˆ{ObÔbkˆTïIK²Ñ&´î5Ù´á… -Ê(˜|Ѿ{gØ9Ñæí%)Ï]P—áxèþ˜5:I4çœ{uÛänÈ@[ߣšúº‹C9íë.q´£Í¾¯w"¤RW–¢¥ZN.Lã_Ê\íÕ7ÊDË×ùÕe“žˆ„ðÎh\¡ ™áœ`ë\ íh#^•‘JÛZ]•ªyµ;Ɉ»à)4ÞGßòhÙí 4¢gÕLÖ/G7òë°·6Œ6^¦¤,^îèuªÅ ÅPšœ\ÙqÉ2ØÂ‘>ù%¡`nüBM2¾˜3 kýÁâ·Ú‘ó–¶m‡-42áÉþñë*× D}ÝÅŸ  DÑýLNÀ÷shIÃÕóܘ m]3G%ÓÖqæòTЉêz.Ÿºx—¨SgLs\¤ãÂ6;‡È8Bç9ù.‰ ¥-ëuçÖ*ÿß ØÂÁÉ…~÷B[ïÌ6µØ Ýÿ΄J¤¾³°ö…š8$\ŒASZfФz5¸\9Šû2p9‘J)’üÿöξª"íÿ¿”{Ó !!ƒz—¢TÅöZVAduWTì\,ï‚"VTÄŽmí¾ººººŠîŠÅ¿XÐE¥£T#„šÒþó;ð„ÃͽÉÉMá–gøæÜSæÌ|gòüÎÌ™BÁÛïöí¹3èwfZªñÙ›°'o%žúÓ=Èßf&Ù`ÛGª~‹VmB᎕x誧°ÛL¸a”ÒLŸçõY ‡a~¯~´; vàËæâ‚ÝàfI8ïC¢t„ä‹úJ  8jþuÚë)hœƒ›€÷jžÅU§%\gVŒ¹Õ‡Á©ÇöÆÏ=ˆ¥ãoAŠi^ó¶ºËáf¸Ò?ÏÁŒ‹NÁ_J Ñ5~J#G ÕL9—tøHÜpú͘{鉸»²9YÛPcä8" GŒŸ€;ï¬ý¬ïþð ^ÿg³ˆÃUvG Š»v@^ çðÒ‡N2C¢Vx•¢C ‚»HjìC”@PS Ñ<Ñd55w73Êû 5©QC°p´Y¡¬ émÍŠ5˜º­ms²1ø‹%p{YÝ¥â×oÝk^}{*’Œ–.£ÿdzx›¸¢ÜíqÁ_cä•fdÒÓ\¨ˆJC»Œ$TŸŽ…CΫý¬ìH,]× 3þñ62Íwñò•¸ÀL¯yxš™+>Ã稦Ƥá)%ÐxÁ=Nµñé×Â@„ÛZ©Åû 5qÈéÑgß÷SÓ#’®U+3‹ãŸj KØ7LaYdæŒ{Ðûô«pZ÷|Ü}×KzõÃÈJ3ŒLW±„43¤*Õ|5=«kFd1à„ïϪø gþ¯™šò´q}ñó½sðC· ÑÏ,Yc:"EùÅ Õ)%Xœ¶Ø:jþuÚë)°hl€ç+KP뀒Ôu<ž}(ÿoé*ü°¼çÿåo8ûä¡hx`ùoŽÿóæ¼>ËÕW?ÿ(º-úë×®@ÂÄñÎØSÐ׬^$½¹}‰òö=¦”@Àp$ªN{=1µ4\õ¯€§¢ôJÀž·ö}¯áÁH3ûTßá'£ëÀãÍÊ!fŸ˜x$X‹¥û›2{…ñ9}±·ÂhÖÏMLŒ53‚vï^{ÞÚ÷ý¥ ÷)` P_Yo‘qª »/ðxL]è°fhÙÿÂÄý*3ô„y,ÇC%¥Q®³’OL“%'Â,—`¶& ±y’ü”¼ Õ|n^Šz°°ë÷=7¦Çi‹­÷¶,"N{=yܦ?•€PJ@ „§-¶Žšöz rš% ”€P~p$ªN{=I•™qá~tt4œª»Ÿñ×Ûš™óké2?é賉°zÿä šÏÍœ-¼“|f9пçÊ}L‹`y¦=G{&¶Nމï´Åö@hr§¾DD"#ße(Æ«W¯ÆÞ½fSuAI`ÕªUhݺµõ‚$…Ïî3Ï5Ÿƒ2kŠ´æóA8ôG˜ðVî=õLP8YБ¨²×Snn®„íÕ÷T^*ûW_}…ÒR3›º $ðÙgŸ™«f¡w3Á.ó˜µ:©½j>e¶ÖŠ´Ó|^²d‰þ=×¢§‚•À¢E‹,ûæv›…Lö·Â1-¢göt±ò öÏ~Üsß‘¨:éõ$—實ßO</¿ü2.\ˆ²2³â¶º "À¢ûFJJŠ•¯Ì[ ,óWó9¨²ÓgdæóI'¤Ï>)ê‰`#Àr?oÞ< :´Æ¾±’@»æMT¦/êVã께ž4iRSÍêÌŽßÖ¸±mºÜ,ªmß¹„„<øàƒ(((@LL ÒÓÓ-£ìŒþ ÌÇ+Và­·Þ‹uàÀf6¢VˆßèXðXè˜ïrÁ|fmVó9@2ÑA4üÍgù{ÎÏϷʃþ=;€­— )÷o¾ù&fΜiÙ·Aƒ!-- ñññˆ‹‹³tŠZÅÊm›Ø;¶ØwÜqHJJª3=Æ0X®ÃÇ¥;vħŸ~Š:ÔºBÄ”M¼%%%(**²tÏž=عs'òòò°víZ|ûí·Öñ]»vYìà±µž¥šŸ (Å´gÏžÈÊʲÞähPET)¦üVÎüf^k>7Þ4å›Ïß|óMÍß3Ë‚þ=7eîhXÍE€•»}ëÕ«—eß(ªl£`ÒÎQ\Y‰ae|:wîŒ>úÈ«Úãë¨÷o]½žInü#¥ª³Y¡ñe¤’““Ñ©S'«&ÃÚ*01k´dþ1ê¤=KZv_òy'ùÆBÅšg›6m¬Ö æ#ßܘ§¼†yMCJWQQ¡ùܲYæ×Ó4Ÿý¦79§å^l›|Ö¢“{õʉs$ªõSåÃi”ETAaª½ˆ'¯á[k³"ª4È*ªN²©ù®‘‚cb¾±)„Â*om"ªÌ[ÞÃ|£Ï‚¦ùÜ|ùÓT!k>7I '˜4´Ü‹¨RËx¯?Α¨Ö7N•§²Û 3k7M©ÑðXDÕSitÅHÛUj©ÒlÈyÞ+‘T¿é Ø ‹ y)b~qc!³o<&×2¯˜wüM§ùÜôyÔ!6w>³„ýSŽþ=;Ë5{¾Ø÷ë»Ûn#íûõÝnçíLi£¸‰}£Mófß¼‰ª?܉*°.Çp“ˆËµò›êÏóÏÛ ñå~õ›€:ñ= 󌛬ü–¼f^q³ç»æsóå—¿!KþŠßØ|–û™×|IfùV'Tñõï¹v®I>Èþ–Çìûü-gö}O®ž¿ym¸;áÌòJg/·v›&•±o^^ë™ £®[žçHTåâº|FB âoy; 0ËÆ?:nöðµpÔE¸iÏI¡c¨öü’}¹VòŽ¿åLÉcúšÏM›gþ„ÖTùìù·É<—|·çµü]3®ž÷øÿP»Gþ~$_ÈÎ~œÇÈ‘> SáÉßtü-›u@ÿ«! |é“£lbÓDD)°Ü—ãrÜ_ Ù©¯ÅV®u$ªNz=I$)îKb$²ò‡çYH´PHV_òJ|É?úÌ;)dâó:qÌ;Íg¡Ø¾ä¯øNóYþ^Y¸Ïûhˆä8 Ãäß7}:žS盀=™ÉßÓI‘£°û)k÷}?IÏ¥Ø1úö}–cûoá.¾'=æçHTöz’È0Ã%²’ùü-…D ŠüvQ½¦ùH¾ñ ²OßóŸçyLœæ³_ò–±•ýºòÙþw*÷Ðg¾ó[êßÿþwkÁŒeË–Y“ºðœÜÃý@qLc Æ‹M=zô@¿~ý0qâDk5(ÚIÆWãMcΦö×_ÝâÍ'Ñ Ä41ÞÆ[xRD9CÜGóÎ;™™™–NÉyò”}áïïhšBæ=Œ¤d¼ç¾„)çå·ú-OÀ^dŸ¾·}o±óÌ[ÉSñy}ß[z¬ù H~òI²_W>SÙ[žz;ÖÐpõzÿxËSoÇ$tæ:ÅTjJ¬-[6†Ž)S¦Xà 8ž¹®°$Lõkàœéœ¾õ‘GÁ'Ÿ|‚Ù³g#;;»æBC<çœs0bÄå]CÅÿ;oÎgï½÷¢cÇŽ52)¬"®žešËŸrêVž¯Ë9U§½žêzžSJ xˆ Ò—ñ嬩>óÌ3øàƒ0þ|tíÚõ ÏÁ“ºÀ‹)Wý™6mš5•ëþðËç‹Í+¯¼b‰­ònÚ<Þ\yíâ‹/¶šÞYcåØ|ÖTEXýyêduÜí´×SAè)% ‚„¹ÔTe•óvs ÊóÏ?ßj³_’¤l4¹ðÈØ±c±|ùr«¹—ßL¹}ýõ×Ê»rMxÿôÓOؽ{·õiƒ-22í <û·ñÞ[ÿ‡éçDàî+OÅ‚Ÿ6¢, tu Ο|.–ænE…;§]v%vÈ„;òÀ÷þ¤½¡÷kúꚟ€4õòI²ï½Ó[G¢ÚüÉÒ'(%ˆĸЗýdzb^ºéohwÆm¸õ² 8öÈ#Ðûˆ¡8oú¸rholÜ\€²ò*ä~ÿ.n<=Í\¬g]3K6î+‰>~ ·<ú<¾ùBѧF_3K·¢²b3^ºí<øÄƒ¸~¬ ³[G\ÿćVMÓŠcY.Þ}ì ì×ϺOº%ûkŹ߿‰+›Ú²yÖù ï—•xòÚó°nÅ8ï¢ûðÅÚmÈÛ¼{ŠÊQeâÐà¸5Ò„µøÎè^s²-¾·ç8m±u$ªN{=y‹ˆSJ ¸ Ôeh¥¬ K¶çã˜ã mF2\Qf¤ÈhÄ'wÃäûŸÆä1=àÎý¿Ÿt¶Œ¾÷?1ƒV?…?÷~ÜR€¢ß~ÀË÷þ ÷öÁ 7^Šø=„;^ø;ÍwÙ/_þþzÓL$Žº×_|^¿k2^[ºŦ)÷ÍigaÊ‹ÛñÇ›fãæÁe¸ë’ÿÁkÿ݈í«ßÅY܈ßF_Œ»çÝŒøwÁ¸ç¾GÆàqpÇõÇØSŽBû„,|áy|—»¿ü»áq+,w„¦®‹ͽ®Àõ\-õñvÚbë¨÷¯Ó^Oµb©”€{EÛ×â§Ê¡˜Ä¦Ôhû{|Ó3,>+?y ;þ L>ýÛ'aX'7>>îZ|õóxU´ÅC¯ÂôKÏCßvñè¸åÜRY…ŠÈJ”—âŒÏâ £=>¼{ò‹QüË'˜õîϘ4÷aüat$¢'~}l4>üìgtŠy›2ÎÃÜ‹Çc`v"z»óýC4ú9Cã—àÔS‡£m+S{.ÌÌs~ùظ±z«., 8U§–eC†ø¶nßšA|›^èu36åšæÜ®™ˆqÉ7Ê üôþ?±&©ö,ü½ÿíÓ“áv¹áÎÎA‡èä™_™¶Îû#;=q±n¤µIGÄŽHP¶8™_Ÿ’”ˆ8wŽïÑ& %¥ØYZ…¹—Ÿ‰Çb¢JäoÙŠ!»ñß/–¢÷È‹Ð)#q1n䌙Œ;މB²{b#Ì %‘±ˆŠ(ØŸôbüø©¹~ð Ž[S°Ó0Zž€½üs_†Ô8m±u$ª-Ÿ,}¢P@ÀÓÀø§ˆØ8dGìÅŸ¯Â9ƒ;#ш¦åŠVá¡g"þÚG1 ­+ÊÍ$RÁ«Ø‹mF6ˆ1MÅp™⪌ðSgýäå¬ûFXkÖÂÜ/Øå•Fn«;cæcs1ºs¢]ÕØ´òDäôÇÎÝÕXQTŒòýµÉÂÍßaÁ.œ|¬‰— ÐuUŒB\†q“ø6Ô'sé0ÓÐ{õú†pZƶØÚÛb|ÆÆi¯'Ÿè % —€» &Ýz–=7·=û~Ù–‚m«ðô çá_«³0¼_W9f,ªŸ{‹~،ҊB|ùÂ,+8};dí«ÙŠØжÝ}L‰“½„Ãzâ¸è\üwÝv´êØéQ[pÓÌÛðÉæbô<úT¿ò-߈ÂüõxéÜ«1ãÓ-(.âÝK±aÓofÜ¢tGv£×1gú7‰X@ûEx÷¶Ø»£µ`7íævÍœ—±±ÀÌ÷lâ^¼ê%Œò–›NeÂØ[’Ö½z5&>þ9¶øñMÙ>¤É[à‡ø˜Óqª½“ùгÓ^O¾î×ãJ@ „1úœ3 %âªû¦âÍ{*U]‚´ã1û•ÿŘ^YHêõgÜþû‹pû£1ÝZY~6®}àóÍ3\lKŒI1ªæw2Ì™¸(S7ˆØwÅþ:0"Rà–§nÄ)W߀ÁLADE1ºü~&&Ù“§à®ï®5Ï: ÓÍÔ‹qÇá¾ ƒ‘‘^ŽÞ]ËÍñkÐnÁ,¤VG >*Nœ†Û¿¼¸qcLƒÅUc×êÍØxä$Üwö‘Èt»°gÓ—xböÍ8nm>þ}÷ùÈIí˯)CFªöž¶ÒÂ<ü–_†JÛ‹Ž÷+½妉¸ó•·q|çl[Û„·k÷X´“¨9íõä$,½F (ð#àŠÏĘI·à³S.ÄÖ]ŨŒt£UÆah—Ù nÊd6Ιñ"†Ÿ¿; +M¦ÃС}&’b£‘0îA¼T•„ô¤}½‹üNŒÂ­‹—À•žeš”)ª 8óÙŨL6÷Ÿxüøø½1غ³ѱ‰hÓ®2[Å#2"cox G›‹âÊH¤¤e¡mfâ\U¸æ¥Å˜`ú'µ3ª±Ñ&ì¤ØÈ†Çm\ƒ&§ÍT{‰‘ÃG"+ÙªŠáÚ¥-†_8þ8ç¶Ûw– Ê|ßFEÞáüïý¯aÇž"œqù_1õгÐ!)ö Zlц/ñð]Wã…ó0àÄkqõ´IÐ>Ì*iºý’YøhÛ.uƵ˜:õ|{ï!M¯>=ÃsR¬k…![lçÏŸúú9Õ@I”ÆC (`%˜„dwJB[~Ë45˨Hû×§Hħd¢[Ÿtk\hdTTMíÔ•˜L[²í¿Ó²³mg"yàw„;Ù9Ýжƒùkžu`S»MÎ@·ž­D˜µa¥‰äÌHj³¯âo ÛŸ¸Ù"»1¦Æï6µT®+jþCû£1©j¾ÿ~ N‹]ƒ¿=RŽ1gV ÿýëðçÛÃÄ™w``ê<5íV<×½;¦žÒÿ@:w}‹+ξ¹ÃÇá–9Ýðã·˜ñ¿exýÅ+ѱð3kHS»S®Á݃c±àÏŸ´¶˜Ë!M¯Fà,ijc:‹.öœ¶Ø:U§½ž!á% ˜{ײbêÃEDšÎF>Îùu˜âíÃ:Sh½Ùíý-ɵ×äq«õ„C}à௥® tÏ‹Š°×4çæT ÂTTSº‹›= £†uB¬™€ã¿ígâ½·à²Q}j°îã—ðéúÑxìé‹pl÷ÖÙxîwÓñÝ/ç ò§gêÒ”ëlº¿š'6ÿŽÓ[G¢ê´×Só'KŸ ”€P-B b'–ÿT‰áçf™¡G¿šGšžØæ·†´üo`ÖÕ»PXVŽ‚¼­4ÚÞ”—°'÷¸ðþ ·y©©®,ů¹yظk'*>æ¦?Ö=¤©EØ<q$ªõµ!7OÔ4T% ”€h9l#8PwßõßOðf~ .NK@tMyþ}ûx¦ÍxÌ{öDÑ. §œˆwÌ­özneù3¢éOxòþ è—蟖ÿŠÞGuÄŠ*0¤©åR_ß“œ¶ØÚ?jÔ¦žWJ@ 4+¢uoš¡бcÇÛh3oïÇæímpÊÖá6³âÌ3fîß¼_t44¤ÁÏ–æþ‚Õë×bÕòŸðåûÏbÌŸnÃÚãoÂI½Û!¾fÂŽJïÌÈν0¼oÄïYG—íÅn¡±©jFΈúõkl-q£S—ö(^ñ:fÝú/lÙ]…~Ç6dHSà€e‹m;ÓÙ­>稦ê´×S}ÓóJ@ (ºT—aóæM8gú}™“‰Èê=ønÁã¸ã¢ÛÑiáýƘ·nøP‹ŠR¬Ú´ ®¢2Duên††”×;4¤®8†â¹3[dÙ«30î5ÓAÈ$°ª² ÃÆMÃÜ© {›D”ï6;›Í•ˆ#ÆOÀw^g†*]‹êÃÇàÔc{ãçÄÒó‡¡VÖuÓ‡NÂýW®ÁôËOÇ,ÓD¼·p.™û/S³MEjö5¸ëÜz†4}ð*Fu3ÓZ¨8›P­sÚbëHTöz:´IÖ§+%ŽÅ1ǎ±4ª¨Âˆ˜ÿÖuXóÛN é‹oÿù0®¾óäí.ĸi`ꤓÑ6!•»Vá…fáþ×¾Áž¬ãqËŒi8wDÄ™i ­Å+Mg›ª2 )³††põ›§WÄ£Mî'xòŸ_ }Ô¥¸gúd‘™ˆj+¬y&¬OpâÅס{åö?ad÷6ˆ¥ê„”KÀ)·.ÆwÓ*­ž×’´„”ÖHM1M¿¦ç–«óx,~­Y鉨? ‡œ‡ü²j$¤·Eë„ LÝV„¶Z#6û!üŸ5ü)§^y/úŸñ+v•U 61Ù‡µGJ¼Ëˆn»ú‡4uL(A&N|G¢ê´×““ê5J@ (º ˜ ôK+ÌxÈ*TE•aí÷ËQ^Öm’±òõ›pÉôoqöÿÎÂäõ¸jÆõX½7O]Ò ¯Ÿ:wµ:³æ<„¨UoáöËNEõÓï`bÿí’…¿˜¡!eÖÐHkõ›·ÐkÜufõ›¾xåf³úÍá}ñøeûÃr›fü;?¼w¾» “ûœŽ£»¤Q µ¯ff(RZ¶Ù|çJ„©¡f˸&34*§G«µ7rÿ°¨V­öÏ‘ë>0ü)*)z¤™×"3•äA=­# iò›CsÆi‹­#Q=4IЧ*%žVáê3ŽÆ .V «±·x^ô0dîÀç-À˜iᢱG##®/šù„_ù›†lÁÃܘ1çRüî(3ÔãøÈ{÷_øû§ëqª©]Ö8Së’¡!e»¼®~³gÍbŠÌ·@+¬J3^Ó|•mo®ÐJT­´éCGÀÑǧ³óºdè“•€1&)Ñf&¤(DG_ÆH&´ÅèAÑxòŰ2ÏÌœ¿ ¿éu 舌=Ð=ê?xó³eØ]R޼ïßÅ]‹‹q\ïl$šõT}:[ÕSvÚnfz¯~´; v˜sæâ³k‰Ï@ôD8pÚb[Gi LšF% @"b͸ÈZµÂˆVç|,9í:L2åØÓe,ž¼|뜊»ï™„ 3/Å«3÷¢zo_p.Ö‰îhcB£H[«Ùt6)6ÓÛò[Ÿýò;"}Þ|ìϘþç«0ìŽw퀼’ƒ‘™Æ¨Ù 8m±u$ªN{=5{ªôJ@ „4„Îgbñ’‘žeÆ£ÚÏJµ™ð¾ÛÌùô=lÞº¥•.´Ênöf¥›3^¿3¯ÅÂc‘·sùšŠÃ:v@FŠY•]¬Õl¸âLbT‡š¡!Õ²Úçê7î]XSÝ 3þñ62͸Íò•¸ÀL«wxZœfG{!Gáš8§®9¯éVAL ÂeV–ÉNðÓa(-;©m;ðkëA"›ŒÝz Û¬‚a&î?°úŒËÜ#WmCC|­~S^‚÷où ^3L×?ß;?t»ý:´1âí;jzF €£×.§½ž©PJ ù ˜åÚLS®×Z£µ Žé´$ßaý‰Œ«=®~þQL?±r×®@ÂÄñÎSSÑ7+Ùçð£÷¶ØæææÖiGÍ¿õ†¢(%’8þо…d"k%Ê…Ì^Ga|N_ì­0s1EÆ 11Ö¬ïY«=ºÖMq€¼9YBU•Ž‹m žõ…a/ßÜ÷åtœª/2z\ (ŸœŸ„ȉˆHâÌ"éÑd8'àëoÀi‹­£šªÓ^OΣ­W*% ”€=ŽDÕi¯§Ðã)RJ@ (%è8U-J@ 4š€ý÷­±žUðE@&¨çyåí‹RÓ·óæ¾BwÚbë¨÷¯Ó^O¾"£Ç•€.žß•äwëÖ­±aÃTT˜UdÔ5)õë×#))ÉzqÞiiiàqåݤ¨­ÀÈ5%%ÅšµKxÛ}Ï':YБ¨:íõä ý­”@ð ¡ãÛ<7þNOOÇŠ+°wïÞàMX€Æœv6!!n·ÛV¶ ´jÕJy7S~‘wrr²%ªRÆù()÷þ>Ö‘¨:íõäo$ô>% ‹x—Ë…‘#GâÁ ’Ú…¦Ë³-[¶`ÆŒèÖ­›U[%knÆ SÞM‡¹&$áݯ_?«¶Êò-/޾DÕi‹­#Q­‰‰î(%òhT<•F‡“ÛwíÚƒ „ ðÊ+¯`ݺu(//y&Í‘@ŽC¥qÿè£@ƒ™™‰îÝ»[µU²f•KÞÑð+ïÆç€÷YgefîÊFŸ>}_Ó2P—¸:m±uÔû×i¯§Æ'[CPJàP° *÷id¸ÑÀs£2dˆå?ú裸÷Þ{‘ŸŸoMTPÍ%ÓÔ9&@¾l~Œ‰‰±j¨½{÷¶š×ù›¢*ßQùÃo­ÊÛ1Z¯Úy÷èÑ ûHÙ–²Îë¼ÕZ¶Ì8U§½ž¼¦D*%”ĸ°RjN±±±HMM€5«ÂÂBYßX)4<*®¾³›Li¼…'¿¡ÆÅÅYLÙi†ûdÌëÈ>y÷ìÙYfqöââbåío­3ÞxóÅ}¸‘7E•ùAÞ"¦µjÀG¢ªãT@T/UAN€†ˆÆ…††›ÔTYƒ¢b'%Š'Ï•”” ´´eeeV30³™M…µv!®äINcNžÂU•çx Yҗב³ò®ÍÖÛ_¼É™/3Wá/Ì¥Ì{W§-¶ŽDÕ[„õ˜P¡KÀÓ Ñ óÛ)7™“–ˆÆHŒ½½¦ª¢Z»l)7Š$Ù‘7ÖLéÓÈ‹ØòV!¶oì#™Ò ?r¥ Ò˜K •>Ï˵ò2#BJ_yàêkOʰ/Þän¯¥Š ²\‹°Jøz†¯ãŽD•«N (ð"@ãΆÞîhth|h˜¤†JÁâÊMÕNlß>´0¥O†ÜÈ—> ;‹Q'CaJ1eK€ò®ÍÕבºx“µ¼ÐpŸ›¼Ð0¼9§-¶ŽDÕÛô˜P¡K€†…âHOã.ÂÊãÜì¢*†Ÿ>¯•-téø—2©=‰±'[n"ªÂ•¡ó8yÒQL)"ªÊÛÂRïõñ–y±!r—üñ|€Ó[G¢ê´×“g$ô·PÁK@Œ  ¹ü¦á¡cOñC/©¥°ª;˜ù‰N_Ä”\å7ùñz©ù“¯òzÎüúx³\“9}{~Øï³?‰yàÄ9U§½žœûlkžJ_éq% “€ˆlx¦¾q©¦¡o¨SÞ %vàzx¸ÛÙž#Qå* ,@ûöí…ªW)% ”€CŽD5''Çêy6yòdL™2¥¦ÆÊq;öu9AñÛo¿]3ó’žW>Z>¬;ªjÔ>†Ž>øz_p$ª›7o¶:'p™"®˜ Uh¶1sP²8~Xg§&ùÀ®ç•–ýûPû°€ÚÇÐÒ)מ¾#Qõ¼I+% ”€Pµ xŸ©öuzD (% ”€¨‡€Šj=€ô´PJ@ (§Ø*)IDATTT’Ò딀PJ@ ÔC@Eµ@zZ (% ”€Sަ)t˜^§”@xð6·cáEESd”Š=.ÞŽÙÏ7žŠjSPÔ0”@ XŠ`zîKòå¼üV_ Jv•}úÞö›*ž*ªMERÃQ!JÀ>ç¬Lž/¾§¸ª¨†h!ÒdyPãÜ¿vŸÉ“ù€›TÕÆÔû•@ˆÁ¤Oµou-C¦Â¢"È’%‚*¾]H9Û7–o¹¶1IUQm =½W „(TRЍlÛ·oÇã?ŽeË–aÍš5Ö¬jr}ˆâÐd1 %…”SîvîÜ—_~¹å»\.ë¸]dy­ÜÃý†:Q©¡Äôz%<Å´¢¢Âšÿ›"zýõ×cðàÁ1bzõê…˜˜˜F¡0À©I ,Ó,¿K–,Á /¼€9sæ`øðላ‹³Ö¦°zŠ«?ÑVQõ‡šÞ£B˜€4÷²f*bÊ9œ·mÛ† &€ kp)È´´4KPC…&-İ—––báÂ…¸çž{0{ölôéÓ±±±–°FGG׫¿µUÕ+4š%ÐX|£A-//·šwiŒî¾ûn«÷ïM7Ý„¬¬,­6´ÞÈ”••aîܹøùçŸ1uêTdffÂívƒÍÁvaõ'‚:ùƒ?Ôô%¢äÛ¨]X)¨%%%Ö2Çs Zµj¥‚¢ù.Éâ'‹Q£FaíÚµÈÏÏ·^Ù*×I–}ù;ð‡‡Šª?Ôô%¢¤éWDUjªl2[·núöí«M¾!š÷á–¬þýûã×_Eaa¡%ª,ë"ª"¬þ0QQõ‡šÞ£B˜€+ßÜí¢Jaå·'¿5…02MZ`Yf+ ›‚鳬³Ì7FP‰AE5 ƒFY 4'Š*ßØå»ªW§BË77iþe¹oLYWQ µ¢éQ @cÂMšíµÕÆšFDIoUÍJÀ^K•æ_ù;ðçÁ*ªþPÓ{”@C"¾ÔT¥ XE53]“d5ûJÓ¯ÔRåoÀŸ2¯¢ª…J (4"¬¥Êw%»°úc`jÖ% Øcÿ–j/ÿþDYEÕjzqPi aUQ ñLÓäÙ¿¥JMµ1(TTCOïU!J@Tj¬ôÕ)P$àYÆ¥ìû›VUÉé}J D Ð¨ÈÆ$ŠÑ Ñäj²ÂœËº¼4J¹§ï¯SQõ—œÞ§Bœ€&³1F&Ä1iòB„€½¼7&I*ª¡§÷*% ”€°PQµÁÐ]% ”€P! ¢Úzz¯!Òüe÷%yÚü+$Ô5ö²m/û²ßÐôª¨6”˜^¯ˆ€¿†%ŒiRC€@S–sÕ(š% @1þsۙث :vìX³]5ç%¬/,%í]ÿ1Ž:¨æ¯ËÉ9-üE{+m‰(ÀÛ7žŽ¡}®Ç¢ »P!gÊÖâΣ‡¢wÇ[#1íÑ·±¹¨ Õ(ÁûW‚ÉÏ}ŠíÅår—ú-H ºŸ¥RJ@ „0jä¯Ú€ƒÿ€ûƉ6fÑë‚Mßà‰Ù31rÝn¼7ç^²›7Gã™s0<'Q•¥X½èU̽êV´ÿ×£8¡[¸#Ì0¦ÜïpËßbÙºOðþò‰èß6ÉnÖʱeÓ ¼ðFŒÚ1FF·þø n¹w ;¿‰?ÌÁî¼\|ùÛ”Vú?,$„3©Ù“¦¢ÚìˆõJ@ „ j#d½z ÂñÃG¡m²UÃ1¬K[ ›4ÿ9ÿLL­BdD6ú3Ã;eÁQ!í 0ûÙëW°•Õm#ª«ÿó:¶õ9WôZ€çß_Š‹†v7¢oa,4BÚéÈc0üèH1 \9bÊ?û7ÿî\2¼½9 ´2Õâ¨p`éÔæßËŽPÁMÀmÔÌåvÁår!&. Ù#Faru¾ûï&””™õ:«óðÃ7ßáÛ¯¿Æ7_,Äü{ç"¿óxtk›«2Z‘‹7çý§þ{\z奈xg~ü­{÷W<ÍŠ¶@Y)\¨4“T£hó÷ø÷²íèÛ.1®}Rªµ¥CW†”ý¡c¯OVJ ¸Z£Ë€d¼Q\ŒR#‚ÕÕkpÏ#Å{1±p¯[ƒwlEiT_ìÞU†Êv@ɪOðÒŽÃ1c@gtã÷>†¿]‡£sÒ­¦á$ÃìÉËγñ.3+‡i*./ÅŽcñê°.HŠÑúé¡.R*ª‡:ôùJ@ „6ŠÝøiy>FOl‹w1"#ûàž{îÆ 9™ˆ©(GiÁZÌ;é2<üÁ2 Ê9ëÞ]€¼Â¯qÍéÃqcTvlÙŠˆ7>Ç%'÷CªQÔCëÔ)·aÂ1Ýá6ªZƒ¬œÎ8<³µ]žUw( ¨¨Júúl% B€©AÚ\þ÷ŸáüBœŸèH~qs£M›,´k×Öªy¢*GŽÇ§…(Ï_ƒçŸý'\w'ÎØq®h”l\„Ëg=‚ï6ŽE§ž€õMµÿ 8²’]¦)8"ÑQZCµ!?¤»*ª‡¿>\ (P#°<÷g¬^ßùQÕØ½y)®¸îV¬9 §÷ÉFÂÎå¦ùw6­Yƒµ{UUÜoàÖ¯6aÄYéÈ_ú.í†Ùg1ÝÙ8Õ‡cñÏãŸ_üŒMí–Cs¢Œ–RH££½w‹Ùj®±Ð 5ÆœÕ@ΛPAE :#埅s^»Óê}[UY†!ã§âî)¢{Fª øôg̼poÔ& ¢Úh„€PJ€âqò¬7±ôú½¦‡ï" )­‘š’`DΨ\‡ðù’%fxûú´.$§·Bj|,ÚÝõÞ‹NEfjìa×>‰/JÝÈl‡9‹ÿ\é™Hty«¥Æá´‡ßÅÈØVHOpÛÂÐÝ–" ¢ÚR¤õ9J@ „8ħµ5[ÉtÅ#;{ßxSoWÅg´3Ò\ÛÅ$g ;yßñÖÙÙµ/¨9FBÍoÝiiÞ^uZ:ú<% ”€P!A@E5$²Q¡”€P@@E5rAã ”€P!A@E5$²Q¡”€P@@E5rAã ”€P!A@E5$²Q¡”€P@@E5rAã ”€P!A@E5$²Q¡”€P@@E5rAã ”@„™ˆ›:%Êš²œ«¨†rIÑ´)Ãb÷åvV!¡~¨°—m{Ù—ý†¦WEµ¡Äôz% ”€P>¨¨ú£‡•€PJ@ 4”€ŠjC‰éõJ LØ›¿¸eÖשּׁÔU:Ã$ûC>™,ˑ֢ñû’j/ïI¼Šjcèé½J  ˆq¡OGÃÃý¶mÛbõêÕØ»wo¦Z“nV­Z…Ö­[›…Þ£kÄÕ³ìûÃDEÕjzqž‚JaÍÈÈÀ³hiiiˆ§^“-Z„ÄÄD¸Ýnë¥Qj­Röýe ¢ê/9½O „06÷ÊÆ·ù“N: /¿ü2.\ˆ²²²N½&-Ô |õÕW˜7o†Š””«œSTYÞ+ªºHy¨—MŸhn²OAu¹\h×®N9åL›6 Ë–-ÃÉ'ŸŒþýû#..®OÐK•À¡!Ào¨+W®ÄâÅ‹1þ|Œ1Ý»w·Ê/˸ª½üûÓˆjãü¹QïQJ ôÐð”——[M¼%%%(**BAAöìÙƒ;w"//k×®Å7ß|cßµkªªª f$ôÊB¨¥ˆ/‰­ZµBll,ˆ^½z!++ iiiVm5)) –Èò¾HRhê´¦ÚPbz½a4ÿ5J `ˆ!Qem•oñâxœÇhœXCA•Z¬ˆ*¯Wajê·$–a:{Y–VVWX{󯪄Óи«¨6”˜^¯Bœ‰ˆª§0Ê[?’]P¥“’4‘ç½!ŽM“ ìb(ÉrkVЍçÆóRSµ‡ÑÐd©¨6”˜^¯Bœ Š]X%¹"´4>4H¬Ú¿¥Š Š˜Š/÷«¯Z‚€¢ø,·ö²ËòkXùmT¹×Ÿøª¨úCMïQ!N@D•Éä¾ü–7~ùÎ*ßZ¥ƒ’]Híû!ŽK“@ì‚h/»"¬,ÃRŽeŸ¾ˆ¯ý~’¥“?øCMïQa@€¢(EÓ¾‰˜ò¼]Påú0À£I p"¨âS4¹/â*"*¾\G¿1NEµ1ôô^%(štvõR9ÏkÔ)@! BÉøÈ>}»ÀŠˆòXS8Õ¦ ¨a(0 `¯…zîKòUT…„ú@@“q‘}úÞö›*¾*ªMERÃQaHÀ›ˆz;†h4ɇ˜€§=ÞŽÙÏ7žŠjSPÔ0”€PJ@ MÓˆ¬(•€PJ@ (U-J@ (% šŠ€ÖT›Š¤†£”€PaO@E5ì‹€PJ@ (¦" ¢ÚT$5% ”€{*ªa_€PJ@ 4Õ¦"©á(% ”@ØPQ û" ”€PJ ©¨¨6I G (% žÀÿ1‚eª´ Þ÷IEND®B`‚SQLAlchemy-1.0.11/doc/_images/sqla_engine_arch.png0000664000175000017500000006703612636375552023015 0ustar classicclassic00000000000000‰PNG  IHDR«¹švHLiCCPICC ProfilexÕYgTTÏ’ï{'Á0 HfÈ9KÎ9JÎ*0ä ’AA’ ˆ`@$)Q (*¢ (Š F ‚"‚ˆ$…½þïíy»ßöËö9Ó÷7UÕÕ5·ªCÕÀ>G † а5Ö£8»¸Rp¯0&°pP½"Ãu­­-ÀÿÚ¾h›ùTj[×ÿ*ö?3HÞ>‘^@ÖÛÓ;Ò+ÁW€õ¼Â#¢@­"ôDZQáFßE0sb ‚ßlc¿ßxa{þÂô/{[}0lШÔ?ˆ‚ãå‡è!€e õ€ìŒ`-/ª7숌dHHØ6îA°¨ç¿éñû7L¥zþ£“Jõûÿþ-ÈHdbƒ€Èð`jü¯/ÿ—]Hp4ò¾~5&¤'„[nû†ùÌxS Ì‘'òÙ þå3Dâð u°ChÛX2ÔÓÒêÖò0²E02²ÒÛÆÈ;ƒ|ã¬íÿГüõ-L@èy>‘†õ”RͶ}FЛ#¢m,ˆàÎÈ;C#M&øÛ;ý‘ùæícð‡þF¦¿e`¦€(Óí¹˜Ÿó…™oÛ€Ì+s |@4ˆ@úP ,€>0øÓK_@E81/i‡ #Â1a¦ü‘ÓÿŠÑ¯q~ȸÿ®‘¼Ùèæü=™ó¯Îàà¿t*2Ç6oÛºH÷€ÔÍùWb[ß/kdëegeüµ -Œ–G+¢õКh-´*  YÑ@ ½­‚ÖEk£Õž*0“ˆf¿¿6nëiö)‹WsôG¸Û¿Ýó/8þ’øçûXÍ]ŸûkQ>qÈ:@?,<>"ÀÏ?Š¢‹¬\IŠi¨—´$E^VNv›ýÿ¦mïY¿]²ýµA¬ƒÿ¢ùÉ ¸ˆÄ”È¿hg¸Ä5nß¿hÂHÜÑwp/Ï+:"æ·>ôöð€‰PvÀ€(òžåP:À˜+`\€?þH F€Xp¤€ p§A(•บÁuк@xƒçà5SàXßÁA8ˆ‘!vˆ‚$ yHÒ‚ ! Èr< ?(І@iÐQè$T•CµÐe¨ ê‚îAO —Ð84 }…ÖaL€™anX–U`]ض‡÷Â~ð>8N‡sà¸n€[à.øüƒ?ÁË(€¢C±¢øPR(”>Ê åŠòEE ’PY¨|TªÕŽêG=E¡æPkh,šŒ¦ ¥85A; ½ÐûÐIèltúº݃~ŠG/ 71D F£†1Å8cü0±˜ L>¦s Ó‹yŽ™Â|Çb±¬X¬2Öë‚ ÄîÇfcÏb›°Ø'Ø ì2‡cÇIà4qV8*. —+Ä5ànã†pS¸U:^y#WšPšTš|š:š[4C4h6hi…hÕh­h½iãisi«hÛii§h7ð$¼^oħà ðø^üü?* ]]2]Ý%º»tãtk&‚8AŸ°‡MÈ!Ô: / KD"Q˜¨Ct%FsˆµÄ;ÄQâ*=™^šÞ”Þ›þ }1} ýý<-ƒƒ.ƒCC>ÆA†9FZFaF}F*cc1cãã2‰L’#Y‘BHÙ¤:Ò=Ò ŽI˜ÉÉ›)©’éÓE 듽Èiä*r/yŠË,ÂlÊÈ|”ù"ó#æ&–,Ž,q,Å,7YÆXQ¬Â¬¦¬Á¬¹¬Í¬Ã¬ë;¸wèîðÙqdG㎡+lœl:l>lYlMlÏÙÖÙ)ì†ìAì'د³¿å@sˆsØpÄrœãèå˜ãdæTçôâÌâlæ|Ås‰sÙríçªäàZææá6æç.ä¾Ã=ÇÃÊ£ÃÈ“Çs‹g–—Ì«ÅÀ›Ç{›÷#……¢K ¦Pz( |\|&|Ñ|å|ø6øEøøSù›øß àT|òºyw ¬|%D+¤"ä/tF¨_hEXDØI8SøºðŒ›ˆ©H‚H½ÈQ¢¨¶è>Ñ ÑgbX1± ±³bÅaqEqñbñA XBI"@â¬ÄIŒ¤ªd¨d…äˆAJW*Fª^j\šUÚB:Uúºô¼Œ Œ«Ì ™~™MYEÙ`Ù*Ù×rLrfr©rír_åÅå½ä‹åŸ)Œ*´*,î”Øé³óÜΊdÅ]Š™ŠÝŠ?•”•"”•f••=”K”GT˜U¬U²UîªbTõTªv¨®©)©E©5«}Q—RR¯SŸÑÑðѨҘÐäפj–kŽiQ´<´Ê´Æ´ù´©ÚÚïut¼uªu>èŠéê6èÎëÉêEè]Ó[ÑWÓOÔï4@d<2d2t0,25â7ò3ª7Z0V4ÞoÜi‚1179a2bÊmêeZkº`¦l–hÖcN0·3/2o!naѾ Þe¶ëÔ®7–B–¡–×­€•©Õ)«·Ö"Öû¬oØ`m¬mŠm¦målØöÛ‘íÜíêì¾ÛëÙçÚ¿vuˆvèvdpÜãXë¸âdàtÒiÌYÆ9Ñù ‡K€K«+ÎÕѵÚuy·áîÓ»§ö(îÉØ3¼WdoÜÞ{nnÁn7ÝÜ©îW<0Nu?¨VÔ ê²§©g‰ç‚—¾×¯OÞ:ÞyÞ³>š>'}>øjúžôñÓô;å7ë¯íŸï? P°hX¸dT´ìÜBâÒÊÚÆö$\"<#|lŸÚ¾Óû"Ì#ª#¡È½‘­QÌÈåp Z4úPôxŒVLqÌj¬cì•8R\hÜ@¼xü‘ø F ç÷£÷{íï>Àw åÀx¢nby”ä™Ô}Pà`úÁ©dãä )ø” ”‡©²©'S¿¥9¥µ§s§'§O2>TŸAŸ‘1’©žYz}8àð£# G lfygÝ?*{4ÿèl¯ìûÇäŽÛÊñÍy”«”{î8öxèñáÚ'.œ$L89qjש–£¾;ýºý·ïjÞí¸§v¯í¾Êýë”´ (\{¨øðÚ#¥G-ƒÊƒ­U·?ÑxrkH{¨ë©ÁÓ¾g¦Ï<·|þdØaøÅÈž‘±Þ/f^¿\|ójãuòÌ›¬·ŒoóG¹F+Þ‰½kS»9n0>ðÞîýë ¯‰O“‘“?¦Ò§‰Óùx?ÔÎÈÏtÌÍ>þ¸ûãÔ§ðOsŸIŸKæEç¯~Ñù2°à¼0µ±¸õ5{‰}©æÛÎoÝËÖË£ßC¾o¬d­²¯^XSYë_wZÿ°û÷£à§ØÏöMóÍ7[![[áÔ꯻ éa__¾Ö 9„ ’;<ßù;§ø%¤+"ƒ`Gè¼¥‚Üí¦0w±5¸\šxZün:‚ ёޖÁ•уäÏJÎ`îe%íØÍV;ȩÁ•Éý’W–’Ä÷L@B0Mè­ˆšhØw {É«Ò,2ñ²ãò ÍŠ\JÊ_TíÕ:4ø5kÍëØê^Óg3ˆ3|i¬brÆtÙÜÞ¢~ׄÙZ×&ÄöŒ]—ý´#ÞIÌÙÈÅÍ5bwÚžã{‹ÝÊÝ«=.Pkè‹òfad±ákú’Ç7Ýe¡ï ß®,¬Î¬½_¿1ñcúççÍo[[¿üoë¢dPóè.L&Ö'MÃBó¶_M—Mˆ!ºÓ›0(3r“èH›df7–<Ö6»2‡?g9×[^J߀^ÐXèð=Q13ñ‰gR¼Òþ2×äPòV …;'”¤”cUºÕèÔí4Š5ǵÅtBt›õÖ ´ ÓŒ:Œ_˜Ìš®šc-H»x,E¬d¬l”l•ì쥄9œó—Qׇ»Û÷”ïMvÛí®èÁà1Kíñ,óÚïíà#ã‹÷òëô/ˆ ´ ’ Æ¿¹Zn±O8D¼Š¼u,Ú/F7–3ö[Ü£øš„Ôý»ìL$$N'Ý>X™âœª•&N“>èYF{fÙáCG‚²¬*f³go›Ì¹›Ûxü䉘“»OiçQNƒÓïòoŸ)+H)¤é ” K¦ÎöŸ«-=ZZn]¡PÉR¹Zõúü­êŠšÔ žµzuüõ¨ú‰†;«Óš¼/\lF7O^é½zþZz‹÷uƒVÁ6lÛ\ûó·:jožº•t;¨Ó¥Ë¸[ùŽhG/}Ü·Ú?wêÞÛûÃî=x48øäñÓ'ÃC/ž¾zöæùèðØÈû“/§_}xýñÍÜÛ…Ñ¥wßÇÖÞCB“–SñÓ5^Î’>Z~:1÷r^üKÚÂÄW‹¥Ë ß›VÖnlýÙ øãiè|e‡¦A·ab°JØE\+M­ ž ¿@×CÈ'Ò2ð1l’°L¬dqfUCVÇlÉì§9.rörr¯ó2Sdù,ùÃr/ /‰²‰i‹ûIdK^’‘þ)Ë'g$ﯵ³Añ¡Òœ AUTÍDÝ[ã f‰V£v½Î9Ý\½$ý gC#acZã“^ÓsfQæFÌc»ê,#¬”¬Ö­oÚ$ÙªÙ.Û]²rrxçXädïLïÜï’âªæº´»aß^Þ½#nÇÝM= j‚§ªçŠW‹w”¼Ï¢o“_ˆ¿¸ÿL@u w%èmpQˆc(9t0ìh¸É>̾;)‘Z‘?¢Ú¢ãbccã‚âEâ'J‘Ha=0”˜“druðNr6+B©‹i7Ó³9fd|ɼq8óˆmOÖǣײ“™ç°æŒç69¡w’áä›Sµy1§õóIù£gê â Š˜‹Æ‘S3ñ¬Å9Žs3¥-eéåvB•ÃU—ÎgU{×h_à¼ð½v¨®©>»!à¢q£pºiòÒË•ÍiW¼¯š]Sk‘½.ÒÊÛÆÖÎ|ƒÔAºÉ|‹ó¶P§|—A·ËÈžÜÞ¦¾¡þµ{”û{Œ?<<¸óñÔPÙ3¿a­⯤ÞìÍû:‘8-5 >-}ÁÕ]Î]]ú¶íÿßµ¥í3«@r <À!ò7ÒFά‰Ø«ø€wà”=ðÏù!§Ð!¤ª"4<Ó‚ý É&ÛüqBAœH–hyA‰PtÉ¿Àô°¼ ƒOÀWáðO?ÊŽ*@u¡>!g‘:]‚¾^È"yY¦ó+€uÆæ`û°›8e\î"î#/M5²[‰Ñ†Ð^¦]Áká3ðƒtt>t— X•p“ÈE<@|G¯O_Ë@fHbøÌèÆø”dNêCr™>²ù93•y‘%•õòÛ«leìfì+5œ.\D®î$5ž5ÞvJ"Ÿ>?ÿ…@µ`¬™° $2&Ú-Vƒìtq’>RvÒ†2j²rrâò B;E%”ä•ÕUäTÅÔ„Ô45Eµd´ÕtÌt÷èEéç4­››:še™w[ü´4°:i=a«bw 9ýtJ·\©»ûöJ¹å{@Ô0ÏwÞ¶>ý~:þmºA!{BÂDGÞ‹ÞË÷,!ç€UóÁw)—Ò2yfê!fdWä„×9I:5yºíLn¡±þYʹ­²ñŠÞªÆê‚ ‡ê¢\õ/É4ó^ejÁ·bÚ±Ä[ìÝú=Ô¾Œ»Í÷ß?dtx’ÿŒc¸æ¥Âë£jcW&ø§2?¼ÿ¨0—0ߺ0¿DY6[‰\+ܸñsì×þâ n¤Þ°©4Ù!u¦8p T!5„°1B¢´Šƒò fÄ÷_a2¬;Ãüþ.¼€bCé¡BP…¨>ÔZm‡NGòóY$+·Arñ›˜oX)¬¶ûÇÛƒ+Á½¥¡ÐxÓÔÒ,ЪЦÐàÙð>ø+t(:{ºjLð#<"ª«¬7~…!„á£ãÉ4ÏOÆ‘ ™e˜ï²x#ùhõ6<Ûöƒš›œÝ\YÜö<|<‹¼½”"¾(~kA&ÁïBcÂE:D›ÄªÅK%J$‹¥ÎIWÉ4ʶËÝ—UXVdP’T6WqSVËU¯×¸¯9§MÒQÓõÑËÓï3Ø0’75©7ýl.o·«×ŠÝzŸÍ€˜ýa‡9'çvWáÝy{1nqîóTÏ o/ŸI¿@ÿ…À”`æú0ƒðñˆ”(ÁèØèx„g'i\KiM‹?¤‰=Ü—•ž­ŸƒÊí?‘uÊò49ÿeAi‘o‰ÔÙåқ噕¶çy«ç.ܨ;Ú`ÙHnz{¹öJì5“뜭‹í:ªoêôé6í‘éc½ Ý›{ðêáƒÁ›O.?x.5üáEÙ+§7ø·×ßyÓ¾¯›4™ý:³þ1uõ9y~c!|qbÉþ[çw‰•ü5°î·ñð§âfñÿ£- N ‚TšŒµÒ@1hC`ñ½$dŠT„2‘д³À°7| n…'Q$”²âËQOÑ8´:]‡žÀp#«=3„eÂÚaOc_âxq¾¸&Ü:REÉ£™¤U¢=Bû¯ˆÏÁÏÒÒU艄¯Ä@â4RߘacØ`Ì& n3y’éÈíÌa,,Ÿ•ÂæÈ.ËAϱÀ9ÂÕÍÝÌSÍ[J)æ;Ë_)Ð(Ø!tOxDdV ç’P—t—:,}MfZŽKÞIáÌÎ×J‚Êá*·ÔHê½Z‚ÚGt–ô<õ‡ ­ŒL¬L‡Í½,–,C­¦lÜmGíݦœ7]ïÙÛåîAE{Ö{;ùbýn„‰‡–!ws‘ÈÅè[±¹ñ^û5Y“–“_¤ÞJ¯ÍÈ?œå-ŸCÎ]=1zªÿtó™²ÂãÅ©gcJƒË}*=λ׸ÕRëí.Ê51\úÜ•¾ˆ€””€ÄjIÉé>}äçç»s UÓ f÷¹I'¥&@‘ÌÆÇÇ;ÖeD@D ¼X-¯5§|‹@Œð R©tóòò E+iìzŒd½Ügƒ"Õ„hBB‚§<ç1ëÅ®“» Úr_h@D ÒX­tU®‹@Ù0±ê¨¹¹¹>|8Ö®][Ø»Z¶©*¶@£ffÍš…–-[ Y´þ0V®ˆ€Ä:‰ÕX¯!åOb˜…* Uö R¨æää`Íš5xþù瑚šÃ%¨xYëß¿?233]T©²·‰÷÷¶V¼R«D" ™ÀÞ–¬"—Re V«&T³³³…j«V­Â–¶"þ=ŠRþX 5cÃÔ³jD䊀”'«å©¶”Wˆ1&Té²W•‰B•={ÖëcY®ÙÙ½{·«–•VãV+EÕ«"P! H¬VÈjU¡D rL¨šX¥P’Xÿ`)±hM¨²nlÜj°ðòX& õLb¹v”7ˆqֳ꫚••å„’zV£Wy¬Zötshë‡=«ª“èÕ‰R(9‰Õ’³Ó"P© Pø˜ø±1«6 €½zô“‰ UÇ`}˜PeNüuœ)U(>‰Õâ3Ó" ¿ ø±;Š"Z $Š%²‚y~¡Ê:ñ×Säs£E@D t$VKÇOw‹@¥'`BÈzWM°Vz0Q`=ª¬ Ö‹ý ˆb–”´ˆ€”˜€Äj‰ÑéF#`‚•âˆc$iÕ³jt"ïZ=˜XU]D¾”¢ˆ@ÙX-;–ŠI*% !³Ö‹G‘$½ÇÁDªõ¨ZýD/GJYD@JN@bµäìt§ˆÀo(†LQ(ÑÊD€Õ¿^¢—¥," ¥# ±Z:~º[DÀGÀzSÍõ]Òa¨¢]IŠ€”9‰Õ2GªE r ÚŸ­b³”û« ØØ¬3åJD 4‰ÕÐltED@D@D@D Ê$V£\J^D@D@D@D 4‰ÕÐltED@D@D@D Ê$V£\J^D@D@D@D 4‰ÕÐltED@D@D@D Ê$V£\J^D ü2–¾†~½»!--m¯zýh2³óK”].Î+ðÉÒ-È)Q ºID@D (ª%ˆ€ˆ@y&P“Õ«WáÔ«îÄàÖ_° sßúnwÚ̺ ýÚÖGb1 ¸'Ο±5+Û[ÂË»9®˜(¸ˆ€ˆ@‘¨gµH˜HD üˆ‡`àÐ!2lþzéÈËþ‹×§#77³_º½;Äõ¼^vï ¬ÍÈ5hî–…xìº3еs{¤ý®76 åŒJ " 1M@=«1]=Êœˆ@ÙHǮݹžúÌG~B–Ì[€œ¬vhT;¿Lÿλê;œ|ådô®½te×ÄÃçµÇôcÏÀ-uÄä[ïA¸á‚cQð蛕B)«Õ²«Å$•ƒÀ6æØßõ¸¸ÐŸpöw­¼“•X-ï5¨ü‹€‘ÀBL8¾?.Od_h²wmC÷q÷¢[ãÍxpì[ö÷cÜIýѰzžÞ¹§>ÿVõ^ƒ{WTŤ[ÏǨ^mPmPGlœù:^ød9å _JMÀ/,ýÇÅïêØÂ–Öõ‹U;670n¿¿ÿ80\¬œK¬ÆJM(" a&ƒ‹¦Ü‹¡íšxZ5ñ5ë¢M›6h\e9–oÝVMš ¹z5TMŠÇ¡G‚É?aÓö|lÁtoѵ«%"Ñ®S²r‘ï „9ËŠ^D êLdšË ñØÎÍ=¿]÷»<6ãÇüB¹Á¦ùºŒÃïgÇEõ•‡HúK¬F’¶Òˆ"î8ìð#гCo2Uââï}RÛY€r‚½eË–8ì°ÃлwoôèÑ£p( W¿0÷ ×¢æ)T8‰ÕPdä/"PÉx½¡zO¤†ºTÉ ©¸"P.˜H¥kÂÔ\Š3ö*Rœ~ðÁøâ‹/Ügùž={bôèѸæškÜøNŽõ¤(¥e¯j´iYƒgy(´i«U«ö»è)Lûöíë„+}FF–,Y‚o¿ýO<ñ®»î:zè¡2dˆ³ø&˜)TyÌ4ÊB´J¬þ®zä!" " "P^ ˜Pµ^TŠSöÒrâÓôéÓñÚk¯¹O÷ Àý÷ßïÆlr)*…)Ve7&Ô9¶–†\)`ûõëç„+YRèøá‡¸ï¾û0xð`œqÆ®·–÷’?+ï3·¤\%V+ûÓ¨ò‹€ˆ€ˆ@!`B•Õ¬}Þ~öÙgñôÓO£K—.¸êª«\¯ Ç`R¤RLÉìŸ{I)äi9IŒ³ÒÒÒp '¸!o½õÎ?ÿ|×ËzÑE¹•L¨2f U Ø’V‰Õý׮Ѐˆ€ˆ€”Á„*Çbr æÕW_í&ñ¯^½Üg~Š.™’ xe¯+m½zõбcGüñ¸øâ‹ñé§Ÿâ?ÿù³f`ÆzXyoqŒú¹‹CKaE@D@D@b’ŪÿÓ¿ ÕiÓ¦aÁ‚˜2e †îz%T˶ 9éŒë¶vëÖ /¿ü²çz÷Ýw»Ulò–õt³žŠk$V‹KLáE@D@D@b’€ V #Š$ÎjñÅÝgÿC9Ä}ÂŽÉŒWLQ´r½V¹`ï*צåÄ,Å`Xïwq«Äjy@T ¨¬LYïõªr;gþsÂLäp<+뀛?Y7Öó-±¹ºPJ" " " 1@€â‡Bˆ®ÍüçXUŠUúŸwÞyøå—_Üq d·BgÌÉ›[ÏZP¬ú{W‹ @=«Å%¦ð" A lÛ¶ œ zíµ×âÁ Fž" ".&Xm€-nOÿaÆá¯ý+žzê)×Ó®»íƒ*¢'ü±ÀÉÇ{,Ž9æ·4OD3 ÄD (Jm…{ó(–è?~üx¬X±>ú(^zé%œxâ‰8餓ÜßI²Z¡’$s®µÊqªÜ–vìØ±3f >ûì3'Rý½ª Ëú0[ÔU$V+Ô#£Âˆ@ø pVíŒ3ðꫯºL‚¥Èõ%YK/X\ò+¾ ~øá÷ùíã?v»ñ}ôÑîåAñZ«V­âE¨Ð"P˜ø¡Ë¿«ìѣẠGu¸KÕW_}…7Þx<ð€Û2”ýû÷wËYU<•$aÍ"Ì›73gÎÄ{g ¸Æ\¿¶iÓ¦¨Q£†{X¯ª Vöªªg5¬U£ÈE r`ãÿÍ7ß8qJºhÑ¢ 8Ûöˆ#Žp‹mOž<9d8]£g)prƒõ~³Çû¸ãŽÃ)§œâ^,Á¶X´ûäŠ@y#À¶Ê„]¬ô§árUœøÃ¯ §ëׯw;0=÷Üs˜4iÚµkç¶íÞ½;;ì0‰WßÀv…¨Üµê¿ÿý¯Ûõ‹Û±N:ÕíZů9©~ÁÏ: å«‹–õâkþÁ\õ¬£"?¨äøk˜½r&r¸üH0ÃeJ¸]!{îèö—Þºu+6oÞ,¸ü"@€‚ô‚ .pŸ<9~lÙ²e…©²ÇûùçŸw–=¬&\Y®…˜tP Pø˜¥02qD×Ī‹Ï:-÷²oݺ5FíÛ·ãÇÄ÷ßï¶_]¸p¡ë!lß¾=:uê„Î;£mÛ¶N UôÝ®6mÚäÚŠSZ²X¼x±úäÀíV'L˜€F9qÊÀø.f¬GÕꃂ5P´»/Ð/xì¡t."Pá ìØ±ï¼óލœ(Å SÁ ¦#F¸Jè²Ñçò$;wît ~°{äYÍš5s[Nœ8ÑõÍš5 o¾ù¦{áXNXßcFKáÊgØãJáªÓ’ÜòDÀÄ*óLAd¢5T8T‰ímÆ ‘ššêö·ç'nþ°ã×¥K—bÉ’%îÇ;ÏÙÎqQöÂ2<ÿÖZ´háÜÆ»ÞØXEaÎ…5kÖ8»jÕ*×Np¼)ËËÞN–-ÍÛJ•½§tëÖ­ëÚûE)g0‘øã!Týøý%Vý4t,•Œ—áØ-~Þÿý÷Ýd„`ø š½pÜšã¾(fø2àç6ì2±K€=Cü¬Éaìyå•WÜ3ì-1CáúÌ3Ï8˱}&\¹Û„«Q’«üâ‡ÇÖsgÇEÉ7š‰0 OÁvøá‡ï3›+œXʯM{Î1›7n{$ùÃ[²Íäߦ¤¤8ËcúÑÒ_A8cž=µœÊÞIZúÑ¥m8C k½–üÆv˜"Ô,;˜Wÿ9*Û|ŠLö*ÓrXíA„#<Ò o¥L›y -i;ÀºðׇՕ¹Æú@®ÄêéºT0üõlŸ÷9öˆ I0Ó¦MŒ5ÊÍšågŸÀ_ÑlÜCÙÀ°Áâ—_ø„ª—C=Ôm‡xã7ºÏœMë‡Ì—ÛÓO?í,_¨~áÊ—–ŒÄ"`â‡~fK’gj•fÛFŽì9¤hd;J?ŽíäP¨-[¶8¡ÈZþMÑ¥Àe/-E$Ï–BÓâ0—q™?ýØžRÐR<ò˜ùâ¹_ÜRhs¼(1óK—b”½£<çß2E2…4ÃY\t-nÍeÙ~¿`5–Åq%V‹C«‡ &HøÉ” Šƒ@S–ðq÷œƒâÙ{J‘Êž€P†“ Ø{JÛµk×PÁ‚ú ¤ äE© Ö/-…+Ÿ®ì}5ÃÞ®KIË—Ÿ .IÃÞõh W{Z>å–-{~Ê6ÖÈÅfÏG8Þqdc"6T‰(ZMœQlò8Ô¹½›-ÏŒÓòm~L“Æ_/ÁŽùÎ1ËëŸvN—ç´Ÿ‹4ÿYyJ“”ÄjièÅð½|8ŠòGÃE(WY l8˜yò·F …G¢`l 9¹†â”"•c­‚6\ì5嚃´­Zµ l¿~,—•ÏÆh4ˆûÍd%»HþVþú …?Rh§L™‚¹sç Wö™¡p}â‰'œå¬_¿peïL8Œ½ä‚µeLÏ®‡#íʧ¿â±óy¢±óÊʧ(åf§LÙѲåõØØxû-y ÓZonÔ3]2` 8] ,–‰rç5³á(2—(âD Ô×_=äŒ|~.bÏÅ?ñr}¼²0Vn–™5ËÊåax.9OGæöì™È(jºuëæ† ÜtÓM˜3gN¡påÄ 3üÌùøã;Ëñm&\¹KPY Wk«ØNÙ1];·¶Îò%·t¬}¢kÏ ]ž“µ]·k¥KMwWþ¿Q;6·8å—X-­kr¿¥X5k¼¹,ï“)6Þ4Ö¸›kblÙ°û­…w7–ò?Ž‹âÌ}öžr&?÷3\øš¢‚3øýc°‚…/ŽŸ•™ed¹)T)X8¡‡½qœ=Ë|Ñr–M ÐsWÊû†%sãÌO’ƒFËYÍ«ÆE¹yÝ/XyOq 'fÑÞ|óÍøî»ï …+Ç=›á3øØc9ËçŒ=ô*0tèÐ W¶M4tÙnñ9±öËÎù™ÕÂØsd®» ÿŠEÀž û[æÍöƒÓÚ2º¼N׌D«‘n«á&ÁøÙX³·Fž :w.E³víÚBQªF=|•b>Sàñ»ï¾ëfW²á7ÃÞÎü‹ê²çŒ;H±õco-T{qÞÏ™¬ì9¥x4h{ù†)í9ËaÖÄ*…*—w¡xâD VöürÆ*Å*ŸK%¥ÍCe¼ßÏš³t«W¯îXÛD ž³L¬Z•æ¹ëÑ£‡Ûíç–[nq ‚¿øâ‹nUÿ:®®ÜÎ’–9üÂÕÿ7p :³6Ì\>3ü‘¥vì@äÊæº='æò‡0‡I¨– _ÅR|{ß Å¿WwÄ P³là)`hÙ“Åå5^xá·nZ e¹Âg…ŸÀÙ›hbÒƒÕ“½ŠâçŸ.œ ÅCBýààâÕì=¥íÕ«W©Dñòeù§Ë—-­ (+;ŬÄê¨íz XåÐ TŠUºäouau㯯¢¥:·¬¤åÎ5ß~û-L¸._¾¼ð¦ôôt<òÈ#ÎrV2…+×qçF2¯)-2'Wòöÿ8 PåpŠWòæu†cýX]•5þ(¢½í¶Ûܶ¼&\ý“ú¸¾ãÃ?ì,…+w ¢pÐãÏç%˜PU;VÖµVôøøüØð»‹ÏëÉž+ó—+á" ±.²Qˆ×z Š60\Çþ2Ñ!À—,ëºÙPõÁp}ô‘¨üÌj‹SŠöNY*Ç'F˘PeÙL8™Ø`9éÏUŠ'–Ï„ªÄjéjÌĪ V2¦8¥¥`õ÷®²^ìÙã}á4üáD{ûí·»[&\W®\Y˜,…ëC=ä,'÷ù…+óØŽñoˆ={|®d¢C€ïÖ Ÿ%>w¡Ú±èäP©Vt«¤†ÙpP°A7¡ªF>ú•k¼‰6ò¬'žÓp,çÛo¿í>ñÏœ9s¿[œsÌ1N r¢×¼Œc‚•¢Èÿ£?ý¬G•Ï%ËnBUâ£ä5Èç‡|ù<ÑÚ'­Ö³Ês« IÓ»woÐ Wníh†;þ<øàƒÎr»Kþ£xíÓ§{Nøã†C«þgËî—¬Z>k|žì©µc‘É…R©Ì$V+Hí[Cnb€<Å*»VAŠZ®ŠÁ: ¥˜°ºáX> TnsZ”-NùyŸKQôÅš¡2ÑÄçÌÿ¬Ñß/V­ü #¡Zúš &XÉ›ÏEª U´VW¥O¹x10Ý#Ž8ÂÙ;gÏ.ãÊÉ‚f¸]¥¿Ç•?ʸÝkçÎÕŽ¤(¹lÃøeÄ~ño™Ï ÿžYÇ2"N«á¤¡¸M Ð¥eCB±Êž,62v=BÙQ2>làY\êçcoæ>×AåXÔPuÂmüìó~°-N}QÇÄ¡ ºE4æg=~öéßzcXv‰ÕÒWŸ‰Uº´ÖëEa6P¨F[T0}öšÒÞyçøê«¯ …+'P™a«m@ÀU8ɇK2Ñ!À÷ˆ}áß3E«ýóÙ“pX 7áÅφƒ–B•B€ -xúËDžù?ÿüóøñÇá_P=0'Ü9ˆ½§©]ºt ¼óç þ–‰UPþ›=§æÆ|áb4ƒÆØ\²¶z0J—þfy=– óÓ·o_g§M›†/¾øÂý½LŸ>ëׯ/Ì*¿Dp“ N“‰¶e«tù~á߯~pF§.*kª«¨æMØçV6.´ô—‰<rçZ¨†âÁ¶8åø¼ò¾Jƒ‰ L~Ñd/6ÿËMÏcàQòóPì­.Ìe8 [òÔÂw'óÆ¿ Žqå:®Ÿ|ò‰Û€€^°—•†Ãd¢CÀ„ª½[ô7z¨Ì©J¬V Ú·Ä„õ°V "–«¢°G±I“&®—ˆ/Úâ¸ãŽs õsÁ|~ªeQØþr°GÏ …’ÿ™ô'n… MÀ¸Ó <<Kl]a¾¹¢À!‡‚ÿû¿ÿs=®\ËõÍ7ߌ­ŒV¢ÜX*Å*zU+QåÇHQ%Vc¤"Ê*´ö Øz¶Ê*~ÅS<ÜAŠkÜr‚…+wtâ²BÑø“„iôjØ/R£—‹’§ìoÃXŠÖfÍš¹ ‰%Uw–†ß#|§Ø»Åê¨4qê^(‰ÕâÐ*aýˆ5,å Û6‹ÝºuCÇŽ@õ×M…-ðo+ï‚©¢×Oy*ÿnÔ“Ý«LmWtI+õP4/™räo I(·¥Âe5T˜…+° $%$`~W_†J3 ·±^¬>üuÄc7‰ÕpVü" " ¥" AT*|¥¾YüKP”’€Äj)êvðX [Å," " " "PJ«¥¨ÛE@D@D@D@ÂG@b5Ll·lÙ¦˜­ˆ€ˆ€ˆ€T•F¬FbénmúÊ+¯¸…ß»víZyž"•TD@D@D@ÂD BŠUîÃÎíá̬\¹Òí?½páB·¥ßêÕ«íR™¸sæÌÁÅ_ì®>餓ÜN+Üñ£òš]øðú¿ ÇÁí‘––¶¯}~Ù´%YìdÉÓÐûîYX½#»ò¢UÉE@"G k!®ïß÷iÇÎĽ3¾GF֞ݜ–ΜŠÞÝÙ·;ó|²l rü ÝÆÙ˜Ð·N¿a6íÈ*,CÆÒ×Яw·}ïzýh2³ó‘µb&úq1>X´yßø cÐT|rS€3Î8/¿ü2Z´hájð”SNÁ?þñ·;ÅÓo¼?ýéO¨Y³f‰kø×_Å3Ï<ƒÇ?þøc‰ã©˜7 }á\¬è: 7Ÿx8êð'ÑovbýNhQ«âJPð”NGãâÝ­P§j…üU"ºED ¼r°fÕ*t=óJœÖ¿-’ârñ¿Ï^Ä—žˆåÏâêѽ³åX½²?®»gZ7®†ì+ðö]×cܵðî´3Ѷ~ —Å¥Ÿ¾·ÿ‚uó_Ây§@ÿZIHô®äd`õêU8õª;1¸æ›÷ IDATucÄlÃÜ·þƒÇÝ€6³îÂa9[°jÕ2ìö:@üÚ7¼åVì"[J,V³²²pþùçãË/¿D½zõðÀ K—.`⥗^ŠY³f{£?üðÃèÙ³'víÚ…n¸íÛ·Çý÷ߎé¤hìܹ³‹cöìÙX¼x1>ýôSpן»ï¾ 4pÛ»ÝvÛmxòÉ'ÝV•ôïÞ½»ÛW=¨¦Ž}ûöÅ´iÓ0~üx|ÿý÷>|¸ËÃòå˱yóf—·jÕªáæ›oÆÈ‘#qì±Ç[¬ò3?÷§~ì±ÇðÎ;︲ÆVuÆNnòórÑé°#1lè 4Kf“ü›‰¯â5øðÜõwacãæXóÞ3xó‡qÙ¿qåØAhP£*~û.îœz^ßz(ÎúÃ¡ÈØš‚Ó.ä­éØ•‰¼ìuxîÖ{CÞŸ¹ü[ÜsëåxòÃè:b<&\zz´¨‹Ä’(dË·\JI`‡WêN½û£ß€ƒQ»jôé‹6Éçâ¢k^Áè>‘‚í@÷ÞпÚ4ñ:@ ú uÝ LŸ¼»2ò_ˆÇ:¼}ãËxê_°áéçñÅ÷«¼6©“ì‡÷@ 8Û7FUä£_Ç8<0ã,^ŸŽCRˆ=¿R²W¡EÀØ_ŠÙ:t(>ùä'<Û´iƒÑ£Gcݺu ÿgŸ}æÄëÿøGüùÏÆÏ?ÿŒŒŒ ¼öÚk¸ë®»0iÒ$×ëyúé§cýúõàgúûî»Ï‰[ Ú/¾ø=ôvî܉1cƸk7Þx#Ž>úhœyæ™ HݰaN=õT 4<ò¾úê+ÜqÇ8á„P»vmŒ7uêÔÁ½÷Þëü(¨iØÛÊx×®]ëö:.J¿ûî;L˜0AŸù‹Ëféú…^ÝÏw?øâû9ßcõÖÝÈݽ _=÷*n¹òZTr6.û˼rëxnÎ lZòFóX˜v &ý©'ý×]¸÷ö—°dã.l]ý!\¼Ûv¦‡¼?cÃ\\0æ\¼4WO½í<‰³Î½ 6ìPs響€{3ª ª×ù’˜X5j7@¿@î®ÅØ”™…¼O Î_ˆÙÿƒo¾ý/>}÷YLžö5?·3ÕIò„*±ôKügCFžs.Î:³ {ïklÈØíË@:víö†Žåæ#¿ Kæ-@NV;4ªŒ„¿¥}ÑëPÊ9õ¬nܸ+V¬À³Ï>‹Þ½{ƒ“‰Î=÷\¤§§;ÿ—^z =zô@¯^½Ü§rž;»wïvŸÍÙÓÚ¡CŒ1ì¡¥x¤à¥ lÕª,Xàü7mÚŽ¥p0`† âz5ßzë-ÄÇÇ#33gŸ}6Ú¶mëâ}ÿý÷1lØ0·ûqÇçzN9NµÿþN³®¸gyÓ¦Mñᇺ^ÝZµj­BöÖ>ýôÓ.ÞŸ~ú)h˜ýyR¸Ûþèû ‰k&Ô#‘–¥‘X£ rž» g?_ñ¾Í‹Ÿœ…¿ö‰Cö®LŒœôÎ>m'ôLJ÷¾Ž;2°bÉëXÕcî9,iR=ëe`ØYï¢ ÏëYÈM@Rvâ«„¼éG/âÛµqû¸spd‡Ò*œp9æ,?íÔBõË¡\("À-E«U÷^&¹Hpßå½ÏüYÏãïS¿G‡jU±rÑÏØ¸~#zíÞŽŒ‚|ïÓ}.~|íFlì&º¤¶@í“ÎCöÙ/᧠ǣeÝ=C€…˜p|\žÈŠíÛ6tw/º±÷u}óÆ`ü¢ùË/¿„1…ðFw`xKTùb/‘XÍËÛ3°¼eË–¨Zµ*6lè>÷ó39 Å ‡Ðò³¿_´¥¦¦"))É}â7nåvä‘G¢I“&à§zÆGAL!ˉR¤Œ‹†=¢ü¬ÿÁ¸Þ\K‹Ã;ì07T€ñ2_t™W+`šfœpÞßrì5~÷Ýwp¶ûä@nfê3à ØË>¥Q3$Ç/Á.¯AnÓ©êx=Õ[bX»¬JÈÄO³f£ÖÁCѤ^mT¯–äõ„§zC>¼1®|+ÄaÖŒ q?³+Ûw¿‚sGÎ@UvIäîÆZïű|ËnääxbÕ§œ‹^…½ò¼ ¼Þ{kÏ4ÞÍ@»óñÜýcÑ©q2ò¼wÖ¦ïgàäK®Ã—§õE³NÙxãñ ÈXvŽúê_ˆËÙuÞ—¢·¾YŠ#Û6tãV½– M¹CÛ5ñâÍC|ͺ®§qJudÆ€XÝ[pí%™×ǤÖ =c¯ïñÞŒ¿_tâçfän_…kóкm*j¸){ãØ{ä¥qåpLownõž¥5}CêöªG%«ÁfºsüéGáÆ˜úÉQXîOZX®Mˆüñ˜1c†ËSà Yö¼r(€õúò‡ôë×Ïõâò|†c\ÿùÏbÑ¢E®\îÀ1±2L×Ên—åd/ôöíÛÝà%K–¸áêÀ!Ñ2‹Öý?/üFQhY‰Ìœ==ó®³ô·Ìíù;ŒGÛî‘èM¦ûäǕغq!ûóTìØàý¡z#¿~÷·êó°ÃFmFÜÚÿb}fÚÔÙ ÞÀõ“_ÃÚm™žŽ ¥+"P^ ${_±x –yvÑÂï1ãž+pÁ=K1êÆqPCoL)¼KVa±×î.Y¸Kù/½îlÌN-«ãë'ÂæQÇaôqG{‡aÈУpÚØ+Eÿö†'y_]sȯOUÜûŽ*±&T¹4#ß'–Ç÷ çšð}Ã÷ß?á~Ç(~®£Îw<ßíìÄZ¶l~øá7a›seÂf¼ºëÞ¾'÷ì:ÊFýñ<úèåÈù`*>øi-2½aÈ+>{ ö< ÓÒ0ôÌIøè—tìXö&ž6 ~¼ï|˽÷Óò ἕËä ‰ž3ãYÜ0a4éÔçMz‹Ò3]‡OÆêïðÀå§¡‹çŸ6ôLL{ý{ìäåuÒ|öÌ-èÙuÏ’j—=ø&6ìÚÓñ“±b6n½°阆3/ž†oVn-K¢½«ÑWÛžœýÏYÿ“'OvÈí·ßîÆ›þë_ÿ•W^é{`9V•cZù q|¨¿Ç“Qžû’qŸòŸ{î97 €ñò<æ˜cÜÐ FŽKåÄ.ƽmÛ67™ŠBšéŒ5ÊMº:묳ÜJ™ô7QGñ[œžVæ‹—–ãw¸ø?{[ù‰ ü\ÊÃqõIÈya2Îx±Ê>cVÚOÀ»OŽ@}ï÷f5¯;Õ:+½sþkzÌükùT\9îX\™vÕ·"/±R’¸Š€•< ƒÞÔ=b,îºègüã‚qcvŽ·ŒL:ιóÖÔûe·Ë"`7Ëóמ‡7{­”÷)(»ÙawùSúá¨ïÍß”ä…Èyçò¼7‹ý?ùÈl;7†­[·®³vŸ¹þtØ{J±Ê岸¾ëÔ©S]¯°‰f»§¤® àP®9û¸×Ûú /”4º r_ª×kŠ{V Z¦ºû<;q¨é­¹J³=s ®t#:!NlŸ…[oyÏ¿ë%yím4ý-¶P÷órR­úhÛ±®7t`Ïøgµï¿A“#"P<Þ8úz^[µŸ¦ ‰5ëyB$Tˆ4öVø½IDН LMõ¾õ†0q‰5½w`èë!n“w´ä¦cÁü<ô=½‰÷ƒÆë_9 c†\Šm;v!+c36µïíÆ$ÇÇ{(+½±ÉqÞ²hU¼Õ%‚…ó^^qÛ€SF‚æõ=1Y­ :ôêÄì×¼O÷:q¶àá3Gb¶Èò6—X·¡‡÷1©ýÿ„ †ŸÉç«=Ÿ~'^ˆ Æ÷Bõ,oR`ÁK8gä«ÞdoÚrž7yÝF¬Ü’é& WóübÕ”X¬¨@~¡z °E¹^šø¸V+4Wø÷¿ÿíV (ä‘+ÐrÈ{ƒeŠO VûQxâžZøjÎBü°` ÆþóAœ4›é[Q H±rüt‘*ˆ€ˆ€””ß4{EÞ–ï?ÆkÛ3qn½šXóùý˜òÐ&Œ¿åßèß½%vϾÇ?è­TäuƺþØßöã]ñé}!ÃqHôâ_½^ÚÜ==¸9;EA~g4Þ5\}Ž<û\:´š$-Ãÿ ¹ßÍîÈFcŒ¾æ ŒÉõ–ƒüeÞ¼l*¦4è+j¯GAÛ³ðÐ]§¡•›€œŽù Ö¢s¯VÞW€½e()‰pÞËCʬÜülOÉáVÀópÆÏñ´2Å'W5‡ôõ68÷<\þ+pþIƒÑÊû<¡‰±Åg©;D@D@ÂK`κeX´| .˜Ùï=î­ ~=– úŽîÜ qÛ‘¾­!îz(Z×Êħw¼‰]9;‘íéBo…2ï¿ lÍð&†mÙ:œ7Ÿû§i⫟½å϶,Å3<„#Ú¢¶·sÚ¦ÍõÑþ°î8´]C¬þüeÌËÜÙ¹X:ã4Œ½âìJiþÃr°+? õÒº aí·ø5³*Ú´k]?OÇäë^Çš­±?9l=«á}<Š;ÇÏÒ„£Gµø¹Ñû#˜„šž•X%ÔÐÛâÅIó’·„™—Éü¼,ôówÜ1ñÏèÐ(9½Fâð¤KqÖÐéÞ´»\ Öùo>„kžé‡†{ûèÆOÁÉ%aæ Ñ3éÚ áºcdFZuÙŽ § B•Ü]ØÑä8ê¤ÔDo©›Ä6#ðà—Ü&ñÉ)hÔ¨6®»bâ´@“ÚñÞÒZ]‘•PiMªãá/{ —Šºg~ƒ=)¹+}Ò½ÍvR5Gj“zÞ7ù8÷Î1|­·ü'7m„¤¬¿cS^]´nZÿú¼7V®ñ&]y÷ÖmØ-S¢†·űãoÇaǯu«yS›·(%VËÛ߇ò+" " "eÞÄàz©ž ¸*ÞŽœ­Ú£~*7zˆß³TcÝúÞšß{–)kÕºMáqèpÉÞ'Ïx+µò†­&xñì1Þ ¿ Z¡£—¯‹Ö[݈cN뢾· 8—„LJmƒºMÓ<¿8Ä{ËkÙˆÔjµ MÇznVN`7/`L‰Õ˜®eND@D@D ü XôIÂß„ª+ÿ؉Êá8Λ4ì»l<â½ÝÎöš=BuϹ·=¹·êR0CáüJ°Ð±ág=6r£\ˆ€ˆ€ˆ€ˆ€ˆ€€Äª†E ¬ ps ’Ð0€’³Ó"p@=ö˜ÛCû¨£ŽrË™õìÙó€÷(€ˆÀ¾¸¥¶Œˆ@å% ±Zyë^%3n!<þ|·õïþóжjÕ 'œpF~ýú…9Š^Ê/mÛ¶áõ×_Ç[o½…Å‹—x{ìòK@90«FB®”1à6Á~³bÅ ·Ãw9kÒ¤‰­'žx" òÖÐó¶–ÊL`ûöíxå•WðÒK/áã?Fnnn!Žädo+JI@oÇJYí*t$$%%á¶ÛnÃÖ­[1{öl|ñÅعsgaÒëׯÇý÷ßïlݺuÝ0ö¸><컬fB"exã7ðüóÏãí·ßF¨OþyyÜòGFD 2X­Œµ®2GŒë‘G‰Q£F¡Zµj˜;w.Þ}÷]÷RÞ¸qca>¶lÙ‚'Ÿ|ÒÙš5kbĈ®×õØcEJŠ·Ó‰ŒT ™™™˜9s&^xá¼ùæ›ày0Ó¹sg 8]»vÅ?ÿùÏ`Aä'"P H¬V‚JVcƒ·ü:t¨ëAMLLt½­¯½öh9<À {š¦OŸî,ï2dˆ®Ç<7nlÁäŠ@¹"••å~¨Q r,ªÿ+ƒ¿ ¦øÃÀI‰µk×v_&6oÞì¢cJF@bµ‚Wx·²‰Pü¹s{Œ8Võ®»îÂwß}çÆê½úê«øùçŸ óËO¢ï¼ó޳\p›”Å¡œ¤•––VN"‹8nûý÷ßw=¨üQÆISÁÌ¡‡ŠSN9'tZ´h]»v91»cÇ7±Š/2Ñ#ÀvŒuàv<òŽyî·ÑË™R®,$V+@Mû ;ö7, ˆåºþº°ú1× Ö£GÐN™2 .t•MþûßÿZpuÏ>ûÌÙ‰'¢{÷î…´>øàÂp:hàØÒ?üÐ TþøJOOšŽ;âÔSOu¶S§Nn2"'T±–†#fø7$=~þþz‰^Ž”re# ±ZjÜ5&tCm·VŠÓE!}Xí/Ó:tÀ•W^éìªU«À>…ëçŸÿ$“9sæ€vÒ¤Ihß¾½®ìuåZ®–æþÒ‰ôµÀ•"~EN/ÚõÍRŸ~ú©¨ÂâíçÞ®];׃J‘Ú¥Kÿ¥Âc–Å~àùÝh—±0ƒ•ðÀ_v¬ú¨„B‹,±EøáHÚzŠ$³jTÂAºhqZÐe#_ܺà'Ñ‹/¾ØÙM›6aÆŒN¸~ðÁ…=PÌÉ¢E‹pË-·8›šš .‡Eá:`À€ˆþ`1Az ·hôª(ì™:[”¸Š†uüå—_:Ê¥¦¸ºE0õ…ù‰Ÿ•_ŠjXþÍðoG˺•ZxÂY;V’6,<9R¬•€Äj«q6ð´Ö¸°‘çyÿþý#*Z*ÖgõêÕ…õÀú°º)QdÞM 4À¸qãœåX>.–ÎW.÷㟬Âtï¹çgy'fQ¸6 \ \†½k41<¦̆+ýʯ=W® ÖËÊ|óÍ7N ¾øâ‹à³Ì4oÞcÆŒqõˆ#Ž$¤ËÁüÒòï†m'ò\íXHla»À:&ÖëÃê†õ$#‘" ±)ÒHÇ^VlPh9㜠ÇArb '.ìÞ½ÛõÊq|­‰Œd±Â%Á†›8-… —§ªQ£¸€y­ZµÐ¬Y3WV'VG¥Á¸O;í4gYŸ³fÍr•ëUúgN³7öÑGu–÷Œ9Ò ×cŽ9Æå¯´ùàý~Aj"•.‡,ع _°ó²HWqì×Ég†Ï˜ =³^ìœÜKóìqÉ5Îâ§]¾|¹K/ð?nrqòÉ';ÊÝÙ˜^qåÑËÁ6ŒmÙÍ7ßìÚ1.seí' r—=oÅMOá÷ g2·w†µcÕ«WwíDÓ¦M]=X8†µºCˆ‰ÕHPŽ@Öx°11ˆ‡н*\‰–=x6ô|©É”Œlò¶*w®“JËew(\YV'ty뫬 _,Çwœ³¬ÏO>ùÄsåìkÏ{cMpð¹àÒ@ìqå½ì-©1±j‚.qsƒµk׊ْƯûŠF€Ï•þxiÙ²eásfÏ›?Œ… åþøã`ï)ŸnwÌð¹á ~~âçz–N°°ò³¼Ù߇ý]qŸW–‡Ï0tóÛ1«jÇDwÿ×Éœ¼M¬²c»b?ºÙŽY7ÃÙû†±Z½í?]ÒX-¿˜¸Û k@üy64ֳ‡_Ö#Ák2%#@æ´öÀ¼É—b•ÜynB–õb/b¦fõV²”ƒßÅ4¸.+íÝwß ~²µ Z~ÁÁúçbì´¼‡Ëhqœ+-ǼÕØª ª|¶Ö¬Yãv%*N|EMWáBà§rûAÊ¿w3¬gëm5¿@÷—_~)üAã_BÍŽ»­ñ9¡@åsæOî$Çü›°¿æ×Ú+{¾˜ú±|Töƒ›Ïœ=‹%I·²ßCæäJæl¯hM°²£e»faì]Ž6¬²×…ÊœÀÞ–,øuù–3llhí2 kŒüBÕ†H¬–¼’M¬²÷ V6ôÖØûÅ*ë&R†uÞ»wog9ùê§Ÿ~*®óæÍ+ÌŸ>úÈÙ¿ýíoèÕ«Wá-®2p C‘`b„*Eý(T9ÁF&røŒñÇ­> &0ÌÏÜ¥K— Ô~øÁ¼÷qù•€»°Q ²ÇœÏz8Œå“ye|6ùƒÏŸ3–Í~òãóÆ0v=yª qwòe[Æ6‹œi­ã±µsÖîñ>ˆ‰ÕHPŽ@þƆ<okÀy ëQ5¡Ê†^=%¯ ²eÏÆÛrrfcÏ´ ì¥åòYÜîÒVèÖ­Ûï@™P¥Ëg‰‰"‚=_z®~‡+büLÎz ±g”íi¹cš}âç†Á ¿pˆ*Ç8ó9Ž„ñçן¦ùÓÏÄ8Ÿ9kÃX>™’°2lŸh­½¢KÞlÇLÀZÖ‡ŒDŠ€Äj¤HG 68l°é²‘1±Às60ltø+™ =x†¥µpÈb…K‚ 6ùcr§hµlàiéÏ0>Ú Z·nK.¹ÄÙ_ýÕmùÊ•ØÃêï‘›?>ho¼ñF¤¥¥ ×¾}ûºò°&TM¬ÚxB=WÑ«eÖ­ <ÖͺuëÜ.35{öì ™cûpì±Çº¥¦¸å)Ï#i˜_ûáߟ!¬ô§h›µö‹®LéøÛ'kÃè²í¢e=ð=B? Ë:‘HXå¤a3Öè[ãoàM¬2¬D…+¾kÜM¬Òµ†ÞyºVtyÝWüËþŽÆãüóÏwvëÖ­n+ǹr›Wκ6³Ü›>mÚ4gy? Ór-W>C|¾(tÙ{O¡¤çÊÈEÞeÐnٲŭñú믃=èÁ EȈ#\*{R9©&ZÆþ6èòo‰;æg‰ÏÛ/>_öƒ[bµô5FÞfÉÜÚ,kÏìœa¬Nèʈ@$H¬F‚rÒ`£ÁÆÛ&éoPØà˜@µ†žáÍF ‹2eé ‡tIDAT ?ccÏÆœ ;]ª<§esc J:u0vìXgù)Ÿ‚••KbQÈšaoìƒ>è,Ç3}ôÑn,#×ÔäóE±*a´"ë’?ë‹ã’¹]/ÿÆ ŸK®ÁOü'œp‚[¹"0L´Îù·Á¿3Ì« $þ ñœeâóeeS;f´Jæ’¹µIÆß\²çûÃÚ/ž[}”,5Ý%Å' ±Z|f1{‡56Ì 6*<6¡jz`Cïë¿b0æÖ°Óµcr·FÝz»^ì„¢p?Û ì1å W.‰åß­hûöíà§eZöÒQ°rbŸ9™È`êC=ô»„ù,<Ø­ÍËå¦8«? ÿFhìo‡mýø7dí–‰U†£ŸLÙ scom™µ[~«§²IY±ˆÀþ H¬îŸO¹ºêo<¬q±F ½[¡x.S6üìý ºÿ˜)Y½”Mª‘‹…½YœN{ÿý÷»OÊÜžâ•“µÌP(qWZöÒÊDždpBŒYåŠS.ØÏ5—ÙKæ^#ŸÃ§èÏŸµ]l¯ø÷DÃcµ_æXÒþvÊŽ­N켤që¾ÊE ðy±ç¨¸$V‹K,ÆÃÛƒa ¹[¶ÍŸçþc».·dü€ÁŽë¡d©ÄÆ], 'XõéÓ\‹³É)\9.rÑ¢E.“ 6TW”ª‹õÃÙû\2ŒcŠ9™Ž3û#=Yª´Å÷ÿÍX[eniãÖýE'Àz 1·èw*¤ìKÀž!ÿßö¾!BŸI¬†fS®¯> Áù`~åºÐQ̼ýú³ÌϽ¢s9¬¶mÛbüøñn-×·ÞzËM¶âê2Ñ!@±Ú±cG¤¤¤öB–ç¿wû[27:T•ªˆ@´H¬F‹|„Ó ÖÈó‹p¶”\!@!ıƒ-Z´p“vÒÓÓ!±½ÊåxaZ«—ò,T£GQ)—ï2ž›-/e¨Èù,‹ºØ;å²"“RÙD@"BÀD‘¹IT‰„$ z‰F*@ÁÊâ™Hânާ—‰òæ\›$iõQ’\H¬–„šîØçó2EQ ¢è¬‹ÀóèåL)‹@x Pù'¶ò¸Y³fxä‘G°mÛ¶ð&®Ø pÕÛ'Øj8Á~\Þä@b5y‰€ˆ€ˆ€”/þž;ŠTŠ$Ú1cÆà™gžq;ömܸ±|ªœå–½¨=ö˜ÛÖ›»à5hÐÀõ°²>ü?"Š[,‰ÕâSx˜"`B•®‰TsSSS1nÜ8üðÃ=z´[rÏ¿;_L¤œf†_o¾ýö[7gáî»ïÆ!CpÐA¹eôlS ¬%­«åôÁP¶E@D@D@ö P5!D—b•ã&9f•“?GŽ n=eÊ·AÆí·ßŽ… j“½‹}ÄÝ Ÿ~úi·eó_þòìÜ¹Ó ÕÎ;»­›9€bÕ/X‹ˆwƒV( 5Ý#" " "3(Ti¬g•âˆB•b‰–‚µQ£FNÀrsŒU«V¹Ýø~øa·YÆÀ1hÐ ôêÕ 5jÔˆ™rÅZF؃:þ||öÙgøàƒðóÏ?»uœÉ–kosíúõë£V­Znmg·Êºàë]µú*jù$V‹JJáD@D@D@b–€ !ûüOÄ- ¹!Fvv¶[ÿ™Kºñ:ýš4i‚;v€ãXß}÷]·± ‡¤¥¥9ÑÊÝß>ø`´lÙÒݳcÆ6lØàzŸ¹ùË×_í„*…&ù%''£K—.n=gŠSîZÈ Hèo‘0ëÀzW­ç›qG°J¬†±’µˆ@$ d`æõÃ1é…5HÏØ›îñÞŒ¿_t"ZÔª†Ì…Ïà¨?íÀCÓÿˆN©µ°§/foXw”µ×=-¦<…Sú„š‰ACÜ´÷4wûj,\›‡ÖmSQ#1a„•Åÿó?{õL¬æææ®=Ìp¬¼f"‹Ûs¹%ŠÕíÛ·ãý÷ßÇ{ï½çÄ,' ¥y–c0;uêä6AáÎpì¡­½°999 (]¾|9þ÷¿ÿ9qÊÝ9D‚"Ÿâ“b“eå0<7®£fy"•L†çìÑfXþpð(ŽPåC#±Ö?E."9زh5Vö<wžÜ«&bÛªÙxð¦«qä’íxgê™h]§#.¼8 ëT ­ÜÝXè}"LÌÈB¾÷ÉËû°:l°+¿~€³OßçÞ8 m=A,#"~?b•½¨Kt)8ù Û/f)¢(R)¶(ØÌR Q¸Úyff¦±sçÎu“ˆ¯³W–¢ŒŸÀ9&–=µ°<§­]»¶nÜIŽÇÌK¤ Å9óGáÍ%»8–tÓ¦MàÓõë×cíÚµX³fs·nÝêòÆüY(¹e3Å'E?ým8…'-ÏÍ )NM°’‹õ®Z8ëñ.®P%3‰ÕH=9JGD üò€îzbpßÁhR»*òsûâˆvMÑ÷œ[ñÁOCqzómØ”ž‰ü¼| w#Þ{ê>\y×Kؼ-ìèõÀ¶ªZ/xo6—ߌ³qï-ðÔGÑmøß0áïg£[‹:HŒÛŽÏžy§>…MÛcì?¯À… póØ)Xðã&L¼³:î¾ö ´J©^\¹~NJA*¿Pµu…)PM¤²¸ª]6<€‚Œ¢“‚••â”"–Ç¥fyÎ0vnáìÞeË–añâÅî:Óbº ËpLƒ÷2}Š9rì´üPÐSLÓŸñ˜xf<–?úñóÆ|P˜R”fdd¸ðÖ£i½šLß81-^oÚ´)ØKìÃc³ G‘I׬]cÞ-Y²l~ÑÊsú[Ùx?ó`ù §¢‰Õ¢’R8rA )!Þk÷4¶^‹‹ý†âìüɘ7o þPm1¹/ÃNÈÅö÷.Á¥7¬Ç×Þˆîu6ãá¿_‡':tÀÄ!¾fqËw¸èä¿`]ß1¸æÖöøñþk0v\¦?=xÿŸõœ|Õ$tÞý®˜z!ê7}Gœ: /,ý{uBJR ÕrñÔ(“…€‰1 ,Š* W3ILOQV|&&)úÌš¥k•® `;æu¦ÎÂÐeúvî“Çìñ´0Žyµ<Óµc–‹–ÆŽMø1 ËEqÈ^\•d`ÂÐ\ó£K˰~?‹ÓÝÂÐõ Ur4ÁJžÁ,…,Ãð>KÇòï SÄÿ|­rïP0ˆi{_NÌf\bCÔ½ >÷z²½F}ûŽ\Äy«)íOÇÕ÷7Ç>mP-w5¾oq-Þþi .è×¼°tK?zŸ,Š?:;ÔÇàVÀ£®ÂÜE‡ãÇ›fbÀÅ÷áœÑÑ0i(rÒw ³N ~(j¼ÙÃûxbµÚží #Ôˆ@ØPQlQ¼Q™1qdbËzM¤²·Ò,E$k °ä¹_ŒšX5!j.Óö Pž›e¼Ràȹ¼×Ží>Ë?Ëf‚/ð8P4òœaü÷’‡ùÑõ[‹—®ß’™ßšX¥Ë4ÌZ«åÃüéú…ª¥ouSTWbµ¨¤ND |ÈMÇ‚ùyè{zTOZë•Áë¡ðþ¯šT ù3&OØ‚Y9رñWôê5à¾!ª9™Þ'¶‚é8g䫨š‡‚¼ÝX»n#VnJÇ"o8Aï6©¨]³’ªÖÀ »¹IÞÕe³•¹HˆS¯jù|`”ëòJ€ÂŒ†bËoL”™X¥0¥ˆbO õ¤šJ¬R@³&.M¤šP¥kÇvÍħ¹Ì£Ó-®1!jå¦Ë²Ò˜(4¿@טР%TéO¡In<6~æšp5AjÖüéò>¿e>h‹k$V‹KLáE@bœ_T{Ã-ߌ׶gâÜz5Q¥P‰fà®ÀcNÁ´Ç‡£k³j˜õÃñ¦w«ÿ•‘—³mÏÂCwæÆž&zÂwþ‚µèÜ£9¾÷V ˆ¯Êž âÈÃ/Ÿ¿Í­û¡kžCŽ?–Ç¥ì‰@"`BˆÉš 3¿à¢Àò÷žR¬RŒšX5aj=ªæš?]¤þcó£øôÛ¹_œúýU@ÿPÆÊÇëvL7КX t¹ðÜ«›85^tí˜×-Ei Ÿ?ãoy U¶ýùK¬îŽ®‰€”;sÖ-âåK°5![V‹ñ—\%ƒþ£;7C­&bó°+}#úõ8}=ÿ›‰ûÈFíCröQ« [wEÂÚYø5óOÜ£¾ú&_·ÓÞ˜„GVÁMÏÌÄ.ÍÑfÇ—wùU8yÚk8¸¡‡¬ [3¼ÉÞ;§Š%YîH*Ã"P> ˜ˆ3¡ÆsLQ—XtEh°sŠN†5Qèš(õ»&NégÇ&NéGc®‰Ó@7}+›_øù­Ì¡\ŠN?ãb‚”ç<6×üÍõ‹PúÙ¹¹Îâ¥kù£[R#±ZRrºOD æ$yB1ëÅIóRØ¿šŸ—…>cþŽ;&þ%#g«çÙÖ³‰ÉèzÊiÞ¶‹—àðý -‡áØñÊwcÎ)× NÁžuô9wÿW]x&{C²wnÆyw¼Ž®Í[¢çµcÞÑÅ{ß‹¬¼]hwüåøC×HÉXê}ƒ» '_”„_8í'ûúyc™2$’€ $ @¿8£@¤°¢ þsÛ9ï±cséǸíœÇæʵ0na >¯Ñúÿ-¿Åuã<ûÒ)n /"P) °±e/' p!m.™Â¥S¸fßæÍ›qþùçcΜ9n­¾È*@Fº·!ÀNïóœ¯U«™RuR¼!Þ¯ú‚œX“^€& ’‘¿{;V¯XíY¨Ù )ê×ÌÅÆ hÚ:97#±AÔª–€¬›±fÕZlÉÊEµä:Hmî Ò‰Þ$­llY»k7oG^BM4ô–€iä 5ˆ÷Ƶ®\¹Y uÐ:µ.½Õ "iÒÒÒpÅW cÇŽnG®ïh ts_Ò|‘Ȉ@e"@©crÇ„¢¹~ÁiǼv Ëø,Ž@×Ò3;t­èOc®ùs)i̵c†n p´s¿kÇž¼Ÿç~ë÷7qÊp~»ÏŸ—…QÏjYPT" 1@ 5ë¥z6tVâ¼ÕÔÆ¿]¯™‚Ö½!Þ)ešºu÷ô 5Õó¿jµ MÇzà‡;× Û•øª¨›Ú)ͼâ|³ªTG«ÖmPà5ä%ÿèe‰È( &à) )´(&éòÜ„eI]Æa–qÐX\<¶4xLcaé–İ4eóŸ»6ë·pvÌëÖæ™_I]\þtKRžýÝ#±º?:º&"P¡ Äy"Õ/(ÙØ3 Ça¿7lôƒÜãÅÄ÷÷·ËGD âü¢ŠÇ&Å£ M¿?Cù†cÁýxnþîà·0~v-”km•¹ gå2¿ýû¯ñ8P¼†ºî÷÷§*Ÿeå/±ZV$ˆ€ˆ€ˆ@¹"`âË2í“Öãj‚“aìØÎü‚]ߟŸ]ó»<.ªaÞi]ó ,›ûÛ_à=þvÝïò8RFb5R¤•Žˆ€ˆ€ˆ@Lð 9FýâÔüý~vÌkvL׎ƒÝãk×KâæÙÎ-®`“×öço×-Žh»«Ñ®¥/" " "ÓLš˜Y¿(õ[8¿Ÿÿ˜×Ï힢¸ùñŸû-.¿ŸÿخǪ+±«5£|‰€ˆ€ˆ€” ~áç?.JæËR¬%½òFbµ<Öšò," " "P!WÜVˆB³Zl¯˜À\D@D@D@D r$V#ÇZ)‰€ˆ€ˆ€ˆ€“€Äj1)¸ˆ€ˆ€ˆ€ˆ@äH¬F޵R(&‰ÕbSpÈXk¥$" " " "PL«Å¦à" " " " ‘# ±9ÖJID@D@D@D ˜$V‹ LÁE@D@D@D@"G@b5r¬•’T(Üu%”WÓÍÊU/æͼ)m(.½QŠKLáE@‚0!dnÐ@òŒ«s#’¨0X XE+•…{RÍåqBB‚;¯, b­œäÏzð× ëGFD@Ê#*å1Óʳˆ@ì0qD¡T¥J'Vû÷ïï„kìå¶âæhõêÕŽ¹_°VÜÒªd" •€Äje¨e•QÂH °G•B511“'OƶmÛ°sçNìÚµËÙììläää 77aÌUÅŽšÌsÕªUQ£F g“““Q«V-4mÚÔ]÷ Võ¬VìgB¥ŠL@bµ"×®Ê&a&@dÖ>ÿS¨R@5oÞON°fff"++ˉռ¼<äçç‡9w7z?뤤$T¯^ݱ®Y³&RRRÜ9ëÁĪՑkÅ}&T2¨È$V+ríªl"F&|èRÑR Ñš€b* ŬÄjÙTH X­V­š¨«®äouau㯯²É…bÈX g¥"’­‰'~š¦P¢HµOý&béoCسªa%$ì‚ °ª@ñJ?^'ÖÕUÉSÕ" "«Ñá®TE B0¡J1d‰Ÿ÷i)FéÏUŠ'ŽW¥€¥P•X-]õ›X¥%w2¦8õ‹V³w•×Y&XK—²îÈX # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to help with the creation, loading and introspection of modules, classes, hierarchies, attributes, functions, and methods. """ import itertools import inspect import operator import re import sys import types import warnings from functools import update_wrapper from .. import exc import hashlib from . import compat from . import _collections def md5_hex(x): if compat.py3k: x = x.encode('utf-8') m = hashlib.md5() m.update(x) return m.hexdigest() class safe_reraise(object): """Reraise an exception after invoking some handler code. Stores the existing exception info before invoking so that it is maintained across a potential coroutine context switch. e.g.:: try: sess.commit() except: with safe_reraise(): sess.rollback() """ def __enter__(self): self._exc_info = sys.exc_info() def __exit__(self, type_, value, traceback): # see #2703 for notes if type_ is None: exc_type, exc_value, exc_tb = self._exc_info self._exc_info = None # remove potential circular references compat.reraise(exc_type, exc_value, exc_tb) else: self._exc_info = None # remove potential circular references compat.reraise(type_, value, traceback) def decode_slice(slc): """decode a slice object as sent to __getitem__. takes into account the 2.5 __index__() method, basically. """ ret = [] for x in slc.start, slc.stop, slc.step: if hasattr(x, '__index__'): x = x.__index__() ret.append(x) return tuple(ret) def _unique_symbols(used, *bases): used = set(used) for base in bases: pool = itertools.chain((base,), compat.itertools_imap(lambda i: base + str(i), range(1000))) for sym in pool: if sym not in used: used.add(sym) yield sym break else: raise NameError("exhausted namespace for symbol base %s" % base) def map_bits(fn, n): """Call the given function given each nonzero bit from n.""" while n: b = n & (~n + 1) yield fn(b) n ^= b def decorator(target): """A signature-matching decorator factory.""" def decorate(fn): if not inspect.isfunction(fn): raise Exception("not a decoratable function") spec = compat.inspect_getfullargspec(fn) names = tuple(spec[0]) + spec[1:3] + (fn.__name__,) targ_name, fn_name = _unique_symbols(names, 'target', 'fn') metadata = dict(target=targ_name, fn=fn_name) metadata.update(format_argspec_plus(spec, grouped=False)) metadata['name'] = fn.__name__ code = """\ def %(name)s(%(args)s): return %(target)s(%(fn)s, %(apply_kw)s) """ % metadata decorated = _exec_code_in_env(code, {targ_name: target, fn_name: fn}, fn.__name__) decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ decorated.__wrapped__ = fn return update_wrapper(decorated, fn) return update_wrapper(decorate, target) def _exec_code_in_env(code, env, fn_name): exec(code, env) return env[fn_name] def public_factory(target, location): """Produce a wrapping function for the given cls or classmethod. Rationale here is so that the __init__ method of the class can serve as documentation for the function. """ if isinstance(target, type): fn = target.__init__ callable_ = target doc = "Construct a new :class:`.%s` object. \n\n"\ "This constructor is mirrored as a public API function; "\ "see :func:`~%s` "\ "for a full usage and argument description." % ( target.__name__, location, ) else: fn = callable_ = target doc = "This function is mirrored; see :func:`~%s` "\ "for a description of arguments." % location location_name = location.split(".")[-1] spec = compat.inspect_getfullargspec(fn) del spec[0][0] metadata = format_argspec_plus(spec, grouped=False) metadata['name'] = location_name code = """\ def %(name)s(%(args)s): return cls(%(apply_kw)s) """ % metadata env = {'cls': callable_, 'symbol': symbol} exec(code, env) decorated = env[location_name] decorated.__doc__ = fn.__doc__ decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0] if compat.py2k or hasattr(fn, '__func__'): fn.__func__.__doc__ = doc else: fn.__doc__ = doc return decorated class PluginLoader(object): def __init__(self, group, auto_fn=None): self.group = group self.impls = {} self.auto_fn = auto_fn def load(self, name): if name in self.impls: return self.impls[name]() if self.auto_fn: loader = self.auto_fn(name) if loader: self.impls[name] = loader return loader() try: import pkg_resources except ImportError: pass else: for impl in pkg_resources.iter_entry_points( self.group, name): self.impls[name] = impl.load return impl.load() raise exc.NoSuchModuleError( "Can't load plugin: %s:%s" % (self.group, name)) def register(self, name, modulepath, objname): def load(): mod = compat.import_(modulepath) for token in modulepath.split(".")[1:]: mod = getattr(mod, token) return getattr(mod, objname) self.impls[name] = load def get_cls_kwargs(cls, _set=None): """Return the full set of inherited kwargs for the given `cls`. Probes a class's __init__ method, collecting all named arguments. If the __init__ defines a \**kwargs catch-all, then the constructor is presumed to pass along unrecognized keywords to its base classes, and the collection process is repeated recursively on each of the bases. Uses a subset of inspect.getargspec() to cut down on method overhead. No anonymous tuple arguments please ! """ toplevel = _set is None if toplevel: _set = set() ctr = cls.__dict__.get('__init__', False) has_init = ctr and isinstance(ctr, types.FunctionType) and \ isinstance(ctr.__code__, types.CodeType) if has_init: names, has_kw = inspect_func_args(ctr) _set.update(names) if not has_kw and not toplevel: return None if not has_init or has_kw: for c in cls.__bases__: if get_cls_kwargs(c, _set) is None: break _set.discard('self') return _set try: # TODO: who doesn't have this constant? from inspect import CO_VARKEYWORDS def inspect_func_args(fn): co = fn.__code__ nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) has_kw = bool(co.co_flags & CO_VARKEYWORDS) return args, has_kw except ImportError: def inspect_func_args(fn): names, _, has_kw, _ = inspect.getargspec(fn) return names, bool(has_kw) def get_func_kwargs(func): """Return the set of legal kwargs for the given `func`. Uses getargspec so is safe to call for methods, functions, etc. """ return compat.inspect_getargspec(func)[0] def get_callable_argspec(fn, no_self=False, _is_init=False): """Return the argument signature for any callable. All pure-Python callables are accepted, including functions, methods, classes, objects with __call__; builtins and other edge cases like functools.partial() objects raise a TypeError. """ if inspect.isbuiltin(fn): raise TypeError("Can't inspect builtin: %s" % fn) elif inspect.isfunction(fn): if _is_init and no_self: spec = compat.inspect_getargspec(fn) return compat.ArgSpec(spec.args[1:], spec.varargs, spec.keywords, spec.defaults) else: return compat.inspect_getargspec(fn) elif inspect.ismethod(fn): if no_self and (_is_init or fn.__self__): spec = compat.inspect_getargspec(fn.__func__) return compat.ArgSpec(spec.args[1:], spec.varargs, spec.keywords, spec.defaults) else: return compat.inspect_getargspec(fn.__func__) elif inspect.isclass(fn): return get_callable_argspec( fn.__init__, no_self=no_self, _is_init=True) elif hasattr(fn, '__func__'): return compat.inspect_getargspec(fn.__func__) elif hasattr(fn, '__call__'): if inspect.ismethod(fn.__call__): return get_callable_argspec(fn.__call__, no_self=no_self) else: raise TypeError("Can't inspect callable: %s" % fn) else: raise TypeError("Can't inspect callable: %s" % fn) def format_argspec_plus(fn, grouped=True): """Returns a dictionary of formatted, introspected function arguments. A enhanced variant of inspect.formatargspec to support code generation. fn An inspectable callable or tuple of inspect getargspec() results. grouped Defaults to True; include (parens, around, argument) lists Returns: args Full inspect.formatargspec for fn self_arg The name of the first positional argument, varargs[0], or None if the function defines no positional arguments. apply_pos args, re-written in calling rather than receiving syntax. Arguments are passed positionally. apply_kw Like apply_pos, except keyword-ish args are passed as keywords. Example:: >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) {'args': '(self, a, b, c=3, **d)', 'self_arg': 'self', 'apply_kw': '(self, a, b, c=c, **d)', 'apply_pos': '(self, a, b, c, **d)'} """ if compat.callable(fn): spec = compat.inspect_getfullargspec(fn) else: # we accept an existing argspec... spec = fn args = inspect.formatargspec(*spec) if spec[0]: self_arg = spec[0][0] elif spec[1]: self_arg = '%s[0]' % spec[1] else: self_arg = None if compat.py3k: apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2], None, spec[4]) num_defaults = 0 if spec[3]: num_defaults += len(spec[3]) if spec[4]: num_defaults += len(spec[4]) name_args = spec[0] + spec[4] else: apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2]) num_defaults = 0 if spec[3]: num_defaults += len(spec[3]) name_args = spec[0] if num_defaults: defaulted_vals = name_args[0 - num_defaults:] else: defaulted_vals = () apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], defaulted_vals, formatvalue=lambda x: '=' + x) if grouped: return dict(args=args, self_arg=self_arg, apply_pos=apply_pos, apply_kw=apply_kw) else: return dict(args=args[1:-1], self_arg=self_arg, apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1]) def format_argspec_init(method, grouped=True): """format_argspec_plus with considerations for typical __init__ methods Wraps format_argspec_plus with error handling strategies for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ if method is object.__init__: args = grouped and '(self)' or 'self' else: try: return format_argspec_plus(method, grouped=grouped) except TypeError: args = (grouped and '(self, *args, **kwargs)' or 'self, *args, **kwargs') return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args) def getargspec_init(method): """inspect.getargspec with considerations for typical __init__ methods Wraps inspect.getargspec with error handling for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ try: return compat.inspect_getargspec(method) except TypeError: if method is object.__init__: return (['self'], None, None, None) else: return (['self'], 'args', 'kwargs', None) def unbound_method_to_callable(func_or_cls): """Adjust the incoming callable such that a 'self' argument is not required. """ if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__: return func_or_cls.__func__ else: return func_or_cls def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()): """Produce a __repr__() based on direct association of the __init__() specification vs. same-named attributes present. """ if to_inspect is None: to_inspect = [obj] else: to_inspect = _collections.to_list(to_inspect) missing = object() pos_args = [] kw_args = _collections.OrderedDict() vargs = None for i, insp in enumerate(to_inspect): try: (_args, _vargs, vkw, defaults) = \ compat.inspect_getargspec(insp.__init__) except TypeError: continue else: default_len = defaults and len(defaults) or 0 if i == 0: if _vargs: vargs = _vargs if default_len: pos_args.extend(_args[1:-default_len]) else: pos_args.extend(_args[1:]) else: kw_args.update([ (arg, missing) for arg in _args[1:-default_len] ]) if default_len: kw_args.update([ (arg, default) for arg, default in zip(_args[-default_len:], defaults) ]) output = [] output.extend(repr(getattr(obj, arg, None)) for arg in pos_args) if vargs is not None and hasattr(obj, vargs): output.extend([repr(val) for val in getattr(obj, vargs)]) for arg, defval in kw_args.items(): if arg in omit_kwarg: continue try: val = getattr(obj, arg, missing) if val is not missing and val != defval: output.append('%s=%r' % (arg, val)) except Exception: pass if additional_kw: for arg, defval in additional_kw: try: val = getattr(obj, arg, missing) if val is not missing and val != defval: output.append('%s=%r' % (arg, val)) except Exception: pass return "%s(%s)" % (obj.__class__.__name__, ", ".join(output)) class portable_instancemethod(object): """Turn an instancemethod into a (parent, name) pair to produce a serializable callable. """ __slots__ = 'target', 'name', '__weakref__' def __getstate__(self): return {'target': self.target, 'name': self.name} def __setstate__(self, state): self.target = state['target'] self.name = state['name'] def __init__(self, meth): self.target = meth.__self__ self.name = meth.__name__ def __call__(self, *arg, **kw): return getattr(self.target, self.name)(*arg, **kw) def class_hierarchy(cls): """Return an unordered sequence of all classes related to cls. Traverses diamond hierarchies. Fibs slightly: subclasses of builtin types are not returned. Thus class_hierarchy(class A(object)) returns (A, object), not A plus every class systemwide that derives from object. Old-style classes are discarded and hierarchies rooted on them will not be descended. """ if compat.py2k: if isinstance(cls, types.ClassType): return list() hier = set([cls]) process = list(cls.__mro__) while process: c = process.pop() if compat.py2k: if isinstance(c, types.ClassType): continue bases = (_ for _ in c.__bases__ if _ not in hier and not isinstance(_, types.ClassType)) else: bases = (_ for _ in c.__bases__ if _ not in hier) for b in bases: process.append(b) hier.add(b) if compat.py3k: if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'): continue else: if c.__module__ == '__builtin__' or not hasattr( c, '__subclasses__'): continue for s in [_ for _ in c.__subclasses__() if _ not in hier]: process.append(s) hier.add(s) return list(hier) def iterate_attributes(cls): """iterate all the keys and attributes associated with a class, without using getattr(). Does not use getattr() so that class-sensitive descriptors (i.e. property.__get__()) are not called. """ keys = dir(cls) for key in keys: for c in cls.__mro__: if key in c.__dict__: yield (key, c.__dict__[key]) break def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None, name='self.proxy', from_instance=None): """Automates delegation of __specials__ for a proxying type.""" if only: dunders = only else: if skip is None: skip = ('__slots__', '__del__', '__getattribute__', '__metaclass__', '__getstate__', '__setstate__') dunders = [m for m in dir(from_cls) if (m.startswith('__') and m.endswith('__') and not hasattr(into_cls, m) and m not in skip)] for method in dunders: try: fn = getattr(from_cls, method) if not hasattr(fn, '__call__'): continue fn = getattr(fn, 'im_func', fn) except AttributeError: continue try: spec = compat.inspect_getargspec(fn) fn_args = inspect.formatargspec(spec[0]) d_args = inspect.formatargspec(spec[0][1:]) except TypeError: fn_args = '(self, *args, **kw)' d_args = '(*args, **kw)' py = ("def %(method)s%(fn_args)s: " "return %(name)s.%(method)s%(d_args)s" % locals()) env = from_instance is not None and {name: from_instance} or {} compat.exec_(py, env) try: env[method].__defaults__ = fn.__defaults__ except AttributeError: pass setattr(into_cls, method, env[method]) def methods_equivalent(meth1, meth2): """Return True if the two methods are the same implementation.""" return getattr(meth1, '__func__', meth1) is getattr( meth2, '__func__', meth2) def as_interface(obj, cls=None, methods=None, required=None): """Ensure basic interface compliance for an instance or dict of callables. Checks that ``obj`` implements public methods of ``cls`` or has members listed in ``methods``. If ``required`` is not supplied, implementing at least one interface method is sufficient. Methods present on ``obj`` that are not in the interface are ignored. If ``obj`` is a dict and ``dict`` does not meet the interface requirements, the keys of the dictionary are inspected. Keys present in ``obj`` that are not in the interface will raise TypeErrors. Raises TypeError if ``obj`` does not meet the interface criteria. In all passing cases, an object with callable members is returned. In the simple case, ``obj`` is returned as-is; if dict processing kicks in then an anonymous class is returned. obj A type, instance, or dictionary of callables. cls Optional, a type. All public methods of cls are considered the interface. An ``obj`` instance of cls will always pass, ignoring ``required``.. methods Optional, a sequence of method names to consider as the interface. required Optional, a sequence of mandatory implementations. If omitted, an ``obj`` that provides at least one interface method is considered sufficient. As a convenience, required may be a type, in which case all public methods of the type are required. """ if not cls and not methods: raise TypeError('a class or collection of method names are required') if isinstance(cls, type) and isinstance(obj, cls): return obj interface = set(methods or [m for m in dir(cls) if not m.startswith('_')]) implemented = set(dir(obj)) complies = operator.ge if isinstance(required, type): required = interface elif not required: required = set() complies = operator.gt else: required = set(required) if complies(implemented.intersection(interface), required): return obj # No dict duck typing here. if not isinstance(obj, dict): qualifier = complies is operator.gt and 'any of' or 'all of' raise TypeError("%r does not implement %s: %s" % ( obj, qualifier, ', '.join(interface))) class AnonymousInterface(object): """A callable-holding shell.""" if cls: AnonymousInterface.__name__ = 'Anonymous' + cls.__name__ found = set() for method, impl in dictlike_iteritems(obj): if method not in interface: raise TypeError("%r: unknown in this interface" % method) if not compat.callable(impl): raise TypeError("%r=%r is not callable" % (method, impl)) setattr(AnonymousInterface, method, staticmethod(impl)) found.add(method) if complies(found, required): return AnonymousInterface raise TypeError("dictionary does not contain required keys %s" % ', '.join(required - found)) class memoized_property(object): """A read-only @property that is only evaluated once.""" def __init__(self, fget, doc=None): self.fget = fget self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ def __get__(self, obj, cls): if obj is None: return self obj.__dict__[self.__name__] = result = self.fget(obj) return result def _reset(self, obj): memoized_property.reset(obj, self.__name__) @classmethod def reset(cls, obj, name): obj.__dict__.pop(name, None) def memoized_instancemethod(fn): """Decorate a method memoize its return value. Best applied to no-arg methods: memoization is not sensitive to argument values, and will always return the same value even when called with different arguments. """ def oneshot(self, *args, **kw): result = fn(self, *args, **kw) memo = lambda *a, **kw: result memo.__name__ = fn.__name__ memo.__doc__ = fn.__doc__ self.__dict__[fn.__name__] = memo return result return update_wrapper(oneshot, fn) class group_expirable_memoized_property(object): """A family of @memoized_properties that can be expired in tandem.""" def __init__(self, attributes=()): self.attributes = [] if attributes: self.attributes.extend(attributes) def expire_instance(self, instance): """Expire all memoized properties for *instance*.""" stash = instance.__dict__ for attribute in self.attributes: stash.pop(attribute, None) def __call__(self, fn): self.attributes.append(fn.__name__) return memoized_property(fn) def method(self, fn): self.attributes.append(fn.__name__) return memoized_instancemethod(fn) class MemoizedSlots(object): """Apply memoized items to an object using a __getattr__ scheme. This allows the functionality of memoized_property and memoized_instancemethod to be available to a class using __slots__. """ __slots__ = () def _fallback_getattr(self, key): raise AttributeError(key) def __getattr__(self, key): if key.startswith('_memoized'): raise AttributeError(key) elif hasattr(self, '_memoized_attr_%s' % key): value = getattr(self, '_memoized_attr_%s' % key)() setattr(self, key, value) return value elif hasattr(self, '_memoized_method_%s' % key): fn = getattr(self, '_memoized_method_%s' % key) def oneshot(*args, **kw): result = fn(*args, **kw) memo = lambda *a, **kw: result memo.__name__ = fn.__name__ memo.__doc__ = fn.__doc__ setattr(self, key, memo) return result oneshot.__doc__ = fn.__doc__ return oneshot else: return self._fallback_getattr(key) def dependency_for(modulename): def decorate(obj): # TODO: would be nice to improve on this import silliness, # unfortunately importlib doesn't work that great either tokens = modulename.split(".") mod = compat.import_( ".".join(tokens[0:-1]), globals(), locals(), tokens[-1]) mod = getattr(mod, tokens[-1]) setattr(mod, obj.__name__, obj) return obj return decorate class dependencies(object): """Apply imported dependencies as arguments to a function. E.g.:: @util.dependencies( "sqlalchemy.sql.widget", "sqlalchemy.engine.default" ); def some_func(self, widget, default, arg1, arg2, **kw): # ... Rationale is so that the impact of a dependency cycle can be associated directly with the few functions that cause the cycle, and not pollute the module-level namespace. """ def __init__(self, *deps): self.import_deps = [] for dep in deps: tokens = dep.split(".") self.import_deps.append( dependencies._importlater( ".".join(tokens[0:-1]), tokens[-1] ) ) def __call__(self, fn): import_deps = self.import_deps spec = compat.inspect_getfullargspec(fn) spec_zero = list(spec[0]) hasself = spec_zero[0] in ('self', 'cls') for i in range(len(import_deps)): spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i inner_spec = format_argspec_plus(spec, grouped=False) for impname in import_deps: del spec_zero[1 if hasself else 0] spec[0][:] = spec_zero outer_spec = format_argspec_plus(spec, grouped=False) code = 'lambda %(args)s: fn(%(apply_kw)s)' % { "args": outer_spec['args'], "apply_kw": inner_spec['apply_kw'] } decorated = eval(code, locals()) decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ return update_wrapper(decorated, fn) @classmethod def resolve_all(cls, path): for m in list(dependencies._unresolved): if m._full_path.startswith(path): m._resolve() _unresolved = set() _by_key = {} class _importlater(object): _unresolved = set() _by_key = {} def __new__(cls, path, addtl): key = path + "." + addtl if key in dependencies._by_key: return dependencies._by_key[key] else: dependencies._by_key[key] = imp = object.__new__(cls) return imp def __init__(self, path, addtl): self._il_path = path self._il_addtl = addtl dependencies._unresolved.add(self) @property def _full_path(self): return self._il_path + "." + self._il_addtl @memoized_property def module(self): if self in dependencies._unresolved: raise ImportError( "importlater.resolve_all() hasn't " "been called (this is %s %s)" % (self._il_path, self._il_addtl)) return getattr(self._initial_import, self._il_addtl) def _resolve(self): dependencies._unresolved.discard(self) self._initial_import = compat.import_( self._il_path, globals(), locals(), [self._il_addtl]) def __getattr__(self, key): if key == 'module': raise ImportError("Could not resolve module %s" % self._full_path) try: attr = getattr(self.module, key) except AttributeError: raise AttributeError( "Module %s has no attribute '%s'" % (self._full_path, key) ) self.__dict__[key] = attr return attr # from paste.deploy.converters def asbool(obj): if isinstance(obj, compat.string_types): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError("String is not true/false: %r" % obj) return bool(obj) def bool_or_str(*text): """Return a callable that will evaluate a string as boolean, or one of a set of "alternate" string values. """ def bool_or_value(obj): if obj in text: return obj else: return asbool(obj) return bool_or_value def asint(value): """Coerce to integer.""" if value is None: return value return int(value) def coerce_kw_type(kw, key, type_, flexi_bool=True): """If 'key' is present in dict 'kw', coerce its value to type 'type\_' if necessary. If 'flexi_bool' is True, the string '0' is considered false when coercing to boolean. """ if key in kw and not isinstance(kw[key], type_) and kw[key] is not None: if type_ is bool and flexi_bool: kw[key] = asbool(kw[key]) else: kw[key] = type_(kw[key]) def constructor_copy(obj, cls, *args, **kw): """Instantiate cls using the __dict__ of obj as constructor arguments. Uses inspect to match the named arguments of ``cls``. """ names = get_cls_kwargs(cls) kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__) return cls(*args, **kw) def counter(): """Return a threadsafe counter function.""" lock = compat.threading.Lock() counter = itertools.count(1) # avoid the 2to3 "next" transformation... def _next(): lock.acquire() try: return next(counter) finally: lock.release() return _next def duck_type_collection(specimen, default=None): """Given an instance or class, guess if it is or is acting as one of the basic collection types: list, set and dict. If the __emulates__ property is present, return that preferentially. """ if hasattr(specimen, '__emulates__'): # canonicalize set vs sets.Set to a standard: the builtin set if (specimen.__emulates__ is not None and issubclass(specimen.__emulates__, set)): return set else: return specimen.__emulates__ isa = isinstance(specimen, type) and issubclass or isinstance if isa(specimen, list): return list elif isa(specimen, set): return set elif isa(specimen, dict): return dict if hasattr(specimen, 'append'): return list elif hasattr(specimen, 'add'): return set elif hasattr(specimen, 'set'): return dict else: return default def assert_arg_type(arg, argtype, name): if isinstance(arg, argtype): return arg else: if isinstance(argtype, tuple): raise exc.ArgumentError( "Argument '%s' is expected to be one of type %s, got '%s'" % (name, ' or '.join("'%s'" % a for a in argtype), type(arg))) else: raise exc.ArgumentError( "Argument '%s' is expected to be of type '%s', got '%s'" % (name, argtype, type(arg))) def dictlike_iteritems(dictlike): """Return a (key, value) iterator for almost any dict-like object.""" if compat.py3k: if hasattr(dictlike, 'items'): return list(dictlike.items()) else: if hasattr(dictlike, 'iteritems'): return dictlike.iteritems() elif hasattr(dictlike, 'items'): return iter(dictlike.items()) getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None)) if getter is None: raise TypeError( "Object '%r' is not dict-like" % dictlike) if hasattr(dictlike, 'iterkeys'): def iterator(): for key in dictlike.iterkeys(): yield key, getter(key) return iterator() elif hasattr(dictlike, 'keys'): return iter((key, getter(key)) for key in dictlike.keys()) else: raise TypeError( "Object '%r' is not dict-like" % dictlike) class classproperty(property): """A decorator that behaves like @property except that operates on classes rather than instances. The decorator is currently special when using the declarative module, but note that the :class:`~.sqlalchemy.ext.declarative.declared_attr` decorator should be used for this purpose with declarative. """ def __init__(self, fget, *arg, **kw): super(classproperty, self).__init__(fget, *arg, **kw) self.__doc__ = fget.__doc__ def __get__(desc, self, cls): return desc.fget(cls) class hybridproperty(object): def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None: clsval = self.func(owner) clsval.__doc__ = self.func.__doc__ return clsval else: return self.func(instance) class hybridmethod(object): """Decorate a function as cls- or instance- level.""" def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None: return self.func.__get__(owner, owner.__class__) else: return self.func.__get__(instance, owner) class _symbol(int): def __new__(self, name, doc=None, canonical=None): """Construct a new named symbol.""" assert isinstance(name, compat.string_types) if canonical is None: canonical = hash(name) v = int.__new__(_symbol, canonical) v.name = name if doc: v.__doc__ = doc return v def __reduce__(self): return symbol, (self.name, "x", int(self)) def __str__(self): return repr(self) def __repr__(self): return "symbol(%r)" % self.name _symbol.__name__ = 'symbol' class symbol(object): """A constant symbol. >>> symbol('foo') is symbol('foo') True >>> symbol('foo') A slight refinement of the MAGICCOOKIE=object() pattern. The primary advantage of symbol() is its repr(). They are also singletons. Repeated calls of symbol('name') will all return the same instance. The optional ``doc`` argument assigns to ``__doc__``. This is strictly so that Sphinx autoattr picks up the docstring we want (it doesn't appear to pick up the in-module docstring if the datamember is in a different module - autoattribute also blows up completely). If Sphinx fixes/improves this then we would no longer need ``doc`` here. """ symbols = {} _lock = compat.threading.Lock() def __new__(cls, name, doc=None, canonical=None): cls._lock.acquire() try: sym = cls.symbols.get(name) if sym is None: cls.symbols[name] = sym = _symbol(name, doc, canonical) return sym finally: symbol._lock.release() _creation_order = 1 def set_creation_order(instance): """Assign a '_creation_order' sequence to the given instance. This allows multiple instances to be sorted in order of creation (typically within a single thread; the counter is not particularly threadsafe). """ global _creation_order instance._creation_order = _creation_order _creation_order += 1 def warn_exception(func, *args, **kwargs): """executes the given function, catches all exceptions and converts to a warning. """ try: return func(*args, **kwargs) except Exception: warn("%s('%s') ignored" % sys.exc_info()[0:2]) def ellipses_string(value, len_=25): try: if len(value) > len_: return "%s..." % value[0:len_] else: return value except TypeError: return value class _hash_limit_string(compat.text_type): """A string subclass that can only be hashed on a maximum amount of unique values. This is used for warnings so that we can send out parameterized warnings without the __warningregistry__ of the module, or the non-overridable "once" registry within warnings.py, overloading memory, """ def __new__(cls, value, num, args): interpolated = (value % args) + \ (" (this warning may be suppressed after %d occurrences)" % num) self = super(_hash_limit_string, cls).__new__(cls, interpolated) self._hash = hash("%s_%d" % (value, hash(interpolated) % num)) return self def __hash__(self): return self._hash def __eq__(self, other): return hash(self) == hash(other) def warn(msg): """Issue a warning. If msg is a string, :class:`.exc.SAWarning` is used as the category. """ warnings.warn(msg, exc.SAWarning, stacklevel=2) def warn_limited(msg, args): """Issue a warning with a paramterized string, limiting the number of registrations. """ if args: msg = _hash_limit_string(msg, 10, args) warnings.warn(msg, exc.SAWarning, stacklevel=2) def only_once(fn): """Decorate the given function to be a no-op after it is called exactly once.""" once = [fn] def go(*arg, **kw): if once: once_fn = once.pop() return once_fn(*arg, **kw) return go _SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py') _UNITTEST_RE = re.compile(r'unit(?:2|test2?/)') def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE): """Chop extraneous lines off beginning and end of a traceback. :param tb: a list of traceback lines as returned by ``traceback.format_stack()`` :param exclude_prefix: a regular expression object matching lines to skip at beginning of ``tb`` :param exclude_suffix: a regular expression object matching lines to skip at end of ``tb`` """ start = 0 end = len(tb) - 1 while start <= end and exclude_prefix.search(tb[start]): start += 1 while start <= end and exclude_suffix.search(tb[end]): end -= 1 return tb[start:end + 1] NoneType = type(None) def attrsetter(attrname): code = \ "def set(obj, value):"\ " obj.%s = value" % attrname env = locals().copy() exec(code, env) return env['set'] class EnsureKWArgType(type): """Apply translation of functions to accept **kw arguments if they don't already. """ def __init__(cls, clsname, bases, clsdict): fn_reg = cls.ensure_kwarg if fn_reg: for key in clsdict: m = re.match(fn_reg, key) if m: fn = clsdict[key] spec = compat.inspect_getargspec(fn) if not spec.keywords: clsdict[key] = wrapped = cls._wrap_w_kw(fn) setattr(cls, key, wrapped) super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict) def _wrap_w_kw(self, fn): def wrap(*arg, **kw): return fn(*arg) return update_wrapper(wrap, fn) SQLAlchemy-1.0.11/lib/sqlalchemy/util/compat.py0000664000175000017500000001527312636375552022372 0ustar classicclassic00000000000000# util/compat.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Handle Python version/platform incompatibilities.""" import sys try: import threading except ImportError: import dummy_threading as threading py36 = sys.version_info >= (3, 6) py33 = sys.version_info >= (3, 3) py32 = sys.version_info >= (3, 2) py3k = sys.version_info >= (3, 0) py2k = sys.version_info < (3, 0) py265 = sys.version_info >= (2, 6, 5) jython = sys.platform.startswith('java') pypy = hasattr(sys, 'pypy_version_info') win32 = sys.platform.startswith('win') cpython = not pypy and not jython # TODO: something better for this ? import collections next = next if py3k: import pickle else: try: import cPickle as pickle except ImportError: import pickle # work around http://bugs.python.org/issue2646 if py265: safe_kwarg = lambda arg: arg else: safe_kwarg = str ArgSpec = collections.namedtuple("ArgSpec", ["args", "varargs", "keywords", "defaults"]) if py3k: import builtins from inspect import getfullargspec as inspect_getfullargspec from urllib.parse import (quote_plus, unquote_plus, parse_qsl, quote, unquote) import configparser from io import StringIO from io import BytesIO as byte_buffer def inspect_getargspec(func): return ArgSpec( *inspect_getfullargspec(func)[0:4] ) string_types = str, binary_type = bytes text_type = str int_types = int, iterbytes = iter def u(s): return s def ue(s): return s def b(s): return s.encode("latin-1") if py32: callable = callable else: def callable(fn): return hasattr(fn, '__call__') def cmp(a, b): return (a > b) - (a < b) from functools import reduce print_ = getattr(builtins, "print") import_ = getattr(builtins, '__import__') import itertools itertools_filterfalse = itertools.filterfalse itertools_filter = filter itertools_imap = map from itertools import zip_longest import base64 def b64encode(x): return base64.b64encode(x).decode('ascii') def b64decode(x): return base64.b64decode(x.encode('ascii')) else: from inspect import getargspec as inspect_getfullargspec inspect_getargspec = inspect_getfullargspec from urllib import quote_plus, unquote_plus, quote, unquote from urlparse import parse_qsl import ConfigParser as configparser from StringIO import StringIO from cStringIO import StringIO as byte_buffer string_types = basestring, binary_type = str text_type = unicode int_types = int, long def iterbytes(buf): return (ord(byte) for byte in buf) def u(s): # this differs from what six does, which doesn't support non-ASCII # strings - we only use u() with # literal source strings, and all our source files with non-ascii # in them (all are tests) are utf-8 encoded. return unicode(s, "utf-8") def ue(s): return unicode(s, "unicode_escape") def b(s): return s def import_(*args): if len(args) == 4: args = args[0:3] + ([str(arg) for arg in args[3]],) return __import__(*args) callable = callable cmp = cmp reduce = reduce import base64 b64encode = base64.b64encode b64decode = base64.b64decode def print_(*args, **kwargs): fp = kwargs.pop("file", sys.stdout) if fp is None: return for arg in enumerate(args): if not isinstance(arg, basestring): arg = str(arg) fp.write(arg) import itertools itertools_filterfalse = itertools.ifilterfalse itertools_filter = itertools.ifilter itertools_imap = itertools.imap from itertools import izip_longest as zip_longest import time if win32 or jython: time_func = time.clock else: time_func = time.time from collections import namedtuple from operator import attrgetter as dottedgetter if py3k: def reraise(tp, value, tb=None, cause=None): if cause is not None: value.__cause__ = cause if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value def raise_from_cause(exception, exc_info=None): if exc_info is None: exc_info = sys.exc_info() exc_type, exc_value, exc_tb = exc_info reraise(type(exception), exception, tb=exc_tb, cause=exc_value) else: exec("def reraise(tp, value, tb=None, cause=None):\n" " raise tp, value, tb\n") def raise_from_cause(exception, exc_info=None): # not as nice as that of Py3K, but at least preserves # the code line where the issue occurred if exc_info is None: exc_info = sys.exc_info() exc_type, exc_value, exc_tb = exc_info reraise(type(exception), exception, tb=exc_tb) if py3k: exec_ = getattr(builtins, 'exec') else: def exec_(func_text, globals_, lcl=None): if lcl is None: exec('exec func_text in globals_') else: exec('exec func_text in globals_, lcl') def with_metaclass(meta, *bases): """Create a base class with a metaclass. Drops the middle class upon creation. Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/ """ class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass('temporary_class', None, {}) from contextlib import contextmanager try: from contextlib import nested except ImportError: # removed in py3k, credit to mitsuhiko for # workaround @contextmanager def nested(*managers): exits = [] vars = [] exc = (None, None, None) try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) yield vars except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): reraise(exc[0], exc[1], exc[2]) SQLAlchemy-1.0.11/lib/sqlalchemy/util/deprecations.py0000664000175000017500000001046312636375552023563 0ustar classicclassic00000000000000# util/deprecations.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Helpers related to deprecation of functions, methods, classes, other functionality.""" from .. import exc import warnings import re from .langhelpers import decorator def warn_deprecated(msg, stacklevel=3): warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel) def warn_pending_deprecation(msg, stacklevel=3): warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel) def deprecated(version, message=None, add_deprecation_to_docstring=True): """Decorates a function and issues a deprecation warning on use. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s %s" % \ (version, (message or '')) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SADeprecationWarning, message % dict(func=fn.__name__), header) return decorate def pending_deprecation(version, message=None, add_deprecation_to_docstring=True): """Decorates a function and issues a pending deprecation warning on use. :param version: An approximate future version at which point the pending deprecation will become deprecated. Not used in messaging. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s (pending) %s" % \ (version, (message or '')) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SAPendingDeprecationWarning, message % dict(func=fn.__name__), header) return decorate def _sanitize_restructured_text(text): def repl(m): type_, name = m.group(1, 2) if type_ in ("func", "meth"): name += "()" return name return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text) def _decorate_with_warning(func, wtype, message, docstring_header=None): """Wrap a function with a warnings.warn and augmented docstring.""" message = _sanitize_restructured_text(message) @decorator def warned(fn, *args, **kwargs): warnings.warn(message, wtype, stacklevel=3) return fn(*args, **kwargs) doc = func.__doc__ is not None and func.__doc__ or '' if docstring_header is not None: docstring_header %= dict(func=func.__name__) doc = inject_docstring_text(doc, docstring_header, 1) decorated = warned(func) decorated.__doc__ = doc return decorated import textwrap def _dedent_docstring(text): split_text = text.split("\n", 1) if len(split_text) == 1: return text else: firstline, remaining = split_text if not firstline.startswith(" "): return firstline + "\n" + textwrap.dedent(remaining) else: return textwrap.dedent(text) def inject_docstring_text(doctext, injecttext, pos): doctext = _dedent_docstring(doctext or "") lines = doctext.split('\n') injectlines = textwrap.dedent(injecttext).split("\n") if injectlines[0]: injectlines.insert(0, "") blanks = [num for num, line in enumerate(lines) if not line.strip()] blanks.insert(0, 0) inject_pos = blanks[min(pos, len(blanks) - 1)] lines = lines[0:inject_pos] + injectlines + lines[inject_pos:] return "\n".join(lines) SQLAlchemy-1.0.11/lib/sqlalchemy/util/topological.py0000664000175000017500000000535212636375552023420 0ustar classicclassic00000000000000# util/topological.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Topological sorting algorithms.""" from ..exc import CircularDependencyError from .. import util __all__ = ['sort', 'sort_as_subsets', 'find_cycles'] def sort_as_subsets(tuples, allitems, deterministic_order=False): edges = util.defaultdict(set) for parent, child in tuples: edges[child].add(parent) Set = util.OrderedSet if deterministic_order else set todo = Set(allitems) while todo: output = Set() for node in todo: if todo.isdisjoint(edges[node]): output.add(node) if not output: raise CircularDependencyError( "Circular dependency detected.", find_cycles(tuples, allitems), _gen_edges(edges) ) todo.difference_update(output) yield output def sort(tuples, allitems, deterministic_order=False): """sort the given list of items by dependency. 'tuples' is a list of tuples representing a partial ordering. 'deterministic_order' keeps items within a dependency tier in list order. """ for set_ in sort_as_subsets(tuples, allitems, deterministic_order): for s in set_: yield s def find_cycles(tuples, allitems): # adapted from: # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html edges = util.defaultdict(set) for parent, child in tuples: edges[parent].add(child) nodes_to_test = set(edges) output = set() # we'd like to find all nodes that are # involved in cycles, so we do the full # pass through the whole thing for each # node in the original list. # we can go just through parent edge nodes. # if a node is only a child and never a parent, # by definition it can't be part of a cycle. same # if it's not in the edges at all. for node in nodes_to_test: stack = [node] todo = nodes_to_test.difference(stack) while stack: top = stack[-1] for node in edges[top]: if node in stack: cyc = stack[stack.index(node):] todo.difference_update(cyc) output.update(cyc) if node in todo: stack.append(node) todo.remove(node) break else: node = stack.pop() return output def _gen_edges(edges): return set([ (right, left) for left in edges for right in edges[left] ]) SQLAlchemy-1.0.11/lib/sqlalchemy/util/_collections.py0000664000175000017500000006630212636375552023563 0ustar classicclassic00000000000000# util/_collections.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Collection classes and helpers.""" from __future__ import absolute_import import weakref import operator from .compat import threading, itertools_filterfalse, string_types from . import py2k import types import collections EMPTY_SET = frozenset() class AbstractKeyedTuple(tuple): __slots__ = () def keys(self): """Return a list of string key names for this :class:`.KeyedTuple`. .. seealso:: :attr:`.KeyedTuple._fields` """ return list(self._fields) class KeyedTuple(AbstractKeyedTuple): """``tuple`` subclass that adds labeled names. E.g.:: >>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"]) >>> k.one 1 >>> k.two 2 Result rows returned by :class:`.Query` that contain multiple ORM entities and/or column expressions make use of this class to return rows. The :class:`.KeyedTuple` exhibits similar behavior to the ``collections.namedtuple()`` construct provided in the Python standard library, however is architected very differently. Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is does not rely on creation of custom subtypes in order to represent a new series of keys, instead each :class:`.KeyedTuple` instance receives its list of keys in place. The subtype approach of ``collections.namedtuple()`` introduces significant complexity and performance overhead, which is not necessary for the :class:`.Query` object's use case. .. versionchanged:: 0.8 Compatibility methods with ``collections.namedtuple()`` have been added including :attr:`.KeyedTuple._fields` and :meth:`.KeyedTuple._asdict`. .. seealso:: :ref:`ormtutorial_querying` """ def __new__(cls, vals, labels=None): t = tuple.__new__(cls, vals) if labels: t.__dict__.update(zip(labels, vals)) else: labels = [] t.__dict__['_labels'] = labels return t @property def _fields(self): """Return a tuple of string key names for this :class:`.KeyedTuple`. This method provides compatibility with ``collections.namedtuple()``. .. versionadded:: 0.8 .. seealso:: :meth:`.KeyedTuple.keys` """ return tuple([l for l in self._labels if l is not None]) def __setattr__(self, key, value): raise AttributeError("Can't set attribute: %s" % key) def _asdict(self): """Return the contents of this :class:`.KeyedTuple` as a dictionary. This method provides compatibility with ``collections.namedtuple()``, with the exception that the dictionary returned is **not** ordered. .. versionadded:: 0.8 """ return dict((key, self.__dict__[key]) for key in self.keys()) class _LW(AbstractKeyedTuple): __slots__ = () def __new__(cls, vals): return tuple.__new__(cls, vals) def __reduce__(self): # for pickling, degrade down to the regular # KeyedTuple, thus avoiding anonymous class pickling # difficulties return KeyedTuple, (list(self), self._real_fields) def _asdict(self): """Return the contents of this :class:`.KeyedTuple` as a dictionary.""" d = dict(zip(self._real_fields, self)) d.pop(None, None) return d class ImmutableContainer(object): def _immutable(self, *arg, **kw): raise TypeError("%s object is immutable" % self.__class__.__name__) __delitem__ = __setitem__ = __setattr__ = _immutable class immutabledict(ImmutableContainer, dict): clear = pop = popitem = setdefault = \ update = ImmutableContainer._immutable def __new__(cls, *args): new = dict.__new__(cls) dict.__init__(new, *args) return new def __init__(self, *args): pass def __reduce__(self): return immutabledict, (dict(self), ) def union(self, d): if not d: return self elif not self: if isinstance(d, immutabledict): return d else: return immutabledict(d) else: d2 = immutabledict(self) dict.update(d2, d) return d2 def __repr__(self): return "immutabledict(%s)" % dict.__repr__(self) class Properties(object): """Provide a __getattr__/__setattr__ interface over a dict.""" __slots__ = '_data', def __init__(self, data): object.__setattr__(self, '_data', data) def __len__(self): return len(self._data) def __iter__(self): return iter(list(self._data.values())) def __add__(self, other): return list(self) + list(other) def __setitem__(self, key, object): self._data[key] = object def __getitem__(self, key): return self._data[key] def __delitem__(self, key): del self._data[key] def __setattr__(self, key, obj): self._data[key] = obj def __getstate__(self): return {'_data': self.__dict__['_data']} def __setstate__(self, state): self.__dict__['_data'] = state['_data'] def __getattr__(self, key): try: return self._data[key] except KeyError: raise AttributeError(key) def __contains__(self, key): return key in self._data def as_immutable(self): """Return an immutable proxy for this :class:`.Properties`.""" return ImmutableProperties(self._data) def update(self, value): self._data.update(value) def get(self, key, default=None): if key in self: return self[key] else: return default def keys(self): return list(self._data) def values(self): return list(self._data.values()) def items(self): return list(self._data.items()) def has_key(self, key): return key in self._data def clear(self): self._data.clear() class OrderedProperties(Properties): """Provide a __getattr__/__setattr__ interface with an OrderedDict as backing store.""" __slots__ = () def __init__(self): Properties.__init__(self, OrderedDict()) class ImmutableProperties(ImmutableContainer, Properties): """Provide immutable dict/object attribute to an underlying dictionary.""" __slots__ = () class OrderedDict(dict): """A dict that returns keys/values/items in the order they were added.""" __slots__ = '_list', def __reduce__(self): return OrderedDict, (self.items(),) def __init__(self, ____sequence=None, **kwargs): self._list = [] if ____sequence is None: if kwargs: self.update(**kwargs) else: self.update(____sequence, **kwargs) def clear(self): self._list = [] dict.clear(self) def copy(self): return self.__copy__() def __copy__(self): return OrderedDict(self) def sort(self, *arg, **kw): self._list.sort(*arg, **kw) def update(self, ____sequence=None, **kwargs): if ____sequence is not None: if hasattr(____sequence, 'keys'): for key in ____sequence.keys(): self.__setitem__(key, ____sequence[key]) else: for key, value in ____sequence: self[key] = value if kwargs: self.update(kwargs) def setdefault(self, key, value): if key not in self: self.__setitem__(key, value) return value else: return self.__getitem__(key) def __iter__(self): return iter(self._list) def keys(self): return list(self) def values(self): return [self[key] for key in self._list] def items(self): return [(key, self[key]) for key in self._list] if py2k: def itervalues(self): return iter(self.values()) def iterkeys(self): return iter(self) def iteritems(self): return iter(self.items()) def __setitem__(self, key, object): if key not in self: try: self._list.append(key) except AttributeError: # work around Python pickle loads() with # dict subclass (seems to ignore __setstate__?) self._list = [key] dict.__setitem__(self, key, object) def __delitem__(self, key): dict.__delitem__(self, key) self._list.remove(key) def pop(self, key, *default): present = key in self value = dict.pop(self, key, *default) if present: self._list.remove(key) return value def popitem(self): item = dict.popitem(self) self._list.remove(item[0]) return item class OrderedSet(set): def __init__(self, d=None): set.__init__(self) self._list = [] if d is not None: self._list = unique_list(d) set.update(self, self._list) else: self._list = [] def add(self, element): if element not in self: self._list.append(element) set.add(self, element) def remove(self, element): set.remove(self, element) self._list.remove(element) def insert(self, pos, element): if element not in self: self._list.insert(pos, element) set.add(self, element) def discard(self, element): if element in self: self._list.remove(element) set.remove(self, element) def clear(self): set.clear(self) self._list = [] def __getitem__(self, key): return self._list[key] def __iter__(self): return iter(self._list) def __add__(self, other): return self.union(other) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._list) __str__ = __repr__ def update(self, iterable): for e in iterable: if e not in self: self._list.append(e) set.add(self, e) return self __ior__ = update def union(self, other): result = self.__class__(self) result.update(other) return result __or__ = union def intersection(self, other): other = set(other) return self.__class__(a for a in self if a in other) __and__ = intersection def symmetric_difference(self, other): other = set(other) result = self.__class__(a for a in self if a not in other) result.update(a for a in other if a not in self) return result __xor__ = symmetric_difference def difference(self, other): other = set(other) return self.__class__(a for a in self if a not in other) __sub__ = difference def intersection_update(self, other): other = set(other) set.intersection_update(self, other) self._list = [a for a in self._list if a in other] return self __iand__ = intersection_update def symmetric_difference_update(self, other): set.symmetric_difference_update(self, other) self._list = [a for a in self._list if a in self] self._list += [a for a in other._list if a in self] return self __ixor__ = symmetric_difference_update def difference_update(self, other): set.difference_update(self, other) self._list = [a for a in self._list if a in self] return self __isub__ = difference_update class IdentitySet(object): """A set that considers only object id() for uniqueness. This strategy has edge cases for builtin types- it's possible to have two 'foo' strings in one of these sets, for example. Use sparingly. """ _working_set = set def __init__(self, iterable=None): self._members = dict() if iterable: for o in iterable: self.add(o) def add(self, value): self._members[id(value)] = value def __contains__(self, value): return id(value) in self._members def remove(self, value): del self._members[id(value)] def discard(self, value): try: self.remove(value) except KeyError: pass def pop(self): try: pair = self._members.popitem() return pair[1] except KeyError: raise KeyError('pop from an empty set') def clear(self): self._members.clear() def __cmp__(self, other): raise TypeError('cannot compare sets using cmp()') def __eq__(self, other): if isinstance(other, IdentitySet): return self._members == other._members else: return False def __ne__(self, other): if isinstance(other, IdentitySet): return self._members != other._members else: return True def issubset(self, iterable): other = type(self)(iterable) if len(self) > len(other): return False for m in itertools_filterfalse(other._members.__contains__, iter(self._members.keys())): return False return True def __le__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issubset(other) def __lt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) < len(other) and self.issubset(other) def issuperset(self, iterable): other = type(self)(iterable) if len(self) < len(other): return False for m in itertools_filterfalse(self._members.__contains__, iter(other._members.keys())): return False return True def __ge__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issuperset(other) def __gt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) > len(other) and self.issuperset(other) def union(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).union(other)) return result def __or__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.union(other) def update(self, iterable): self._members = self.union(iterable)._members def __ior__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.update(other) return self def difference(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).difference(other)) return result def __sub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.difference(other) def difference_update(self, iterable): self._members = self.difference(iterable)._members def __isub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.difference_update(other) return self def intersection(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).intersection(other)) return result def __and__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.intersection(other) def intersection_update(self, iterable): self._members = self.intersection(iterable)._members def __iand__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.intersection_update(other) return self def symmetric_difference(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update( self._working_set(members).symmetric_difference(other)) return result def _member_id_tuples(self): return ((id(v), v) for v in self._members.values()) def __xor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.symmetric_difference(other) def symmetric_difference_update(self, iterable): self._members = self.symmetric_difference(iterable)._members def __ixor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.symmetric_difference(other) return self def copy(self): return type(self)(iter(self._members.values())) __copy__ = copy def __len__(self): return len(self._members) def __iter__(self): return iter(self._members.values()) def __hash__(self): raise TypeError('set objects are unhashable') def __repr__(self): return '%s(%r)' % (type(self).__name__, list(self._members.values())) class WeakSequence(object): def __init__(self, __elements=()): self._storage = [ weakref.ref(element, self._remove) for element in __elements ] def append(self, item): self._storage.append(weakref.ref(item, self._remove)) def _remove(self, ref): self._storage.remove(ref) def __len__(self): return len(self._storage) def __iter__(self): return (obj for obj in (ref() for ref in self._storage) if obj is not None) def __getitem__(self, index): try: obj = self._storage[index] except KeyError: raise IndexError("Index %s out of range" % index) else: return obj() class OrderedIdentitySet(IdentitySet): class _working_set(OrderedSet): # a testing pragma: exempt the OIDS working set from the test suite's # "never call the user's __hash__" assertions. this is a big hammer, # but it's safe here: IDS operates on (id, instance) tuples in the # working set. __sa_hash_exempt__ = True def __init__(self, iterable=None): IdentitySet.__init__(self) self._members = OrderedDict() if iterable: for o in iterable: self.add(o) class PopulateDict(dict): """A dict which populates missing values via a creation function. Note the creation function takes a key, unlike collections.defaultdict. """ def __init__(self, creator): self.creator = creator def __missing__(self, key): self[key] = val = self.creator(key) return val # Define collections that are capable of storing # ColumnElement objects as hashable keys/elements. # At this point, these are mostly historical, things # used to be more complicated. column_set = set column_dict = dict ordered_column_set = OrderedSet populate_column_dict = PopulateDict _getters = PopulateDict(operator.itemgetter) _property_getters = PopulateDict( lambda idx: property(operator.itemgetter(idx))) def unique_list(seq, hashfunc=None): seen = set() seen_add = seen.add if not hashfunc: return [x for x in seq if x not in seen and not seen_add(x)] else: return [x for x in seq if hashfunc(x) not in seen and not seen_add(hashfunc(x))] class UniqueAppender(object): """Appends items to a collection ensuring uniqueness. Additional appends() of the same object are ignored. Membership is determined by identity (``is a``) not equality (``==``). """ def __init__(self, data, via=None): self.data = data self._unique = {} if via: self._data_appender = getattr(data, via) elif hasattr(data, 'append'): self._data_appender = data.append elif hasattr(data, 'add'): self._data_appender = data.add def append(self, item): id_ = id(item) if id_ not in self._unique: self._data_appender(item) self._unique[id_] = True def __iter__(self): return iter(self.data) def coerce_generator_arg(arg): if len(arg) == 1 and isinstance(arg[0], types.GeneratorType): return list(arg[0]) else: return arg def to_list(x, default=None): if x is None: return default if not isinstance(x, collections.Iterable) or isinstance(x, string_types): return [x] elif isinstance(x, list): return x else: return list(x) def has_intersection(set_, iterable): """return True if any items of set_ are present in iterable. Goes through special effort to ensure __hash__ is not called on items in iterable that don't support it. """ # TODO: optimize, write in C, etc. return bool( set_.intersection([i for i in iterable if i.__hash__]) ) def to_set(x): if x is None: return set() if not isinstance(x, set): return set(to_list(x)) else: return x def to_column_set(x): if x is None: return column_set() if not isinstance(x, column_set): return column_set(to_list(x)) else: return x def update_copy(d, _new=None, **kw): """Copy the given dict and update with the given values.""" d = d.copy() if _new: d.update(_new) d.update(**kw) return d def flatten_iterator(x): """Given an iterator of which further sub-elements may also be iterators, flatten the sub-elements into a single iterator. """ for elem in x: if not isinstance(elem, str) and hasattr(elem, '__iter__'): for y in flatten_iterator(elem): yield y else: yield elem class LRUCache(dict): """Dictionary with 'squishy' removal of least recently used items. Note that either get() or [] should be used here, but generally its not safe to do an "in" check first as the dictionary can change subsequent to that call. """ def __init__(self, capacity=100, threshold=.5): self.capacity = capacity self.threshold = threshold self._counter = 0 self._mutex = threading.Lock() def _inc_counter(self): self._counter += 1 return self._counter def get(self, key, default=None): item = dict.get(self, key, default) if item is not default: item[2] = self._inc_counter() return item[1] else: return default def __getitem__(self, key): item = dict.__getitem__(self, key) item[2] = self._inc_counter() return item[1] def values(self): return [i[1] for i in dict.values(self)] def setdefault(self, key, value): if key in self: return self[key] else: self[key] = value return value def __setitem__(self, key, value): item = dict.get(self, key) if item is None: item = [key, value, self._inc_counter()] dict.__setitem__(self, key, item) else: item[1] = value self._manage_size() def _manage_size(self): if not self._mutex.acquire(False): return try: while len(self) > self.capacity + self.capacity * self.threshold: by_counter = sorted(dict.values(self), key=operator.itemgetter(2), reverse=True) for item in by_counter[self.capacity:]: try: del self[item[0]] except KeyError: # deleted elsewhere; skip continue finally: self._mutex.release() _lw_tuples = LRUCache(100) def lightweight_named_tuple(name, fields): hash_ = (name, ) + tuple(fields) tp_cls = _lw_tuples.get(hash_) if tp_cls: return tp_cls tp_cls = type( name, (_LW,), dict([ (field, _property_getters[idx]) for idx, field in enumerate(fields) if field is not None ] + [('__slots__', ())]) ) tp_cls._real_fields = fields tp_cls._fields = tuple([f for f in fields if f is not None]) _lw_tuples[hash_] = tp_cls return tp_cls class ScopedRegistry(object): """A Registry that can store one or multiple instances of a single class on the basis of a "scope" function. The object implements ``__call__`` as the "getter", so by calling ``myregistry()`` the contained object is returned for the current scope. :param createfunc: a callable that returns a new object to be placed in the registry :param scopefunc: a callable that will return a key to store/retrieve an object. """ def __init__(self, createfunc, scopefunc): """Construct a new :class:`.ScopedRegistry`. :param createfunc: A creation function that will generate a new value for the current scope, if none is present. :param scopefunc: A function that returns a hashable token representing the current scope (such as, current thread identifier). """ self.createfunc = createfunc self.scopefunc = scopefunc self.registry = {} def __call__(self): key = self.scopefunc() try: return self.registry[key] except KeyError: return self.registry.setdefault(key, self.createfunc()) def has(self): """Return True if an object is present in the current scope.""" return self.scopefunc() in self.registry def set(self, obj): """Set the value for the current scope.""" self.registry[self.scopefunc()] = obj def clear(self): """Clear the current scope, if any.""" try: del self.registry[self.scopefunc()] except KeyError: pass class ThreadLocalRegistry(ScopedRegistry): """A :class:`.ScopedRegistry` that uses a ``threading.local()`` variable for storage. """ def __init__(self, createfunc): self.createfunc = createfunc self.registry = threading.local() def __call__(self): try: return self.registry.value except AttributeError: val = self.registry.value = self.createfunc() return val def has(self): return hasattr(self.registry, "value") def set(self, obj): self.registry.value = obj def clear(self): try: del self.registry.value except AttributeError: pass def _iter_id(iterable): """Generator: ((id(o), o) for o in iterable).""" for item in iterable: yield id(item), item SQLAlchemy-1.0.11/lib/sqlalchemy/util/__init__.py0000664000175000017500000000473012636375552022642 0ustar classicclassic00000000000000# util/__init__.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .compat import callable, cmp, reduce, \ threading, py3k, py33, py36, py2k, jython, pypy, cpython, win32, \ pickle, dottedgetter, parse_qsl, namedtuple, next, reraise, \ raise_from_cause, text_type, safe_kwarg, string_types, int_types, \ binary_type, nested, \ quote_plus, with_metaclass, print_, itertools_filterfalse, u, ue, b,\ unquote_plus, unquote, b64decode, b64encode, byte_buffer, itertools_filter,\ iterbytes, StringIO, inspect_getargspec, zip_longest from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \ Properties, OrderedProperties, ImmutableProperties, OrderedDict, \ OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \ column_dict, ordered_column_set, populate_column_dict, unique_list, \ UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \ to_column_set, update_copy, flatten_iterator, has_intersection, \ LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence, \ coerce_generator_arg, lightweight_named_tuple from .langhelpers import iterate_attributes, class_hierarchy, \ portable_instancemethod, unbound_method_to_callable, \ getargspec_init, format_argspec_init, format_argspec_plus, \ get_func_kwargs, get_cls_kwargs, decorator, as_interface, \ memoized_property, memoized_instancemethod, md5_hex, \ group_expirable_memoized_property, dependencies, decode_slice, \ monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\ duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\ classproperty, set_creation_order, warn_exception, warn, NoneType,\ constructor_copy, methods_equivalent, chop_traceback, asint,\ generic_repr, counter, PluginLoader, hybridproperty, hybridmethod, \ safe_reraise,\ get_callable_argspec, only_once, attrsetter, ellipses_string, \ warn_limited, map_bits, MemoizedSlots, EnsureKWArgType from .deprecations import warn_deprecated, warn_pending_deprecation, \ deprecated, pending_deprecation, inject_docstring_text # things that used to be not always available, # but are now as of current support Python versions from collections import defaultdict from functools import partial from functools import update_wrapper from contextlib import contextmanager SQLAlchemy-1.0.11/lib/sqlalchemy/util/queue.py0000664000175000017500000001462412636375552022232 0ustar classicclassic00000000000000# util/queue.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """An adaptation of Py2.3/2.4's Queue module which supports reentrant behavior, using RLock instead of Lock for its mutex object. The Queue object is used exclusively by the sqlalchemy.pool.QueuePool class. This is to support the connection pool's usage of weakref callbacks to return connections to the underlying Queue, which can in extremely rare cases be invoked within the ``get()`` method of the Queue itself, producing a ``put()`` inside the ``get()`` and therefore a reentrant condition. """ from collections import deque from time import time as _time from .compat import threading __all__ = ['Empty', 'Full', 'Queue'] class Empty(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass class Full(Exception): "Exception raised by Queue.put(block=0)/put_nowait()." pass class Queue: def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. """ self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" self.mutex.acquire() n = self._empty() self.mutex.release() return n def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = self._full() self.mutex.release() return n def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Full`` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the ``Full`` exception (`timeout` is ignored in that case). """ self.not_full.acquire() try: if not block: if self._full(): raise Full elif timeout is None: while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._full(): remaining = endtime - _time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.not_empty.notify() finally: self.not_full.release() def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the ``Full`` exception. """ return self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Empty`` exception if no item was available within that time. Otherwise (`block` is false), return an item if one is immediately available, else raise the ``Empty`` exception (`timeout` is ignored in that case). """ self.not_empty.acquire() try: if not block: if self._empty(): raise Empty elif timeout is None: while self._empty(): self.not_empty.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item finally: self.not_empty.release() def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the ``Empty`` exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft() SQLAlchemy-1.0.11/lib/sqlalchemy/event/0000775000175000017500000000000012636376632020671 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/lib/sqlalchemy/event/api.py0000664000175000017500000001354612636375552022025 0ustar classicclassic00000000000000# event/api.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Public API functions for the event system. """ from __future__ import absolute_import from .. import util, exc from .base import _registrars from .registry import _EventKey CANCEL = util.symbol('CANCEL') NO_RETVAL = util.symbol('NO_RETVAL') def _event_key(target, identifier, fn): for evt_cls in _registrars[identifier]: tgt = evt_cls._accept_with(target) if tgt is not None: return _EventKey(target, identifier, fn, tgt) else: raise exc.InvalidRequestError("No such event '%s' for target '%s'" % (identifier, target)) def listen(target, identifier, fn, *args, **kw): """Register a listener function for the given target. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) event.listen( UniqueConstraint, "after_parent_attach", unique_constraint_name) A given function can also be invoked for only the first invocation of the event using the ``once`` argument:: def on_config(): do_config() event.listen(Mapper, "before_configure", on_config, once=True) .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` and :func:`.event.listens_for`. .. note:: The :func:`.listen` function cannot be called at the same time that the target event is being run. This has implications for thread safety, and also means an event cannot be added from inside the listener function for itself. The list of events to be run are present inside of a mutable collection that can't be changed during iteration. Event registration and removal is not intended to be a "high velocity" operation; it is a configurational operation. For systems that need to quickly associate and deassociate with events at high scale, use a mutable structure that is handled from inside of a single listener. .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now used as the container for the list of events, which explicitly disallows collection mutation while the collection is being iterated. .. seealso:: :func:`.listens_for` :func:`.remove` """ _event_key(target, identifier, fn).listen(*args, **kw) def listens_for(target, identifier, *args, **kw): """Decorate a function as a listener for the given target + identifier. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint @event.listens_for(UniqueConstraint, "after_parent_attach") def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) A given function can also be invoked for only the first invocation of the event using the ``once`` argument:: @event.listens_for(Mapper, "before_configure", once=True) def on_config(): do_config() .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` and :func:`.event.listens_for`. .. seealso:: :func:`.listen` - general description of event listening """ def decorate(fn): listen(target, identifier, fn, *args, **kw) return fn return decorate def remove(target, identifier, fn): """Remove an event listener. The arguments here should match exactly those which were sent to :func:`.listen`; all the event registration which proceeded as a result of this call will be reverted by calling :func:`.remove` with the same arguments. e.g.:: # if a function was registered like this... @event.listens_for(SomeMappedClass, "before_insert", propagate=True) def my_listener_function(*arg): pass # ... it's removed like this event.remove(SomeMappedClass, "before_insert", my_listener_function) Above, the listener function associated with ``SomeMappedClass`` was also propagated to subclasses of ``SomeMappedClass``; the :func:`.remove` function will revert all of these operations. .. versionadded:: 0.9.0 .. note:: The :func:`.remove` function cannot be called at the same time that the target event is being run. This has implications for thread safety, and also means an event cannot be removed from inside the listener function for itself. The list of events to be run are present inside of a mutable collection that can't be changed during iteration. Event registration and removal is not intended to be a "high velocity" operation; it is a configurational operation. For systems that need to quickly associate and deassociate with events at high scale, use a mutable structure that is handled from inside of a single listener. .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now used as the container for the list of events, which explicitly disallows collection mutation while the collection is being iterated. .. seealso:: :func:`.listen` """ _event_key(target, identifier, fn).remove() def contains(target, identifier, fn): """Return True if the given target/ident/fn is set up to listen. .. versionadded:: 0.9.0 """ return _event_key(target, identifier, fn).contains() SQLAlchemy-1.0.11/lib/sqlalchemy/event/base.py0000664000175000017500000002250412636375552022160 0ustar classicclassic00000000000000# event/base.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Base implementation classes. The public-facing ``Events`` serves as the base class for an event interface; its public attributes represent different kinds of events. These attributes are mirrored onto a ``_Dispatch`` class, which serves as a container for collections of listener functions. These collections are represented both at the class level of a particular ``_Dispatch`` class as well as within instances of ``_Dispatch``. """ from __future__ import absolute_import import weakref from .. import util from .attr import _JoinedListener, \ _EmptyListener, _ClsLevelDispatch _registrars = util.defaultdict(list) def _is_event_name(name): return not name.startswith('_') and name != 'dispatch' class _UnpickleDispatch(object): """Serializable callable that re-generates an instance of :class:`_Dispatch` given a particular :class:`.Events` subclass. """ def __call__(self, _instance_cls): for cls in _instance_cls.__mro__: if 'dispatch' in cls.__dict__: return cls.__dict__['dispatch'].\ dispatch_cls._for_class(_instance_cls) else: raise AttributeError("No class with a 'dispatch' member present.") class _Dispatch(object): """Mirror the event listening definitions of an Events class with listener collections. Classes which define a "dispatch" member will return a non-instantiated :class:`._Dispatch` subclass when the member is accessed at the class level. When the "dispatch" member is accessed at the instance level of its owner, an instance of the :class:`._Dispatch` class is returned. A :class:`._Dispatch` class is generated for each :class:`.Events` class defined, by the :func:`._create_dispatcher_class` function. The original :class:`.Events` classes remain untouched. This decouples the construction of :class:`.Events` subclasses from the implementation used by the event internals, and allows inspecting tools like Sphinx to work in an unsurprising way against the public API. """ # in one ORM edge case, an attribute is added to _Dispatch, # so __dict__ is used in just that case and potentially others. __slots__ = '_parent', '_instance_cls', '__dict__', '_empty_listeners' _empty_listener_reg = weakref.WeakKeyDictionary() def __init__(self, parent, instance_cls=None): self._parent = parent self._instance_cls = instance_cls if instance_cls: try: self._empty_listeners = self._empty_listener_reg[instance_cls] except KeyError: self._empty_listeners = \ self._empty_listener_reg[instance_cls] = dict( (ls.name, _EmptyListener(ls, instance_cls)) for ls in parent._event_descriptors ) else: self._empty_listeners = {} def __getattr__(self, name): # assign EmptyListeners as attributes on demand # to reduce startup time for new dispatch objects try: ls = self._empty_listeners[name] except KeyError: raise AttributeError(name) else: setattr(self, ls.name, ls) return ls @property def _event_descriptors(self): for k in self._event_names: yield getattr(self, k) def _for_class(self, instance_cls): return self.__class__(self, instance_cls) def _for_instance(self, instance): instance_cls = instance.__class__ return self._for_class(instance_cls) @property def _listen(self): return self._events._listen def _join(self, other): """Create a 'join' of this :class:`._Dispatch` and another. This new dispatcher will dispatch events to both :class:`._Dispatch` objects. """ if '_joined_dispatch_cls' not in self.__class__.__dict__: cls = type( "Joined%s" % self.__class__.__name__, (_JoinedDispatcher, ), {'__slots__': self._event_names} ) self.__class__._joined_dispatch_cls = cls return self._joined_dispatch_cls(self, other) def __reduce__(self): return _UnpickleDispatch(), (self._instance_cls, ) def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" for ls in other._event_descriptors: if isinstance(ls, _EmptyListener): continue getattr(self, ls.name).\ for_modify(self)._update(ls, only_propagate=only_propagate) def _clear(self): for ls in self._event_descriptors: ls.for_modify(self).clear() class _EventMeta(type): """Intercept new Event subclasses and create associated _Dispatch classes.""" def __init__(cls, classname, bases, dict_): _create_dispatcher_class(cls, classname, bases, dict_) return type.__init__(cls, classname, bases, dict_) def _create_dispatcher_class(cls, classname, bases, dict_): """Create a :class:`._Dispatch` class corresponding to an :class:`.Events` class.""" # there's all kinds of ways to do this, # i.e. make a Dispatch class that shares the '_listen' method # of the Event class, this is the straight monkeypatch. if hasattr(cls, 'dispatch'): dispatch_base = cls.dispatch.__class__ else: dispatch_base = _Dispatch event_names = [k for k in dict_ if _is_event_name(k)] dispatch_cls = type("%sDispatch" % classname, (dispatch_base, ), {'__slots__': event_names}) dispatch_cls._event_names = event_names dispatch_inst = cls._set_dispatch(cls, dispatch_cls) for k in dispatch_cls._event_names: setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k])) _registrars[k].append(cls) for super_ in dispatch_cls.__bases__: if issubclass(super_, _Dispatch) and super_ is not _Dispatch: for ls in super_._events.dispatch._event_descriptors: setattr(dispatch_inst, ls.name, ls) dispatch_cls._event_names.append(ls.name) if getattr(cls, '_dispatch_target', None): cls._dispatch_target.dispatch = dispatcher(cls) def _remove_dispatcher(cls): for k in cls.dispatch._event_names: _registrars[k].remove(cls) if not _registrars[k]: del _registrars[k] class Events(util.with_metaclass(_EventMeta, object)): """Define event listening functions for a particular target type.""" @staticmethod def _set_dispatch(cls, dispatch_cls): # this allows an Events subclass to define additional utility # methods made available to the target via # "self.dispatch._events." # @staticemethod to allow easy "super" calls while in a metaclass # constructor. cls.dispatch = dispatch_cls(None) dispatch_cls._events = cls return cls.dispatch @classmethod def _accept_with(cls, target): # Mapper, ClassManager, Session override this to # also accept classes, scoped_sessions, sessionmakers, etc. if hasattr(target, 'dispatch') and ( isinstance(target.dispatch, cls.dispatch.__class__) or ( isinstance(target.dispatch, type) and isinstance(target.dispatch, cls.dispatch.__class__) ) or ( isinstance(target.dispatch, _JoinedDispatcher) and isinstance(target.dispatch.parent, cls.dispatch.__class__) ) ): return target else: return None @classmethod def _listen(cls, event_key, propagate=False, insert=False, named=False): event_key.base_listen(propagate=propagate, insert=insert, named=named) @classmethod def _remove(cls, event_key): event_key.remove() @classmethod def _clear(cls): cls.dispatch._clear() class _JoinedDispatcher(object): """Represent a connection between two _Dispatch objects.""" __slots__ = 'local', 'parent', '_instance_cls' def __init__(self, local, parent): self.local = local self.parent = parent self._instance_cls = self.local._instance_cls def __getattr__(self, name): # assign _JoinedListeners as attributes on demand # to reduce startup time for new dispatch objects ls = getattr(self.local, name) jl = _JoinedListener(self.parent, ls.name, ls) setattr(self, ls.name, jl) return jl @property def _listen(self): return self.parent._listen class dispatcher(object): """Descriptor used by target classes to deliver the _Dispatch class at the class level and produce new _Dispatch instances for target instances. """ def __init__(self, events): self.dispatch_cls = events.dispatch self.events = events def __get__(self, obj, cls): if obj is None: return self.dispatch_cls obj.__dict__['dispatch'] = disp = self.dispatch_cls._for_instance(obj) return disp SQLAlchemy-1.0.11/lib/sqlalchemy/event/attr.py0000664000175000017500000002742512636375552022227 0ustar classicclassic00000000000000# event/attr.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Attribute implementation for _Dispatch classes. The various listener targets for a particular event class are represented as attributes, which refer to collections of listeners to be fired off. These collections can exist at the class level as well as at the instance level. An event is fired off using code like this:: some_object.dispatch.first_connect(arg1, arg2) Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and ``first_connect`` is typically an instance of ``_ListenerCollection`` if event listeners are present, or ``_EmptyListener`` if none are present. The attribute mechanics here spend effort trying to ensure listener functions are available with a minimum of function call overhead, that unnecessary objects aren't created (i.e. many empty per-instance listener collections), as well as that everything is garbage collectable when owning references are lost. Other features such as "propagation" of listener functions across many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances, as well as support for subclass propagation (e.g. events assigned to ``Pool`` vs. ``QueuePool``) are all implemented here. """ from __future__ import absolute_import, with_statement from .. import util from ..util import threading from . import registry from . import legacy from itertools import chain import weakref import collections class RefCollection(util.MemoizedSlots): __slots__ = 'ref', def _memoized_attr_ref(self): return weakref.ref(self, registry._collection_gced) class _ClsLevelDispatch(RefCollection): """Class-level events on :class:`._Dispatch` classes.""" __slots__ = ('name', 'arg_names', 'has_kw', 'legacy_signatures', '_clslevel', '__weakref__') def __init__(self, parent_dispatch_cls, fn): self.name = fn.__name__ argspec = util.inspect_getargspec(fn) self.arg_names = argspec.args[1:] self.has_kw = bool(argspec.keywords) self.legacy_signatures = list(reversed( sorted( getattr(fn, '_legacy_signatures', []), key=lambda s: s[0] ) )) fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn) self._clslevel = weakref.WeakKeyDictionary() def _adjust_fn_spec(self, fn, named): if named: fn = self._wrap_fn_for_kw(fn) if self.legacy_signatures: try: argspec = util.get_callable_argspec(fn, no_self=True) except TypeError: pass else: fn = legacy._wrap_fn_for_legacy(self, fn, argspec) return fn def _wrap_fn_for_kw(self, fn): def wrap_kw(*args, **kw): argdict = dict(zip(self.arg_names, args)) argdict.update(kw) return fn(**argdict) return wrap_kw def insert(self, event_key, propagate): target = event_key.dispatch_target assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls is not target and cls not in self._clslevel: self.update_subclass(cls) else: if cls not in self._clslevel: self._clslevel[cls] = collections.deque() self._clslevel[cls].appendleft(event_key._listen_fn) registry._stored_in_collection(event_key, self) def append(self, event_key, propagate): target = event_key.dispatch_target assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls is not target and cls not in self._clslevel: self.update_subclass(cls) else: if cls not in self._clslevel: self._clslevel[cls] = collections.deque() self._clslevel[cls].append(event_key._listen_fn) registry._stored_in_collection(event_key, self) def update_subclass(self, target): if target not in self._clslevel: self._clslevel[target] = collections.deque() clslevel = self._clslevel[target] for cls in target.__mro__[1:]: if cls in self._clslevel: clslevel.extend([ fn for fn in self._clslevel[cls] if fn not in clslevel ]) def remove(self, event_key): target = event_key.dispatch_target stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls in self._clslevel: self._clslevel[cls].remove(event_key._listen_fn) registry._removed_from_collection(event_key, self) def clear(self): """Clear all class level listeners""" to_clear = set() for dispatcher in self._clslevel.values(): to_clear.update(dispatcher) dispatcher.clear() registry._clear(self, to_clear) def for_modify(self, obj): """Return an event collection which can be modified. For _ClsLevelDispatch at the class level of a dispatcher, this returns self. """ return self class _InstanceLevelDispatch(RefCollection): __slots__ = () def _adjust_fn_spec(self, fn, named): return self.parent._adjust_fn_spec(fn, named) class _EmptyListener(_InstanceLevelDispatch): """Serves as a proxy interface to the events served by a _ClsLevelDispatch, when there are no instance-level events present. Is replaced by _ListenerCollection when instance-level events are added. """ propagate = frozenset() listeners = () __slots__ = 'parent', 'parent_listeners', 'name' def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self.parent = parent # _ClsLevelDispatch self.parent_listeners = parent._clslevel[target_cls] self.name = parent.name def for_modify(self, obj): """Return an event collection which can be modified. For _EmptyListener at the instance level of a dispatcher, this generates a new _ListenerCollection, applies it to the instance, and returns it. """ result = _ListenerCollection(self.parent, obj._instance_cls) if getattr(obj, self.name) is self: setattr(obj, self.name, result) else: assert isinstance(getattr(obj, self.name), _JoinedListener) return result def _needs_modify(self, *args, **kw): raise NotImplementedError("need to call for_modify()") exec_once = insert = append = remove = clear = _needs_modify def __call__(self, *args, **kw): """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) def __len__(self): return len(self.parent_listeners) def __iter__(self): return iter(self.parent_listeners) def __bool__(self): return bool(self.parent_listeners) __nonzero__ = __bool__ class _CompoundListener(_InstanceLevelDispatch): __slots__ = '_exec_once_mutex', '_exec_once' def _memoized_attr__exec_once_mutex(self): return threading.Lock() def exec_once(self, *args, **kw): """Execute this event, but only if it has not been executed already for this collection.""" if not self._exec_once: with self._exec_once_mutex: if not self._exec_once: try: self(*args, **kw) finally: self._exec_once = True def __call__(self, *args, **kw): """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) for fn in self.listeners: fn(*args, **kw) def __len__(self): return len(self.parent_listeners) + len(self.listeners) def __iter__(self): return chain(self.parent_listeners, self.listeners) def __bool__(self): return bool(self.listeners or self.parent_listeners) __nonzero__ = __bool__ class _ListenerCollection(_CompoundListener): """Instance-level attributes on instances of :class:`._Dispatch`. Represents a collection of listeners. As of 0.7.9, _ListenerCollection is only first created via the _EmptyListener.for_modify() method. """ __slots__ = ( 'parent_listeners', 'parent', 'name', 'listeners', 'propagate', '__weakref__') def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self._exec_once = False self.parent_listeners = parent._clslevel[target_cls] self.parent = parent self.name = parent.name self.listeners = collections.deque() self.propagate = set() def for_modify(self, obj): """Return an event collection which can be modified. For _ListenerCollection at the instance level of a dispatcher, this returns self. """ return self def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" existing_listeners = self.listeners existing_listener_set = set(existing_listeners) self.propagate.update(other.propagate) other_listeners = [l for l in other.listeners if l not in existing_listener_set and not only_propagate or l in self.propagate ] existing_listeners.extend(other_listeners) to_associate = other.propagate.union(other_listeners) registry._stored_in_collection_multi(self, other, to_associate) def insert(self, event_key, propagate): if event_key.prepend_to_list(self, self.listeners): if propagate: self.propagate.add(event_key._listen_fn) def append(self, event_key, propagate): if event_key.append_to_list(self, self.listeners): if propagate: self.propagate.add(event_key._listen_fn) def remove(self, event_key): self.listeners.remove(event_key._listen_fn) self.propagate.discard(event_key._listen_fn) registry._removed_from_collection(event_key, self) def clear(self): registry._clear(self, self.listeners) self.propagate.clear() self.listeners.clear() class _JoinedListener(_CompoundListener): __slots__ = 'parent', 'name', 'local', 'parent_listeners' def __init__(self, parent, name, local): self._exec_once = False self.parent = parent self.name = name self.local = local self.parent_listeners = self.local @property def listeners(self): return getattr(self.parent, self.name) def _adjust_fn_spec(self, fn, named): return self.local._adjust_fn_spec(fn, named) def for_modify(self, obj): self.local = self.parent_listeners = self.local.for_modify(obj) return self def insert(self, event_key, propagate): self.local.insert(event_key, propagate) def append(self, event_key, propagate): self.local.append(event_key, propagate) def remove(self, event_key): self.local.remove(event_key) def clear(self): raise NotImplementedError() SQLAlchemy-1.0.11/lib/sqlalchemy/event/registry.py0000664000175000017500000001715212636375552023121 0ustar classicclassic00000000000000# event/registry.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides managed registration services on behalf of :func:`.listen` arguments. By "managed registration", we mean that event listening functions and other objects can be added to various collections in such a way that their membership in all those collections can be revoked at once, based on an equivalent :class:`._EventKey`. """ from __future__ import absolute_import import weakref import collections import types from .. import exc, util _key_to_collection = collections.defaultdict(dict) """ Given an original listen() argument, can locate all listener collections and the listener fn contained (target, identifier, fn) -> { ref(listenercollection) -> ref(listener_fn) ref(listenercollection) -> ref(listener_fn) ref(listenercollection) -> ref(listener_fn) } """ _collection_to_key = collections.defaultdict(dict) """ Given a _ListenerCollection or _ClsLevelListener, can locate all the original listen() arguments and the listener fn contained ref(listenercollection) -> { ref(listener_fn) -> (target, identifier, fn), ref(listener_fn) -> (target, identifier, fn), ref(listener_fn) -> (target, identifier, fn), } """ def _collection_gced(ref): # defaultdict, so can't get a KeyError if not _collection_to_key or ref not in _collection_to_key: return listener_to_key = _collection_to_key.pop(ref) for key in listener_to_key.values(): if key in _key_to_collection: # defaultdict, so can't get a KeyError dispatch_reg = _key_to_collection[key] dispatch_reg.pop(ref) if not dispatch_reg: _key_to_collection.pop(key) def _stored_in_collection(event_key, owner): key = event_key._key dispatch_reg = _key_to_collection[key] owner_ref = owner.ref listen_ref = weakref.ref(event_key._listen_fn) if owner_ref in dispatch_reg: return False dispatch_reg[owner_ref] = listen_ref listener_to_key = _collection_to_key[owner_ref] listener_to_key[listen_ref] = key return True def _removed_from_collection(event_key, owner): key = event_key._key dispatch_reg = _key_to_collection[key] listen_ref = weakref.ref(event_key._listen_fn) owner_ref = owner.ref dispatch_reg.pop(owner_ref, None) if not dispatch_reg: del _key_to_collection[key] if owner_ref in _collection_to_key: listener_to_key = _collection_to_key[owner_ref] listener_to_key.pop(listen_ref) def _stored_in_collection_multi(newowner, oldowner, elements): if not elements: return oldowner = oldowner.ref newowner = newowner.ref old_listener_to_key = _collection_to_key[oldowner] new_listener_to_key = _collection_to_key[newowner] for listen_fn in elements: listen_ref = weakref.ref(listen_fn) key = old_listener_to_key[listen_ref] dispatch_reg = _key_to_collection[key] if newowner in dispatch_reg: assert dispatch_reg[newowner] == listen_ref else: dispatch_reg[newowner] = listen_ref new_listener_to_key[listen_ref] = key def _clear(owner, elements): if not elements: return owner = owner.ref listener_to_key = _collection_to_key[owner] for listen_fn in elements: listen_ref = weakref.ref(listen_fn) key = listener_to_key[listen_ref] dispatch_reg = _key_to_collection[key] dispatch_reg.pop(owner, None) if not dispatch_reg: del _key_to_collection[key] class _EventKey(object): """Represent :func:`.listen` arguments. """ __slots__ = ( 'target', 'identifier', 'fn', 'fn_key', 'fn_wrap', 'dispatch_target' ) def __init__(self, target, identifier, fn, dispatch_target, _fn_wrap=None): self.target = target self.identifier = identifier self.fn = fn if isinstance(fn, types.MethodType): self.fn_key = id(fn.__func__), id(fn.__self__) else: self.fn_key = id(fn) self.fn_wrap = _fn_wrap self.dispatch_target = dispatch_target @property def _key(self): return (id(self.target), self.identifier, self.fn_key) def with_wrapper(self, fn_wrap): if fn_wrap is self._listen_fn: return self else: return _EventKey( self.target, self.identifier, self.fn, self.dispatch_target, _fn_wrap=fn_wrap ) def with_dispatch_target(self, dispatch_target): if dispatch_target is self.dispatch_target: return self else: return _EventKey( self.target, self.identifier, self.fn, dispatch_target, _fn_wrap=self.fn_wrap ) def listen(self, *args, **kw): once = kw.pop("once", False) named = kw.pop("named", False) target, identifier, fn = \ self.dispatch_target, self.identifier, self._listen_fn dispatch_collection = getattr(target.dispatch, identifier) adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named) self = self.with_wrapper(adjusted_fn) if once: self.with_wrapper( util.only_once(self._listen_fn)).listen(*args, **kw) else: self.dispatch_target.dispatch._listen(self, *args, **kw) def remove(self): key = self._key if key not in _key_to_collection: raise exc.InvalidRequestError( "No listeners found for event %s / %r / %s " % (self.target, self.identifier, self.fn) ) dispatch_reg = _key_to_collection.pop(key) for collection_ref, listener_ref in dispatch_reg.items(): collection = collection_ref() listener_fn = listener_ref() if collection is not None and listener_fn is not None: collection.remove(self.with_wrapper(listener_fn)) def contains(self): """Return True if this event key is registered to listen. """ return self._key in _key_to_collection def base_listen(self, propagate=False, insert=False, named=False): target, identifier, fn = \ self.dispatch_target, self.identifier, self._listen_fn dispatch_collection = getattr(target.dispatch, identifier) if insert: dispatch_collection.\ for_modify(target.dispatch).insert(self, propagate) else: dispatch_collection.\ for_modify(target.dispatch).append(self, propagate) @property def _listen_fn(self): return self.fn_wrap or self.fn def append_to_list(self, owner, list_): if _stored_in_collection(self, owner): list_.append(self._listen_fn) return True else: return False def remove_from_list(self, owner, list_): _removed_from_collection(self, owner) list_.remove(self._listen_fn) def prepend_to_list(self, owner, list_): if _stored_in_collection(self, owner): list_.appendleft(self._listen_fn) return True else: return False SQLAlchemy-1.0.11/lib/sqlalchemy/event/legacy.py0000664000175000017500000001326612636375552022517 0ustar classicclassic00000000000000# event/legacy.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to handle adaption of legacy call signatures, generation of deprecation notes and docstrings. """ from .. import util def _legacy_signature(since, argnames, converter=None): def leg(fn): if not hasattr(fn, '_legacy_signatures'): fn._legacy_signatures = [] fn._legacy_signatures.append((since, argnames, converter)) return fn return leg def _wrap_fn_for_legacy(dispatch_collection, fn, argspec): for since, argnames, conv in dispatch_collection.legacy_signatures: if argnames[-1] == "**kw": has_kw = True argnames = argnames[0:-1] else: has_kw = False if len(argnames) == len(argspec.args) \ and has_kw is bool(argspec.keywords): if conv: assert not has_kw def wrap_leg(*args): return fn(*conv(*args)) else: def wrap_leg(*args, **kw): argdict = dict(zip(dispatch_collection.arg_names, args)) args = [argdict[name] for name in argnames] if has_kw: return fn(*args, **kw) else: return fn(*args) return wrap_leg else: return fn def _indent(text, indent): return "\n".join( indent + line for line in text.split("\n") ) def _standard_listen_example(dispatch_collection, sample_target, fn): example_kw_arg = _indent( "\n".join( "%(arg)s = kw['%(arg)s']" % {"arg": arg} for arg in dispatch_collection.arg_names[0:2] ), " ") if dispatch_collection.legacy_signatures: current_since = max(since for since, args, conv in dispatch_collection.legacy_signatures) else: current_since = None text = ( "from sqlalchemy import event\n\n" "# standard decorator style%(current_since)s\n" "@event.listens_for(%(sample_target)s, '%(event_name)s')\n" "def receive_%(event_name)s(" "%(named_event_arguments)s%(has_kw_arguments)s):\n" " \"listen for the '%(event_name)s' event\"\n" "\n # ... (event handling logic) ...\n" ) if len(dispatch_collection.arg_names) > 3: text += ( "\n# named argument style (new in 0.9)\n" "@event.listens_for(" "%(sample_target)s, '%(event_name)s', named=True)\n" "def receive_%(event_name)s(**kw):\n" " \"listen for the '%(event_name)s' event\"\n" "%(example_kw_arg)s\n" "\n # ... (event handling logic) ...\n" ) text %= { "current_since": " (arguments as of %s)" % current_since if current_since else "", "event_name": fn.__name__, "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "", "named_event_arguments": ", ".join(dispatch_collection.arg_names), "example_kw_arg": example_kw_arg, "sample_target": sample_target } return text def _legacy_listen_examples(dispatch_collection, sample_target, fn): text = "" for since, args, conv in dispatch_collection.legacy_signatures: text += ( "\n# legacy calling style (pre-%(since)s)\n" "@event.listens_for(%(sample_target)s, '%(event_name)s')\n" "def receive_%(event_name)s(" "%(named_event_arguments)s%(has_kw_arguments)s):\n" " \"listen for the '%(event_name)s' event\"\n" "\n # ... (event handling logic) ...\n" % { "since": since, "event_name": fn.__name__, "has_kw_arguments": " **kw" if dispatch_collection.has_kw else "", "named_event_arguments": ", ".join(args), "sample_target": sample_target } ) return text def _version_signature_changes(dispatch_collection): since, args, conv = dispatch_collection.legacy_signatures[0] return ( "\n.. versionchanged:: %(since)s\n" " The ``%(event_name)s`` event now accepts the \n" " arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n" " Listener functions which accept the previous argument \n" " signature(s) listed above will be automatically \n" " adapted to the new signature." % { "since": since, "event_name": dispatch_collection.name, "named_event_arguments": ", ".join(dispatch_collection.arg_names), "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "" } ) def _augment_fn_docs(dispatch_collection, parent_dispatch_cls, fn): header = ".. container:: event_signatures\n\n"\ " Example argument forms::\n"\ "\n" sample_target = getattr(parent_dispatch_cls, "_target_class_doc", "obj") text = ( header + _indent( _standard_listen_example( dispatch_collection, sample_target, fn), " " * 8) ) if dispatch_collection.legacy_signatures: text += _indent( _legacy_listen_examples( dispatch_collection, sample_target, fn), " " * 8) text += _version_signature_changes(dispatch_collection) return util.inject_docstring_text(fn.__doc__, text, 1 ) SQLAlchemy-1.0.11/lib/sqlalchemy/event/__init__.py0000664000175000017500000000064312636375552023005 0ustar classicclassic00000000000000# event/__init__.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .api import CANCEL, NO_RETVAL, listen, listens_for, remove, contains from .base import Events, dispatcher from .attr import RefCollection from .legacy import _legacy_signature SQLAlchemy-1.0.11/lib/sqlalchemy/testing/0000775000175000017500000000000012636376632021225 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/lib/sqlalchemy/testing/profiling.py0000664000175000017500000002006112636375552023567 0ustar classicclassic00000000000000# testing/profiling.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Profiling support for unit and performance tests. These are special purpose profiling methods which operate in a more fine-grained way than nose's profiling plugin. """ import os import sys from .util import gc_collect from . import config import pstats import collections import contextlib try: import cProfile except ImportError: cProfile = None from ..util import jython, pypy, win32, update_wrapper _current_test = None # ProfileStatsFile instance, set up in plugin_base _profile_stats = None class ProfileStatsFile(object): """"Store per-platform/fn profiling results in a file. We're still targeting Py2.5, 2.4 on 0.7 with no dependencies, so no json lib :( need to roll something silly """ def __init__(self, filename): self.force_write = ( config.options is not None and config.options.force_write_profiles ) self.write = self.force_write or ( config.options is not None and config.options.write_profiles ) self.fname = os.path.abspath(filename) self.short_fname = os.path.split(self.fname)[-1] self.data = collections.defaultdict( lambda: collections.defaultdict(dict)) self._read() if self.write: # rewrite for the case where features changed, # etc. self._write() @property def platform_key(self): dbapi_key = config.db.name + "_" + config.db.driver # keep it at 2.7, 3.1, 3.2, etc. for now. py_version = '.'.join([str(v) for v in sys.version_info[0:2]]) platform_tokens = [py_version] platform_tokens.append(dbapi_key) if jython: platform_tokens.append("jython") if pypy: platform_tokens.append("pypy") if win32: platform_tokens.append("win") _has_cext = config.requirements._has_cextensions() platform_tokens.append(_has_cext and "cextensions" or "nocextensions") return "_".join(platform_tokens) def has_stats(self): test_key = _current_test return ( test_key in self.data and self.platform_key in self.data[test_key] ) def result(self, callcount): test_key = _current_test per_fn = self.data[test_key] per_platform = per_fn[self.platform_key] if 'counts' not in per_platform: per_platform['counts'] = counts = [] else: counts = per_platform['counts'] if 'current_count' not in per_platform: per_platform['current_count'] = current_count = 0 else: current_count = per_platform['current_count'] has_count = len(counts) > current_count if not has_count: counts.append(callcount) if self.write: self._write() result = None else: result = per_platform['lineno'], counts[current_count] per_platform['current_count'] += 1 return result def replace(self, callcount): test_key = _current_test per_fn = self.data[test_key] per_platform = per_fn[self.platform_key] counts = per_platform['counts'] current_count = per_platform['current_count'] if current_count < len(counts): counts[current_count - 1] = callcount else: counts[-1] = callcount if self.write: self._write() def _header(self): return ( "# %s\n" "# This file is written out on a per-environment basis.\n" "# For each test in aaa_profiling, the corresponding " "function and \n" "# environment is located within this file. " "If it doesn't exist,\n" "# the test is skipped.\n" "# If a callcount does exist, it is compared " "to what we received. \n" "# assertions are raised if the counts do not match.\n" "# \n" "# To add a new callcount test, apply the function_call_count \n" "# decorator and re-run the tests using the --write-profiles \n" "# option - this file will be rewritten including the new count.\n" "# \n" ) % (self.fname) def _read(self): try: profile_f = open(self.fname) except IOError: return for lineno, line in enumerate(profile_f): line = line.strip() if not line or line.startswith("#"): continue test_key, platform_key, counts = line.split() per_fn = self.data[test_key] per_platform = per_fn[platform_key] c = [int(count) for count in counts.split(",")] per_platform['counts'] = c per_platform['lineno'] = lineno + 1 per_platform['current_count'] = 0 profile_f.close() def _write(self): print(("Writing profile file %s" % self.fname)) profile_f = open(self.fname, "w") profile_f.write(self._header()) for test_key in sorted(self.data): per_fn = self.data[test_key] profile_f.write("\n# TEST: %s\n\n" % test_key) for platform_key in sorted(per_fn): per_platform = per_fn[platform_key] c = ",".join(str(count) for count in per_platform['counts']) profile_f.write("%s %s %s\n" % (test_key, platform_key, c)) profile_f.close() def function_call_count(variance=0.05): """Assert a target for a test case's function call count. The main purpose of this assertion is to detect changes in callcounts for various functions - the actual number is not as important. Callcounts are stored in a file keyed to Python version and OS platform information. This file is generated automatically for new tests, and versioned so that unexpected changes in callcounts will be detected. """ def decorate(fn): def wrap(*args, **kw): with count_functions(variance=variance): return fn(*args, **kw) return update_wrapper(wrap, fn) return decorate @contextlib.contextmanager def count_functions(variance=0.05): if cProfile is None: raise SkipTest("cProfile is not installed") if not _profile_stats.has_stats() and not _profile_stats.write: config.skip_test( "No profiling stats available on this " "platform for this function. Run tests with " "--write-profiles to add statistics to %s for " "this platform." % _profile_stats.short_fname) gc_collect() pr = cProfile.Profile() pr.enable() #began = time.time() yield #ended = time.time() pr.disable() #s = compat.StringIO() stats = pstats.Stats(pr, stream=sys.stdout) #timespent = ended - began callcount = stats.total_calls expected = _profile_stats.result(callcount) if expected is None: expected_count = None else: line_no, expected_count = expected print(("Pstats calls: %d Expected %s" % ( callcount, expected_count ) )) stats.sort_stats("cumulative") stats.print_stats() if expected_count: deviance = int(callcount * variance) failed = abs(callcount - expected_count) > deviance if failed or _profile_stats.force_write: if _profile_stats.write: _profile_stats.replace(callcount) else: raise AssertionError( "Adjusted function call count %s not within %s%% " "of expected %s, platform %s. Rerun with " "--write-profiles to " "regenerate this callcount." % ( callcount, (variance * 100), expected_count, _profile_stats.platform_key)) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/distutils_run.py0000664000175000017500000000036712636375552024515 0ustar classicclassic00000000000000"""Quick and easy way to get setup.py test to run py.test without any custom setuptools/distutils code. """ import unittest import pytest class TestSuite(unittest.TestCase): def test_sqlalchemy(self): pytest.main(["-n", "4", "-q"]) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/requirements.py0000664000175000017500000004675512636375552024343 0ustar classicclassic00000000000000# testing/requirements.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Global database feature support policy. Provides decorators to mark tests requiring specific feature support from the target database. External dialect test suites should subclass SuiteRequirements to provide specific inclusion/exclusions. """ from . import exclusions from .. import util class Requirements(object): pass class SuiteRequirements(Requirements): @property def create_table(self): """target platform can emit basic CreateTable DDL.""" return exclusions.open() @property def drop_table(self): """target platform can emit basic DropTable DDL.""" return exclusions.open() @property def foreign_keys(self): """Target database must support foreign keys.""" return exclusions.open() @property def on_update_cascade(self): """"target database must support ON UPDATE..CASCADE behavior in foreign keys.""" return exclusions.open() @property def non_updating_cascade(self): """target database must *not* support ON UPDATE..CASCADE behavior in foreign keys.""" return exclusions.closed() @property def deferrable_fks(self): return exclusions.closed() @property def on_update_or_deferrable_fks(self): # TODO: exclusions should be composable, # somehow only_if([x, y]) isn't working here, negation/conjunctions # getting confused. return exclusions.only_if( lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled ) @property def self_referential_foreign_keys(self): """Target database must support self-referential foreign keys.""" return exclusions.open() @property def foreign_key_ddl(self): """Target database must support the DDL phrases for FOREIGN KEY.""" return exclusions.open() @property def named_constraints(self): """target database must support names for constraints.""" return exclusions.open() @property def subqueries(self): """Target database must support subqueries.""" return exclusions.open() @property def offset(self): """target database can render OFFSET, or an equivalent, in a SELECT. """ return exclusions.open() @property def bound_limit_offset(self): """target database can render LIMIT and/or OFFSET using a bound parameter """ return exclusions.open() @property def boolean_col_expressions(self): """Target database must support boolean expressions as columns""" return exclusions.closed() @property def nullsordering(self): """Target backends that support nulls ordering.""" return exclusions.closed() @property def standalone_binds(self): """target database/driver supports bound parameters as column expressions without being in the context of a typed column. """ return exclusions.closed() @property def intersect(self): """Target database must support INTERSECT or equivalent.""" return exclusions.closed() @property def except_(self): """Target database must support EXCEPT or equivalent (i.e. MINUS).""" return exclusions.closed() @property def window_functions(self): """Target database must support window functions.""" return exclusions.closed() @property def autoincrement_insert(self): """target platform generates new surrogate integer primary key values when insert() is executed, excluding the pk column.""" return exclusions.open() @property def fetch_rows_post_commit(self): """target platform will allow cursor.fetchone() to proceed after a COMMIT. Typically this refers to an INSERT statement with RETURNING which is invoked within "autocommit". If the row can be returned after the autocommit, then this rule can be open. """ return exclusions.open() @property def empty_inserts(self): """target platform supports INSERT with no values, i.e. INSERT DEFAULT VALUES or equivalent.""" return exclusions.only_if( lambda config: config.db.dialect.supports_empty_insert or config.db.dialect.supports_default_values, "empty inserts not supported" ) @property def insert_from_select(self): """target platform supports INSERT from a SELECT.""" return exclusions.open() @property def returning(self): """target platform supports RETURNING.""" return exclusions.only_if( lambda config: config.db.dialect.implicit_returning, "%(database)s %(does_support)s 'returning'" ) @property def duplicate_names_in_cursor_description(self): """target platform supports a SELECT statement that has the same name repeated more than once in the columns list.""" return exclusions.open() @property def denormalized_names(self): """Target database must have 'denormalized', i.e. UPPERCASE as case insensitive names.""" return exclusions.skip_if( lambda config: not config.db.dialect.requires_name_normalize, "Backend does not require denormalized names." ) @property def multivalues_inserts(self): """target database must support multiple VALUES clauses in an INSERT statement.""" return exclusions.skip_if( lambda config: not config.db.dialect.supports_multivalues_insert, "Backend does not support multirow inserts." ) @property def implements_get_lastrowid(self): """"target dialect implements the executioncontext.get_lastrowid() method without reliance on RETURNING. """ return exclusions.open() @property def emulated_lastrowid(self): """"target dialect retrieves cursor.lastrowid, or fetches from a database-side function after an insert() construct executes, within the get_lastrowid() method. Only dialects that "pre-execute", or need RETURNING to get last inserted id, would return closed/fail/skip for this. """ return exclusions.closed() @property def dbapi_lastrowid(self): """"target platform includes a 'lastrowid' accessor on the DBAPI cursor object. """ return exclusions.closed() @property def views(self): """Target database must support VIEWs.""" return exclusions.closed() @property def schemas(self): """Target database must support external schemas, and have one named 'test_schema'.""" return exclusions.closed() @property def sequences(self): """Target database must support SEQUENCEs.""" return exclusions.only_if([ lambda config: config.db.dialect.supports_sequences ], "no sequence support") @property def sequences_optional(self): """Target database supports sequences, but also optionally as a means of generating new PK values.""" return exclusions.only_if([ lambda config: config.db.dialect.supports_sequences and config.db.dialect.sequences_optional ], "no sequence support, or sequences not optional") @property def reflects_pk_names(self): return exclusions.closed() @property def table_reflection(self): return exclusions.open() @property def view_column_reflection(self): """target database must support retrieval of the columns in a view, similarly to how a table is inspected. This does not include the full CREATE VIEW definition. """ return self.views @property def view_reflection(self): """target database must support inspection of the full CREATE VIEW definition. """ return self.views @property def schema_reflection(self): return self.schemas @property def primary_key_constraint_reflection(self): return exclusions.open() @property def foreign_key_constraint_reflection(self): return exclusions.open() @property def temp_table_reflection(self): return exclusions.open() @property def temp_table_names(self): """target dialect supports listing of temporary table names""" return exclusions.closed() @property def temporary_tables(self): """target database supports temporary tables""" return exclusions.open() @property def temporary_views(self): """target database supports temporary views""" return exclusions.closed() @property def index_reflection(self): return exclusions.open() @property def unique_constraint_reflection(self): """target dialect supports reflection of unique constraints""" return exclusions.open() @property def duplicate_key_raises_integrity_error(self): """target dialect raises IntegrityError when reporting an INSERT with a primary key violation. (hint: it should) """ return exclusions.open() @property def unbounded_varchar(self): """Target database must support VARCHAR with no length""" return exclusions.open() @property def unicode_data(self): """Target database/dialect must support Python unicode objects with non-ASCII characters represented, delivered as bound parameters as well as in result rows. """ return exclusions.open() @property def unicode_ddl(self): """Target driver must support some degree of non-ascii symbol names. """ return exclusions.closed() @property def datetime_literals(self): """target dialect supports rendering of a date, time, or datetime as a literal string, e.g. via the TypeEngine.literal_processor() method. """ return exclusions.closed() @property def datetime(self): """target dialect supports representation of Python datetime.datetime() objects.""" return exclusions.open() @property def datetime_microseconds(self): """target dialect supports representation of Python datetime.datetime() with microsecond objects.""" return exclusions.open() @property def datetime_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1970) values.""" return exclusions.closed() @property def date(self): """target dialect supports representation of Python datetime.date() objects.""" return exclusions.open() @property def date_coerces_from_datetime(self): """target dialect accepts a datetime object as the target of a date column.""" return exclusions.open() @property def date_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1970) values.""" return exclusions.closed() @property def time(self): """target dialect supports representation of Python datetime.time() objects.""" return exclusions.open() @property def time_microseconds(self): """target dialect supports representation of Python datetime.time() with microsecond objects.""" return exclusions.open() @property def binary_comparisons(self): """target database/driver can allow BLOB/BINARY fields to be compared against a bound parameter value. """ return exclusions.open() @property def binary_literals(self): """target backend supports simple binary literals, e.g. an expression like:: SELECT CAST('foo' AS BINARY) Where ``BINARY`` is the type emitted from :class:`.LargeBinary`, e.g. it could be ``BLOB`` or similar. Basically fails on Oracle. """ return exclusions.open() @property def precision_numerics_general(self): """target backend has general support for moderately high-precision numerics.""" return exclusions.open() @property def precision_numerics_enotation_small(self): """target backend supports Decimal() objects using E notation to represent very small values.""" return exclusions.closed() @property def precision_numerics_enotation_large(self): """target backend supports Decimal() objects using E notation to represent very large values.""" return exclusions.closed() @property def precision_numerics_many_significant_digits(self): """target backend supports values with many digits on both sides, such as 319438950232418390.273596, 87673.594069654243 """ return exclusions.closed() @property def precision_numerics_retains_significant_digits(self): """A precision numeric type will return empty significant digits, i.e. a value such as 10.000 will come back in Decimal form with the .000 maintained.""" return exclusions.closed() @property def precision_generic_float_type(self): """target backend will return native floating point numbers with at least seven decimal places when using the generic Float type. """ return exclusions.open() @property def floats_to_four_decimals(self): """target backend can return a floating-point number with four significant digits (such as 15.7563) accurately (i.e. without FP inaccuracies, such as 15.75629997253418). """ return exclusions.open() @property def fetch_null_from_numeric(self): """target backend doesn't crash when you try to select a NUMERIC value that has a value of NULL. Added to support Pyodbc bug #351. """ return exclusions.open() @property def text_type(self): """Target database must support an unbounded Text() " "type such as TEXT or CLOB""" return exclusions.open() @property def empty_strings_varchar(self): """target database can persist/return an empty string with a varchar. """ return exclusions.open() @property def empty_strings_text(self): """target database can persist/return an empty string with an unbounded text.""" return exclusions.open() @property def selectone(self): """target driver must support the literal statement 'select 1'""" return exclusions.open() @property def savepoints(self): """Target database must support savepoints.""" return exclusions.closed() @property def two_phase_transactions(self): """Target database must support two-phase transactions.""" return exclusions.closed() @property def update_from(self): """Target must support UPDATE..FROM syntax""" return exclusions.closed() @property def update_where_target_in_subquery(self): """Target must support UPDATE where the same table is present in a subquery in the WHERE clause. This is an ANSI-standard syntax that apparently MySQL can't handle, such as: UPDATE documents SET flag=1 WHERE documents.title IN (SELECT max(documents.title) AS title FROM documents GROUP BY documents.user_id ) """ return exclusions.open() @property def mod_operator_as_percent_sign(self): """target database must use a plain percent '%' as the 'modulus' operator.""" return exclusions.closed() @property def percent_schema_names(self): """target backend supports weird identifiers with percent signs in them, e.g. 'some % column'. this is a very weird use case but often has problems because of DBAPIs that use python formatting. It's not a critical use case either. """ return exclusions.closed() @property def order_by_label_with_expression(self): """target backend supports ORDER BY a column label within an expression. Basically this:: select data as foo from test order by foo || 'bar' Lots of databases including Postgresql don't support this, so this is off by default. """ return exclusions.closed() @property def unicode_connections(self): """Target driver must support non-ASCII characters being passed at all. """ return exclusions.open() @property def graceful_disconnects(self): """Target driver must raise a DBAPI-level exception, such as InterfaceError, when the underlying connection has been closed and the execute() method is called. """ return exclusions.open() @property def skip_mysql_on_windows(self): """Catchall for a large variety of MySQL on Windows failures""" return exclusions.open() @property def ad_hoc_engines(self): """Test environment must allow ad-hoc engine/connection creation. DBs that scale poorly for many connections, even when closed, i.e. Oracle, may use the "--low-connections" option which flags this requirement as not present. """ return exclusions.skip_if( lambda config: config.options.low_connections) @property def timing_intensive(self): return exclusions.requires_tag("timing_intensive") @property def memory_intensive(self): return exclusions.requires_tag("memory_intensive") @property def threading_with_mock(self): """Mark tests that use threading and mock at the same time - stability issues have been observed with coverage + python 3.3 """ return exclusions.skip_if( lambda config: util.py3k and config.options.has_coverage, "Stability issues with coverage + py3k" ) @property def no_coverage(self): """Test should be skipped if coverage is enabled. This is to block tests that exercise libraries that seem to be sensitive to coverage, such as Postgresql notice logging. """ return exclusions.skip_if( lambda config: config.options.has_coverage, "Issues observed when coverage is enabled" ) def _has_mysql_on_windows(self, config): return False def _has_mysql_fully_case_sensitive(self, config): return False @property def sqlite(self): return exclusions.skip_if(lambda: not self._has_sqlite()) @property def cextensions(self): return exclusions.skip_if( lambda: not self._has_cextensions(), "C extensions not installed" ) def _has_sqlite(self): from sqlalchemy import create_engine try: create_engine('sqlite://') return True except ImportError: return False def _has_cextensions(self): try: from sqlalchemy import cresultproxy, cprocessors return True except ImportError: return False SQLAlchemy-1.0.11/lib/sqlalchemy/testing/mock.py0000664000175000017500000000116612636375552022534 0ustar classicclassic00000000000000# testing/mock.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Import stub for mock library. """ from __future__ import absolute_import from ..util import py33 if py33: from unittest.mock import MagicMock, Mock, call, patch, ANY else: try: from mock import MagicMock, Mock, call, patch, ANY except ImportError: raise ImportError( "SQLAlchemy's test suite requires the " "'mock' library as of 0.8.2.") SQLAlchemy-1.0.11/lib/sqlalchemy/testing/exclusions.py0000664000175000017500000003043212636375552023775 0ustar classicclassic00000000000000# testing/exclusions.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import operator from ..util import decorator from . import config from .. import util import inspect import contextlib from sqlalchemy.util.compat import inspect_getargspec def skip_if(predicate, reason=None): rule = compound() pred = _as_predicate(predicate, reason) rule.skips.add(pred) return rule def fails_if(predicate, reason=None): rule = compound() pred = _as_predicate(predicate, reason) rule.fails.add(pred) return rule class compound(object): def __init__(self): self.fails = set() self.skips = set() self.tags = set() def __add__(self, other): return self.add(other) def add(self, *others): copy = compound() copy.fails.update(self.fails) copy.skips.update(self.skips) copy.tags.update(self.tags) for other in others: copy.fails.update(other.fails) copy.skips.update(other.skips) copy.tags.update(other.tags) return copy def not_(self): copy = compound() copy.fails.update(NotPredicate(fail) for fail in self.fails) copy.skips.update(NotPredicate(skip) for skip in self.skips) copy.tags.update(self.tags) return copy @property def enabled(self): return self.enabled_for_config(config._current) def enabled_for_config(self, config): for predicate in self.skips.union(self.fails): if predicate(config): return False else: return True def matching_config_reasons(self, config): return [ predicate._as_string(config) for predicate in self.skips.union(self.fails) if predicate(config) ] def include_test(self, include_tags, exclude_tags): return bool( not self.tags.intersection(exclude_tags) and (not include_tags or self.tags.intersection(include_tags)) ) def _extend(self, other): self.skips.update(other.skips) self.fails.update(other.fails) self.tags.update(other.tags) def __call__(self, fn): if hasattr(fn, '_sa_exclusion_extend'): fn._sa_exclusion_extend._extend(self) return fn @decorator def decorate(fn, *args, **kw): return self._do(config._current, fn, *args, **kw) decorated = decorate(fn) decorated._sa_exclusion_extend = self return decorated @contextlib.contextmanager def fail_if(self): all_fails = compound() all_fails.fails.update(self.skips.union(self.fails)) try: yield except Exception as ex: all_fails._expect_failure(config._current, ex) else: all_fails._expect_success(config._current) def _do(self, config, fn, *args, **kw): for skip in self.skips: if skip(config): msg = "'%s' : %s" % ( fn.__name__, skip._as_string(config) ) config.skip_test(msg) try: return_value = fn(*args, **kw) except Exception as ex: self._expect_failure(config, ex, name=fn.__name__) else: self._expect_success(config, name=fn.__name__) return return_value def _expect_failure(self, config, ex, name='block'): for fail in self.fails: if fail(config): print(("%s failed as expected (%s): %s " % ( name, fail._as_string(config), str(ex)))) break else: util.raise_from_cause(ex) def _expect_success(self, config, name='block'): if not self.fails: return for fail in self.fails: if not fail(config): break else: raise AssertionError( "Unexpected success for '%s' (%s)" % ( name, " and ".join( fail._as_string(config) for fail in self.fails ) ) ) def requires_tag(tagname): return tags([tagname]) def tags(tagnames): comp = compound() comp.tags.update(tagnames) return comp def only_if(predicate, reason=None): predicate = _as_predicate(predicate) return skip_if(NotPredicate(predicate), reason) def succeeds_if(predicate, reason=None): predicate = _as_predicate(predicate) return fails_if(NotPredicate(predicate), reason) class Predicate(object): @classmethod def as_predicate(cls, predicate, description=None): if isinstance(predicate, compound): return cls.as_predicate(predicate.enabled_for_config, description) elif isinstance(predicate, Predicate): if description and predicate.description is None: predicate.description = description return predicate elif isinstance(predicate, (list, set)): return OrPredicate( [cls.as_predicate(pred) for pred in predicate], description) elif isinstance(predicate, tuple): return SpecPredicate(*predicate) elif isinstance(predicate, util.string_types): tokens = predicate.split(" ", 2) op = spec = None db = tokens.pop(0) if tokens: op = tokens.pop(0) if tokens: spec = tuple(int(d) for d in tokens.pop(0).split(".")) return SpecPredicate(db, op, spec, description=description) elif util.callable(predicate): return LambdaPredicate(predicate, description) else: assert False, "unknown predicate type: %s" % predicate def _format_description(self, config, negate=False): bool_ = self(config) if negate: bool_ = not negate return self.description % { "driver": config.db.url.get_driver_name(), "database": config.db.url.get_backend_name(), "doesnt_support": "doesn't support" if bool_ else "does support", "does_support": "does support" if bool_ else "doesn't support" } def _as_string(self, config=None, negate=False): raise NotImplementedError() class BooleanPredicate(Predicate): def __init__(self, value, description=None): self.value = value self.description = description or "boolean %s" % value def __call__(self, config): return self.value def _as_string(self, config, negate=False): return self._format_description(config, negate=negate) class SpecPredicate(Predicate): def __init__(self, db, op=None, spec=None, description=None): self.db = db self.op = op self.spec = spec self.description = description _ops = { '<': operator.lt, '>': operator.gt, '==': operator.eq, '!=': operator.ne, '<=': operator.le, '>=': operator.ge, 'in': operator.contains, 'between': lambda val, pair: val >= pair[0] and val <= pair[1], } def __call__(self, config): engine = config.db if "+" in self.db: dialect, driver = self.db.split('+') else: dialect, driver = self.db, None if dialect and engine.name != dialect: return False if driver is not None and engine.driver != driver: return False if self.op is not None: assert driver is None, "DBAPI version specs not supported yet" version = _server_version(engine) oper = hasattr(self.op, '__call__') and self.op \ or self._ops[self.op] return oper(version, self.spec) else: return True def _as_string(self, config, negate=False): if self.description is not None: return self._format_description(config) elif self.op is None: if negate: return "not %s" % self.db else: return "%s" % self.db else: if negate: return "not %s %s %s" % ( self.db, self.op, self.spec ) else: return "%s %s %s" % ( self.db, self.op, self.spec ) class LambdaPredicate(Predicate): def __init__(self, lambda_, description=None, args=None, kw=None): spec = inspect_getargspec(lambda_) if not spec[0]: self.lambda_ = lambda db: lambda_() else: self.lambda_ = lambda_ self.args = args or () self.kw = kw or {} if description: self.description = description elif lambda_.__doc__: self.description = lambda_.__doc__ else: self.description = "custom function" def __call__(self, config): return self.lambda_(config) def _as_string(self, config, negate=False): return self._format_description(config) class NotPredicate(Predicate): def __init__(self, predicate, description=None): self.predicate = predicate self.description = description def __call__(self, config): return not self.predicate(config) def _as_string(self, config, negate=False): if self.description: return self._format_description(config, not negate) else: return self.predicate._as_string(config, not negate) class OrPredicate(Predicate): def __init__(self, predicates, description=None): self.predicates = predicates self.description = description def __call__(self, config): for pred in self.predicates: if pred(config): return True return False def _eval_str(self, config, negate=False): if negate: conjunction = " and " else: conjunction = " or " return conjunction.join(p._as_string(config, negate=negate) for p in self.predicates) def _negation_str(self, config): if self.description is not None: return "Not " + self._format_description(config) else: return self._eval_str(config, negate=True) def _as_string(self, config, negate=False): if negate: return self._negation_str(config) else: if self.description is not None: return self._format_description(config) else: return self._eval_str(config) _as_predicate = Predicate.as_predicate def _is_excluded(db, op, spec): return SpecPredicate(db, op, spec)(config._current) def _server_version(engine): """Return a server_version_info tuple.""" # force metadata to be retrieved conn = engine.connect() version = getattr(engine.dialect, 'server_version_info', ()) conn.close() return version def db_spec(*dbs): return OrPredicate( [Predicate.as_predicate(db) for db in dbs] ) def open(): return skip_if(BooleanPredicate(False, "mark as execute")) def closed(): return skip_if(BooleanPredicate(True, "marked as skip")) def fails(reason=None): return fails_if(BooleanPredicate(True, reason or "expected to fail")) @decorator def future(fn, *arg): return fails_if(LambdaPredicate(fn), "Future feature") def fails_on(db, reason=None): return fails_if(SpecPredicate(db), reason) def fails_on_everything_except(*dbs): return succeeds_if( OrPredicate([ SpecPredicate(db) for db in dbs ]) ) def skip(db, reason=None): return skip_if(SpecPredicate(db), reason) def only_on(dbs, reason=None): return only_if( OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)]) ) def exclude(db, op, spec, reason=None): return skip_if(SpecPredicate(db, op, spec), reason) def against(config, *queries): assert queries, "no queries sent!" return OrPredicate([ Predicate.as_predicate(query) for query in queries ])(config) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/runner.py0000664000175000017500000000310712636375552023111 0ustar classicclassic00000000000000#!/usr/bin/env python # testing/runner.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Nose test runner module. This script is a front-end to "nosetests" which installs SQLAlchemy's testing plugin into the local environment. The script is intended to be used by third-party dialects and extensions that run within SQLAlchemy's testing framework. The runner can be invoked via:: python -m sqlalchemy.testing.runner The script is then essentially the same as the "nosetests" script, including all of the usual Nose options. The test environment requires that a setup.cfg is locally present including various required options. Note that when using this runner, Nose's "coverage" plugin will not be able to provide coverage for SQLAlchemy itself, since SQLAlchemy is imported into sys.modules before coverage is started. The special script sqla_nose.py is provided as a top-level script which loads the plugin in a special (somewhat hacky) way so that coverage against SQLAlchemy itself is possible. """ from .plugin.noseplugin import NoseSQLAlchemy import nose def main(): nose.main(addplugins=[NoseSQLAlchemy()]) def setup_py_test(): """Runner to use for the 'test_suite' entry of your setup.py. Prevents any name clash shenanigans from the command line argument "test" that the "setup.py test" command sends to nose. """ nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner']) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/config.py0000664000175000017500000000464512636375552023055 0ustar classicclassic00000000000000# testing/config.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import collections requirements = None db = None db_url = None db_opts = None file_config = None test_schema = None test_schema_2 = None _current = None _skip_test_exception = None class Config(object): def __init__(self, db, db_opts, options, file_config): self.db = db self.db_opts = db_opts self.options = options self.file_config = file_config self.test_schema = "test_schema" self.test_schema_2 = "test_schema_2" _stack = collections.deque() _configs = {} @classmethod def register(cls, db, db_opts, options, file_config): """add a config as one of the global configs. If there are no configs set up yet, this config also gets set as the "_current". """ cfg = Config(db, db_opts, options, file_config) cls._configs[cfg.db.name] = cfg cls._configs[(cfg.db.name, cfg.db.dialect)] = cfg cls._configs[cfg.db] = cfg return cfg @classmethod def set_as_current(cls, config, namespace): global db, _current, db_url, test_schema, test_schema_2, db_opts _current = config db_url = config.db.url db_opts = config.db_opts test_schema = config.test_schema test_schema_2 = config.test_schema_2 namespace.db = db = config.db @classmethod def push_engine(cls, db, namespace): assert _current, "Can't push without a default Config set up" cls.push( Config( db, _current.db_opts, _current.options, _current.file_config), namespace ) @classmethod def push(cls, config, namespace): cls._stack.append(_current) cls.set_as_current(config, namespace) @classmethod def reset(cls, namespace): if cls._stack: cls.set_as_current(cls._stack[0], namespace) cls._stack.clear() @classmethod def all_configs(cls): for cfg in set(cls._configs.values()): yield cfg @classmethod def all_dbs(cls): for cfg in cls.all_configs(): yield cfg.db def skip_test(self, msg): skip_test(msg) def skip_test(msg): raise _skip_test_exception(msg) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/0000775000175000017500000000000012636376632022356 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_update_delete.py0000664000175000017500000000305612636375552026577 0ustar classicclassic00000000000000from .. import fixtures, config from ..assertions import eq_ from sqlalchemy import Integer, String from ..schema import Table, Column class SimpleUpdateDeleteTest(fixtures.TablesTest): run_deletes = 'each' __backend__ = True @classmethod def define_tables(cls, metadata): Table('plain_pk', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) @classmethod def insert_data(cls): config.db.execute( cls.tables.plain_pk.insert(), [ {"id": 1, "data": "d1"}, {"id": 2, "data": "d2"}, {"id": 3, "data": "d3"}, ] ) def test_update(self): t = self.tables.plain_pk r = config.db.execute( t.update().where(t.c.id == 2), data="d2_new" ) assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [ (1, "d1"), (2, "d2_new"), (3, "d3") ] ) def test_delete(self): t = self.tables.plain_pk r = config.db.execute( t.delete().where(t.c.id == 2) ) assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [ (1, "d1"), (3, "d3") ] ) __all__ = ('SimpleUpdateDeleteTest', ) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_dialect.py0000664000175000017500000000221512636375552025374 0ustar classicclassic00000000000000from .. import fixtures, config from ..config import requirements from sqlalchemy import exc from sqlalchemy import Integer, String from .. import assert_raises from ..schema import Table, Column class ExceptionTest(fixtures.TablesTest): """Test basic exception wrapping. DBAPIs vary a lot in exception behavior so to actually anticipate specific exceptions from real round trips, we need to be conservative. """ run_deletes = 'each' __backend__ = True @classmethod def define_tables(cls, metadata): Table('manual_pk', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(50)) ) @requirements.duplicate_key_raises_integrity_error def test_integrity_error(self): with config.db.begin() as conn: conn.execute( self.tables.manual_pk.insert(), {'id': 1, 'data': 'd1'} ) assert_raises( exc.IntegrityError, conn.execute, self.tables.manual_pk.insert(), {'id': 1, 'data': 'd1'} ) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_insert.py0000664000175000017500000001770412636375552025304 0ustar classicclassic00000000000000from .. import fixtures, config from ..config import requirements from .. import exclusions from ..assertions import eq_ from .. import engines from sqlalchemy import Integer, String, select, literal_column, literal from ..schema import Table, Column class LastrowidTest(fixtures.TablesTest): run_deletes = 'each' __backend__ = True __requires__ = 'implements_get_lastrowid', 'autoincrement_insert' __engine_options__ = {"implicit_returning": False} @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) Table('manual_pk', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(50)) ) def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (config.db.dialect.default_sequence_base, "some data") ) def test_autoincrement_on_insert(self): config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( r.inserted_primary_key, [pk] ) # failed on pypy1.9 but seems to be OK on pypy 2.1 # @exclusions.fails_if(lambda: util.pypy, # "lastrowid not maintained after " # "connection close") @requirements.dbapi_lastrowid def test_native_lastrowid_autoinc(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) lastrowid = r.lastrowid pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( lastrowid, pk ) class InsertBehaviorTest(fixtures.TablesTest): run_deletes = 'each' __backend__ = True @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) Table('manual_pk', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(50)) ) Table('includes_defaults', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)), Column('x', Integer, default=5), Column('y', Integer, default=literal_column("2", type_=Integer) + literal(2))) def test_autoclose_on_insert(self): if requirements.returning.enabled: engine = engines.testing_engine( options={'implicit_returning': False}) else: engine = config.db r = engine.execute( self.tables.autoinc_pk.insert(), data="some data" ) assert r._soft_closed assert not r.closed assert r.is_insert assert not r.returns_rows @requirements.returning def test_autoclose_on_insert_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) assert r._soft_closed assert not r.closed assert r.is_insert assert not r.returns_rows @requirements.empty_inserts def test_empty_insert(self): r = config.db.execute( self.tables.autoinc_pk.insert(), ) assert r._soft_closed assert not r.closed r = config.db.execute( self.tables.autoinc_pk.select(). where(self.tables.autoinc_pk.c.id != None) ) assert len(r.fetchall()) @requirements.insert_from_select def test_insert_from_select(self): table = self.tables.manual_pk config.db.execute( table.insert(), [ dict(id=1, data="data1"), dict(id=2, data="data2"), dict(id=3, data="data3"), ] ) config.db.execute( table.insert(inline=True). from_select(("id", "data",), select([table.c.id + 5, table.c.data]). where(table.c.data.in_(["data2", "data3"])) ), ) eq_( config.db.execute( select([table.c.data]).order_by(table.c.data) ).fetchall(), [("data1", ), ("data2", ), ("data2", ), ("data3", ), ("data3", )] ) @requirements.insert_from_select def test_insert_from_select_with_defaults(self): table = self.tables.includes_defaults config.db.execute( table.insert(), [ dict(id=1, data="data1"), dict(id=2, data="data2"), dict(id=3, data="data3"), ] ) config.db.execute( table.insert(inline=True). from_select(("id", "data",), select([table.c.id + 5, table.c.data]). where(table.c.data.in_(["data2", "data3"])) ), ) eq_( config.db.execute( select([table]).order_by(table.c.data, table.c.id) ).fetchall(), [(1, 'data1', 5, 4), (2, 'data2', 5, 4), (7, 'data2', 5, 4), (3, 'data3', 5, 4), (8, 'data3', 5, 4)] ) class ReturningTest(fixtures.TablesTest): run_create_tables = 'each' __requires__ = 'returning', 'autoincrement_insert' __backend__ = True __engine_options__ = {"implicit_returning": True} def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (config.db.dialect.default_sequence_base, "some data") ) @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) @requirements.fetch_rows_post_commit def test_explicit_returning_pk_autocommit(self): engine = config.db table = self.tables.autoinc_pk r = engine.execute( table.insert().returning( table.c.id), data="some data" ) pk = r.first()[0] fetched_pk = config.db.scalar(select([table.c.id])) eq_(fetched_pk, pk) def test_explicit_returning_pk_no_autocommit(self): engine = config.db table = self.tables.autoinc_pk with engine.begin() as conn: r = conn.execute( table.insert().returning( table.c.id), data="some data" ) pk = r.first()[0] fetched_pk = config.db.scalar(select([table.c.id])) eq_(fetched_pk, pk) def test_autoincrement_on_insert_implcit_returning(self): config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( r.inserted_primary_key, [pk] ) __all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest') SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_reflection.py0000664000175000017500000005745412636375552026140 0ustar classicclassic00000000000000 import sqlalchemy as sa from sqlalchemy import exc as sa_exc from sqlalchemy import types as sql_types from sqlalchemy import inspect from sqlalchemy import MetaData, Integer, String from sqlalchemy.engine.reflection import Inspector from sqlalchemy.testing import engines, fixtures from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import eq_, assert_raises_message from sqlalchemy import testing from .. import config import operator from sqlalchemy.schema import DDL, Index from sqlalchemy import event metadata, users = None, None class HasTableTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table('test_table', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) def test_has_table(self): with config.db.begin() as conn: assert config.db.dialect.has_table(conn, "test_table") assert not config.db.dialect.has_table(conn, "nonexistent_table") class ComponentReflectionTest(fixtures.TablesTest): run_inserts = run_deletes = None __backend__ = True @classmethod def define_tables(cls, metadata): cls.define_reflected_tables(metadata, None) if testing.requires.schemas.enabled: cls.define_reflected_tables(metadata, testing.config.test_schema) @classmethod def define_reflected_tables(cls, metadata, schema): if schema: schema_prefix = schema + "." else: schema_prefix = "" if testing.requires.self_referential_foreign_keys.enabled: users = Table('users', metadata, Column('user_id', sa.INT, primary_key=True), Column('test1', sa.CHAR(5), nullable=False), Column('test2', sa.Float(5), nullable=False), Column('parent_user_id', sa.Integer, sa.ForeignKey('%susers.user_id' % schema_prefix)), schema=schema, test_needs_fk=True, ) else: users = Table('users', metadata, Column('user_id', sa.INT, primary_key=True), Column('test1', sa.CHAR(5), nullable=False), Column('test2', sa.Float(5), nullable=False), schema=schema, test_needs_fk=True, ) Table("dingalings", metadata, Column('dingaling_id', sa.Integer, primary_key=True), Column('address_id', sa.Integer, sa.ForeignKey('%semail_addresses.address_id' % schema_prefix)), Column('data', sa.String(30)), schema=schema, test_needs_fk=True, ) Table('email_addresses', metadata, Column('address_id', sa.Integer), Column('remote_user_id', sa.Integer, sa.ForeignKey(users.c.user_id)), Column('email_address', sa.String(20)), sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'), schema=schema, test_needs_fk=True, ) if testing.requires.index_reflection.enabled: cls.define_index(metadata, users) if testing.requires.view_column_reflection.enabled: cls.define_views(metadata, schema) if not schema and testing.requires.temp_table_reflection.enabled: cls.define_temp_tables(metadata) @classmethod def define_temp_tables(cls, metadata): # cheat a bit, we should fix this with some dialect-level # temp table fixture if testing.against("oracle"): kw = { 'prefixes': ["GLOBAL TEMPORARY"], 'oracle_on_commit': 'PRESERVE ROWS' } else: kw = { 'prefixes': ["TEMPORARY"], } user_tmp = Table( "user_tmp", metadata, Column("id", sa.INT, primary_key=True), Column('name', sa.VARCHAR(50)), Column('foo', sa.INT), sa.UniqueConstraint('name', name='user_tmp_uq'), sa.Index("user_tmp_ix", "foo"), **kw ) if testing.requires.view_reflection.enabled and \ testing.requires.temporary_views.enabled: event.listen( user_tmp, "after_create", DDL("create temporary view user_tmp_v as " "select * from user_tmp") ) event.listen( user_tmp, "before_drop", DDL("drop view user_tmp_v") ) @classmethod def define_index(cls, metadata, users): Index("users_t_idx", users.c.test1, users.c.test2) Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1) @classmethod def define_views(cls, metadata, schema): for table_name in ('users', 'email_addresses'): fullname = table_name if schema: fullname = "%s.%s" % (schema, table_name) view_name = fullname + '_v' query = "CREATE VIEW %s AS SELECT * FROM %s" % ( view_name, fullname) event.listen( metadata, "after_create", DDL(query) ) event.listen( metadata, "before_drop", DDL("DROP VIEW %s" % view_name) ) @testing.requires.schema_reflection def test_get_schema_names(self): insp = inspect(testing.db) self.assert_(testing.config.test_schema in insp.get_schema_names()) @testing.requires.schema_reflection def test_dialect_initialize(self): engine = engines.testing_engine() assert not hasattr(engine.dialect, 'default_schema_name') inspect(engine) assert hasattr(engine.dialect, 'default_schema_name') @testing.requires.schema_reflection def test_get_default_schema_name(self): insp = inspect(testing.db) eq_(insp.default_schema_name, testing.db.dialect.default_schema_name) @testing.provide_metadata def _test_get_table_names(self, schema=None, table_type='table', order_by=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings insp = inspect(meta.bind) if table_type == 'view': table_names = insp.get_view_names(schema) table_names.sort() answer = ['email_addresses_v', 'users_v'] eq_(sorted(table_names), answer) else: table_names = insp.get_table_names(schema, order_by=order_by) if order_by == 'foreign_key': answer = ['users', 'email_addresses', 'dingalings'] eq_(table_names, answer) else: answer = ['dingalings', 'email_addresses', 'users'] eq_(sorted(table_names), answer) @testing.requires.temp_table_names def test_get_temp_table_names(self): insp = inspect(testing.db) temp_table_names = insp.get_temp_table_names() eq_(sorted(temp_table_names), ['user_tmp']) @testing.requires.view_reflection @testing.requires.temp_table_names @testing.requires.temporary_views def test_get_temp_view_names(self): insp = inspect(self.metadata.bind) temp_table_names = insp.get_temp_view_names() eq_(sorted(temp_table_names), ['user_tmp_v']) @testing.requires.table_reflection def test_get_table_names(self): self._test_get_table_names() @testing.requires.table_reflection @testing.requires.foreign_key_constraint_reflection def test_get_table_names_fks(self): self._test_get_table_names(order_by='foreign_key') @testing.requires.table_reflection @testing.requires.schemas def test_get_table_names_with_schema(self): self._test_get_table_names(testing.config.test_schema) @testing.requires.view_column_reflection def test_get_view_names(self): self._test_get_table_names(table_type='view') @testing.requires.view_column_reflection @testing.requires.schemas def test_get_view_names_with_schema(self): self._test_get_table_names( testing.config.test_schema, table_type='view') @testing.requires.table_reflection @testing.requires.view_column_reflection def test_get_tables_and_views(self): self._test_get_table_names() self._test_get_table_names(table_type='view') def _test_get_columns(self, schema=None, table_type='table'): meta = MetaData(testing.db) users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings table_names = ['users', 'email_addresses'] if table_type == 'view': table_names = ['users_v', 'email_addresses_v'] insp = inspect(meta.bind) for table_name, table in zip(table_names, (users, addresses)): schema_name = schema cols = insp.get_columns(table_name, schema=schema_name) self.assert_(len(cols) > 0, len(cols)) # should be in order for i, col in enumerate(table.columns): eq_(col.name, cols[i]['name']) ctype = cols[i]['type'].__class__ ctype_def = col.type if isinstance(ctype_def, sa.types.TypeEngine): ctype_def = ctype_def.__class__ # Oracle returns Date for DateTime. if testing.against('oracle') and ctype_def \ in (sql_types.Date, sql_types.DateTime): ctype_def = sql_types.Date # assert that the desired type and return type share # a base within one of the generic types. self.assert_(len(set(ctype.__mro__). intersection(ctype_def.__mro__). intersection([ sql_types.Integer, sql_types.Numeric, sql_types.DateTime, sql_types.Date, sql_types.Time, sql_types.String, sql_types._Binary, ])) > 0, '%s(%s), %s(%s)' % (col.name, col.type, cols[i]['name'], ctype)) if not col.primary_key: assert cols[i]['default'] is None @testing.requires.table_reflection def test_get_columns(self): self._test_get_columns() @testing.provide_metadata def _type_round_trip(self, *types): t = Table('t', self.metadata, *[ Column('t%d' % i, type_) for i, type_ in enumerate(types) ] ) t.create() return [ c['type'] for c in inspect(self.metadata.bind).get_columns('t') ] @testing.requires.table_reflection def test_numeric_reflection(self): for typ in self._type_round_trip( sql_types.Numeric(18, 5), ): assert isinstance(typ, sql_types.Numeric) eq_(typ.precision, 18) eq_(typ.scale, 5) @testing.requires.table_reflection def test_varchar_reflection(self): typ = self._type_round_trip(sql_types.String(52))[0] assert isinstance(typ, sql_types.String) eq_(typ.length, 52) @testing.requires.table_reflection @testing.provide_metadata def test_nullable_reflection(self): t = Table('t', self.metadata, Column('a', Integer, nullable=True), Column('b', Integer, nullable=False)) t.create() eq_( dict( (col['name'], col['nullable']) for col in inspect(self.metadata.bind).get_columns('t') ), {"a": True, "b": False} ) @testing.requires.table_reflection @testing.requires.schemas def test_get_columns_with_schema(self): self._test_get_columns(schema=testing.config.test_schema) @testing.requires.temp_table_reflection def test_get_temp_table_columns(self): meta = MetaData(testing.db) user_tmp = self.tables.user_tmp insp = inspect(meta.bind) cols = insp.get_columns('user_tmp') self.assert_(len(cols) > 0, len(cols)) for i, col in enumerate(user_tmp.columns): eq_(col.name, cols[i]['name']) @testing.requires.temp_table_reflection @testing.requires.view_column_reflection @testing.requires.temporary_views def test_get_temp_view_columns(self): insp = inspect(self.metadata.bind) cols = insp.get_columns('user_tmp_v') eq_( [col['name'] for col in cols], ['id', 'name', 'foo'] ) @testing.requires.view_column_reflection def test_get_view_columns(self): self._test_get_columns(table_type='view') @testing.requires.view_column_reflection @testing.requires.schemas def test_get_view_columns_with_schema(self): self._test_get_columns( schema=testing.config.test_schema, table_type='view') @testing.provide_metadata def _test_get_pk_constraint(self, schema=None): meta = self.metadata users, addresses = self.tables.users, self.tables.email_addresses insp = inspect(meta.bind) users_cons = insp.get_pk_constraint(users.name, schema=schema) users_pkeys = users_cons['constrained_columns'] eq_(users_pkeys, ['user_id']) addr_cons = insp.get_pk_constraint(addresses.name, schema=schema) addr_pkeys = addr_cons['constrained_columns'] eq_(addr_pkeys, ['address_id']) with testing.requires.reflects_pk_names.fail_if(): eq_(addr_cons['name'], 'email_ad_pk') @testing.requires.primary_key_constraint_reflection def test_get_pk_constraint(self): self._test_get_pk_constraint() @testing.requires.table_reflection @testing.requires.primary_key_constraint_reflection @testing.requires.schemas def test_get_pk_constraint_with_schema(self): self._test_get_pk_constraint(schema=testing.config.test_schema) @testing.requires.table_reflection @testing.provide_metadata def test_deprecated_get_primary_keys(self): meta = self.metadata users = self.tables.users insp = Inspector(meta.bind) assert_raises_message( sa_exc.SADeprecationWarning, "Call to deprecated method get_primary_keys." " Use get_pk_constraint instead.", insp.get_primary_keys, users.name ) @testing.provide_metadata def _test_get_foreign_keys(self, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings insp = inspect(meta.bind) expected_schema = schema # users if testing.requires.self_referential_foreign_keys.enabled: users_fkeys = insp.get_foreign_keys(users.name, schema=schema) fkey1 = users_fkeys[0] with testing.requires.named_constraints.fail_if(): self.assert_(fkey1['name'] is not None) eq_(fkey1['referred_schema'], expected_schema) eq_(fkey1['referred_table'], users.name) eq_(fkey1['referred_columns'], ['user_id', ]) if testing.requires.self_referential_foreign_keys.enabled: eq_(fkey1['constrained_columns'], ['parent_user_id']) # addresses addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema) fkey1 = addr_fkeys[0] with testing.requires.named_constraints.fail_if(): self.assert_(fkey1['name'] is not None) eq_(fkey1['referred_schema'], expected_schema) eq_(fkey1['referred_table'], users.name) eq_(fkey1['referred_columns'], ['user_id', ]) eq_(fkey1['constrained_columns'], ['remote_user_id']) @testing.requires.foreign_key_constraint_reflection def test_get_foreign_keys(self): self._test_get_foreign_keys() @testing.requires.foreign_key_constraint_reflection @testing.requires.schemas def test_get_foreign_keys_with_schema(self): self._test_get_foreign_keys(schema=testing.config.test_schema) @testing.provide_metadata def _test_get_indexes(self, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings # The database may decide to create indexes for foreign keys, etc. # so there may be more indexes than expected. insp = inspect(meta.bind) indexes = insp.get_indexes('users', schema=schema) expected_indexes = [ {'unique': False, 'column_names': ['test1', 'test2'], 'name': 'users_t_idx'}, {'unique': False, 'column_names': ['user_id', 'test2', 'test1'], 'name': 'users_all_idx'} ] index_names = [d['name'] for d in indexes] for e_index in expected_indexes: assert e_index['name'] in index_names index = indexes[index_names.index(e_index['name'])] for key in e_index: eq_(e_index[key], index[key]) @testing.requires.index_reflection def test_get_indexes(self): self._test_get_indexes() @testing.requires.index_reflection @testing.requires.schemas def test_get_indexes_with_schema(self): self._test_get_indexes(schema=testing.config.test_schema) @testing.requires.unique_constraint_reflection def test_get_unique_constraints(self): self._test_get_unique_constraints() @testing.requires.temp_table_reflection @testing.requires.unique_constraint_reflection def test_get_temp_table_unique_constraints(self): insp = inspect(self.metadata.bind) reflected = insp.get_unique_constraints('user_tmp') for refl in reflected: # Different dialects handle duplicate index and constraints # differently, so ignore this flag refl.pop('duplicates_index', None) eq_(reflected, [{'column_names': ['name'], 'name': 'user_tmp_uq'}]) @testing.requires.temp_table_reflection def test_get_temp_table_indexes(self): insp = inspect(self.metadata.bind) indexes = insp.get_indexes('user_tmp') for ind in indexes: ind.pop('dialect_options', None) eq_( # TODO: we need to add better filtering for indexes/uq constraints # that are doubled up [idx for idx in indexes if idx['name'] == 'user_tmp_ix'], [{'unique': False, 'column_names': ['foo'], 'name': 'user_tmp_ix'}] ) @testing.requires.unique_constraint_reflection @testing.requires.schemas def test_get_unique_constraints_with_schema(self): self._test_get_unique_constraints(schema=testing.config.test_schema) @testing.provide_metadata def _test_get_unique_constraints(self, schema=None): # SQLite dialect needs to parse the names of the constraints # separately from what it gets from PRAGMA index_list(), and # then matches them up. so same set of column_names in two # constraints will confuse it. Perhaps we should no longer # bother with index_list() here since we have the whole # CREATE TABLE? uniques = sorted( [ {'name': 'unique_a', 'column_names': ['a']}, {'name': 'unique_a_b_c', 'column_names': ['a', 'b', 'c']}, {'name': 'unique_c_a_b', 'column_names': ['c', 'a', 'b']}, {'name': 'unique_asc_key', 'column_names': ['asc', 'key']}, {'name': 'i.have.dots', 'column_names': ['b']}, {'name': 'i have spaces', 'column_names': ['c']}, ], key=operator.itemgetter('name') ) orig_meta = self.metadata table = Table( 'testtbl', orig_meta, Column('a', sa.String(20)), Column('b', sa.String(30)), Column('c', sa.Integer), # reserved identifiers Column('asc', sa.String(30)), Column('key', sa.String(30)), schema=schema ) for uc in uniques: table.append_constraint( sa.UniqueConstraint(*uc['column_names'], name=uc['name']) ) orig_meta.create_all() inspector = inspect(orig_meta.bind) reflected = sorted( inspector.get_unique_constraints('testtbl', schema=schema), key=operator.itemgetter('name') ) for orig, refl in zip(uniques, reflected): # Different dialects handle duplicate index and constraints # differently, so ignore this flag refl.pop('duplicates_index', None) eq_(orig, refl) @testing.provide_metadata def _test_get_view_definition(self, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings view_name1 = 'users_v' view_name2 = 'email_addresses_v' insp = inspect(meta.bind) v1 = insp.get_view_definition(view_name1, schema=schema) self.assert_(v1) v2 = insp.get_view_definition(view_name2, schema=schema) self.assert_(v2) @testing.requires.view_reflection def test_get_view_definition(self): self._test_get_view_definition() @testing.requires.view_reflection @testing.requires.schemas def test_get_view_definition_with_schema(self): self._test_get_view_definition(schema=testing.config.test_schema) @testing.only_on("postgresql", "PG specific feature") @testing.provide_metadata def _test_get_table_oid(self, table_name, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings insp = inspect(meta.bind) oid = insp.get_table_oid(table_name, schema) self.assert_(isinstance(oid, int)) def test_get_table_oid(self): self._test_get_table_oid('users') @testing.requires.schemas def test_get_table_oid_with_schema(self): self._test_get_table_oid('users', schema=testing.config.test_schema) @testing.requires.table_reflection @testing.provide_metadata def test_autoincrement_col(self): """test that 'autoincrement' is reflected according to sqla's policy. Don't mark this test as unsupported for any backend ! (technically it fails with MySQL InnoDB since "id" comes before "id2") A backend is better off not returning "autoincrement" at all, instead of potentially returning "False" for an auto-incrementing primary key column. """ meta = self.metadata insp = inspect(meta.bind) for tname, cname in [ ('users', 'user_id'), ('email_addresses', 'address_id'), ('dingalings', 'dingaling_id'), ]: cols = insp.get_columns(tname) id_ = dict((c['name'], c) for c in cols)[cname] assert id_.get('autoincrement', True) __all__ = ('ComponentReflectionTest', 'HasTableTest') SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_types.py0000664000175000017500000004130012636375552025131 0ustar classicclassic00000000000000# coding: utf-8 from .. import fixtures, config from ..assertions import eq_ from ..config import requirements from sqlalchemy import Integer, Unicode, UnicodeText, select from sqlalchemy import Date, DateTime, Time, MetaData, String, \ Text, Numeric, Float, literal, Boolean from ..schema import Table, Column from ... import testing import decimal import datetime from ...util import u from ... import util class _LiteralRoundTripFixture(object): @testing.provide_metadata def _literal_round_trip(self, type_, input_, output, filter_=None): """test literal rendering """ # for literal, we test the literal render in an INSERT # into a typed column. we can then SELECT it back as its # official type; ideally we'd be able to use CAST here # but MySQL in particular can't CAST fully t = Table('t', self.metadata, Column('x', type_)) t.create() for value in input_: ins = t.insert().values(x=literal(value)).compile( dialect=testing.db.dialect, compile_kwargs=dict(literal_binds=True) ) testing.db.execute(ins) for row in t.select().execute(): value = row[0] if filter_ is not None: value = filter_(value) assert value in output class _UnicodeFixture(_LiteralRoundTripFixture): __requires__ = 'unicode_data', data = u("Alors vous imaginez ma surprise, au lever du jour, " "quand une drôle de petite voix m’a réveillé. Elle " "disait: « S’il vous plaît… dessine-moi un mouton! »") @classmethod def define_tables(cls, metadata): Table('unicode_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('unicode_data', cls.datatype), ) def test_round_trip(self): unicode_table = self.tables.unicode_table config.db.execute( unicode_table.insert(), { 'unicode_data': self.data, } ) row = config.db.execute( select([ unicode_table.c.unicode_data, ]) ).first() eq_( row, (self.data, ) ) assert isinstance(row[0], util.text_type) def test_round_trip_executemany(self): unicode_table = self.tables.unicode_table config.db.execute( unicode_table.insert(), [ { 'unicode_data': self.data, } for i in range(3) ] ) rows = config.db.execute( select([ unicode_table.c.unicode_data, ]) ).fetchall() eq_( rows, [(self.data, ) for i in range(3)] ) for row in rows: assert isinstance(row[0], util.text_type) def _test_empty_strings(self): unicode_table = self.tables.unicode_table config.db.execute( unicode_table.insert(), {"unicode_data": u('')} ) row = config.db.execute( select([unicode_table.c.unicode_data]) ).first() eq_(row, (u(''),)) def test_literal(self): self._literal_round_trip(self.datatype, [self.data], [self.data]) class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest): __requires__ = 'unicode_data', __backend__ = True datatype = Unicode(255) @requirements.empty_strings_varchar def test_empty_strings_varchar(self): self._test_empty_strings() class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest): __requires__ = 'unicode_data', 'text_type' __backend__ = True datatype = UnicodeText() @requirements.empty_strings_text def test_empty_strings_text(self): self._test_empty_strings() class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest): __requires__ = 'text_type', __backend__ = True @classmethod def define_tables(cls, metadata): Table('text_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('text_data', Text), ) def test_text_roundtrip(self): text_table = self.tables.text_table config.db.execute( text_table.insert(), {"text_data": 'some text'} ) row = config.db.execute( select([text_table.c.text_data]) ).first() eq_(row, ('some text',)) def test_text_empty_strings(self): text_table = self.tables.text_table config.db.execute( text_table.insert(), {"text_data": ''} ) row = config.db.execute( select([text_table.c.text_data]) ).first() eq_(row, ('',)) def test_literal(self): self._literal_round_trip(Text, ["some text"], ["some text"]) def test_literal_quoting(self): data = '''some 'text' hey "hi there" that's text''' self._literal_round_trip(Text, [data], [data]) def test_literal_backslashes(self): data = r'backslash one \ backslash two \\ end' self._literal_round_trip(Text, [data], [data]) class StringTest(_LiteralRoundTripFixture, fixtures.TestBase): __backend__ = True @requirements.unbounded_varchar def test_nolength_string(self): metadata = MetaData() foo = Table('foo', metadata, Column('one', String) ) foo.create(config.db) foo.drop(config.db) def test_literal(self): self._literal_round_trip(String(40), ["some text"], ["some text"]) def test_literal_quoting(self): data = '''some 'text' hey "hi there" that's text''' self._literal_round_trip(String(40), [data], [data]) def test_literal_backslashes(self): data = r'backslash one \ backslash two \\ end' self._literal_round_trip(String(40), [data], [data]) class _DateFixture(_LiteralRoundTripFixture): compare = None @classmethod def define_tables(cls, metadata): Table('date_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('date_data', cls.datatype), ) def test_round_trip(self): date_table = self.tables.date_table config.db.execute( date_table.insert(), {'date_data': self.data} ) row = config.db.execute( select([ date_table.c.date_data, ]) ).first() compare = self.compare or self.data eq_(row, (compare, )) assert isinstance(row[0], type(compare)) def test_null(self): date_table = self.tables.date_table config.db.execute( date_table.insert(), {'date_data': None} ) row = config.db.execute( select([ date_table.c.date_data, ]) ).first() eq_(row, (None,)) @testing.requires.datetime_literals def test_literal(self): compare = self.compare or self.data self._literal_round_trip(self.datatype, [self.data], [compare]) class DateTimeTest(_DateFixture, fixtures.TablesTest): __requires__ = 'datetime', __backend__ = True datatype = DateTime data = datetime.datetime(2012, 10, 15, 12, 57, 18) class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = 'datetime_microseconds', __backend__ = True datatype = DateTime data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) class TimeTest(_DateFixture, fixtures.TablesTest): __requires__ = 'time', __backend__ = True datatype = Time data = datetime.time(12, 57, 18) class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = 'time_microseconds', __backend__ = True datatype = Time data = datetime.time(12, 57, 18, 396) class DateTest(_DateFixture, fixtures.TablesTest): __requires__ = 'date', __backend__ = True datatype = Date data = datetime.date(2012, 10, 15) class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest): __requires__ = 'date', 'date_coerces_from_datetime' __backend__ = True datatype = Date data = datetime.datetime(2012, 10, 15, 12, 57, 18) compare = datetime.date(2012, 10, 15) class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest): __requires__ = 'datetime_historic', __backend__ = True datatype = DateTime data = datetime.datetime(1850, 11, 10, 11, 52, 35) class DateHistoricTest(_DateFixture, fixtures.TablesTest): __requires__ = 'date_historic', __backend__ = True datatype = Date data = datetime.date(1727, 4, 1) class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase): __backend__ = True def test_literal(self): self._literal_round_trip(Integer, [5], [5]) class NumericTest(_LiteralRoundTripFixture, fixtures.TestBase): __backend__ = True @testing.emits_warning(r".*does \*not\* support Decimal objects natively") @testing.provide_metadata def _do_test(self, type_, input_, output, filter_=None, check_scale=False): metadata = self.metadata t = Table('t', metadata, Column('x', type_)) t.create() t.insert().execute([{'x': x} for x in input_]) result = set([row[0] for row in t.select().execute()]) output = set(output) if filter_: result = set(filter_(x) for x in result) output = set(filter_(x) for x in output) eq_(result, output) if check_scale: eq_( [str(x) for x in result], [str(x) for x in output], ) @testing.emits_warning(r".*does \*not\* support Decimal objects natively") def test_render_literal_numeric(self): self._literal_round_trip( Numeric(precision=8, scale=4), [15.7563, decimal.Decimal("15.7563")], [decimal.Decimal("15.7563")], ) @testing.emits_warning(r".*does \*not\* support Decimal objects natively") def test_render_literal_numeric_asfloat(self): self._literal_round_trip( Numeric(precision=8, scale=4, asdecimal=False), [15.7563, decimal.Decimal("15.7563")], [15.7563], ) def test_render_literal_float(self): self._literal_round_trip( Float(4), [15.7563, decimal.Decimal("15.7563")], [15.7563, ], filter_=lambda n: n is not None and round(n, 5) or None ) @testing.requires.precision_generic_float_type def test_float_custom_scale(self): self._do_test( Float(None, decimal_return_scale=7, asdecimal=True), [15.7563827, decimal.Decimal("15.7563827")], [decimal.Decimal("15.7563827"), ], check_scale=True ) def test_numeric_as_decimal(self): self._do_test( Numeric(precision=8, scale=4), [15.7563, decimal.Decimal("15.7563")], [decimal.Decimal("15.7563")], ) def test_numeric_as_float(self): self._do_test( Numeric(precision=8, scale=4, asdecimal=False), [15.7563, decimal.Decimal("15.7563")], [15.7563], ) @testing.requires.fetch_null_from_numeric def test_numeric_null_as_decimal(self): self._do_test( Numeric(precision=8, scale=4), [None], [None], ) @testing.requires.fetch_null_from_numeric def test_numeric_null_as_float(self): self._do_test( Numeric(precision=8, scale=4, asdecimal=False), [None], [None], ) @testing.requires.floats_to_four_decimals def test_float_as_decimal(self): self._do_test( Float(precision=8, asdecimal=True), [15.7563, decimal.Decimal("15.7563"), None], [decimal.Decimal("15.7563"), None], ) def test_float_as_float(self): self._do_test( Float(precision=8), [15.7563, decimal.Decimal("15.7563")], [15.7563], filter_=lambda n: n is not None and round(n, 5) or None ) @testing.requires.precision_numerics_general def test_precision_decimal(self): numbers = set([ decimal.Decimal("54.234246451650"), decimal.Decimal("0.004354"), decimal.Decimal("900.0"), ]) self._do_test( Numeric(precision=18, scale=12), numbers, numbers, ) @testing.requires.precision_numerics_enotation_large def test_enotation_decimal(self): """test exceedingly small decimals. Decimal reports values with E notation when the exponent is greater than 6. """ numbers = set([ decimal.Decimal('1E-2'), decimal.Decimal('1E-3'), decimal.Decimal('1E-4'), decimal.Decimal('1E-5'), decimal.Decimal('1E-6'), decimal.Decimal('1E-7'), decimal.Decimal('1E-8'), decimal.Decimal("0.01000005940696"), decimal.Decimal("0.00000005940696"), decimal.Decimal("0.00000000000696"), decimal.Decimal("0.70000000000696"), decimal.Decimal("696E-12"), ]) self._do_test( Numeric(precision=18, scale=14), numbers, numbers ) @testing.requires.precision_numerics_enotation_large def test_enotation_decimal_large(self): """test exceedingly large decimals. """ numbers = set([ decimal.Decimal('4E+8'), decimal.Decimal("5748E+15"), decimal.Decimal('1.521E+15'), decimal.Decimal('00000000000000.1E+12'), ]) self._do_test( Numeric(precision=25, scale=2), numbers, numbers ) @testing.requires.precision_numerics_many_significant_digits def test_many_significant_digits(self): numbers = set([ decimal.Decimal("31943874831932418390.01"), decimal.Decimal("319438950232418390.273596"), decimal.Decimal("87673.594069654243"), ]) self._do_test( Numeric(precision=38, scale=12), numbers, numbers ) @testing.requires.precision_numerics_retains_significant_digits def test_numeric_no_decimal(self): numbers = set([ decimal.Decimal("1.000") ]) self._do_test( Numeric(precision=5, scale=3), numbers, numbers, check_scale=True ) class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table('boolean_table', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('value', Boolean), Column('unconstrained_value', Boolean(create_constraint=False)), ) def test_render_literal_bool(self): self._literal_round_trip( Boolean(), [True, False], [True, False] ) def test_round_trip(self): boolean_table = self.tables.boolean_table config.db.execute( boolean_table.insert(), { 'id': 1, 'value': True, 'unconstrained_value': False } ) row = config.db.execute( select([ boolean_table.c.value, boolean_table.c.unconstrained_value ]) ).first() eq_( row, (True, False) ) assert isinstance(row[0], bool) def test_null(self): boolean_table = self.tables.boolean_table config.db.execute( boolean_table.insert(), { 'id': 1, 'value': None, 'unconstrained_value': None } ) row = config.db.execute( select([ boolean_table.c.value, boolean_table.c.unconstrained_value ]) ).first() eq_( row, (None, None) ) __all__ = ('UnicodeVarcharTest', 'UnicodeTextTest', 'DateTest', 'DateTimeTest', 'TextTest', 'NumericTest', 'IntegerTest', 'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest', 'TimeMicrosecondsTest', 'TimeTest', 'DateTimeMicrosecondsTest', 'DateHistoricTest', 'StringTest', 'BooleanTest') SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_ddl.py0000664000175000017500000000345612636375552024542 0ustar classicclassic00000000000000 from .. import fixtures, config, util from ..config import requirements from ..assertions import eq_ from sqlalchemy import Table, Column, Integer, String class TableDDLTest(fixtures.TestBase): __backend__ = True def _simple_fixture(self): return Table('test_table', self.metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(50)) ) def _underscore_fixture(self): return Table('_test_table', self.metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('_data', String(50)) ) def _simple_roundtrip(self, table): with config.db.begin() as conn: conn.execute(table.insert().values((1, 'some data'))) result = conn.execute(table.select()) eq_( result.first(), (1, 'some data') ) @requirements.create_table @util.provide_metadata def test_create_table(self): table = self._simple_fixture() table.create( config.db, checkfirst=False ) self._simple_roundtrip(table) @requirements.drop_table @util.provide_metadata def test_drop_table(self): table = self._simple_fixture() table.create( config.db, checkfirst=False ) table.drop( config.db, checkfirst=False ) @requirements.create_table @util.provide_metadata def test_underscore_names(self): table = self._underscore_fixture() table.create( config.db, checkfirst=False ) self._simple_roundtrip(table) __all__ = ('TableDDLTest', ) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_sequence.py0000664000175000017500000000733612636375552025610 0ustar classicclassic00000000000000from .. import fixtures, config from ..config import requirements from ..assertions import eq_ from ... import testing from ... import Integer, String, Sequence, schema from ..schema import Table, Column class SequenceTest(fixtures.TablesTest): __requires__ = ('sequences',) __backend__ = True run_create_tables = 'each' @classmethod def define_tables(cls, metadata): Table('seq_pk', metadata, Column('id', Integer, Sequence('tab_id_seq'), primary_key=True), Column('data', String(50)) ) Table('seq_opt_pk', metadata, Column('id', Integer, Sequence('tab_id_seq', optional=True), primary_key=True), Column('data', String(50)) ) def test_insert_roundtrip(self): config.db.execute( self.tables.seq_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.seq_pk, config.db) def test_insert_lastrowid(self): r = config.db.execute( self.tables.seq_pk.insert(), data="some data" ) eq_( r.inserted_primary_key, [1] ) def test_nextval_direct(self): r = config.db.execute( self.tables.seq_pk.c.id.default ) eq_( r, 1 ) @requirements.sequences_optional def test_optional_seq(self): r = config.db.execute( self.tables.seq_opt_pk.insert(), data="some data" ) eq_( r.inserted_primary_key, [1] ) def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (1, "some data") ) class HasSequenceTest(fixtures.TestBase): __requires__ = 'sequences', __backend__ = True def test_has_sequence(self): s1 = Sequence('user_id_seq') testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), True) finally: testing.db.execute(schema.DropSequence(s1)) @testing.requires.schemas def test_has_sequence_schema(self): s1 = Sequence('user_id_seq', schema="test_schema") testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence( testing.db, 'user_id_seq', schema="test_schema"), True) finally: testing.db.execute(schema.DropSequence(s1)) def test_has_sequence_neg(self): eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), False) @testing.requires.schemas def test_has_sequence_schemas_neg(self): eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', schema="test_schema"), False) @testing.requires.schemas def test_has_sequence_default_not_in_remote(self): s1 = Sequence('user_id_seq') testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', schema="test_schema"), False) finally: testing.db.execute(schema.DropSequence(s1)) @testing.requires.schemas def test_has_sequence_remote_not_in_default(self): s1 = Sequence('user_id_seq', schema="test_schema") testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), False) finally: testing.db.execute(schema.DropSequence(s1)) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_select.py0000664000175000017500000001314112636375552025246 0ustar classicclassic00000000000000from .. import fixtures, config from ..assertions import eq_ from sqlalchemy import util from sqlalchemy import Integer, String, select, func, bindparam from sqlalchemy import testing from ..schema import Table, Column class OrderByLabelTest(fixtures.TablesTest): """Test the dialect sends appropriate ORDER BY expressions when labels are used. This essentially exercises the "supports_simple_order_by_label" setting. """ __backend__ = True @classmethod def define_tables(cls, metadata): Table("some_table", metadata, Column('id', Integer, primary_key=True), Column('x', Integer), Column('y', Integer), Column('q', String(50)), Column('p', String(50)) ) @classmethod def insert_data(cls): config.db.execute( cls.tables.some_table.insert(), [ {"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"}, {"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"}, {"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"}, ] ) def _assert_result(self, select, result): eq_( config.db.execute(select).fetchall(), result ) def test_plain(self): table = self.tables.some_table lx = table.c.x.label('lx') self._assert_result( select([lx]).order_by(lx), [(1, ), (2, ), (3, )] ) def test_composed_int(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label('lx') self._assert_result( select([lx]).order_by(lx), [(3, ), (5, ), (7, )] ) def test_composed_multiple(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label('lx') ly = (func.lower(table.c.q) + table.c.p).label('ly') self._assert_result( select([lx, ly]).order_by(lx, ly.desc()), [(3, util.u('q1p3')), (5, util.u('q2p2')), (7, util.u('q3p1'))] ) def test_plain_desc(self): table = self.tables.some_table lx = table.c.x.label('lx') self._assert_result( select([lx]).order_by(lx.desc()), [(3, ), (2, ), (1, )] ) def test_composed_int_desc(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label('lx') self._assert_result( select([lx]).order_by(lx.desc()), [(7, ), (5, ), (3, )] ) def test_group_by_composed(self): table = self.tables.some_table expr = (table.c.x + table.c.y).label('lx') stmt = select([func.count(table.c.id), expr]).group_by(expr).order_by(expr) self._assert_result( stmt, [(1, 3), (1, 5), (1, 7)] ) class LimitOffsetTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table("some_table", metadata, Column('id', Integer, primary_key=True), Column('x', Integer), Column('y', Integer)) @classmethod def insert_data(cls): config.db.execute( cls.tables.some_table.insert(), [ {"id": 1, "x": 1, "y": 2}, {"id": 2, "x": 2, "y": 3}, {"id": 3, "x": 3, "y": 4}, {"id": 4, "x": 4, "y": 5}, ] ) def _assert_result(self, select, result, params=()): eq_( config.db.execute(select, params).fetchall(), result ) def test_simple_limit(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).limit(2), [(1, 1, 2), (2, 2, 3)] ) @testing.requires.offset def test_simple_offset(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).offset(2), [(3, 3, 4), (4, 4, 5)] ) @testing.requires.offset def test_simple_limit_offset(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).limit(2).offset(1), [(2, 2, 3), (3, 3, 4)] ) @testing.requires.offset def test_limit_offset_nobinds(self): """test that 'literal binds' mode works - no bound params.""" table = self.tables.some_table stmt = select([table]).order_by(table.c.id).limit(2).offset(1) sql = stmt.compile( dialect=config.db.dialect, compile_kwargs={"literal_binds": True}) sql = str(sql) self._assert_result( sql, [(2, 2, 3), (3, 3, 4)] ) @testing.requires.bound_limit_offset def test_bound_limit(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).limit(bindparam('l')), [(1, 1, 2), (2, 2, 3)], params={"l": 2} ) @testing.requires.bound_limit_offset def test_bound_offset(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).offset(bindparam('o')), [(3, 3, 4), (4, 4, 5)], params={"o": 2} ) @testing.requires.bound_limit_offset def test_bound_limit_offset(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id). limit(bindparam("l")).offset(bindparam("o")), [(2, 2, 3), (3, 3, 4)], params={"l": 2, "o": 1} ) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/__init__.py0000664000175000017500000000072712636375552024475 0ustar classicclassic00000000000000 from sqlalchemy.testing.suite.test_dialect import * from sqlalchemy.testing.suite.test_ddl import * from sqlalchemy.testing.suite.test_insert import * from sqlalchemy.testing.suite.test_sequence import * from sqlalchemy.testing.suite.test_select import * from sqlalchemy.testing.suite.test_results import * from sqlalchemy.testing.suite.test_update_delete import * from sqlalchemy.testing.suite.test_reflection import * from sqlalchemy.testing.suite.test_types import * SQLAlchemy-1.0.11/lib/sqlalchemy/testing/suite/test_results.py0000664000175000017500000001503512636375552025474 0ustar classicclassic00000000000000from .. import fixtures, config from ..config import requirements from .. import exclusions from ..assertions import eq_ from .. import engines from sqlalchemy import Integer, String, select, util, sql, DateTime import datetime from ..schema import Table, Column class RowFetchTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table('plain_pk', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) Table('has_dates', metadata, Column('id', Integer, primary_key=True), Column('today', DateTime) ) @classmethod def insert_data(cls): config.db.execute( cls.tables.plain_pk.insert(), [ {"id": 1, "data": "d1"}, {"id": 2, "data": "d2"}, {"id": 3, "data": "d3"}, ] ) config.db.execute( cls.tables.has_dates.insert(), [ {"id": 1, "today": datetime.datetime(2006, 5, 12, 12, 0, 0)} ] ) def test_via_string(self): row = config.db.execute( self.tables.plain_pk.select(). order_by(self.tables.plain_pk.c.id) ).first() eq_( row['id'], 1 ) eq_( row['data'], "d1" ) def test_via_int(self): row = config.db.execute( self.tables.plain_pk.select(). order_by(self.tables.plain_pk.c.id) ).first() eq_( row[0], 1 ) eq_( row[1], "d1" ) def test_via_col_object(self): row = config.db.execute( self.tables.plain_pk.select(). order_by(self.tables.plain_pk.c.id) ).first() eq_( row[self.tables.plain_pk.c.id], 1 ) eq_( row[self.tables.plain_pk.c.data], "d1" ) @requirements.duplicate_names_in_cursor_description def test_row_with_dupe_names(self): result = config.db.execute( select([self.tables.plain_pk.c.data, self.tables.plain_pk.c.data.label('data')]). order_by(self.tables.plain_pk.c.id) ) row = result.first() eq_(result.keys(), ['data', 'data']) eq_(row, ('d1', 'd1')) def test_row_w_scalar_select(self): """test that a scalar select as a column is returned as such and that type conversion works OK. (this is half a SQLAlchemy Core test and half to catch database backends that may have unusual behavior with scalar selects.) """ datetable = self.tables.has_dates s = select([datetable.alias('x').c.today]).as_scalar() s2 = select([datetable.c.id, s.label('somelabel')]) row = config.db.execute(s2).first() eq_(row['somelabel'], datetime.datetime(2006, 5, 12, 12, 0, 0)) class PercentSchemaNamesTest(fixtures.TablesTest): """tests using percent signs, spaces in table and column names. This is a very fringe use case, doesn't work for MySQL or Postgresql. the requirement, "percent_schema_names", is marked "skip" by default. """ __requires__ = ('percent_schema_names', ) __backend__ = True @classmethod def define_tables(cls, metadata): cls.tables.percent_table = Table('percent%table', metadata, Column("percent%", Integer), Column( "spaces % more spaces", Integer), ) cls.tables.lightweight_percent_table = sql.table( 'percent%table', sql.column("percent%"), sql.column("spaces % more spaces") ) def test_single_roundtrip(self): percent_table = self.tables.percent_table for params in [ {'percent%': 5, 'spaces % more spaces': 12}, {'percent%': 7, 'spaces % more spaces': 11}, {'percent%': 9, 'spaces % more spaces': 10}, {'percent%': 11, 'spaces % more spaces': 9} ]: config.db.execute(percent_table.insert(), params) self._assert_table() def test_executemany_roundtrip(self): percent_table = self.tables.percent_table config.db.execute( percent_table.insert(), {'percent%': 5, 'spaces % more spaces': 12} ) config.db.execute( percent_table.insert(), [{'percent%': 7, 'spaces % more spaces': 11}, {'percent%': 9, 'spaces % more spaces': 10}, {'percent%': 11, 'spaces % more spaces': 9}] ) self._assert_table() def _assert_table(self): percent_table = self.tables.percent_table lightweight_percent_table = self.tables.lightweight_percent_table for table in ( percent_table, percent_table.alias(), lightweight_percent_table, lightweight_percent_table.alias()): eq_( list( config.db.execute( table.select().order_by(table.c['percent%']) ) ), [ (5, 12), (7, 11), (9, 10), (11, 9) ] ) eq_( list( config.db.execute( table.select(). where(table.c['spaces % more spaces'].in_([9, 10])). order_by(table.c['percent%']), ) ), [ (9, 10), (11, 9) ] ) row = config.db.execute(table.select(). order_by(table.c['percent%'])).first() eq_(row['percent%'], 5) eq_(row['spaces % more spaces'], 12) eq_(row[table.c['percent%']], 5) eq_(row[table.c['spaces % more spaces']], 12) config.db.execute( percent_table.update().values( {percent_table.c['spaces % more spaces']: 15} ) ) eq_( list( config.db.execute( percent_table. select(). order_by(percent_table.c['percent%']) ) ), [(5, 15), (7, 15), (9, 15), (11, 15)] ) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/fixtures.py0000664000175000017500000002474112636375552023460 0ustar classicclassic00000000000000# testing/fixtures.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import config from . import assertions, schema from .util import adict from .. import util from .engines import drop_all_tables from .entities import BasicEntity, ComparableEntity import sys import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta # whether or not we use unittest changes things dramatically, # as far as how py.test collection works. class TestBase(object): # A sequence of database names to always run, regardless of the # constraints below. __whitelist__ = () # A sequence of requirement names matching testing.requires decorators __requires__ = () # A sequence of dialect names to exclude from the test class. __unsupported_on__ = () # If present, test class is only runnable for the *single* specified # dialect. If you need multiple, use __unsupported_on__ and invert. __only_on__ = None # A sequence of no-arg callables. If any are True, the entire testcase is # skipped. __skip_if__ = None def assert_(self, val, msg=None): assert val, msg # apparently a handful of tests are doing this....OK def setup(self): if hasattr(self, "setUp"): self.setUp() def teardown(self): if hasattr(self, "tearDown"): self.tearDown() class TablesTest(TestBase): # 'once', None run_setup_bind = 'once' # 'once', 'each', None run_define_tables = 'once' # 'once', 'each', None run_create_tables = 'once' # 'once', 'each', None run_inserts = 'each' # 'each', None run_deletes = 'each' # 'once', None run_dispose_bind = None bind = None metadata = None tables = None other = None @classmethod def setup_class(cls): cls._init_class() cls._setup_once_tables() cls._setup_once_inserts() @classmethod def _init_class(cls): if cls.run_define_tables == 'each': if cls.run_create_tables == 'once': cls.run_create_tables = 'each' assert cls.run_inserts in ('each', None) cls.other = adict() cls.tables = adict() cls.bind = cls.setup_bind() cls.metadata = sa.MetaData() cls.metadata.bind = cls.bind @classmethod def _setup_once_inserts(cls): if cls.run_inserts == 'once': cls._load_fixtures() cls.insert_data() @classmethod def _setup_once_tables(cls): if cls.run_define_tables == 'once': cls.define_tables(cls.metadata) if cls.run_create_tables == 'once': cls.metadata.create_all(cls.bind) cls.tables.update(cls.metadata.tables) def _setup_each_tables(self): if self.run_define_tables == 'each': self.tables.clear() if self.run_create_tables == 'each': drop_all_tables(self.metadata, self.bind) self.metadata.clear() self.define_tables(self.metadata) if self.run_create_tables == 'each': self.metadata.create_all(self.bind) self.tables.update(self.metadata.tables) elif self.run_create_tables == 'each': drop_all_tables(self.metadata, self.bind) self.metadata.create_all(self.bind) def _setup_each_inserts(self): if self.run_inserts == 'each': self._load_fixtures() self.insert_data() def _teardown_each_tables(self): # no need to run deletes if tables are recreated on setup if self.run_define_tables != 'each' and self.run_deletes == 'each': with self.bind.connect() as conn: for table in reversed(self.metadata.sorted_tables): try: conn.execute(table.delete()) except sa.exc.DBAPIError as ex: util.print_( ("Error emptying table %s: %r" % (table, ex)), file=sys.stderr) def setup(self): self._setup_each_tables() self._setup_each_inserts() def teardown(self): self._teardown_each_tables() @classmethod def _teardown_once_metadata_bind(cls): if cls.run_create_tables: drop_all_tables(cls.metadata, cls.bind) if cls.run_dispose_bind == 'once': cls.dispose_bind(cls.bind) cls.metadata.bind = None if cls.run_setup_bind is not None: cls.bind = None @classmethod def teardown_class(cls): cls._teardown_once_metadata_bind() @classmethod def setup_bind(cls): return config.db @classmethod def dispose_bind(cls, bind): if hasattr(bind, 'dispose'): bind.dispose() elif hasattr(bind, 'close'): bind.close() @classmethod def define_tables(cls, metadata): pass @classmethod def fixtures(cls): return {} @classmethod def insert_data(cls): pass def sql_count_(self, count, fn): self.assert_sql_count(self.bind, fn, count) def sql_eq_(self, callable_, statements): self.assert_sql(self.bind, callable_, statements) @classmethod def _load_fixtures(cls): """Insert rows as represented by the fixtures() method.""" headers, rows = {}, {} for table, data in cls.fixtures().items(): if len(data) < 2: continue if isinstance(table, util.string_types): table = cls.tables[table] headers[table] = data[0] rows[table] = data[1:] for table in cls.metadata.sorted_tables: if table not in headers: continue cls.bind.execute( table.insert(), [dict(zip(headers[table], column_values)) for column_values in rows[table]]) from sqlalchemy import event class RemovesEvents(object): @util.memoized_property def _event_fns(self): return set() def event_listen(self, target, name, fn): self._event_fns.add((target, name, fn)) event.listen(target, name, fn) def teardown(self): for key in self._event_fns: event.remove(*key) super_ = super(RemovesEvents, self) if hasattr(super_, "teardown"): super_.teardown() class _ORMTest(object): @classmethod def teardown_class(cls): sa.orm.session.Session.close_all() sa.orm.clear_mappers() class ORMTest(_ORMTest, TestBase): pass class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults): # 'once', 'each', None run_setup_classes = 'once' # 'once', 'each', None run_setup_mappers = 'each' classes = None @classmethod def setup_class(cls): cls._init_class() if cls.classes is None: cls.classes = adict() cls._setup_once_tables() cls._setup_once_classes() cls._setup_once_mappers() cls._setup_once_inserts() @classmethod def teardown_class(cls): cls._teardown_once_class() cls._teardown_once_metadata_bind() def setup(self): self._setup_each_tables() self._setup_each_classes() self._setup_each_mappers() self._setup_each_inserts() def teardown(self): sa.orm.session.Session.close_all() self._teardown_each_mappers() self._teardown_each_classes() self._teardown_each_tables() @classmethod def _teardown_once_class(cls): cls.classes.clear() _ORMTest.teardown_class() @classmethod def _setup_once_classes(cls): if cls.run_setup_classes == 'once': cls._with_register_classes(cls.setup_classes) @classmethod def _setup_once_mappers(cls): if cls.run_setup_mappers == 'once': cls._with_register_classes(cls.setup_mappers) def _setup_each_mappers(self): if self.run_setup_mappers == 'each': self._with_register_classes(self.setup_mappers) def _setup_each_classes(self): if self.run_setup_classes == 'each': self._with_register_classes(self.setup_classes) @classmethod def _with_register_classes(cls, fn): """Run a setup method, framing the operation with a Base class that will catch new subclasses to be established within the "classes" registry. """ cls_registry = cls.classes class FindFixture(type): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls return type.__init__(cls, classname, bases, dict_) class _Base(util.with_metaclass(FindFixture, object)): pass class Basic(BasicEntity, _Base): pass class Comparable(ComparableEntity, _Base): pass cls.Basic = Basic cls.Comparable = Comparable fn() def _teardown_each_mappers(self): # some tests create mappers in the test bodies # and will define setup_mappers as None - # clear mappers in any case if self.run_setup_mappers != 'once': sa.orm.clear_mappers() def _teardown_each_classes(self): if self.run_setup_classes != 'once': self.classes.clear() @classmethod def setup_classes(cls): pass @classmethod def setup_mappers(cls): pass class DeclarativeMappedTest(MappedTest): run_setup_classes = 'once' run_setup_mappers = 'once' @classmethod def _setup_once_tables(cls): pass @classmethod def _with_register_classes(cls, fn): cls_registry = cls.classes class FindFixtureDeclarative(DeclarativeMeta): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls return DeclarativeMeta.__init__( cls, classname, bases, dict_) class DeclarativeBasic(object): __table_cls__ = schema.Table _DeclBase = declarative_base(metadata=cls.metadata, metaclass=FindFixtureDeclarative, cls=DeclarativeBasic) cls.DeclarativeBasic = _DeclBase fn() if cls.metadata.tables and cls.run_create_tables: cls.metadata.create_all(config.db) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/entities.py0000664000175000017500000000566012636375552023432 0ustar classicclassic00000000000000# testing/entities.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import sqlalchemy as sa from sqlalchemy import exc as sa_exc _repr_stack = set() class BasicEntity(object): def __init__(self, **kw): for key, value in kw.items(): setattr(self, key, value) def __repr__(self): if id(self) in _repr_stack: return object.__repr__(self) _repr_stack.add(id(self)) try: return "%s(%s)" % ( (self.__class__.__name__), ', '.join(["%s=%r" % (key, getattr(self, key)) for key in sorted(self.__dict__.keys()) if not key.startswith('_')])) finally: _repr_stack.remove(id(self)) _recursion_stack = set() class ComparableEntity(BasicEntity): def __hash__(self): return hash(self.__class__) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): """'Deep, sparse compare. Deeply compare two entities, following the non-None attributes of the non-persisted object, if possible. """ if other is self: return True elif not self.__class__ == other.__class__: return False if id(self) in _recursion_stack: return True _recursion_stack.add(id(self)) try: # pick the entity that's not SA persisted as the source try: self_key = sa.orm.attributes.instance_state(self).key except sa.orm.exc.NO_STATE: self_key = None if other is None: a = self b = other elif self_key is not None: a = other b = self else: a = self b = other for attr in list(a.__dict__): if attr.startswith('_'): continue value = getattr(a, attr) try: # handle lazy loader errors battr = getattr(b, attr) except (AttributeError, sa_exc.UnboundExecutionError): return False if hasattr(value, '__iter__'): if hasattr(value, '__getitem__') and not hasattr( value, 'keys'): if list(value) != list(battr): return False else: if set(value) != set(battr): return False else: if value is not None and value != battr: return False return True finally: _recursion_stack.remove(id(self)) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/schema.py0000664000175000017500000000656612636375552023054 0ustar classicclassic00000000000000# testing/schema.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import exclusions from .. import schema, event from . import config __all__ = 'Table', 'Column', table_options = {} def Table(*args, **kw): """A schema.Table wrapper/hook for dialect-specific tweaks.""" test_opts = dict([(k, kw.pop(k)) for k in list(kw) if k.startswith('test_')]) kw.update(table_options) if exclusions.against(config._current, 'mysql'): if 'mysql_engine' not in kw and 'mysql_type' not in kw: if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts: kw['mysql_engine'] = 'InnoDB' else: kw['mysql_engine'] = 'MyISAM' # Apply some default cascading rules for self-referential foreign keys. # MySQL InnoDB has some issues around seleting self-refs too. if exclusions.against(config._current, 'firebird'): table_name = args[0] unpack = (config.db.dialect. identifier_preparer.unformat_identifiers) # Only going after ForeignKeys in Columns. May need to # expand to ForeignKeyConstraint too. fks = [fk for col in args if isinstance(col, schema.Column) for fk in col.foreign_keys] for fk in fks: # root around in raw spec ref = fk._colspec if isinstance(ref, schema.Column): name = ref.table.name else: # take just the table name: on FB there cannot be # a schema, so the first element is always the # table name, possibly followed by the field name name = unpack(ref)[0] if name == table_name: if fk.ondelete is None: fk.ondelete = 'CASCADE' if fk.onupdate is None: fk.onupdate = 'CASCADE' return schema.Table(*args, **kw) def Column(*args, **kw): """A schema.Column wrapper/hook for dialect-specific tweaks.""" test_opts = dict([(k, kw.pop(k)) for k in list(kw) if k.startswith('test_')]) if not config.requirements.foreign_key_ddl.enabled_for_config(config): args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)] col = schema.Column(*args, **kw) if 'test_needs_autoincrement' in test_opts and \ kw.get('primary_key', False): # allow any test suite to pick up on this col.info['test_needs_autoincrement'] = True # hardcoded rule for firebird, oracle; this should # be moved out if exclusions.against(config._current, 'firebird', 'oracle'): def add_seq(c, tbl): c._init_items( schema.Sequence(_truncate_name( config.db.dialect, tbl.name + '_' + c.name + '_seq'), optional=True) ) event.listen(col, 'after_parent_attach', add_seq, propagate=True) return col def _truncate_name(dialect, name): if len(name) > dialect.max_identifier_length: return name[0:max(dialect.max_identifier_length - 6, 0)] + \ "_" + hex(hash(name) % 64)[2:] else: return name SQLAlchemy-1.0.11/lib/sqlalchemy/testing/warnings.py0000664000175000017500000000173312636375552023433 0ustar classicclassic00000000000000# testing/warnings.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import import warnings from .. import exc as sa_exc from . import assertions def setup_filters(): """Set global warning behavior for the test suite.""" warnings.filterwarnings('ignore', category=sa_exc.SAPendingDeprecationWarning) warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning) warnings.filterwarnings('error', category=sa_exc.SAWarning) def assert_warnings(fn, warning_msgs, regex=False): """Assert that each of the given warnings are emitted by fn. Deprecated. Please use assertions.expect_warnings(). """ with assertions._expect_warnings( sa_exc.SAWarning, warning_msgs, regex=regex): return fn() SQLAlchemy-1.0.11/lib/sqlalchemy/testing/assertions.py0000664000175000017500000003736012636375552024002 0ustar classicclassic00000000000000# testing/assertions.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import from . import util as testutil from sqlalchemy import pool, orm, util from sqlalchemy.engine import default, url from sqlalchemy.util import decorator from sqlalchemy import types as sqltypes, schema, exc as sa_exc import warnings import re from .exclusions import db_spec, _is_excluded from . import assertsql from . import config from .util import fail import contextlib from . import mock def expect_warnings(*messages, **kw): """Context manager which expects one or more warnings. With no arguments, squelches all SAWarnings emitted via sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise pass string expressions that will match selected warnings via regex; all non-matching warnings are sent through. The expect version **asserts** that the warnings were in fact seen. Note that the test suite sets SAWarning warnings to raise exceptions. """ return _expect_warnings(sa_exc.SAWarning, messages, **kw) @contextlib.contextmanager def expect_warnings_on(db, *messages, **kw): """Context manager which expects one or more warnings on specific dialects. The expect version **asserts** that the warnings were in fact seen. """ spec = db_spec(db) if isinstance(db, util.string_types) and not spec(config._current): yield else: with expect_warnings(*messages, **kw): yield def emits_warning(*messages): """Decorator form of expect_warnings(). Note that emits_warning does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with expect_warnings(assert_=False, *messages): return fn(*args, **kw) return decorate def expect_deprecated(*messages, **kw): return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw) def emits_warning_on(db, *messages): """Mark a test as emitting a warning on a specific dialect. With no arguments, squelches all SAWarning failures. Or pass one or more strings; these will be matched to the root of the warning description by warnings.filterwarnings(). Note that emits_warning_on does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with expect_warnings_on(db, assert_=False, *messages): return fn(*args, **kw) return decorate def uses_deprecated(*messages): """Mark a test as immune from fatal deprecation warnings. With no arguments, squelches all SADeprecationWarning failures. Or pass one or more strings; these will be matched to the root of the warning description by warnings.filterwarnings(). As a special case, you may pass a function name prefixed with // and it will be re-written as needed to match the standard warning verbiage emitted by the sqlalchemy.util.deprecated decorator. Note that uses_deprecated does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with expect_deprecated(*messages, assert_=False): return fn(*args, **kw) return decorate @contextlib.contextmanager def _expect_warnings(exc_cls, messages, regex=True, assert_=True): if regex: filters = [re.compile(msg, re.I | re.S) for msg in messages] else: filters = messages seen = set(filters) real_warn = warnings.warn def our_warn(msg, exception, *arg, **kw): if not issubclass(exception, exc_cls): return real_warn(msg, exception, *arg, **kw) if not filters: return for filter_ in filters: if (regex and filter_.match(msg)) or \ (not regex and filter_ == msg): seen.discard(filter_) break else: real_warn(msg, exception, *arg, **kw) with mock.patch("warnings.warn", our_warn): yield if assert_: assert not seen, "Warnings were not seen: %s" % \ ", ".join("%r" % (s.pattern if regex else s) for s in seen) def global_cleanup_assertions(): """Check things that have to be finalized at the end of a test suite. Hardcoded at the moment, a modular system can be built here to support things like PG prepared transactions, tables all dropped, etc. """ _assert_no_stray_pool_connections() _STRAY_CONNECTION_FAILURES = 0 def _assert_no_stray_pool_connections(): global _STRAY_CONNECTION_FAILURES # lazy gc on cPython means "do nothing." pool connections # shouldn't be in cycles, should go away. testutil.lazy_gc() # however, once in awhile, on an EC2 machine usually, # there's a ref in there. usually just one. if pool._refs: # OK, let's be somewhat forgiving. _STRAY_CONNECTION_FAILURES += 1 print("Encountered a stray connection in test cleanup: %s" % str(pool._refs)) # then do a real GC sweep. We shouldn't even be here # so a single sweep should really be doing it, otherwise # there's probably a real unreachable cycle somewhere. testutil.gc_collect() # if we've already had two of these occurrences, or # after a hard gc sweep we still have pool._refs?! # now we have to raise. if pool._refs: err = str(pool._refs) # but clean out the pool refs collection directly, # reset the counter, # so the error doesn't at least keep happening. pool._refs.clear() _STRAY_CONNECTION_FAILURES = 0 assert False, "Stray connection refused to leave "\ "after gc.collect(): %s" % err elif _STRAY_CONNECTION_FAILURES > 10: assert False, "Encountered more than 10 stray connections" _STRAY_CONNECTION_FAILURES = 0 def eq_(a, b, msg=None): """Assert a == b, with repr messaging on failure.""" assert a == b, msg or "%r != %r" % (a, b) def ne_(a, b, msg=None): """Assert a != b, with repr messaging on failure.""" assert a != b, msg or "%r == %r" % (a, b) def le_(a, b, msg=None): """Assert a <= b, with repr messaging on failure.""" assert a <= b, msg or "%r != %r" % (a, b) def is_(a, b, msg=None): """Assert a is b, with repr messaging on failure.""" assert a is b, msg or "%r is not %r" % (a, b) def is_not_(a, b, msg=None): """Assert a is not b, with repr messaging on failure.""" assert a is not b, msg or "%r is %r" % (a, b) def in_(a, b, msg=None): """Assert a in b, with repr messaging on failure.""" assert a in b, msg or "%r not in %r" % (a, b) def not_in_(a, b, msg=None): """Assert a in not b, with repr messaging on failure.""" assert a not in b, msg or "%r is in %r" % (a, b) def startswith_(a, fragment, msg=None): """Assert a.startswith(fragment), with repr messaging on failure.""" assert a.startswith(fragment), msg or "%r does not start with %r" % ( a, fragment) def assert_raises(except_cls, callable_, *args, **kw): try: callable_(*args, **kw) success = False except except_cls: success = True # assert outside the block so it works for AssertionError too ! assert success, "Callable did not raise an exception" def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): try: callable_(*args, **kwargs) assert False, "Callable did not raise an exception" except except_cls as e: assert re.search( msg, util.text_type(e), re.UNICODE), "%r !~ %s" % (msg, e) print(util.text_type(e).encode('utf-8')) class AssertsCompiledSQL(object): def assert_compile(self, clause, result, params=None, checkparams=None, dialect=None, checkpositional=None, check_prefetch=None, use_default_dialect=False, allow_dialect_select=False, literal_binds=False): if use_default_dialect: dialect = default.DefaultDialect() elif allow_dialect_select: dialect = None else: if dialect is None: dialect = getattr(self, '__dialect__', None) if dialect is None: dialect = config.db.dialect elif dialect == 'default': dialect = default.DefaultDialect() elif isinstance(dialect, util.string_types): dialect = url.URL(dialect).get_dialect()() kw = {} compile_kwargs = {} if params is not None: kw['column_keys'] = list(params) if literal_binds: compile_kwargs['literal_binds'] = True if isinstance(clause, orm.Query): context = clause._compile_context() context.statement.use_labels = True clause = context.statement if compile_kwargs: kw['compile_kwargs'] = compile_kwargs c = clause.compile(dialect=dialect, **kw) param_str = repr(getattr(c, 'params', {})) if util.py3k: param_str = param_str.encode('utf-8').decode('ascii', 'ignore') print( ("\nSQL String:\n" + util.text_type(c) + param_str).encode('utf-8')) else: print( "\nSQL String:\n" + util.text_type(c).encode('utf-8') + param_str) cc = re.sub(r'[\n\t]', '', util.text_type(c)) eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect)) if checkparams is not None: eq_(c.construct_params(params), checkparams) if checkpositional is not None: p = c.construct_params(params) eq_(tuple([p[x] for x in c.positiontup]), checkpositional) if check_prefetch is not None: eq_(c.prefetch, check_prefetch) class ComparesTables(object): def assert_tables_equal(self, table, reflected_table, strict_types=False): assert len(table.c) == len(reflected_table.c) for c, reflected_c in zip(table.c, reflected_table.c): eq_(c.name, reflected_c.name) assert reflected_c is reflected_table.c[c.name] eq_(c.primary_key, reflected_c.primary_key) eq_(c.nullable, reflected_c.nullable) if strict_types: msg = "Type '%s' doesn't correspond to type '%s'" assert isinstance(reflected_c.type, type(c.type)), \ msg % (reflected_c.type, c.type) else: self.assert_types_base(reflected_c, c) if isinstance(c.type, sqltypes.String): eq_(c.type.length, reflected_c.type.length) eq_( set([f.column.name for f in c.foreign_keys]), set([f.column.name for f in reflected_c.foreign_keys]) ) if c.server_default: assert isinstance(reflected_c.server_default, schema.FetchedValue) assert len(table.primary_key) == len(reflected_table.primary_key) for c in table.primary_key: assert reflected_table.primary_key.columns[c.name] is not None def assert_types_base(self, c1, c2): assert c1.type._compare_type_affinity(c2.type),\ "On column %r, type '%s' doesn't correspond to type '%s'" % \ (c1.name, c1.type, c2.type) class AssertsExecutionResults(object): def assert_result(self, result, class_, *objects): result = list(result) print(repr(result)) self.assert_list(result, class_, objects) def assert_list(self, result, class_, list): self.assert_(len(result) == len(list), "result list is not the same size as test list, " + "for class " + class_.__name__) for i in range(0, len(list)): self.assert_row(class_, result[i], list[i]) def assert_row(self, class_, rowobj, desc): self.assert_(rowobj.__class__ is class_, "item class is not " + repr(class_)) for key, value in desc.items(): if isinstance(value, tuple): if isinstance(value[1], list): self.assert_list(getattr(rowobj, key), value[0], value[1]) else: self.assert_row(value[0], getattr(rowobj, key), value[1]) else: self.assert_(getattr(rowobj, key) == value, "attribute %s value %s does not match %s" % ( key, getattr(rowobj, key), value)) def assert_unordered_result(self, result, cls, *expected): """As assert_result, but the order of objects is not considered. The algorithm is very expensive but not a big deal for the small numbers of rows that the test suite manipulates. """ class immutabledict(dict): def __hash__(self): return id(self) found = util.IdentitySet(result) expected = set([immutabledict(e) for e in expected]) for wrong in util.itertools_filterfalse(lambda o: isinstance(o, cls), found): fail('Unexpected type "%s", expected "%s"' % ( type(wrong).__name__, cls.__name__)) if len(found) != len(expected): fail('Unexpected object count "%s", expected "%s"' % ( len(found), len(expected))) NOVALUE = object() def _compare_item(obj, spec): for key, value in spec.items(): if isinstance(value, tuple): try: self.assert_unordered_result( getattr(obj, key), value[0], *value[1]) except AssertionError: return False else: if getattr(obj, key, NOVALUE) != value: return False return True for expected_item in expected: for found_item in found: if _compare_item(found_item, expected_item): found.remove(found_item) break else: fail( "Expected %s instance with attributes %s not found." % ( cls.__name__, repr(expected_item))) return True def sql_execution_asserter(self, db=None): if db is None: from . import db as db return assertsql.assert_engine(db) def assert_sql_execution(self, db, callable_, *rules): with self.sql_execution_asserter(db) as asserter: callable_() asserter.assert_(*rules) def assert_sql(self, db, callable_, rules): newrules = [] for rule in rules: if isinstance(rule, dict): newrule = assertsql.AllOf(*[ assertsql.CompiledSQL(k, v) for k, v in rule.items() ]) else: newrule = assertsql.CompiledSQL(*rule) newrules.append(newrule) self.assert_sql_execution(db, callable_, *newrules) def assert_sql_count(self, db, callable_, count): self.assert_sql_execution( db, callable_, assertsql.CountStatements(count)) @contextlib.contextmanager def assert_execution(self, *rules): assertsql.asserter.add_rules(rules) try: yield assertsql.asserter.statement_complete() finally: assertsql.asserter.clear_rules() def assert_statement_count(self, count): return self.assert_execution(assertsql.CountStatements(count)) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/util.py0000664000175000017500000001655112636375552022564 0ustar classicclassic00000000000000# testing/util.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from ..util import jython, pypy, defaultdict, decorator, py2k import decimal import gc import time import random import sys import types if jython: def jython_gc_collect(*args): """aggressive gc.collect for tests.""" gc.collect() time.sleep(0.1) gc.collect() gc.collect() return 0 # "lazy" gc, for VM's that don't GC on refcount == 0 gc_collect = lazy_gc = jython_gc_collect elif pypy: def pypy_gc_collect(*args): gc.collect() gc.collect() gc_collect = lazy_gc = pypy_gc_collect else: # assume CPython - straight gc.collect, lazy_gc() is a pass gc_collect = gc.collect def lazy_gc(): pass def picklers(): picklers = set() if py2k: try: import cPickle picklers.add(cPickle) except ImportError: pass import pickle picklers.add(pickle) # yes, this thing needs this much testing for pickle_ in picklers: for protocol in -1, 0, 1, 2: yield pickle_.loads, lambda d: pickle_.dumps(d, protocol) def round_decimal(value, prec): if isinstance(value, float): return round(value, prec) # can also use shift() here but that is 2.6 only return (value * decimal.Decimal("1" + "0" * prec) ).to_integral(decimal.ROUND_FLOOR) / \ pow(10, prec) class RandomSet(set): def __iter__(self): l = list(set.__iter__(self)) random.shuffle(l) return iter(l) def pop(self): index = random.randint(0, len(self) - 1) item = list(set.__iter__(self))[index] self.remove(item) return item def union(self, other): return RandomSet(set.union(self, other)) def difference(self, other): return RandomSet(set.difference(self, other)) def intersection(self, other): return RandomSet(set.intersection(self, other)) def copy(self): return RandomSet(self) def conforms_partial_ordering(tuples, sorted_elements): """True if the given sorting conforms to the given partial ordering.""" deps = defaultdict(set) for parent, child in tuples: deps[parent].add(child) for i, node in enumerate(sorted_elements): for n in sorted_elements[i:]: if node in deps[n]: return False else: return True def all_partial_orderings(tuples, elements): edges = defaultdict(set) for parent, child in tuples: edges[child].add(parent) def _all_orderings(elements): if len(elements) == 1: yield list(elements) else: for elem in elements: subset = set(elements).difference([elem]) if not subset.intersection(edges[elem]): for sub_ordering in _all_orderings(subset): yield [elem] + sub_ordering return iter(_all_orderings(elements)) def function_named(fn, name): """Return a function with a given __name__. Will assign to __name__ and return the original function if possible on the Python implementation, otherwise a new function will be constructed. This function should be phased out as much as possible in favor of @decorator. Tests that "generate" many named tests should be modernized. """ try: fn.__name__ = name except TypeError: fn = types.FunctionType(fn.__code__, fn.__globals__, name, fn.__defaults__, fn.__closure__) return fn def run_as_contextmanager(ctx, fn, *arg, **kw): """Run the given function under the given contextmanager, simulating the behavior of 'with' to support older Python versions. This is not necessary anymore as we have placed 2.6 as minimum Python version, however some tests are still using this structure. """ obj = ctx.__enter__() try: result = fn(obj, *arg, **kw) ctx.__exit__(None, None, None) return result except: exc_info = sys.exc_info() raise_ = ctx.__exit__(*exc_info) if raise_ is None: raise else: return raise_ def rowset(results): """Converts the results of sql execution into a plain set of column tuples. Useful for asserting the results of an unordered query. """ return set([tuple(row) for row in results]) def fail(msg): assert False, msg @decorator def provide_metadata(fn, *args, **kw): """Provide bound MetaData for a single test, dropping afterwards.""" from . import config from . import engines from sqlalchemy import schema metadata = schema.MetaData(config.db) self = args[0] prev_meta = getattr(self, 'metadata', None) self.metadata = metadata try: return fn(*args, **kw) finally: engines.drop_all_tables(metadata, config.db) self.metadata = prev_meta def force_drop_names(*names): """Force the given table names to be dropped after test complete, isolating for foreign key cycles """ from . import config from sqlalchemy import inspect @decorator def go(fn, *args, **kw): try: return fn(*args, **kw) finally: drop_all_tables( config.db, inspect(config.db), include_names=names) return go class adict(dict): """Dict keys available as attributes. Shadows.""" def __getattribute__(self, key): try: return self[key] except KeyError: return dict.__getattribute__(self, key) def __call__(self, *keys): return tuple([self[key] for key in keys]) get_all = __call__ def drop_all_tables(engine, inspector, schema=None, include_names=None): from sqlalchemy import Column, Table, Integer, MetaData, \ ForeignKeyConstraint from sqlalchemy.schema import DropTable, DropConstraint if include_names is not None: include_names = set(include_names) with engine.connect() as conn: for tname, fkcs in reversed( inspector.get_sorted_table_and_fkc_names(schema=schema)): if tname: if include_names is not None and tname not in include_names: continue conn.execute(DropTable( Table(tname, MetaData()) )) elif fkcs: if not engine.dialect.supports_alter: continue for tname, fkc in fkcs: if include_names is not None and \ tname not in include_names: continue tb = Table( tname, MetaData(), Column('x', Integer), Column('y', Integer), schema=schema ) conn.execute(DropConstraint( ForeignKeyConstraint( [tb.c.x], [tb.c.y], name=fkc) )) def teardown_events(event_cls): @decorator def decorate(fn, *arg, **kw): try: return fn(*arg, **kw) finally: event_cls._clear() return decorate SQLAlchemy-1.0.11/lib/sqlalchemy/testing/provision.py0000664000175000017500000001205212636375552023627 0ustar classicclassic00000000000000from sqlalchemy.engine import url as sa_url from sqlalchemy import text from sqlalchemy.util import compat from . import config, engines FOLLOWER_IDENT = None class register(object): def __init__(self): self.fns = {} @classmethod def init(cls, fn): return register().for_db("*")(fn) def for_db(self, dbname): def decorate(fn): self.fns[dbname] = fn return self return decorate def __call__(self, cfg, *arg): if isinstance(cfg, compat.string_types): url = sa_url.make_url(cfg) elif isinstance(cfg, sa_url.URL): url = cfg else: url = cfg.db.url backend = url.get_backend_name() if backend in self.fns: return self.fns[backend](cfg, *arg) else: return self.fns['*'](cfg, *arg) def create_follower_db(follower_ident): for cfg in _configs_for_db_operation(): _create_db(cfg, cfg.db, follower_ident) def configure_follower(follower_ident): for cfg in config.Config.all_configs(): _configure_follower(cfg, follower_ident) def setup_config(db_url, options, file_config, follower_ident): if follower_ident: db_url = _follower_url_from_main(db_url, follower_ident) db_opts = {} _update_db_opts(db_url, db_opts) eng = engines.testing_engine(db_url, db_opts) eng.connect().close() cfg = config.Config.register(eng, db_opts, options, file_config) if follower_ident: _configure_follower(cfg, follower_ident) return cfg def drop_follower_db(follower_ident): for cfg in _configs_for_db_operation(): _drop_db(cfg, cfg.db, follower_ident) def _configs_for_db_operation(): hosts = set() for cfg in config.Config.all_configs(): cfg.db.dispose() for cfg in config.Config.all_configs(): url = cfg.db.url backend = url.get_backend_name() host_conf = ( backend, url.username, url.host, url.database) if host_conf not in hosts: yield cfg hosts.add(host_conf) for cfg in config.Config.all_configs(): cfg.db.dispose() @register.init def _create_db(cfg, eng, ident): raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) @register.init def _drop_db(cfg, eng, ident): raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) @register.init def _update_db_opts(db_url, db_opts): pass @register.init def _configure_follower(cfg, ident): pass @register.init def _follower_url_from_main(url, ident): url = sa_url.make_url(url) url.database = ident return url @_update_db_opts.for_db("mssql") def _mssql_update_db_opts(db_url, db_opts): db_opts['legacy_schema_aliasing'] = False @_follower_url_from_main.for_db("sqlite") def _sqlite_follower_url_from_main(url, ident): url = sa_url.make_url(url) if not url.database or url.database == ':memory:': return url else: return sa_url.make_url("sqlite:///%s.db" % ident) @_create_db.for_db("postgresql") def _pg_create_db(cfg, eng, ident): with eng.connect().execution_options( isolation_level="AUTOCOMMIT") as conn: try: _pg_drop_db(cfg, conn, ident) except Exception: pass currentdb = conn.scalar("select current_database()") conn.execute("CREATE DATABASE %s TEMPLATE %s" % (ident, currentdb)) @_create_db.for_db("mysql") def _mysql_create_db(cfg, eng, ident): with eng.connect() as conn: try: _mysql_drop_db(cfg, conn, ident) except Exception: pass conn.execute("CREATE DATABASE %s" % ident) conn.execute("CREATE DATABASE %s_test_schema" % ident) conn.execute("CREATE DATABASE %s_test_schema_2" % ident) @_configure_follower.for_db("mysql") def _mysql_configure_follower(config, ident): config.test_schema = "%s_test_schema" % ident config.test_schema_2 = "%s_test_schema_2" % ident @_create_db.for_db("sqlite") def _sqlite_create_db(cfg, eng, ident): pass @_drop_db.for_db("postgresql") def _pg_drop_db(cfg, eng, ident): with eng.connect().execution_options( isolation_level="AUTOCOMMIT") as conn: conn.execute( text( "select pg_terminate_backend(pid) from pg_stat_activity " "where usename=current_user and pid != pg_backend_pid() " "and datname=:dname" ), dname=ident) conn.execute("DROP DATABASE %s" % ident) @_drop_db.for_db("sqlite") def _sqlite_drop_db(cfg, eng, ident): pass #os.remove("%s.db" % ident) @_drop_db.for_db("mysql") def _mysql_drop_db(cfg, eng, ident): with eng.connect() as conn: try: conn.execute("DROP DATABASE %s_test_schema" % ident) except Exception: pass try: conn.execute("DROP DATABASE %s_test_schema_2" % ident) except Exception: pass try: conn.execute("DROP DATABASE %s" % ident) except Exception: pass SQLAlchemy-1.0.11/lib/sqlalchemy/testing/__init__.py0000664000175000017500000000210712636375552023336 0ustar classicclassic00000000000000# testing/__init__.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .warnings import assert_warnings from . import config from .exclusions import db_spec, _is_excluded, fails_if, skip_if, future,\ fails_on, fails_on_everything_except, skip, only_on, exclude, \ against as _against, _server_version, only_if, fails def against(*queries): return _against(config._current, *queries) from .assertions import emits_warning, emits_warning_on, uses_deprecated, \ eq_, ne_, le_, is_, is_not_, startswith_, assert_raises, \ assert_raises_message, AssertsCompiledSQL, ComparesTables, \ AssertsExecutionResults, expect_deprecated, expect_warnings, \ in_, not_in_ from .util import run_as_contextmanager, rowset, fail, \ provide_metadata, adict, force_drop_names, \ teardown_events crashes = skip from .config import db from .config import requirements as requires from . import mock SQLAlchemy-1.0.11/lib/sqlalchemy/testing/plugin/0000775000175000017500000000000012636376632022523 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/lib/sqlalchemy/testing/plugin/plugin_base.py0000664000175000017500000004134012636375552025367 0ustar classicclassic00000000000000# plugin/plugin_base.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Testing extensions. this module is designed to work as a testing-framework-agnostic library, so that we can continue to support nose and also begin adding new functionality via py.test. """ from __future__ import absolute_import import sys import re py3k = sys.version_info >= (3, 0) if py3k: import configparser else: import ConfigParser as configparser # late imports fixtures = None engines = None exclusions = None warnings = None profiling = None assertions = None requirements = None config = None testing = None util = None file_config = None logging = None include_tags = set() exclude_tags = set() options = None def setup_options(make_option): make_option("--log-info", action="callback", type="string", callback=_log, help="turn on info logging for (multiple OK)") make_option("--log-debug", action="callback", type="string", callback=_log, help="turn on debug logging for (multiple OK)") make_option("--db", action="append", type="string", dest="db", help="Use prefab database uri. Multiple OK, " "first one is run by default.") make_option('--dbs', action='callback', callback=_list_dbs, help="List available prefab dbs") make_option("--dburi", action="append", type="string", dest="dburi", help="Database uri. Multiple OK, " "first one is run by default.") make_option("--dropfirst", action="store_true", dest="dropfirst", help="Drop all tables in the target database first") make_option("--backend-only", action="store_true", dest="backend_only", help="Run only tests marked with __backend__") make_option("--low-connections", action="store_true", dest="low_connections", help="Use a low number of distinct connections - " "i.e. for Oracle TNS") make_option("--reversetop", action="store_true", dest="reversetop", default=False, help="Use a random-ordering set implementation in the ORM " "(helps reveal dependency issues)") make_option("--requirements", action="callback", type="string", callback=_requirements_opt, help="requirements class for testing, overrides setup.cfg") make_option("--with-cdecimal", action="store_true", dest="cdecimal", default=False, help="Monkeypatch the cdecimal library into Python 'decimal' " "for all tests") make_option("--include-tag", action="callback", callback=_include_tag, type="string", help="Include tests with tag ") make_option("--exclude-tag", action="callback", callback=_exclude_tag, type="string", help="Exclude tests with tag ") make_option("--write-profiles", action="store_true", dest="write_profiles", default=False, help="Write/update failing profiling data.") make_option("--force-write-profiles", action="store_true", dest="force_write_profiles", default=False, help="Unconditionally write/update profiling data.") def configure_follower(follower_ident): """Configure required state for a follower. This invokes in the parent process and typically includes database creation. """ from sqlalchemy.testing import provision provision.FOLLOWER_IDENT = follower_ident def memoize_important_follower_config(dict_): """Store important configuration we will need to send to a follower. This invokes in the parent process after normal config is set up. This is necessary as py.test seems to not be using forking, so we start with nothing in memory, *but* it isn't running our argparse callables, so we have to just copy all of that over. """ dict_['memoized_config'] = { 'include_tags': include_tags, 'exclude_tags': exclude_tags } def restore_important_follower_config(dict_): """Restore important configuration needed by a follower. This invokes in the follower process. """ global include_tags, exclude_tags include_tags.update(dict_['memoized_config']['include_tags']) exclude_tags.update(dict_['memoized_config']['exclude_tags']) def read_config(): global file_config file_config = configparser.ConfigParser() file_config.read(['setup.cfg', 'test.cfg']) def pre_begin(opt): """things to set up early, before coverage might be setup.""" global options options = opt for fn in pre_configure: fn(options, file_config) def set_coverage_flag(value): options.has_coverage = value _skip_test_exception = None def set_skip_test(exc): global _skip_test_exception _skip_test_exception = exc def post_begin(): """things to set up later, once we know coverage is running.""" # Lazy setup of other options (post coverage) for fn in post_configure: fn(options, file_config) # late imports, has to happen after config as well # as nose plugins like coverage global util, fixtures, engines, exclusions, \ assertions, warnings, profiling,\ config, testing from sqlalchemy import testing # noqa from sqlalchemy.testing import fixtures, engines, exclusions # noqa from sqlalchemy.testing import assertions, warnings, profiling # noqa from sqlalchemy.testing import config # noqa from sqlalchemy import util # noqa warnings.setup_filters() def _log(opt_str, value, parser): global logging if not logging: import logging logging.basicConfig() if opt_str.endswith('-info'): logging.getLogger(value).setLevel(logging.INFO) elif opt_str.endswith('-debug'): logging.getLogger(value).setLevel(logging.DEBUG) def _list_dbs(*args): print("Available --db options (use --dburi to override)") for macro in sorted(file_config.options('db')): print("%20s\t%s" % (macro, file_config.get('db', macro))) sys.exit(0) def _requirements_opt(opt_str, value, parser): _setup_requirements(value) def _exclude_tag(opt_str, value, parser): exclude_tags.add(value.replace('-', '_')) def _include_tag(opt_str, value, parser): include_tags.add(value.replace('-', '_')) pre_configure = [] post_configure = [] def pre(fn): pre_configure.append(fn) return fn def post(fn): post_configure.append(fn) return fn @pre def _setup_options(opt, file_config): global options options = opt @pre def _monkeypatch_cdecimal(options, file_config): if options.cdecimal: import cdecimal sys.modules['decimal'] = cdecimal @post def _init_skiptest(options, file_config): from sqlalchemy.testing import config config._skip_test_exception = _skip_test_exception @post def _engine_uri(options, file_config): from sqlalchemy.testing import config from sqlalchemy import testing from sqlalchemy.testing import provision if options.dburi: db_urls = list(options.dburi) else: db_urls = [] if options.db: for db_token in options.db: for db in re.split(r'[,\s]+', db_token): if db not in file_config.options('db'): raise RuntimeError( "Unknown URI specifier '%s'. " "Specify --dbs for known uris." % db) else: db_urls.append(file_config.get('db', db)) if not db_urls: db_urls.append(file_config.get('db', 'default')) for db_url in db_urls: cfg = provision.setup_config( db_url, options, file_config, provision.FOLLOWER_IDENT) if not config._current: cfg.set_as_current(cfg, testing) @post def _requirements(options, file_config): requirement_cls = file_config.get('sqla_testing', "requirement_cls") _setup_requirements(requirement_cls) def _setup_requirements(argument): from sqlalchemy.testing import config from sqlalchemy import testing if config.requirements is not None: return modname, clsname = argument.split(":") # importlib.import_module() only introduced in 2.7, a little # late mod = __import__(modname) for component in modname.split(".")[1:]: mod = getattr(mod, component) req_cls = getattr(mod, clsname) config.requirements = testing.requires = req_cls() @post def _prep_testing_database(options, file_config): from sqlalchemy.testing import config, util from sqlalchemy.testing.exclusions import against from sqlalchemy import schema, inspect if options.dropfirst: for cfg in config.Config.all_configs(): e = cfg.db inspector = inspect(e) try: view_names = inspector.get_view_names() except NotImplementedError: pass else: for vname in view_names: e.execute(schema._DropView( schema.Table(vname, schema.MetaData()) )) if config.requirements.schemas.enabled_for_config(cfg): try: view_names = inspector.get_view_names( schema="test_schema") except NotImplementedError: pass else: for vname in view_names: e.execute(schema._DropView( schema.Table(vname, schema.MetaData(), schema="test_schema") )) util.drop_all_tables(e, inspector) if config.requirements.schemas.enabled_for_config(cfg): util.drop_all_tables(e, inspector, schema=cfg.test_schema) if against(cfg, "postgresql"): from sqlalchemy.dialects import postgresql for enum in inspector.get_enums("*"): e.execute(postgresql.DropEnumType( postgresql.ENUM( name=enum['name'], schema=enum['schema']))) @post def _reverse_topological(options, file_config): if options.reversetop: from sqlalchemy.orm.util import randomize_unitofwork randomize_unitofwork() @post def _post_setup_options(opt, file_config): from sqlalchemy.testing import config config.options = options config.file_config = file_config @post def _setup_profiling(options, file_config): from sqlalchemy.testing import profiling profiling._profile_stats = profiling.ProfileStatsFile( file_config.get('sqla_testing', 'profile_file')) def want_class(cls): if not issubclass(cls, fixtures.TestBase): return False elif cls.__name__.startswith('_'): return False elif config.options.backend_only and not getattr(cls, '__backend__', False): return False else: return True def want_method(cls, fn): if not fn.__name__.startswith("test_"): return False elif fn.__module__ is None: return False elif include_tags: return ( hasattr(cls, '__tags__') and exclusions.tags(cls.__tags__).include_test( include_tags, exclude_tags) ) or ( hasattr(fn, '_sa_exclusion_extend') and fn._sa_exclusion_extend.include_test( include_tags, exclude_tags) ) elif exclude_tags and hasattr(cls, '__tags__'): return exclusions.tags(cls.__tags__).include_test( include_tags, exclude_tags) elif exclude_tags and hasattr(fn, '_sa_exclusion_extend'): return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags) else: return True def generate_sub_tests(cls, module): if getattr(cls, '__backend__', False): for cfg in _possible_configs_for_cls(cls): name = "%s_%s_%s" % (cls.__name__, cfg.db.name, cfg.db.driver) subcls = type( name, (cls, ), { "__only_on__": ("%s+%s" % (cfg.db.name, cfg.db.driver)), } ) setattr(module, name, subcls) yield subcls else: yield cls def start_test_class(cls): _do_skips(cls) _setup_engine(cls) def stop_test_class(cls): #from sqlalchemy import inspect #assert not inspect(testing.db).get_table_names() engines.testing_reaper._stop_test_ctx() if not options.low_connections: assertions.global_cleanup_assertions() _restore_engine() def _restore_engine(): config._current.reset(testing) def _setup_engine(cls): if getattr(cls, '__engine_options__', None): eng = engines.testing_engine(options=cls.__engine_options__) config._current.push_engine(eng, testing) def before_test(test, test_module_name, test_class, test_name): # like a nose id, e.g.: # "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause" name = test_class.__name__ suffix = "_%s_%s" % (config.db.name, config.db.driver) if name.endswith(suffix): name = name[0:-(len(suffix))] id_ = "%s.%s.%s" % (test_module_name, name, test_name) profiling._current_test = id_ def after_test(test): engines.testing_reaper._after_test_ctx() def _possible_configs_for_cls(cls, reasons=None): all_configs = set(config.Config.all_configs()) if cls.__unsupported_on__: spec = exclusions.db_spec(*cls.__unsupported_on__) for config_obj in list(all_configs): if spec(config_obj): all_configs.remove(config_obj) if getattr(cls, '__only_on__', None): spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) for config_obj in list(all_configs): if not spec(config_obj): all_configs.remove(config_obj) if hasattr(cls, '__requires__'): requirements = config.requirements for config_obj in list(all_configs): for requirement in cls.__requires__: check = getattr(requirements, requirement) skip_reasons = check.matching_config_reasons(config_obj) if skip_reasons: all_configs.remove(config_obj) if reasons is not None: reasons.extend(skip_reasons) break if hasattr(cls, '__prefer_requires__'): non_preferred = set() requirements = config.requirements for config_obj in list(all_configs): for requirement in cls.__prefer_requires__: check = getattr(requirements, requirement) if not check.enabled_for_config(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) return all_configs def _do_skips(cls): reasons = [] all_configs = _possible_configs_for_cls(cls, reasons) if getattr(cls, '__skip_if__', False): for c in getattr(cls, '__skip_if__'): if c(): config.skip_test("'%s' skipped by %s" % ( cls.__name__, c.__name__) ) if not all_configs: if getattr(cls, '__backend__', False): msg = "'%s' unsupported for implementation '%s'" % ( cls.__name__, cls.__only_on__) else: msg = "'%s' unsupported on any DB implementation %s%s" % ( cls.__name__, ", ".join( "'%s(%s)+%s'" % ( config_obj.db.name, ".".join( str(dig) for dig in config_obj.db.dialect.server_version_info), config_obj.db.driver ) for config_obj in config.Config.all_configs() ), ", ".join(reasons) ) config.skip_test(msg) elif hasattr(cls, '__prefer_backends__'): non_preferred = set() spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) for config_obj in all_configs: if not spec(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) if config._current not in all_configs: _setup_config(all_configs.pop(), cls) def _setup_config(config_obj, ctx): config._current.push(config_obj, testing) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/plugin/bootstrap.py0000664000175000017500000000314212636375552025112 0ustar classicclassic00000000000000""" Bootstrapper for nose/pytest plugins. The entire rationale for this system is to get the modules in plugin/ imported without importing all of the supporting library, so that we can set up things for testing before coverage starts. The rationale for all of plugin/ being *in* the supporting library in the first place is so that the testing and plugin suite is available to other libraries, mainly external SQLAlchemy and Alembic dialects, to make use of the same test environment and standard suites available to SQLAlchemy/Alembic themselves without the need to ship/install a separate package outside of SQLAlchemy. NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; this should be removable when Alembic targets SQLAlchemy 1.0.0. """ import os import sys bootstrap_file = locals()['bootstrap_file'] to_bootstrap = locals()['to_bootstrap'] def load_file_as_module(name): path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) if sys.version_info >= (3, 3): from importlib import machinery mod = machinery.SourceFileLoader(name, path).load_module() else: import imp mod = imp.load_source(name, path) return mod if to_bootstrap == "pytest": sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin") elif to_bootstrap == "nose": sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") sys.modules["sqla_noseplugin"] = load_file_as_module("noseplugin") else: raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa SQLAlchemy-1.0.11/lib/sqlalchemy/testing/plugin/__init__.py0000664000175000017500000000000012636375552024622 0ustar classicclassic00000000000000SQLAlchemy-1.0.11/lib/sqlalchemy/testing/plugin/noseplugin.py0000664000175000017500000000543712636375552025271 0ustar classicclassic00000000000000# plugin/noseplugin.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Enhance nose with extra options and behaviors for running SQLAlchemy tests. Must be run via ./sqla_nose.py so that it is imported in the expected way (e.g. as a package-less import). """ try: # installed by bootstrap.py import sqla_plugin_base as plugin_base except ImportError: # assume we're a package, use traditional import from . import plugin_base import os import sys from nose.plugins import Plugin import nose fixtures = None py3k = sys.version_info >= (3, 0) class NoseSQLAlchemy(Plugin): enabled = True name = 'sqla_testing' score = 100 def options(self, parser, env=os.environ): Plugin.options(self, parser, env) opt = parser.add_option def make_option(name, **kw): callback_ = kw.pop("callback", None) if callback_: def wrap_(option, opt_str, value, parser): callback_(opt_str, value, parser) kw["callback"] = wrap_ opt(name, **kw) plugin_base.setup_options(make_option) plugin_base.read_config() def configure(self, options, conf): super(NoseSQLAlchemy, self).configure(options, conf) plugin_base.pre_begin(options) plugin_base.set_coverage_flag(options.enable_plugin_coverage) plugin_base.set_skip_test(nose.SkipTest) def begin(self): global fixtures from sqlalchemy.testing import fixtures # noqa plugin_base.post_begin() def describeTest(self, test): return "" def wantFunction(self, fn): return False def wantMethod(self, fn): if py3k: if not hasattr(fn.__self__, 'cls'): return False cls = fn.__self__.cls else: cls = fn.im_class return plugin_base.want_method(cls, fn) def wantClass(self, cls): return plugin_base.want_class(cls) def beforeTest(self, test): if not hasattr(test.test, 'cls'): return plugin_base.before_test( test, test.test.cls.__module__, test.test.cls, test.test.method.__name__) def afterTest(self, test): plugin_base.after_test(test) def startContext(self, ctx): if not isinstance(ctx, type) \ or not issubclass(ctx, fixtures.TestBase): return plugin_base.start_test_class(ctx) def stopContext(self, ctx): if not isinstance(ctx, type) \ or not issubclass(ctx, fixtures.TestBase): return plugin_base.stop_test_class(ctx) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/plugin/pytestplugin.py0000664000175000017500000001331412636375552025646 0ustar classicclassic00000000000000try: # installed by bootstrap.py import sqla_plugin_base as plugin_base except ImportError: # assume we're a package, use traditional import from . import plugin_base import pytest import argparse import inspect import collections import itertools try: import xdist # noqa has_xdist = True except ImportError: has_xdist = False def pytest_addoption(parser): group = parser.getgroup("sqlalchemy") def make_option(name, **kw): callback_ = kw.pop("callback", None) if callback_: class CallableAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): callback_(option_string, values, parser) kw["action"] = CallableAction group.addoption(name, **kw) plugin_base.setup_options(make_option) plugin_base.read_config() def pytest_configure(config): if hasattr(config, "slaveinput"): plugin_base.restore_important_follower_config(config.slaveinput) plugin_base.configure_follower( config.slaveinput["follower_ident"] ) plugin_base.pre_begin(config.option) plugin_base.set_coverage_flag(bool(getattr(config.option, "cov_source", False))) plugin_base.set_skip_test(pytest.skip.Exception) def pytest_sessionstart(session): plugin_base.post_begin() if has_xdist: _follower_count = itertools.count(1) def pytest_configure_node(node): # the master for each node fills slaveinput dictionary # which pytest-xdist will transfer to the subprocess plugin_base.memoize_important_follower_config(node.slaveinput) node.slaveinput["follower_ident"] = "test_%s" % next(_follower_count) from sqlalchemy.testing import provision provision.create_follower_db(node.slaveinput["follower_ident"]) def pytest_testnodedown(node, error): from sqlalchemy.testing import provision provision.drop_follower_db(node.slaveinput["follower_ident"]) def pytest_collection_modifyitems(session, config, items): # look for all those classes that specify __backend__ and # expand them out into per-database test cases. # this is much easier to do within pytest_pycollect_makeitem, however # pytest is iterating through cls.__dict__ as makeitem is # called which causes a "dictionary changed size" error on py3k. # I'd submit a pullreq for them to turn it into a list first, but # it's to suit the rather odd use case here which is that we are adding # new classes to a module on the fly. rebuilt_items = collections.defaultdict(list) items[:] = [ item for item in items if isinstance(item.parent, pytest.Instance) and not item.parent.parent.name.startswith("_")] test_classes = set(item.parent for item in items) for test_class in test_classes: for sub_cls in plugin_base.generate_sub_tests( test_class.cls, test_class.parent.module): if sub_cls is not test_class.cls: list_ = rebuilt_items[test_class.cls] for inst in pytest.Class( sub_cls.__name__, parent=test_class.parent.parent).collect(): list_.extend(inst.collect()) newitems = [] for item in items: if item.parent.cls in rebuilt_items: newitems.extend(rebuilt_items[item.parent.cls]) rebuilt_items[item.parent.cls][:] = [] else: newitems.append(item) # seems like the functions attached to a test class aren't sorted already? # is that true and why's that? (when using unittest, they're sorted) items[:] = sorted(newitems, key=lambda item: ( item.parent.parent.parent.name, item.parent.parent.name, item.name )) def pytest_pycollect_makeitem(collector, name, obj): if inspect.isclass(obj) and plugin_base.want_class(obj): return pytest.Class(name, parent=collector) elif inspect.isfunction(obj) and \ isinstance(collector, pytest.Instance) and \ plugin_base.want_method(collector.cls, obj): return pytest.Function(name, parent=collector) else: return [] _current_class = None def pytest_runtest_setup(item): # here we seem to get called only based on what we collected # in pytest_collection_modifyitems. So to do class-based stuff # we have to tear that out. global _current_class if not isinstance(item, pytest.Function): return # ... so we're doing a little dance here to figure it out... if _current_class is None: class_setup(item.parent.parent) _current_class = item.parent.parent # this is needed for the class-level, to ensure that the # teardown runs after the class is completed with its own # class-level teardown... def finalize(): global _current_class class_teardown(item.parent.parent) _current_class = None item.parent.parent.addfinalizer(finalize) test_setup(item) def pytest_runtest_teardown(item): # ...but this works better as the hook here rather than # using a finalizer, as the finalizer seems to get in the way # of the test reporting failures correctly (you get a bunch of # py.test assertion stuff instead) test_teardown(item) def test_setup(item): plugin_base.before_test(item, item.parent.module.__name__, item.parent.cls, item.name) def test_teardown(item): plugin_base.after_test(item) def class_setup(item): plugin_base.start_test_class(item.cls) def class_teardown(item): plugin_base.stop_test_class(item.cls) SQLAlchemy-1.0.11/lib/sqlalchemy/testing/assertsql.py0000664000175000017500000003005712636375552023625 0ustar classicclassic00000000000000# testing/assertsql.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from ..engine.default import DefaultDialect from .. import util import re import collections import contextlib from .. import event from sqlalchemy.schema import _DDLCompiles from sqlalchemy.engine.util import _distill_params from sqlalchemy.engine import url class AssertRule(object): is_consumed = False errormessage = None consume_statement = True def process_statement(self, execute_observed): pass def no_more_statements(self): assert False, 'All statements are complete, but pending '\ 'assertion rules remain' class SQLMatchRule(AssertRule): pass class CursorSQL(SQLMatchRule): consume_statement = False def __init__(self, statement, params=None): self.statement = statement self.params = params def process_statement(self, execute_observed): stmt = execute_observed.statements[0] if self.statement != stmt.statement or ( self.params is not None and self.params != stmt.parameters): self.errormessage = \ "Testing for exact SQL %s parameters %s received %s %s" % ( self.statement, self.params, stmt.statement, stmt.parameters ) else: execute_observed.statements.pop(0) self.is_consumed = True if not execute_observed.statements: self.consume_statement = True class CompiledSQL(SQLMatchRule): def __init__(self, statement, params=None, dialect='default'): self.statement = statement self.params = params self.dialect = dialect def _compare_sql(self, execute_observed, received_statement): stmt = re.sub(r'[\n\t]', '', self.statement) return received_statement == stmt def _compile_dialect(self, execute_observed): if self.dialect == 'default': return DefaultDialect() else: # ugh if self.dialect == 'postgresql': params = {'implicit_returning': True} else: params = {} return url.URL(self.dialect).get_dialect()(**params) def _received_statement(self, execute_observed): """reconstruct the statement and params in terms of a target dialect, which for CompiledSQL is just DefaultDialect.""" context = execute_observed.context compare_dialect = self._compile_dialect(execute_observed) if isinstance(context.compiled.statement, _DDLCompiles): compiled = \ context.compiled.statement.compile(dialect=compare_dialect) else: compiled = ( context.compiled.statement.compile( dialect=compare_dialect, column_keys=context.compiled.column_keys, inline=context.compiled.inline) ) _received_statement = re.sub(r'[\n\t]', '', util.text_type(compiled)) parameters = execute_observed.parameters if not parameters: _received_parameters = [compiled.construct_params()] else: _received_parameters = [ compiled.construct_params(m) for m in parameters] return _received_statement, _received_parameters def process_statement(self, execute_observed): context = execute_observed.context _received_statement, _received_parameters = \ self._received_statement(execute_observed) params = self._all_params(context) equivalent = self._compare_sql(execute_observed, _received_statement) if equivalent: if params is not None: all_params = list(params) all_received = list(_received_parameters) while all_params and all_received: param = dict(all_params.pop(0)) for idx, received in enumerate(list(all_received)): # do a positive compare only for param_key in param: # a key in param did not match current # 'received' if param_key not in received or \ received[param_key] != param[param_key]: break else: # all keys in param matched 'received'; # onto next param del all_received[idx] break else: # param did not match any entry # in all_received equivalent = False break if all_params or all_received: equivalent = False if equivalent: self.is_consumed = True self.errormessage = None else: self.errormessage = self._failure_message(params) % { 'received_statement': _received_statement, 'received_parameters': _received_parameters } def _all_params(self, context): if self.params: if util.callable(self.params): params = self.params(context) else: params = self.params if not isinstance(params, list): params = [params] return params else: return None def _failure_message(self, expected_params): return ( 'Testing for compiled statement %r partial params %r, ' 'received %%(received_statement)r with params ' '%%(received_parameters)r' % ( self.statement.replace('%', '%%'), expected_params ) ) class RegexSQL(CompiledSQL): def __init__(self, regex, params=None): SQLMatchRule.__init__(self) self.regex = re.compile(regex) self.orig_regex = regex self.params = params self.dialect = 'default' def _failure_message(self, expected_params): return ( 'Testing for compiled statement ~%r partial params %r, ' 'received %%(received_statement)r with params ' '%%(received_parameters)r' % ( self.orig_regex, expected_params ) ) def _compare_sql(self, execute_observed, received_statement): return bool(self.regex.match(received_statement)) class DialectSQL(CompiledSQL): def _compile_dialect(self, execute_observed): return execute_observed.context.dialect def _compare_no_space(self, real_stmt, received_stmt): stmt = re.sub(r'[\n\t]', '', real_stmt) return received_stmt == stmt def _received_statement(self, execute_observed): received_stmt, received_params = super(DialectSQL, self).\ _received_statement(execute_observed) # TODO: why do we need this part? for real_stmt in execute_observed.statements: if self._compare_no_space(real_stmt.statement, received_stmt): break else: raise AssertionError( "Can't locate compiled statement %r in list of " "statements actually invoked" % received_stmt) return received_stmt, execute_observed.context.compiled_parameters def _compare_sql(self, execute_observed, received_statement): stmt = re.sub(r'[\n\t]', '', self.statement) # convert our comparison statement to have the # paramstyle of the received paramstyle = execute_observed.context.dialect.paramstyle if paramstyle == 'pyformat': stmt = re.sub( r':([\w_]+)', r"%(\1)s", stmt) else: # positional params repl = None if paramstyle == 'qmark': repl = "?" elif paramstyle == 'format': repl = r"%s" elif paramstyle == 'numeric': repl = None stmt = re.sub(r':([\w_]+)', repl, stmt) return received_statement == stmt class CountStatements(AssertRule): def __init__(self, count): self.count = count self._statement_count = 0 def process_statement(self, execute_observed): self._statement_count += 1 def no_more_statements(self): if self.count != self._statement_count: assert False, 'desired statement count %d does not match %d' \ % (self.count, self._statement_count) class AllOf(AssertRule): def __init__(self, *rules): self.rules = set(rules) def process_statement(self, execute_observed): for rule in list(self.rules): rule.errormessage = None rule.process_statement(execute_observed) if rule.is_consumed: self.rules.discard(rule) if not self.rules: self.is_consumed = True break elif not rule.errormessage: # rule is not done yet self.errormessage = None break else: self.errormessage = list(self.rules)[0].errormessage class Or(AllOf): def process_statement(self, execute_observed): for rule in self.rules: rule.process_statement(execute_observed) if rule.is_consumed: self.is_consumed = True break else: self.errormessage = list(self.rules)[0].errormessage class SQLExecuteObserved(object): def __init__(self, context, clauseelement, multiparams, params): self.context = context self.clauseelement = clauseelement self.parameters = _distill_params(multiparams, params) self.statements = [] class SQLCursorExecuteObserved( collections.namedtuple( "SQLCursorExecuteObserved", ["statement", "parameters", "context", "executemany"]) ): pass class SQLAsserter(object): def __init__(self): self.accumulated = [] def _close(self): self._final = self.accumulated del self.accumulated def assert_(self, *rules): rules = list(rules) observed = list(self._final) while observed and rules: rule = rules[0] rule.process_statement(observed[0]) if rule.is_consumed: rules.pop(0) elif rule.errormessage: assert False, rule.errormessage if rule.consume_statement: observed.pop(0) if not observed and rules: rules[0].no_more_statements() elif not rules and observed: assert False, "Additional SQL statements remain" @contextlib.contextmanager def assert_engine(engine): asserter = SQLAsserter() orig = [] @event.listens_for(engine, "before_execute") def connection_execute(conn, clauseelement, multiparams, params): # grab the original statement + params before any cursor # execution orig[:] = clauseelement, multiparams, params @event.listens_for(engine, "after_cursor_execute") def cursor_execute(conn, cursor, statement, parameters, context, executemany): if not context: return # then grab real cursor statements and associate them all # around a single context if asserter.accumulated and \ asserter.accumulated[-1].context is context: obs = asserter.accumulated[-1] else: obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2]) asserter.accumulated.append(obs) obs.statements.append( SQLCursorExecuteObserved( statement, parameters, context, executemany) ) try: yield asserter finally: event.remove(engine, "after_cursor_execute", cursor_execute) event.remove(engine, "before_execute", connection_execute) asserter._close() SQLAlchemy-1.0.11/lib/sqlalchemy/testing/pickleable.py0000664000175000017500000000512112636375552023671 0ustar classicclassic00000000000000# testing/pickleable.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Classes used in pickling tests, need to be at the module level for unpickling. """ from . import fixtures class User(fixtures.ComparableEntity): pass class Order(fixtures.ComparableEntity): pass class Dingaling(fixtures.ComparableEntity): pass class EmailUser(User): pass class Address(fixtures.ComparableEntity): pass # TODO: these are kind of arbitrary.... class Child1(fixtures.ComparableEntity): pass class Child2(fixtures.ComparableEntity): pass class Parent(fixtures.ComparableEntity): pass class Screen(object): def __init__(self, obj, parent=None): self.obj = obj self.parent = parent class Foo(object): def __init__(self, moredata): self.data = 'im data' self.stuff = 'im stuff' self.moredata = moredata __hash__ = object.__hash__ def __eq__(self, other): return other.data == self.data and \ other.stuff == self.stuff and \ other.moredata == self.moredata class Bar(object): def __init__(self, x, y): self.x = x self.y = y __hash__ = object.__hash__ def __eq__(self, other): return other.__class__ is self.__class__ and \ other.x == self.x and \ other.y == self.y def __str__(self): return "Bar(%d, %d)" % (self.x, self.y) class OldSchool: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): return other.__class__ is self.__class__ and \ other.x == self.x and \ other.y == self.y class OldSchoolWithoutCompare: def __init__(self, x, y): self.x = x self.y = y class BarWithoutCompare(object): def __init__(self, x, y): self.x = x self.y = y def __str__(self): return "Bar(%d, %d)" % (self.x, self.y) class NotComparable(object): def __init__(self, data): self.data = data def __hash__(self): return id(self) def __eq__(self, other): return NotImplemented def __ne__(self, other): return NotImplemented class BrokenComparable(object): def __init__(self, data): self.data = data def __hash__(self): return id(self) def __eq__(self, other): raise NotImplementedError def __ne__(self, other): raise NotImplementedError SQLAlchemy-1.0.11/lib/sqlalchemy/testing/replay_fixture.py0000664000175000017500000001246512636375552024651 0ustar classicclassic00000000000000from . import fixtures from . import profiling from .. import util import types from collections import deque import contextlib from . import config from sqlalchemy import MetaData from sqlalchemy import create_engine from sqlalchemy.orm import Session class ReplayFixtureTest(fixtures.TestBase): @contextlib.contextmanager def _dummy_ctx(self, *arg, **kw): yield def test_invocation(self): dbapi_session = ReplayableSession() creator = config.db.pool._creator recorder = lambda: dbapi_session.recorder(creator()) engine = create_engine( config.db.url, creator=recorder, use_native_hstore=False) self.metadata = MetaData(engine) self.engine = engine self.session = Session(engine) self.setup_engine() try: self._run_steps(ctx=self._dummy_ctx) finally: self.teardown_engine() engine.dispose() player = lambda: dbapi_session.player() engine = create_engine( config.db.url, creator=player, use_native_hstore=False) self.metadata = MetaData(engine) self.engine = engine self.session = Session(engine) self.setup_engine() try: self._run_steps(ctx=profiling.count_functions) finally: self.session.close() engine.dispose() def setup_engine(self): pass def teardown_engine(self): pass def _run_steps(self, ctx): raise NotImplementedError() class ReplayableSession(object): """A simple record/playback tool. This is *not* a mock testing class. It only records a session for later playback and makes no assertions on call consistency whatsoever. It's unlikely to be suitable for anything other than DB-API recording. """ Callable = object() NoAttribute = object() if util.py2k: Natives = set([getattr(types, t) for t in dir(types) if not t.startswith('_')]).\ difference([getattr(types, t) for t in ('FunctionType', 'BuiltinFunctionType', 'MethodType', 'BuiltinMethodType', 'LambdaType', 'UnboundMethodType',)]) else: Natives = set([getattr(types, t) for t in dir(types) if not t.startswith('_')]).\ union([type(t) if not isinstance(t, type) else t for t in __builtins__.values()]).\ difference([getattr(types, t) for t in ('FunctionType', 'BuiltinFunctionType', 'MethodType', 'BuiltinMethodType', 'LambdaType', )]) def __init__(self): self.buffer = deque() def recorder(self, base): return self.Recorder(self.buffer, base) def player(self): return self.Player(self.buffer) class Recorder(object): def __init__(self, buffer, subject): self._buffer = buffer self._subject = subject def __call__(self, *args, **kw): subject, buffer = [object.__getattribute__(self, x) for x in ('_subject', '_buffer')] result = subject(*args, **kw) if type(result) not in ReplayableSession.Natives: buffer.append(ReplayableSession.Callable) return type(self)(buffer, result) else: buffer.append(result) return result @property def _sqla_unwrap(self): return self._subject def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: pass subject, buffer = [object.__getattribute__(self, x) for x in ('_subject', '_buffer')] try: result = type(subject).__getattribute__(subject, key) except AttributeError: buffer.append(ReplayableSession.NoAttribute) raise else: if type(result) not in ReplayableSession.Natives: buffer.append(ReplayableSession.Callable) return type(self)(buffer, result) else: buffer.append(result) return result class Player(object): def __init__(self, buffer): self._buffer = buffer def __call__(self, *args, **kw): buffer = object.__getattribute__(self, '_buffer') result = buffer.popleft() if result is ReplayableSession.Callable: return self else: return result @property def _sqla_unwrap(self): return None def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: pass buffer = object.__getattribute__(self, '_buffer') result = buffer.popleft() if result is ReplayableSession.Callable: return self elif result is ReplayableSession.NoAttribute: raise AttributeError(key) else: return result SQLAlchemy-1.0.11/lib/sqlalchemy/testing/engines.py0000664000175000017500000002221712636375552023233 0ustar classicclassic00000000000000# testing/engines.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import import weakref from . import config from .util import decorator from .. import event, pool import re import warnings class ConnectionKiller(object): def __init__(self): self.proxy_refs = weakref.WeakKeyDictionary() self.testing_engines = weakref.WeakKeyDictionary() self.conns = set() def add_engine(self, engine): self.testing_engines[engine] = True def connect(self, dbapi_conn, con_record): self.conns.add((dbapi_conn, con_record)) def checkout(self, dbapi_con, con_record, con_proxy): self.proxy_refs[con_proxy] = True def invalidate(self, dbapi_con, con_record, exception): self.conns.discard((dbapi_con, con_record)) def _safe(self, fn): try: fn() except Exception as e: warnings.warn( "testing_reaper couldn't " "rollback/close connection: %s" % e) def rollback_all(self): for rec in list(self.proxy_refs): if rec is not None and rec.is_valid: self._safe(rec.rollback) def close_all(self): for rec in list(self.proxy_refs): if rec is not None and rec.is_valid: self._safe(rec._close) def _after_test_ctx(self): # this can cause a deadlock with pg8000 - pg8000 acquires # prepared statement lock inside of rollback() - if async gc # is collecting in finalize_fairy, deadlock. # not sure if this should be if pypy/jython only. # note that firebird/fdb definitely needs this though for conn, rec in list(self.conns): self._safe(conn.rollback) def _stop_test_ctx(self): if config.options.low_connections: self._stop_test_ctx_minimal() else: self._stop_test_ctx_aggressive() def _stop_test_ctx_minimal(self): self.close_all() self.conns = set() for rec in list(self.testing_engines): if rec is not config.db: rec.dispose() def _stop_test_ctx_aggressive(self): self.close_all() for conn, rec in list(self.conns): self._safe(conn.close) rec.connection = None self.conns = set() for rec in list(self.testing_engines): rec.dispose() def assert_all_closed(self): for rec in self.proxy_refs: if rec.is_valid: assert False testing_reaper = ConnectionKiller() def drop_all_tables(metadata, bind): testing_reaper.close_all() if hasattr(bind, 'close'): bind.close() if not config.db.dialect.supports_alter: from . import assertions with assertions.expect_warnings( "Can't sort tables", assert_=False): metadata.drop_all(bind) else: metadata.drop_all(bind) @decorator def assert_conns_closed(fn, *args, **kw): try: fn(*args, **kw) finally: testing_reaper.assert_all_closed() @decorator def rollback_open_connections(fn, *args, **kw): """Decorator that rolls back all open connections after fn execution.""" try: fn(*args, **kw) finally: testing_reaper.rollback_all() @decorator def close_first(fn, *args, **kw): """Decorator that closes all connections before fn execution.""" testing_reaper.close_all() fn(*args, **kw) @decorator def close_open_connections(fn, *args, **kw): """Decorator that closes all connections after fn execution.""" try: fn(*args, **kw) finally: testing_reaper.close_all() def all_dialects(exclude=None): import sqlalchemy.databases as d for name in d.__all__: # TEMPORARY if exclude and name in exclude: continue mod = getattr(d, name, None) if not mod: mod = getattr(__import__( 'sqlalchemy.databases.%s' % name).databases, name) yield mod.dialect() class ReconnectFixture(object): def __init__(self, dbapi): self.dbapi = dbapi self.connections = [] def __getattr__(self, key): return getattr(self.dbapi, key) def connect(self, *args, **kwargs): conn = self.dbapi.connect(*args, **kwargs) self.connections.append(conn) return conn def _safe(self, fn): try: fn() except Exception as e: warnings.warn( "ReconnectFixture couldn't " "close connection: %s" % e) def shutdown(self): # TODO: this doesn't cover all cases # as nicely as we'd like, namely MySQLdb. # would need to implement R. Brewer's # proxy server idea to get better # coverage. for c in list(self.connections): self._safe(c.close) self.connections = [] def reconnecting_engine(url=None, options=None): url = url or config.db.url dbapi = config.db.dialect.dbapi if not options: options = {} options['module'] = ReconnectFixture(dbapi) engine = testing_engine(url, options) _dispose = engine.dispose def dispose(): engine.dialect.dbapi.shutdown() _dispose() engine.test_shutdown = engine.dialect.dbapi.shutdown engine.dispose = dispose return engine def testing_engine(url=None, options=None): """Produce an engine configured by --options with optional overrides.""" from sqlalchemy import create_engine from sqlalchemy.engine.url import make_url if not options: use_reaper = True else: use_reaper = options.pop('use_reaper', True) url = url or config.db.url url = make_url(url) if options is None: if config.db is None or url.drivername == config.db.url.drivername: options = config.db_opts else: options = {} engine = create_engine(url, **options) engine._has_events = True # enable event blocks, helps with profiling if isinstance(engine.pool, pool.QueuePool): engine.pool._timeout = 0 engine.pool._max_overflow = 0 if use_reaper: event.listen(engine.pool, 'connect', testing_reaper.connect) event.listen(engine.pool, 'checkout', testing_reaper.checkout) event.listen(engine.pool, 'invalidate', testing_reaper.invalidate) testing_reaper.add_engine(engine) return engine def mock_engine(dialect_name=None): """Provides a mocking engine based on the current testing.db. This is normally used to test DDL generation flow as emitted by an Engine. It should not be used in other cases, as assert_compile() and assert_sql_execution() are much better choices with fewer moving parts. """ from sqlalchemy import create_engine if not dialect_name: dialect_name = config.db.name buffer = [] def executor(sql, *a, **kw): buffer.append(sql) def assert_sql(stmts): recv = [re.sub(r'[\n\t]', '', str(s)) for s in buffer] assert recv == stmts, recv def print_sql(): d = engine.dialect return "\n".join( str(s.compile(dialect=d)) for s in engine.mock ) engine = create_engine(dialect_name + '://', strategy='mock', executor=executor) assert not hasattr(engine, 'mock') engine.mock = buffer engine.assert_sql = assert_sql engine.print_sql = print_sql return engine class DBAPIProxyCursor(object): """Proxy a DBAPI cursor. Tests can provide subclasses of this to intercept DBAPI-level cursor operations. """ def __init__(self, engine, conn, *args, **kwargs): self.engine = engine self.connection = conn self.cursor = conn.cursor(*args, **kwargs) def execute(self, stmt, parameters=None, **kw): if parameters: return self.cursor.execute(stmt, parameters, **kw) else: return self.cursor.execute(stmt, **kw) def executemany(self, stmt, params, **kw): return self.cursor.executemany(stmt, params, **kw) def __getattr__(self, key): return getattr(self.cursor, key) class DBAPIProxyConnection(object): """Proxy a DBAPI connection. Tests can provide subclasses of this to intercept DBAPI-level connection operations. """ def __init__(self, engine, cursor_cls): self.conn = self._sqla_unwrap = engine.pool._creator() self.engine = engine self.cursor_cls = cursor_cls def cursor(self, *args, **kwargs): return self.cursor_cls(self.engine, self.conn, *args, **kwargs) def close(self): self.conn.close() def __getattr__(self, key): return getattr(self.conn, key) def proxying_engine(conn_cls=DBAPIProxyConnection, cursor_cls=DBAPIProxyCursor): """Produce an engine that provides proxy hooks for common methods. """ def mock_conn(): return conn_cls(config.db, cursor_cls) return testing_engine(options={'creator': mock_conn}) SQLAlchemy-1.0.11/lib/sqlalchemy/sql/0000775000175000017500000000000012636376632020347 5ustar classicclassic00000000000000SQLAlchemy-1.0.11/lib/sqlalchemy/sql/dml.py0000664000175000017500000010106212636375552021475 0ustar classicclassic00000000000000# sql/dml.py # Copyright (C) 2009-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`. """ from .base import Executable, _generative, _from_objects, DialectKWArgs from .elements import ClauseElement, _literal_as_text, Null, and_, _clone, \ _column_as_key from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes from .. import util from .. import exc class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement): """Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements. """ __visit_name__ = 'update_base' _execution_options = \ Executable._execution_options.union({'autocommit': True}) _hints = util.immutabledict() _parameter_ordering = None _prefixes = () def _process_colparams(self, parameters): def process_single(p): if isinstance(p, (list, tuple)): return dict( (c.key, pval) for c, pval in zip(self.table.c, p) ) else: return p if self._preserve_parameter_order and parameters is not None: if not isinstance(parameters, list) or \ (parameters and not isinstance(parameters[0], tuple)): raise ValueError( "When preserve_parameter_order is True, " "values() only accepts a list of 2-tuples") self._parameter_ordering = [key for key, value in parameters] return dict(parameters), False if (isinstance(parameters, (list, tuple)) and parameters and isinstance(parameters[0], (list, tuple, dict))): if not self._supports_multi_parameters: raise exc.InvalidRequestError( "This construct does not support " "multiple parameter sets.") return [process_single(p) for p in parameters], True else: return process_single(parameters), False def params(self, *arg, **kw): """Set the parameters for the statement. This method raises ``NotImplementedError`` on the base class, and is overridden by :class:`.ValuesBase` to provide the SET/VALUES clause of UPDATE and INSERT. """ raise NotImplementedError( "params() is not supported for INSERT/UPDATE/DELETE statements." " To set the values for an INSERT or UPDATE statement, use" " stmt.values(**parameters).") def bind(self): """Return a 'bind' linked to this :class:`.UpdateBase` or a :class:`.Table` associated with it. """ return self._bind or self.table.bind def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) @_generative def returning(self, *cols): """Add a :term:`RETURNING` or equivalent clause to this statement. e.g.:: stmt = table.update().\\ where(table.c.data == 'value').\\ values(status='X').\\ returning(table.c.server_flag, table.c.updated_timestamp) for server_flag, updated_timestamp in connection.execute(stmt): print(server_flag, updated_timestamp) The given collection of column expressions should be derived from the table that is the target of the INSERT, UPDATE, or DELETE. While :class:`.Column` objects are typical, the elements can also be expressions:: stmt = table.insert().returning( (table.c.first_name + " " + table.c.last_name). label('fullname')) Upon compilation, a RETURNING clause, or database equivalent, will be rendered within the statement. For INSERT and UPDATE, the values are the newly inserted/updated values. For DELETE, the values are those of the rows which were deleted. Upon execution, the values of the columns to be returned are made available via the result set and can be iterated using :meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not natively support returning values (i.e. cx_oracle), SQLAlchemy will approximate this behavior at the result level so that a reasonable amount of behavioral neutrality is provided. Note that not all databases/DBAPIs support RETURNING. For those backends with no support, an exception is raised upon compilation and/or execution. For those who do support it, the functionality across backends varies greatly, including restrictions on executemany() and other statements which return multiple rows. Please read the documentation notes for the database in use in order to determine the availability of RETURNING. .. seealso:: :meth:`.ValuesBase.return_defaults` - an alternative method tailored towards efficient fetching of server-side defaults and triggers for single-row INSERTs or UPDATEs. """ self._returning = cols @_generative def with_hint(self, text, selectable=None, dialect_name="*"): """Add a table hint for a single table to this INSERT/UPDATE/DELETE statement. .. note:: :meth:`.UpdateBase.with_hint` currently applies only to Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use :meth:`.UpdateBase.prefix_with`. The text of the hint is rendered in the appropriate location for the database backend in use, relative to the :class:`.Table` that is the subject of this statement, or optionally to that of the given :class:`.Table` passed as the ``selectable`` argument. The ``dialect_name`` option will limit the rendering of a particular hint to a particular backend. Such as, to add a hint that only takes effect for SQL Server:: mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql") .. versionadded:: 0.7.6 :param text: Text of the hint. :param selectable: optional :class:`.Table` that specifies an element of the FROM clause within an UPDATE or DELETE to be the subject of the hint - applies only to certain backends. :param dialect_name: defaults to ``*``, if specified as the name of a particular dialect, will apply these hints only when that dialect is in use. """ if selectable is None: selectable = self.table self._hints = self._hints.union( {(selectable, dialect_name): text}) class ValuesBase(UpdateBase): """Supplies support for :meth:`.ValuesBase.values` to INSERT and UPDATE constructs.""" __visit_name__ = 'values_base' _supports_multi_parameters = False _has_multi_parameters = False _preserve_parameter_order = False select = None def __init__(self, table, values, prefixes): self.table = _interpret_as_from(table) self.parameters, self._has_multi_parameters = \ self._process_colparams(values) if prefixes: self._setup_prefixes(prefixes) @_generative def values(self, *args, **kwargs): """specify a fixed VALUES clause for an INSERT statement, or the SET clause for an UPDATE. Note that the :class:`.Insert` and :class:`.Update` constructs support per-execution time formatting of the VALUES and/or SET clauses, based on the arguments passed to :meth:`.Connection.execute`. However, the :meth:`.ValuesBase.values` method can be used to "fix" a particular set of parameters into the statement. Multiple calls to :meth:`.ValuesBase.values` will produce a new construct, each one with the parameter list modified to include the new parameters sent. In the typical case of a single dictionary of parameters, the newly passed keys will replace the same keys in the previous construct. In the case of a list-based "multiple values" construct, each new list of values is extended onto the existing list of values. :param \**kwargs: key value pairs representing the string key of a :class:`.Column` mapped to the value to be rendered into the VALUES or SET clause:: users.insert().values(name="some name") users.update().where(users.c.id==5).values(name="some name") :param \*args: As an alternative to passing key/value parameters, a dictionary, tuple, or list of dictionaries or tuples can be passed as a single positional argument in order to form the VALUES or SET clause of the statement. The forms that are accepted vary based on whether this is an :class:`.Insert` or an :class:`.Update` construct. For either an :class:`.Insert` or :class:`.Update` construct, a single dictionary can be passed, which works the same as that of the kwargs form:: users.insert().values({"name": "some name"}) users.update().values({"name": "some new name"}) Also for either form but more typically for the :class:`.Insert` construct, a tuple that contains an entry for every column in the table is also accepted:: users.insert().values((5, "some name")) The :class:`.Insert` construct also supports being passed a list of dictionaries or full-table-tuples, which on the server will render the less common SQL syntax of "multiple values" - this syntax is supported on backends such as SQLite, Postgresql, MySQL, but not necessarily others:: users.insert().values([ {"name": "some name"}, {"name": "some other name"}, {"name": "yet another name"}, ]) The above form would render a multiple VALUES statement similar to:: INSERT INTO users (name) VALUES (:name_1), (:name_2), (:name_3) It is essential to note that **passing multiple values is NOT the same as using traditional executemany() form**. The above syntax is a **special** syntax not typically used. To emit an INSERT statement against multiple rows, the normal method is to pass a multiple values list to the :meth:`.Connection.execute` method, which is supported by all database backends and is generally more efficient for a very large number of parameters. .. seealso:: :ref:`execute_multiple` - an introduction to the traditional Core method of multiple parameter set invocation for INSERTs and other statements. .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES clause, even a list of length one, implies that the :paramref:`.Insert.inline` flag is set to True, indicating that the statement will not attempt to fetch the "last inserted primary key" or other defaults. The statement deals with an arbitrary number of rows, so the :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports columns with Python side default values and callables in the same way as that of an "executemany" style of invocation; the callable is invoked for each row. See :ref:`bug_3288` for other details. The :class:`.Update` construct supports a special form which is a list of 2-tuples, which when provided must be passed in conjunction with the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` parameter. This form causes the UPDATE statement to render the SET clauses using the order of parameters given to :meth:`.Update.values`, rather than the ordering of columns given in the :class:`.Table`. .. versionadded:: 1.0.10 - added support for parameter-ordered UPDATE statements via the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag. .. seealso:: :ref:`updates_order_parameters` - full example of the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag .. seealso:: :ref:`inserts_and_updates` - SQL Expression Language Tutorial :func:`~.expression.insert` - produce an ``INSERT`` statement :func:`~.expression.update` - produce an ``UPDATE`` statement """ if self.select is not None: raise exc.InvalidRequestError( "This construct already inserts from a SELECT") if self._has_multi_parameters and kwargs: raise exc.InvalidRequestError( "This construct already has multiple parameter sets.") if args: if len(args) > 1: raise exc.ArgumentError( "Only a single dictionary/tuple or list of " "dictionaries/tuples is accepted positionally.") v = args[0] else: v = {} if self.parameters is None: self.parameters, self._has_multi_parameters = \ self._process_colparams(v) else: if self._has_multi_parameters: self.parameters = list(self.parameters) p, self._has_multi_parameters = self._process_colparams(v) if not self._has_multi_parameters: raise exc.ArgumentError( "Can't mix single-values and multiple values " "formats in one statement") self.parameters.extend(p) else: self.parameters = self.parameters.copy() p, self._has_multi_parameters = self._process_colparams(v) if self._has_multi_parameters: raise exc.ArgumentError( "Can't mix single-values and multiple values " "formats in one statement") self.parameters.update(p) if kwargs: if self._has_multi_parameters: raise exc.ArgumentError( "Can't pass kwargs and multiple parameter sets " "simultaenously") else: self.parameters.update(kwargs) @_generative def return_defaults(self, *cols): """Make use of a :term:`RETURNING` clause for the purpose of fetching server-side expressions and defaults. E.g.:: stmt = table.insert().values(data='newdata').return_defaults() result = connection.execute(stmt) server_created_at = result.returned_defaults['created_at'] When used against a backend that supports RETURNING, all column values generated by SQL expression or server-side-default will be added to any existing RETURNING clause, provided that :meth:`.UpdateBase.returning` is not used simultaneously. The column values will then be available on the result using the :attr:`.ResultProxy.returned_defaults` accessor as a dictionary, referring to values keyed to the :class:`.Column` object as well as its ``.key``. This method differs from :meth:`.UpdateBase.returning` in these ways: 1. :meth:`.ValuesBase.return_defaults` is only intended for use with an INSERT or an UPDATE statement that matches exactly one row. While the RETURNING construct in the general sense supports multiple rows for a multi-row UPDATE or DELETE statement, or for special cases of INSERT that return multiple rows (e.g. INSERT from SELECT, multi-valued VALUES clause), :meth:`.ValuesBase.return_defaults` is intended only for an "ORM-style" single-row INSERT/UPDATE statement. The row returned by the statement is also consumed implicitly when :meth:`.ValuesBase.return_defaults` is used. By contrast, :meth:`.UpdateBase.returning` leaves the RETURNING result-set intact with a collection of any number of rows. 2. It is compatible with the existing logic to fetch auto-generated primary key values, also known as "implicit returning". Backends that support RETURNING will automatically make use of RETURNING in order to fetch the value of newly generated primary keys; while the :meth:`.UpdateBase.returning` method circumvents this behavior, :meth:`.ValuesBase.return_defaults` leaves it intact. 3. It can be called against any backend. Backends that don't support RETURNING will skip the usage of the feature, rather than raising an exception. The return value of :attr:`.ResultProxy.returned_defaults` will be ``None`` :meth:`.ValuesBase.return_defaults` is used by the ORM to provide an efficient implementation for the ``eager_defaults`` feature of :func:`.mapper`. :param cols: optional list of column key names or :class:`.Column` objects. If omitted, all column expressions evaluated on the server are added to the returning list. .. versionadded:: 0.9.0 .. seealso:: :meth:`.UpdateBase.returning` :attr:`.ResultProxy.returned_defaults` """ self._return_defaults = cols or True class Insert(ValuesBase): """Represent an INSERT construct. The :class:`.Insert` object is created using the :func:`~.expression.insert()` function. .. seealso:: :ref:`coretutorial_insert_expressions` """ __visit_name__ = 'insert' _supports_multi_parameters = True def __init__(self, table, values=None, inline=False, bind=None, prefixes=None, returning=None, return_defaults=False, **dialect_kw): """Construct an :class:`.Insert` object. Similar functionality is available via the :meth:`~.TableClause.insert` method on :class:`~.schema.Table`. :param table: :class:`.TableClause` which is the subject of the insert. :param values: collection of values to be inserted; see :meth:`.Insert.values` for a description of allowed formats here. Can be omitted entirely; a :class:`.Insert` construct will also dynamically render the VALUES clause at execution time based on the parameters passed to :meth:`.Connection.execute`. :param inline: if True, no attempt will be made to retrieve the SQL-generated default values to be provided within the statement; in particular, this allows SQL expressions to be rendered 'inline' within the statement without the need to pre-execute them beforehand; for backends that support "returning", this turns off the "implicit returning" feature for the statement. If both `values` and compile-time bind parameters are present, the compile-time bind parameters override the information specified within `values` on a per-key basis. The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their string identifiers. Each key may reference one of: * a literal data value (i.e. string, number, etc.); * a Column object; * a SELECT statement. If a ``SELECT`` statement is specified which references this ``INSERT`` statement's table, the statement will be correlated against the ``INSERT`` statement. .. seealso:: :ref:`coretutorial_insert_expressions` - SQL Expression Tutorial :ref:`inserts_and_updates` - SQL Expression Tutorial """ ValuesBase.__init__(self, table, values, prefixes) self._bind = bind self.select = self.select_names = None self.include_insert_from_select_defaults = False self.inline = inline self._returning = returning self._validate_dialect_kwargs(dialect_kw) self._return_defaults = return_defaults def get_children(self, **kwargs): if self.select is not None: return self.select, else: return () @_generative def from_select(self, names, select, include_defaults=True): """Return a new :class:`.Insert` construct which represents an ``INSERT...FROM SELECT`` statement. e.g.:: sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5) ins = table2.insert().from_select(['a', 'b'], sel) :param names: a sequence of string column names or :class:`.Column` objects representing the target columns. :param select: a :func:`.select` construct, :class:`.FromClause` or other construct which resolves into a :class:`.FromClause`, such as an ORM :class:`.Query` object, etc. The order of columns returned from this FROM clause should correspond to the order of columns sent as the ``names`` parameter; while this is not checked before passing along to the database, the database would normally raise an exception if these column lists don't correspond. :param include_defaults: if True, non-server default values and SQL expressions as specified on :class:`.Column` objects (as documented in :ref:`metadata_defaults_toplevel`) not otherwise specified in the list of names will be rendered into the INSERT and SELECT statements, so that these values are also included in the data to be inserted. .. note:: A Python-side default that uses a Python callable function will only be invoked **once** for the whole statement, and **not per row**. .. versionadded:: 1.0.0 - :meth:`.Insert.from_select` now renders Python-side and SQL expression column defaults into the SELECT statement for columns otherwise not included in the list of column names. .. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT implies that the :paramref:`.insert.inline` flag is set to True, indicating that the statement will not attempt to fetch the "last inserted primary key" or other defaults. The statement deals with an arbitrary number of rows, so the :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. .. versionadded:: 0.8.3 """ if self.parameters: raise exc.InvalidRequestError( "This construct already inserts value expressions") self.parameters, self._has_multi_parameters = \ self._process_colparams( dict((_column_as_key(n), Null()) for n in names)) self.select_names = names self.inline = True self.include_insert_from_select_defaults = include_defaults self.select = _interpret_as_select(select) def _copy_internals(self, clone=_clone, **kw): # TODO: coverage self.parameters = self.parameters.copy() if self.select is not None: self.select = _clone(self.select) class Update(ValuesBase): """Represent an Update construct. The :class:`.Update` object is created using the :func:`update()` function. """ __visit_name__ = 'update' def __init__(self, table, whereclause=None, values=None, inline=False, bind=None, prefixes=None, returning=None, return_defaults=False, preserve_parameter_order=False, **dialect_kw): """Construct an :class:`.Update` object. E.g.:: from sqlalchemy import update stmt = update(users).where(users.c.id==5).\\ values(name='user #5') Similar functionality is available via the :meth:`~.TableClause.update` method on :class:`.Table`:: stmt = users.update().\\ where(users.c.id==5).\\ values(name='user #5') :param table: A :class:`.Table` object representing the database table to be updated. :param whereclause: Optional SQL expression describing the ``WHERE`` condition of the ``UPDATE`` statement. Modern applications may prefer to use the generative :meth:`~Update.where()` method to specify the ``WHERE`` clause. The WHERE clause can refer to multiple tables. For databases which support this, an ``UPDATE FROM`` clause will be generated, or on MySQL, a multi-table update. The statement will fail on databases that don't have support for multi-table update statements. A SQL-standard method of referring to additional tables in the WHERE clause is to use a correlated subquery:: users.update().values(name='ed').where( users.c.name==select([addresses.c.email_address]).\\ where(addresses.c.user_id==users.c.id).\\ as_scalar() ) .. versionchanged:: 0.7.4 The WHERE clause can refer to multiple tables. :param values: Optional dictionary which specifies the ``SET`` conditions of the ``UPDATE``. If left as ``None``, the ``SET`` conditions are determined from those parameters passed to the statement during the execution and/or compilation of the statement. When compiled standalone without any parameters, the ``SET`` clause generates for all columns. Modern applications may prefer to use the generative :meth:`.Update.values` method to set the values of the UPDATE statement. :param inline: if True, SQL defaults present on :class:`.Column` objects via the ``default`` keyword will be compiled 'inline' into the statement and not pre-executed. This means that their values will not be available in the dictionary returned from :meth:`.ResultProxy.last_updated_params`. :param preserve_parameter_order: if True, the update statement is expected to receive parameters **only** via the :meth:`.Update.values` method, and they must be passed as a Python ``list`` of 2-tuples. The rendered UPDATE statement will emit the SET clause for each referenced column maintaining this order. .. versionadded:: 1.0.10 .. seealso:: :ref:`updates_order_parameters` - full example of the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag If both ``values`` and compile-time bind parameters are present, the compile-time bind parameters override the information specified within ``values`` on a per-key basis. The keys within ``values`` can be either :class:`.Column` objects or their string identifiers (specifically the "key" of the :class:`.Column`, normally but not necessarily equivalent to its "name"). Normally, the :class:`.Column` objects used here are expected to be part of the target :class:`.Table` that is the table to be updated. However when using MySQL, a multiple-table UPDATE statement can refer to columns from any of the tables referred to in the WHERE clause. The values referred to in ``values`` are typically: * a literal data value (i.e. string, number, etc.) * a SQL expression, such as a related :class:`.Column`, a scalar-returning :func:`.select` construct, etc. When combining :func:`.select` constructs within the values clause of an :func:`.update` construct, the subquery represented by the :func:`.select` should be *correlated* to the parent table, that is, providing criterion which links the table inside the subquery to the outer table being updated:: users.update().values( name=select([addresses.c.email_address]).\\ where(addresses.c.user_id==users.c.id).\\ as_scalar() ) .. seealso:: :ref:`inserts_and_updates` - SQL Expression Language Tutorial """ self._preserve_parameter_order = preserve_parameter_order ValuesBase.__init__(self, table, values, prefixes) self._bind = bind self._returning = returning if whereclause is not None: self._whereclause = _literal_as_text(whereclause) else: self._whereclause = None self.inline = inline self._validate_dialect_kwargs(dialect_kw) self._return_defaults = return_defaults def get_children(self, **kwargs): if self._whereclause is not None: return self._whereclause, else: return () def _copy_internals(self, clone=_clone, **kw): # TODO: coverage self._whereclause = clone(self._whereclause, **kw) self.parameters = self.parameters.copy() @_generative def where(self, whereclause): """return a new update() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ if self._whereclause is not None: self._whereclause = and_(self._whereclause, _literal_as_text(whereclause)) else: self._whereclause = _literal_as_text(whereclause) @property def _extra_froms(self): # TODO: this could be made memoized # if the memoization is reset on each generative call. froms = [] seen = set([self.table]) if self._whereclause is not None: for item in _from_objects(self._whereclause): if not seen.intersection(item._cloned_set): froms.append(item) seen.update(item._cloned_set) return froms class Delete(UpdateBase): """Represent a DELETE construct. The :class:`.Delete` object is created using the :func:`delete()` function. """ __visit_name__ = 'delete' def __init__(self, table, whereclause=None, bind=None, returning=None, prefixes=None, **dialect_kw): """Construct :class:`.Delete` object. Similar functionality is available via the :meth:`~.TableClause.delete` method on :class:`~.schema.Table`. :param table: The table to delete rows from. :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` condition of the ``DELETE`` statement. Note that the :meth:`~Delete.where()` generative method may be used instead. .. seealso:: :ref:`deletes` - SQL Expression Tutorial """ self._bind = bind self.table = _interpret_as_from(table) self._returning = returning if prefixes: self._setup_prefixes(prefixes) if whereclause is not None: self._whereclause = _literal_as_text(whereclause) else: self._whereclause = None self._validate_dialect_kwargs(dialect_kw) def get_children(self, **kwargs): if self._whereclause is not None: return self._whereclause, else: return () @_generative def where(self, whereclause): """Add the given WHERE clause to a newly returned delete construct.""" if self._whereclause is not None: self._whereclause = and_(self._whereclause, _literal_as_text(whereclause)) else: self._whereclause = _literal_as_text(whereclause) def _copy_internals(self, clone=_clone, **kw): # TODO: coverage self._whereclause = clone(self._whereclause, **kw) SQLAlchemy-1.0.11/lib/sqlalchemy/sql/visitors.py0000664000175000017500000002403712636375552022611 0ustar classicclassic00000000000000# sql/visitors.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Visitor/traversal interface and library functions. SQLAlchemy schema and expression constructs rely on a Python-centric version of the classic "visitor" pattern as the primary way in which they apply functionality. The most common use of this pattern is statement compilation, where individual expression classes match up to rendering methods that produce a string result. Beyond this, the visitor system is also used to inspect expressions for various information and patterns, as well as for usage in some kinds of expression transformation. Other kinds of transformation use a non-visitor traversal system. For many examples of how the visit system is used, see the sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules. For an introduction to clause adaption, see http://techspot.zzzeek.org/2008/01/23/expression-transformations/ """ from collections import deque from .. import util import operator from .. import exc __all__ = ['VisitableType', 'Visitable', 'ClauseVisitor', 'CloningVisitor', 'ReplacingCloningVisitor', 'iterate', 'iterate_depthfirst', 'traverse_using', 'traverse', 'traverse_depthfirst', 'cloned_traverse', 'replacement_traverse'] class VisitableType(type): """Metaclass which assigns a `_compiler_dispatch` method to classes having a `__visit_name__` attribute. The _compiler_dispatch attribute becomes an instance method which looks approximately like the following:: def _compiler_dispatch (self, visitor, **kw): '''Look for an attribute named "visit_" + self.__visit_name__ on the visitor, and call it with the same kw params.''' visit_attr = 'visit_%s' % self.__visit_name__ return getattr(visitor, visit_attr)(self, **kw) Classes having no __visit_name__ attribute will remain unaffected. """ def __init__(cls, clsname, bases, clsdict): if clsname != 'Visitable' and \ hasattr(cls, '__visit_name__'): _generate_dispatch(cls) super(VisitableType, cls).__init__(clsname, bases, clsdict) def _generate_dispatch(cls): """Return an optimized visit dispatch function for the cls for use by the compiler. """ if '__visit_name__' in cls.__dict__: visit_name = cls.__visit_name__ if isinstance(visit_name, str): # There is an optimization opportunity here because the # the string name of the class's __visit_name__ is known at # this early stage (import time) so it can be pre-constructed. getter = operator.attrgetter("visit_%s" % visit_name) def _compiler_dispatch(self, visitor, **kw): try: meth = getter(visitor) except AttributeError: raise exc.UnsupportedCompilationError(visitor, cls) else: return meth(self, **kw) else: # The optimization opportunity is lost for this case because the # __visit_name__ is not yet a string. As a result, the visit # string has to be recalculated with each compilation. def _compiler_dispatch(self, visitor, **kw): visit_attr = 'visit_%s' % self.__visit_name__ try: meth = getattr(visitor, visit_attr) except AttributeError: raise exc.UnsupportedCompilationError(visitor, cls) else: return meth(self, **kw) _compiler_dispatch.__doc__ = \ """Look for an attribute named "visit_" + self.__visit_name__ on the visitor, and call it with the same kw params. """ cls._compiler_dispatch = _compiler_dispatch class Visitable(util.with_metaclass(VisitableType, object)): """Base class for visitable objects, applies the ``VisitableType`` metaclass. """ class ClauseVisitor(object): """Base class for visitor objects which can traverse using the traverse() function. """ __traverse_options__ = {} def traverse_single(self, obj, **kw): for v in self._visitor_iterator: meth = getattr(v, "visit_%s" % obj.__visit_name__, None) if meth: return meth(obj, **kw) def iterate(self, obj): """traverse the given expression structure, returning an iterator of all elements. """ return iterate(obj, self.__traverse_options__) def traverse(self, obj): """traverse and visit the given expression structure.""" return traverse(obj, self.__traverse_options__, self._visitor_dict) @util.memoized_property def _visitor_dict(self): visitors = {} for name in dir(self): if name.startswith('visit_'): visitors[name[6:]] = getattr(self, name) return visitors @property def _visitor_iterator(self): """iterate through this visitor and each 'chained' visitor.""" v = self while v: yield v v = getattr(v, '_next', None) def chain(self, visitor): """'chain' an additional ClauseVisitor onto this ClauseVisitor. the chained visitor will receive all visit events after this one. """ tail = list(self._visitor_iterator)[-1] tail._next = visitor return self class CloningVisitor(ClauseVisitor): """Base class for visitor objects which can traverse using the cloned_traverse() function. """ def copy_and_process(self, list_): """Apply cloned traversal to the given list of elements, and return the new list. """ return [self.traverse(x) for x in list_] def traverse(self, obj): """traverse and visit the given expression structure.""" return cloned_traverse( obj, self.__traverse_options__, self._visitor_dict) class ReplacingCloningVisitor(CloningVisitor): """Base class for visitor objects which can traverse using the replacement_traverse() function. """ def replace(self, elem): """receive pre-copied elements during a cloning traversal. If the method returns a new element, the element is used instead of creating a simple copy of the element. Traversal will halt on the newly returned element if it is re-encountered. """ return None def traverse(self, obj): """traverse and visit the given expression structure.""" def replace(elem): for v in self._visitor_iterator: e = v.replace(elem) if e is not None: return e return replacement_traverse(obj, self.__traverse_options__, replace) def iterate(obj, opts): """traverse the given expression structure, returning an iterator. traversal is configured to be breadth-first. """ # fasttrack for atomic elements like columns children = obj.get_children(**opts) if not children: return [obj] traversal = deque() stack = deque([obj]) while stack: t = stack.popleft() traversal.append(t) for c in t.get_children(**opts): stack.append(c) return iter(traversal) def iterate_depthfirst(obj, opts): """traverse the given expression structure, returning an iterator. traversal is configured to be depth-first. """ # fasttrack for atomic elements like columns children = obj.get_children(**opts) if not children: return [obj] stack = deque([obj]) traversal = deque() while stack: t = stack.pop() traversal.appendleft(t) for c in t.get_children(**opts): stack.append(c) return iter(traversal) def traverse_using(iterator, obj, visitors): """visit the given expression structure using the given iterator of objects. """ for target in iterator: meth = visitors.get(target.__visit_name__, None) if meth: meth(target) return obj def traverse(obj, opts, visitors): """traverse and visit the given expression structure using the default iterator. """ return traverse_using(iterate(obj, opts), obj, visitors) def traverse_depthfirst(obj, opts, visitors): """traverse and visit the given expression structure using the depth-first iterator. """ return traverse_using(iterate_depthfirst(obj, opts), obj, visitors) def cloned_traverse(obj, opts, visitors): """clone the given expression structure, allowing modifications by visitors.""" cloned = {} stop_on = set(opts.get('stop_on', [])) def clone(elem): if elem in stop_on: return elem else: if id(elem) not in cloned: cloned[id(elem)] = newelem = elem._clone() newelem._copy_internals(clone=clone) meth = visitors.get(newelem.__visit_name__, None) if meth: meth(newelem) return cloned[id(elem)] if obj is not None: obj = clone(obj) return obj def replacement_traverse(obj, opts, replace): """clone the given expression structure, allowing element replacement by a given replacement function.""" cloned = {} stop_on = set([id(x) for x in opts.get('stop_on', [])]) def clone(elem, **kw): if id(elem) in stop_on or \ 'no_replacement_traverse' in elem._annotations: return elem else: newelem = replace(elem) if newelem is not None: stop_on.add(id(newelem)) return newelem else: if elem not in cloned: cloned[elem] = newelem = elem._clone() newelem._copy_internals(clone=clone, **kw) return cloned[elem] if obj is not None: obj = clone(obj, **opts) return obj SQLAlchemy-1.0.11/lib/sqlalchemy/sql/base.py0000664000175000017500000005211712636375552021641 0ustar classicclassic00000000000000# sql/base.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Foundational utilities common to many sql modules. """ from .. import util, exc import itertools from .visitors import ClauseVisitor import re import collections PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT') NO_ARG = util.symbol('NO_ARG') class Immutable(object): """mark a ClauseElement as 'immutable' when expressions are cloned.""" def unique_params(self, *optionaldict, **kwargs): raise NotImplementedError("Immutable objects do not support copying") def params(self, *optionaldict, **kwargs): raise NotImplementedError("Immutable objects do not support copying") def _clone(self): return self def _from_objects(*elements): return itertools.chain(*[element._from_objects for element in elements]) @util.decorator def _generative(fn, *args, **kw): """Mark a method as generative.""" self = args[0]._generate() fn(self, *args[1:], **kw) return self class _DialectArgView(collections.MutableMapping): """A dictionary view of dialect-level arguments in the form _. """ def __init__(self, obj): self.obj = obj def _key(self, key): try: dialect, value_key = key.split("_", 1) except ValueError: raise KeyError(key) else: return dialect, value_key def __getitem__(self, key): dialect, value_key = self._key(key) try: opt = self.obj.dialect_options[dialect] except exc.NoSuchModuleError: raise KeyError(key) else: return opt[value_key] def __setitem__(self, key, value): try: dialect, value_key = self._key(key) except KeyError: raise exc.ArgumentError( "Keys must be of the form _") else: self.obj.dialect_options[dialect][value_key] = value def __delitem__(self, key): dialect, value_key = self._key(key) del self.obj.dialect_options[dialect][value_key] def __len__(self): return sum(len(args._non_defaults) for args in self.obj.dialect_options.values()) def __iter__(self): return ( util.safe_kwarg("%s_%s" % (dialect_name, value_name)) for dialect_name in self.obj.dialect_options for value_name in self.obj.dialect_options[dialect_name]._non_defaults ) class _DialectArgDict(collections.MutableMapping): """A dictionary view of dialect-level arguments for a specific dialect. Maintains a separate collection of user-specified arguments and dialect-specified default arguments. """ def __init__(self): self._non_defaults = {} self._defaults = {} def __len__(self): return len(set(self._non_defaults).union(self._defaults)) def __iter__(self): return iter(set(self._non_defaults).union(self._defaults)) def __getitem__(self, key): if key in self._non_defaults: return self._non_defaults[key] else: return self._defaults[key] def __setitem__(self, key, value): self._non_defaults[key] = value def __delitem__(self, key): del self._non_defaults[key] class DialectKWArgs(object): """Establish the ability for a class to have dialect-specific arguments with defaults and constructor validation. The :class:`.DialectKWArgs` interacts with the :attr:`.DefaultDialect.construct_arguments` present on a dialect. .. seealso:: :attr:`.DefaultDialect.construct_arguments` """ @classmethod def argument_for(cls, dialect_name, argument_name, default): """Add a new kind of dialect-specific keyword argument for this class. E.g.:: Index.argument_for("mydialect", "length", None) some_index = Index('a', 'b', mydialect_length=5) The :meth:`.DialectKWArgs.argument_for` method is a per-argument way adding extra arguments to the :attr:`.DefaultDialect.construct_arguments` dictionary. This dictionary provides a list of argument names accepted by various schema-level constructs on behalf of a dialect. New dialects should typically specify this dictionary all at once as a data member of the dialect class. The use case for ad-hoc addition of argument names is typically for end-user code that is also using a custom compilation scheme which consumes the additional arguments. :param dialect_name: name of a dialect. The dialect must be locatable, else a :class:`.NoSuchModuleError` is raised. The dialect must also include an existing :attr:`.DefaultDialect.construct_arguments` collection, indicating that it participates in the keyword-argument validation and default system, else :class:`.ArgumentError` is raised. If the dialect does not include this collection, then any keyword argument can be specified on behalf of this dialect already. All dialects packaged within SQLAlchemy include this collection, however for third party dialects, support may vary. :param argument_name: name of the parameter. :param default: default value of the parameter. .. versionadded:: 0.9.4 """ construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] if construct_arg_dictionary is None: raise exc.ArgumentError( "Dialect '%s' does have keyword-argument " "validation and defaults enabled configured" % dialect_name) if cls not in construct_arg_dictionary: construct_arg_dictionary[cls] = {} construct_arg_dictionary[cls][argument_name] = default @util.memoized_property def dialect_kwargs(self): """A collection of keyword arguments specified as dialect-specific options to this construct. The arguments are present here in their original ``_`` format. Only arguments that were actually passed are included; unlike the :attr:`.DialectKWArgs.dialect_options` collection, which contains all options known by this dialect including defaults. The collection is also writable; keys are accepted of the form ``_`` where the value will be assembled into the list of options. .. versionadded:: 0.9.2 .. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs` collection is now writable. .. seealso:: :attr:`.DialectKWArgs.dialect_options` - nested dictionary form """ return _DialectArgView(self) @property def kwargs(self): """A synonym for :attr:`.DialectKWArgs.dialect_kwargs`.""" return self.dialect_kwargs @util.dependencies("sqlalchemy.dialects") def _kw_reg_for_dialect(dialects, dialect_name): dialect_cls = dialects.registry.load(dialect_name) if dialect_cls.construct_arguments is None: return None return dict(dialect_cls.construct_arguments) _kw_registry = util.PopulateDict(_kw_reg_for_dialect) def _kw_reg_for_dialect_cls(self, dialect_name): construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] d = _DialectArgDict() if construct_arg_dictionary is None: d._defaults.update({"*": None}) else: for cls in reversed(self.__class__.__mro__): if cls in construct_arg_dictionary: d._defaults.update(construct_arg_dictionary[cls]) return d @util.memoized_property def dialect_options(self): """A collection of keyword arguments specified as dialect-specific options to this construct. This is a two-level nested registry, keyed to ```` and ````. For example, the ``postgresql_where`` argument would be locatable as:: arg = my_object.dialect_options['postgresql']['where'] .. versionadded:: 0.9.2 .. seealso:: :attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form """ return util.PopulateDict( util.portable_instancemethod(self._kw_reg_for_dialect_cls) ) def _validate_dialect_kwargs(self, kwargs): # validate remaining kwargs that they all specify DB prefixes if not kwargs: return for k in kwargs: m = re.match('^(.+?)_(.+)$', k) if not m: raise TypeError( "Additional arguments should be " "named _, got '%s'" % k) dialect_name, arg_name = m.group(1, 2) try: construct_arg_dictionary = self.dialect_options[dialect_name] except exc.NoSuchModuleError: util.warn( "Can't validate argument %r; can't " "locate any SQLAlchemy dialect named %r" % (k, dialect_name)) self.dialect_options[dialect_name] = d = _DialectArgDict() d._defaults.update({"*": None}) d._non_defaults[arg_name] = kwargs[k] else: if "*" not in construct_arg_dictionary and \ arg_name not in construct_arg_dictionary: raise exc.ArgumentError( "Argument %r is not accepted by " "dialect %r on behalf of %r" % ( k, dialect_name, self.__class__ )) else: construct_arg_dictionary[arg_name] = kwargs[k] class Generative(object): """Allow a ClauseElement to generate itself via the @_generative decorator. """ def _generate(self): s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() return s class Executable(Generative): """Mark a ClauseElement as supporting execution. :class:`.Executable` is a superclass for all "statement" types of objects, including :func:`select`, :func:`delete`, :func:`update`, :func:`insert`, :func:`text`. """ supports_execution = True _execution_options = util.immutabledict() _bind = None @_generative def execution_options(self, **kw): """ Set non-SQL options for the statement which take effect during execution. Execution options can be set on a per-statement or per :class:`.Connection` basis. Additionally, the :class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide access to execution options which they in turn configure upon connections. The :meth:`execution_options` method is generative. A new instance of this statement is returned that contains the options:: statement = select([table.c.x, table.c.y]) statement = statement.execution_options(autocommit=True) Note that only a subset of possible execution options can be applied to a statement - these include "autocommit" and "stream_results", but not "isolation_level" or "compiled_cache". See :meth:`.Connection.execution_options` for a full list of possible options. .. seealso:: :meth:`.Connection.execution_options()` :meth:`.Query.execution_options()` """ if 'isolation_level' in kw: raise exc.ArgumentError( "'isolation_level' execution option may only be specified " "on Connection.execution_options(), or " "per-engine using the isolation_level " "argument to create_engine()." ) if 'compiled_cache' in kw: raise exc.ArgumentError( "'compiled_cache' execution option may only be specified " "on Connection.execution_options(), not per statement." ) self._execution_options = self._execution_options.union(kw) def execute(self, *multiparams, **params): """Compile and execute this :class:`.Executable`.""" e = self.bind if e is None: label = getattr(self, 'description', self.__class__.__name__) msg = ('This %s is not directly bound to a Connection or Engine.' 'Use the .execute() method of a Connection or Engine ' 'to execute this construct.' % label) raise exc.UnboundExecutionError(msg) return e._execute_clauseelement(self, multiparams, params) def scalar(self, *multiparams, **params): """Compile and execute this :class:`.Executable`, returning the result's scalar representation. """ return self.execute(*multiparams, **params).scalar() @property def bind(self): """Returns the :class:`.Engine` or :class:`.Connection` to which this :class:`.Executable` is bound, or None if none found. This is a traversal which checks locally, then checks among the "from" clauses of associated objects until a bound engine or connection is found. """ if self._bind is not None: return self._bind for f in _from_objects(self): if f is self: continue engine = f.bind if engine is not None: return engine else: return None class SchemaEventTarget(object): """Base class for elements that are the targets of :class:`.DDLEvents` events. This includes :class:`.SchemaItem` as well as :class:`.SchemaType`. """ def _set_parent(self, parent): """Associate with this SchemaEvent's parent object.""" raise NotImplementedError() def _set_parent_with_dispatch(self, parent): self.dispatch.before_parent_attach(self, parent) self._set_parent(parent) self.dispatch.after_parent_attach(self, parent) class SchemaVisitor(ClauseVisitor): """Define the visiting for ``SchemaItem`` objects.""" __traverse_options__ = {'schema_visitor': True} class ColumnCollection(util.OrderedProperties): """An ordered dictionary that stores a list of ColumnElement instances. Overrides the ``__eq__()`` method to produce SQL clauses between sets of correlated columns. """ __slots__ = '_all_col_set', '_all_columns' def __init__(self, *columns): super(ColumnCollection, self).__init__() object.__setattr__(self, '_all_col_set', util.column_set()) object.__setattr__(self, '_all_columns', []) for c in columns: self.add(c) def __str__(self): return repr([str(c) for c in self]) def replace(self, column): """add the given column to this collection, removing unaliased versions of this column as well as existing columns with the same key. e.g.:: t = Table('sometable', metadata, Column('col1', Integer)) t.columns.replace(Column('col1', Integer, key='columnone')) will remove the original 'col1' from the collection, and add the new column under the name 'columnname'. Used by schema.Column to override columns during table reflection. """ remove_col = None if column.name in self and column.key != column.name: other = self[column.name] if other.name == other.key: remove_col = other self._all_col_set.remove(other) del self._data[other.key] if column.key in self._data: remove_col = self._data[column.key] self._all_col_set.remove(remove_col) self._all_col_set.add(column) self._data[column.key] = column if remove_col is not None: self._all_columns[:] = [column if c is remove_col else c for c in self._all_columns] else: self._all_columns.append(column) def add(self, column): """Add a column to this collection. The key attribute of the column will be used as the hash key for this dictionary. """ if not column.key: raise exc.ArgumentError( "Can't add unnamed column to column collection") self[column.key] = column def __delitem__(self, key): raise NotImplementedError() def __setattr__(self, key, object): raise NotImplementedError() def __setitem__(self, key, value): if key in self: # this warning is primarily to catch select() statements # which have conflicting column names in their exported # columns collection existing = self[key] if not existing.shares_lineage(value): util.warn('Column %r on table %r being replaced by ' '%r, which has the same key. Consider ' 'use_labels for select() statements.' % (key, getattr(existing, 'table', None), value)) # pop out memoized proxy_set as this # operation may very well be occurring # in a _make_proxy operation util.memoized_property.reset(value, "proxy_set") self._all_col_set.add(value) self._all_columns.append(value) self._data[key] = value def clear(self): raise NotImplementedError() def remove(self, column): del self._data[column.key] self._all_col_set.remove(column) self._all_columns[:] = [ c for c in self._all_columns if c is not column] def update(self, iter): cols = list(iter) self._all_columns.extend( c for label, c in cols if c not in self._all_col_set) self._all_col_set.update(c for label, c in cols) self._data.update((label, c) for label, c in cols) def extend(self, iter): cols = list(iter) self._all_columns.extend(c for c in cols if c not in self._all_col_set) self._all_col_set.update(cols) self._data.update((c.key, c) for c in cols) __hash__ = None @util.dependencies("sqlalchemy.sql.elements") def __eq__(self, elements, other): l = [] for c in getattr(other, "_all_columns", other): for local in self._all_columns: if c.shares_lineage(local): l.append(c == local) return elements.and_(*l) def __contains__(self, other): if not isinstance(other, util.string_types): raise exc.ArgumentError("__contains__ requires a string argument") return util.OrderedProperties.__contains__(self, other) def __getstate__(self): return {'_data': self._data, '_all_columns': self._all_columns} def __setstate__(self, state): object.__setattr__(self, '_data', state['_data']) object.__setattr__(self, '_all_columns', state['_all_columns']) object.__setattr__( self, '_all_col_set', util.column_set(state['_all_columns'])) def contains_column(self, col): # this has to be done via set() membership return col in self._all_col_set def as_immutable(self): return ImmutableColumnCollection( self._data, self._all_col_set, self._all_columns) class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection): def __init__(self, data, colset, all_columns): util.ImmutableProperties.__init__(self, data) object.__setattr__(self, '_all_col_set', colset) object.__setattr__(self, '_all_columns', all_columns) extend = remove = util.ImmutableProperties._immutable class ColumnSet(util.ordered_column_set): def contains_column(self, col): return col in self def extend(self, cols): for col in cols: self.add(col) def __add__(self, other): return list(self) + list(other) @util.dependencies("sqlalchemy.sql.elements") def __eq__(self, elements, other): l = [] for c in other: for local in self: if c.shares_lineage(local): l.append(c == local) return elements.and_(*l) def __hash__(self): return hash(tuple(x for x in self)) def _bind_or_error(schemaitem, msg=None): bind = schemaitem.bind if not bind: name = schemaitem.__class__.__name__ label = getattr(schemaitem, 'fullname', getattr(schemaitem, 'name', None)) if label: item = '%s object %r' % (name, label) else: item = '%s object' % name if msg is None: msg = "%s is not bound to an Engine or Connection. "\ "Execution can not proceed without a database to execute "\ "against." % item raise exc.UnboundExecutionError(msg) return bind SQLAlchemy-1.0.11/lib/sqlalchemy/sql/default_comparator.py0000664000175000017500000002425112636375552024600 0ustar classicclassic00000000000000# sql/default_comparator.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Default implementation of SQL comparison operations. """ from .. import exc, util from . import type_api from . import operators from .elements import BindParameter, True_, False_, BinaryExpression, \ Null, _const_expr, _clause_element_as_expr, \ ClauseList, ColumnElement, TextClause, UnaryExpression, \ collate, _is_literal, _literal_as_text, ClauseElement, and_, or_ from .selectable import SelectBase, Alias, Selectable, ScalarSelect def _boolean_compare(expr, op, obj, negate=None, reverse=False, _python_is_types=(util.NoneType, bool), result_type = None, **kwargs): if result_type is None: result_type = type_api.BOOLEANTYPE if isinstance(obj, _python_is_types + (Null, True_, False_)): # allow x ==/!= True/False to be treated as a literal. # this comes out to "== / != true/false" or "1/0" if those # constants aren't supported and works on all platforms if op in (operators.eq, operators.ne) and \ isinstance(obj, (bool, True_, False_)): return BinaryExpression(expr, _literal_as_text(obj), op, type_=result_type, negate=negate, modifiers=kwargs) else: # all other None/True/False uses IS, IS NOT if op in (operators.eq, operators.is_): return BinaryExpression(expr, _const_expr(obj), operators.is_, negate=operators.isnot) elif op in (operators.ne, operators.isnot): return BinaryExpression(expr, _const_expr(obj), operators.isnot, negate=operators.is_) else: raise exc.ArgumentError( "Only '=', '!=', 'is_()', 'isnot()' operators can " "be used with None/True/False") else: obj = _check_literal(expr, op, obj) if reverse: return BinaryExpression(obj, expr, op, type_=result_type, negate=negate, modifiers=kwargs) else: return BinaryExpression(expr, obj, op, type_=result_type, negate=negate, modifiers=kwargs) def _binary_operate(expr, op, obj, reverse=False, result_type=None, **kw): obj = _check_literal(expr, op, obj) if reverse: left, right = obj, expr else: left, right = expr, obj if result_type is None: op, result_type = left.comparator._adapt_expression( op, right.comparator) return BinaryExpression( left, right, op, type_=result_type, modifiers=kw) def _conjunction_operate(expr, op, other, **kw): if op is operators.and_: return and_(expr, other) elif op is operators.or_: return or_(expr, other) else: raise NotImplementedError() def _scalar(expr, op, fn, **kw): return fn(expr) def _in_impl(expr, op, seq_or_selectable, negate_op, **kw): seq_or_selectable = _clause_element_as_expr(seq_or_selectable) if isinstance(seq_or_selectable, ScalarSelect): return _boolean_compare(expr, op, seq_or_selectable, negate=negate_op) elif isinstance(seq_or_selectable, SelectBase): # TODO: if we ever want to support (x, y, z) IN (select x, # y, z from table), we would need a multi-column version of # as_scalar() to produce a multi- column selectable that # does not export itself as a FROM clause return _boolean_compare( expr, op, seq_or_selectable.as_scalar(), negate=negate_op, **kw) elif isinstance(seq_or_selectable, (Selectable, TextClause)): return _boolean_compare(expr, op, seq_or_selectable, negate=negate_op, **kw) elif isinstance(seq_or_selectable, ClauseElement): raise exc.InvalidRequestError( 'in_() accepts' ' either a list of expressions ' 'or a selectable: %r' % seq_or_selectable) # Handle non selectable arguments as sequences args = [] for o in seq_or_selectable: if not _is_literal(o): if not isinstance(o, operators.ColumnOperators): raise exc.InvalidRequestError( 'in_() accepts' ' either a list of expressions ' 'or a selectable: %r' % o) elif o is None: o = Null() else: o = expr._bind_param(op, o) args.append(o) if len(args) == 0: # Special case handling for empty IN's, behave like # comparison against zero row selectable. We use != to # build the contradiction as it handles NULL values # appropriately, i.e. "not (x IN ())" should not return NULL # values for x. util.warn('The IN-predicate on "%s" was invoked with an ' 'empty sequence. This results in a ' 'contradiction, which nonetheless can be ' 'expensive to evaluate. Consider alternative ' 'strategies for improved performance.' % expr) if op is operators.in_op: return expr != expr else: return expr == expr return _boolean_compare(expr, op, ClauseList(*args).self_group(against=op), negate=negate_op) def _unsupported_impl(expr, op, *arg, **kw): raise NotImplementedError("Operator '%s' is not supported on " "this expression" % op.__name__) def _inv_impl(expr, op, **kw): """See :meth:`.ColumnOperators.__inv__`.""" if hasattr(expr, 'negation_clause'): return expr.negation_clause else: return expr._negate() def _neg_impl(expr, op, **kw): """See :meth:`.ColumnOperators.__neg__`.""" return UnaryExpression(expr, operator=operators.neg) def _match_impl(expr, op, other, **kw): """See :meth:`.ColumnOperators.match`.""" return _boolean_compare( expr, operators.match_op, _check_literal( expr, operators.match_op, other), result_type=type_api.MATCHTYPE, negate=operators.notmatch_op if op is operators.match_op else operators.match_op, **kw ) def _distinct_impl(expr, op, **kw): """See :meth:`.ColumnOperators.distinct`.""" return UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type) def _between_impl(expr, op, cleft, cright, **kw): """See :meth:`.ColumnOperators.between`.""" return BinaryExpression( expr, ClauseList( _check_literal(expr, operators.and_, cleft), _check_literal(expr, operators.and_, cright), operator=operators.and_, group=False, group_contents=False), op, negate=operators.notbetween_op if op is operators.between_op else operators.between_op, modifiers=kw) def _collate_impl(expr, op, other, **kw): return collate(expr, other) # a mapping of operators with the method they use, along with # their negated operator for comparison operators operator_lookup = { "and_": (_conjunction_operate,), "or_": (_conjunction_operate,), "inv": (_inv_impl,), "add": (_binary_operate,), "mul": (_binary_operate,), "sub": (_binary_operate,), "div": (_binary_operate,), "mod": (_binary_operate,), "truediv": (_binary_operate,), "custom_op": (_binary_operate,), "concat_op": (_binary_operate,), "lt": (_boolean_compare, operators.ge), "le": (_boolean_compare, operators.gt), "ne": (_boolean_compare, operators.eq), "gt": (_boolean_compare, operators.le), "ge": (_boolean_compare, operators.lt), "eq": (_boolean_compare, operators.ne), "like_op": (_boolean_compare, operators.notlike_op), "ilike_op": (_boolean_compare, operators.notilike_op), "notlike_op": (_boolean_compare, operators.like_op), "notilike_op": (_boolean_compare, operators.ilike_op), "contains_op": (_boolean_compare, operators.notcontains_op), "startswith_op": (_boolean_compare, operators.notstartswith_op), "endswith_op": (_boolean_compare, operators.notendswith_op), "desc_op": (_scalar, UnaryExpression._create_desc), "asc_op": (_scalar, UnaryExpression._create_asc), "nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst), "nullslast_op": (_scalar, UnaryExpression._create_nullslast), "in_op": (_in_impl, operators.notin_op), "notin_op": (_in_impl, operators.in_op), "is_": (_boolean_compare, operators.is_), "isnot": (_boolean_compare, operators.isnot), "collate": (_collate_impl,), "match_op": (_match_impl,), "notmatch_op": (_match_impl,), "distinct_op": (_distinct_impl,), "between_op": (_between_impl, ), "notbetween_op": (_between_impl, ), "neg": (_neg_impl,), "getitem": (_unsupported_impl,), "lshift": (_unsupported_impl,), "rshift": (_unsupported_impl,), } def _check_literal(expr, operator, other): if isinstance(other, (ColumnElement, TextClause)): if isinstance(other, BindParameter) and \ other.type._isnull: other = other._clone() other.type = expr.type return other elif hasattr(other, '__clause_element__'): other = other.__clause_element__() elif isinstance(other, type_api.TypeEngine.Comparator): other = other.expr if isinstance(other, (SelectBase, Alias)): return other.as_scalar() elif not isinstance(other, (ColumnElement, TextClause)): return expr._bind_param(operator, other) else: return other SQLAlchemy-1.0.11/lib/sqlalchemy/sql/compiler.py0000664000175000017500000030440012636375552022534 0ustar classicclassic00000000000000# sql/compiler.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Base SQL and DDL compiler implementations. Classes provided include: :class:`.compiler.SQLCompiler` - renders SQL strings :class:`.compiler.DDLCompiler` - renders DDL (data definition language) strings :class:`.compiler.GenericTypeCompiler` - renders type specification strings. To generate user-defined SQL strings, see :doc:`/ext/compiler`. """ import contextlib import re from . import schema, sqltypes, operators, functions, visitors, \ elements, selectable, crud from .. import util, exc import itertools RESERVED_WORDS = set([ 'all', 'analyse', 'analyze', 'and', 'any', 'array', 'as', 'asc', 'asymmetric', 'authorization', 'between', 'binary', 'both', 'case', 'cast', 'check', 'collate', 'column', 'constraint', 'create', 'cross', 'current_date', 'current_role', 'current_time', 'current_timestamp', 'current_user', 'default', 'deferrable', 'desc', 'distinct', 'do', 'else', 'end', 'except', 'false', 'for', 'foreign', 'freeze', 'from', 'full', 'grant', 'group', 'having', 'ilike', 'in', 'initially', 'inner', 'intersect', 'into', 'is', 'isnull', 'join', 'leading', 'left', 'like', 'limit', 'localtime', 'localtimestamp', 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', 'placing', 'primary', 'references', 'right', 'select', 'session_user', 'set', 'similar', 'some', 'symmetric', 'table', 'then', 'to', 'trailing', 'true', 'union', 'unique', 'user', 'using', 'verbose', 'when', 'where']) LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$']) BIND_PARAMS = re.compile(r'(? ', operators.ge: ' >= ', operators.eq: ' = ', operators.concat_op: ' || ', operators.match_op: ' MATCH ', operators.notmatch_op: ' NOT MATCH ', operators.in_op: ' IN ', operators.notin_op: ' NOT IN ', operators.comma_op: ', ', operators.from_: ' FROM ', operators.as_: ' AS ', operators.is_: ' IS ', operators.isnot: ' IS NOT ', operators.collate: ' COLLATE ', # unary operators.exists: 'EXISTS ', operators.distinct_op: 'DISTINCT ', operators.inv: 'NOT ', # modifiers operators.desc_op: ' DESC', operators.asc_op: ' ASC', operators.nullsfirst_op: ' NULLS FIRST', operators.nullslast_op: ' NULLS LAST', } FUNCTIONS = { functions.coalesce: 'coalesce%(expr)s', functions.current_date: 'CURRENT_DATE', functions.current_time: 'CURRENT_TIME', functions.current_timestamp: 'CURRENT_TIMESTAMP', functions.current_user: 'CURRENT_USER', functions.localtime: 'LOCALTIME', functions.localtimestamp: 'LOCALTIMESTAMP', functions.random: 'random%(expr)s', functions.sysdate: 'sysdate', functions.session_user: 'SESSION_USER', functions.user: 'USER' } EXTRACT_MAP = { 'month': 'month', 'day': 'day', 'year': 'year', 'second': 'second', 'hour': 'hour', 'doy': 'doy', 'minute': 'minute', 'quarter': 'quarter', 'dow': 'dow', 'week': 'week', 'epoch': 'epoch', 'milliseconds': 'milliseconds', 'microseconds': 'microseconds', 'timezone_hour': 'timezone_hour', 'timezone_minute': 'timezone_minute' } COMPOUND_KEYWORDS = { selectable.CompoundSelect.UNION: 'UNION', selectable.CompoundSelect.UNION_ALL: 'UNION ALL', selectable.CompoundSelect.EXCEPT: 'EXCEPT', selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL', selectable.CompoundSelect.INTERSECT: 'INTERSECT', selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL' } class Compiled(object): """Represent a compiled SQL or DDL expression. The ``__str__`` method of the ``Compiled`` object should produce the actual text of the statement. ``Compiled`` objects are specific to their underlying database dialect, and also may or may not be specific to the columns referenced within a particular set of bind parameters. In no case should the ``Compiled`` object be dependent on the actual values of those bind parameters, even though it may reference those values as defaults. """ _cached_metadata = None def __init__(self, dialect, statement, bind=None, compile_kwargs=util.immutabledict()): """Construct a new ``Compiled`` object. :param dialect: ``Dialect`` to compile against. :param statement: ``ClauseElement`` to be compiled. :param bind: Optional Engine or Connection to compile this statement against. :param compile_kwargs: additional kwargs that will be passed to the initial call to :meth:`.Compiled.process`. .. versionadded:: 0.8 """ self.dialect = dialect self.bind = bind if statement is not None: self.statement = statement self.can_execute = statement.supports_execution self.string = self.process(self.statement, **compile_kwargs) @util.deprecated("0.7", ":class:`.Compiled` objects now compile " "within the constructor.") def compile(self): """Produce the internal string representation of this element. """ pass def _execute_on_connection(self, connection, multiparams, params): return connection._execute_compiled(self, multiparams, params) @property def sql_compiler(self): """Return a Compiled that is capable of processing SQL expressions. If this compiler is one, it would likely just return 'self'. """ raise NotImplementedError() def process(self, obj, **kwargs): return obj._compiler_dispatch(self, **kwargs) def __str__(self): """Return the string text of the generated SQL or DDL.""" return self.string or '' def construct_params(self, params=None): """Return the bind params for this compiled object. :param params: a dict of string/object pairs whose values will override bind values compiled in to the statement. """ raise NotImplementedError() @property def params(self): """Return the bind params for this compiled object.""" return self.construct_params() def execute(self, *multiparams, **params): """Execute this compiled object.""" e = self.bind if e is None: raise exc.UnboundExecutionError( "This Compiled object is not bound to any Engine " "or Connection.") return e._execute_compiled(self, multiparams, params) def scalar(self, *multiparams, **params): """Execute this compiled object and return the result's scalar value.""" return self.execute(*multiparams, **params).scalar() class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)): """Produces DDL specification for TypeEngine objects.""" ensure_kwarg = 'visit_\w+' def __init__(self, dialect): self.dialect = dialect def process(self, type_, **kw): return type_._compiler_dispatch(self, **kw) class _CompileLabel(visitors.Visitable): """lightweight label object which acts as an expression.Label.""" __visit_name__ = 'label' __slots__ = 'element', 'name' def __init__(self, col, name, alt_names=()): self.element = col self.name = name self._alt_names = (col,) + alt_names @property def proxy_set(self): return self.element.proxy_set @property def type(self): return self.element.type class SQLCompiler(Compiled): """Default implementation of Compiled. Compiles ClauseElements into SQL strings. Uses a similar visit paradigm as visitors.ClauseVisitor but implements its own traversal. """ extract_map = EXTRACT_MAP compound_keywords = COMPOUND_KEYWORDS isdelete = isinsert = isupdate = False """class-level defaults which can be set at the instance level to define if this Compiled instance represents INSERT/UPDATE/DELETE """ returning = None """holds the "returning" collection of columns if the statement is CRUD and defines returning columns either implicitly or explicitly """ returning_precedes_values = False """set to True classwide to generate RETURNING clauses before the VALUES or WHERE clause (i.e. MSSQL) """ render_table_with_column_in_update_from = False """set to True classwide to indicate the SET clause in a multi-table UPDATE statement should qualify columns with the table name (i.e. MySQL only) """ ansi_bind_rules = False """SQL 92 doesn't allow bind parameters to be used in the columns clause of a SELECT, nor does it allow ambiguous expressions like "? = ?". A compiler subclass can set this flag to False if the target driver/DB enforces this """ def __init__(self, dialect, statement, column_keys=None, inline=False, **kwargs): """Construct a new ``DefaultCompiler`` object. dialect Dialect to be used statement ClauseElement to be compiled column_keys a list of column names to be compiled into an INSERT or UPDATE statement. """ self.column_keys = column_keys # compile INSERT/UPDATE defaults/sequences inlined (no pre- # execute) self.inline = inline or getattr(statement, 'inline', False) # a dictionary of bind parameter keys to BindParameter # instances. self.binds = {} # a dictionary of BindParameter instances to "compiled" names # that are actually present in the generated SQL self.bind_names = util.column_dict() # stack which keeps track of nested SELECT statements self.stack = [] # relates label names in the final SQL to a tuple of local # column/label name, ColumnElement object (if any) and # TypeEngine. ResultProxy uses this for type processing and # column targeting self._result_columns = [] # if False, means we can't be sure the list of entries # in _result_columns is actually the rendered order. This # gets flipped when we use TextAsFrom, for example. self._ordered_columns = True # true if the paramstyle is positional self.positional = dialect.positional if self.positional: self.positiontup = [] self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] self.ctes = None # an IdentifierPreparer that formats the quoting of identifiers self.preparer = dialect.identifier_preparer self.label_length = dialect.label_length \ or dialect.max_identifier_length # a map which tracks "anonymous" identifiers that are created on # the fly here self.anon_map = util.PopulateDict(self._process_anon) # a map which tracks "truncated" names based on # dialect.label_length or dialect.max_identifier_length self.truncated_names = {} Compiled.__init__(self, dialect, statement, **kwargs) if self.positional and dialect.paramstyle == 'numeric': self._apply_numbered_params() @util.memoized_instancemethod def _init_cte_state(self): """Initialize collections related to CTEs only if a CTE is located, to save on the overhead of these collections otherwise. """ # collect CTEs to tack on top of a SELECT self.ctes = util.OrderedDict() self.ctes_by_name = {} self.ctes_recursive = False if self.positional: self.cte_positional = {} @contextlib.contextmanager def _nested_result(self): """special API to support the use case of 'nested result sets'""" result_columns, ordered_columns = ( self._result_columns, self._ordered_columns) self._result_columns, self._ordered_columns = [], False try: if self.stack: entry = self.stack[-1] entry['need_result_map_for_nested'] = True else: entry = None yield self._result_columns, self._ordered_columns finally: if entry: entry.pop('need_result_map_for_nested') self._result_columns, self._ordered_columns = ( result_columns, ordered_columns) def _apply_numbered_params(self): poscount = itertools.count(1) self.string = re.sub( r'\[_POSITION\]', lambda m: str(util.next(poscount)), self.string) @util.memoized_property def _bind_processors(self): return dict( (key, value) for key, value in ((self.bind_names[bindparam], bindparam.type._cached_bind_processor(self.dialect)) for bindparam in self.bind_names) if value is not None ) def is_subquery(self): return len(self.stack) > 1 @property def sql_compiler(self): return self def construct_params(self, params=None, _group_number=None, _check=True): """return a dictionary of bind parameter keys and values""" if params: pd = {} for bindparam in self.bind_names: name = self.bind_names[bindparam] if bindparam.key in params: pd[name] = params[bindparam.key] elif name in params: pd[name] = params[name] elif _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) elif bindparam.callable: pd[name] = bindparam.effective_value else: pd[name] = bindparam.value return pd else: pd = {} for bindparam in self.bind_names: if _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) if bindparam.callable: pd[self.bind_names[bindparam]] = bindparam.effective_value else: pd[self.bind_names[bindparam]] = bindparam.value return pd @property def params(self): """Return the bind param dictionary embedded into this compiled object, for those values that are present.""" return self.construct_params(_check=False) @util.dependencies("sqlalchemy.engine.result") def _create_result_map(self, result): """utility method used for unit tests only.""" return result.ResultMetaData._create_result_map(self._result_columns) def default_from(self): """Called when a SELECT statement has no froms, and no FROM clause is to be appended. Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output. """ return "" def visit_grouping(self, grouping, asfrom=False, **kwargs): return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" def visit_label_reference( self, element, within_columns_clause=False, **kwargs): if self.stack and self.dialect.supports_simple_order_by_label: selectable = self.stack[-1]['selectable'] with_cols, only_froms = selectable._label_resolve_dict if within_columns_clause: resolve_dict = only_froms else: resolve_dict = with_cols # this can be None in the case that a _label_reference() # were subject to a replacement operation, in which case # the replacement of the Label element may have changed # to something else like a ColumnClause expression. order_by_elem = element.element._order_by_label_element if order_by_elem is not None and order_by_elem.name in \ resolve_dict: kwargs['render_label_as_label'] = \ element.element._order_by_label_element return self.process( element.element, within_columns_clause=within_columns_clause, **kwargs) def visit_textual_label_reference( self, element, within_columns_clause=False, **kwargs): if not self.stack: # compiling the element outside of the context of a SELECT return self.process( element._text_clause ) selectable = self.stack[-1]['selectable'] with_cols, only_froms = selectable._label_resolve_dict try: if within_columns_clause: col = only_froms[element.element] else: col = with_cols[element.element] except KeyError: # treat it like text() util.warn_limited( "Can't resolve label reference %r; converting to text()", util.ellipses_string(element.element)) return self.process( element._text_clause ) else: kwargs['render_label_as_label'] = col return self.process( col, within_columns_clause=within_columns_clause, **kwargs) def visit_label(self, label, add_to_result_map=None, within_label_clause=False, within_columns_clause=False, render_label_as_label=None, **kw): # only render labels within the columns clause # or ORDER BY clause of a select. dialect-specific compilers # can modify this behavior. render_label_with_as = (within_columns_clause and not within_label_clause) render_label_only = render_label_as_label is label if render_label_only or render_label_with_as: if isinstance(label.name, elements._truncated_label): labelname = self._truncated_identifier("colident", label.name) else: labelname = label.name if render_label_with_as: if add_to_result_map is not None: add_to_result_map( labelname, label.name, (label, labelname, ) + label._alt_names, label.type ) return label.element._compiler_dispatch( self, within_columns_clause=True, within_label_clause=True, **kw) + \ OPERATORS[operators.as_] + \ self.preparer.format_label(label, labelname) elif render_label_only: return self.preparer.format_label(label, labelname) else: return label.element._compiler_dispatch( self, within_columns_clause=False, **kw) def visit_column(self, column, add_to_result_map=None, include_table=True, **kwargs): name = orig_name = column.name if name is None: raise exc.CompileError("Cannot compile Column object until " "its 'name' is assigned.") is_literal = column.is_literal if not is_literal and isinstance(name, elements._truncated_label): name = self._truncated_identifier("colident", name) if add_to_result_map is not None: add_to_result_map( name, orig_name, (column, name, column.key), column.type ) if is_literal: name = self.escape_literal_column(name) else: name = self.preparer.quote(name) table = column.table if table is None or not include_table or not table.named_with_column: return name else: if table.schema: schema_prefix = self.preparer.quote_schema(table.schema) + '.' else: schema_prefix = '' tablename = table.name if isinstance(tablename, elements._truncated_label): tablename = self._truncated_identifier("alias", tablename) return schema_prefix + \ self.preparer.quote(tablename) + \ "." + name def escape_literal_column(self, text): """provide escaping for the literal_column() construct.""" # TODO: some dialects might need different behavior here return text.replace('%', '%%') def visit_fromclause(self, fromclause, **kwargs): return fromclause.name def visit_index(self, index, **kwargs): return index.name def visit_typeclause(self, typeclause, **kw): kw['type_expression'] = typeclause return self.dialect.type_compiler.process(typeclause.type, **kw) def post_process_text(self, text): return text def visit_textclause(self, textclause, **kw): def do_bindparam(m): name = m.group(1) if name in textclause._bindparams: return self.process(textclause._bindparams[name], **kw) else: return self.bindparam_string(name, **kw) # un-escape any \:params return BIND_PARAMS_ESC.sub( lambda m: m.group(1), BIND_PARAMS.sub( do_bindparam, self.post_process_text(textclause.text)) ) def visit_text_as_from(self, taf, compound_index=None, asfrom=False, parens=True, **kw): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] populate_result_map = toplevel or \ ( compound_index == 0 and entry.get( 'need_result_map_for_compound', False) ) or entry.get('need_result_map_for_nested', False) if populate_result_map: self._ordered_columns = False for c in taf.column_args: self.process(c, within_columns_clause=True, add_to_result_map=self._add_to_result_map) text = self.process(taf.element, **kw) if asfrom and parens: text = "(%s)" % text return text def visit_null(self, expr, **kw): return 'NULL' def visit_true(self, expr, **kw): if self.dialect.supports_native_boolean: return 'true' else: return "1" def visit_false(self, expr, **kw): if self.dialect.supports_native_boolean: return 'false' else: return "0" def visit_clauselist(self, clauselist, **kw): sep = clauselist.operator if sep is None: sep = " " else: sep = OPERATORS[clauselist.operator] return sep.join( s for s in ( c._compiler_dispatch(self, **kw) for c in clauselist.clauses) if s) def visit_case(self, clause, **kwargs): x = "CASE " if clause.value is not None: x += clause.value._compiler_dispatch(self, **kwargs) + " " for cond, result in clause.whens: x += "WHEN " + cond._compiler_dispatch( self, **kwargs ) + " THEN " + result._compiler_dispatch( self, **kwargs) + " " if clause.else_ is not None: x += "ELSE " + clause.else_._compiler_dispatch( self, **kwargs ) + " " x += "END" return x def visit_cast(self, cast, **kwargs): return "CAST(%s AS %s)" % \ (cast.clause._compiler_dispatch(self, **kwargs), cast.typeclause._compiler_dispatch(self, **kwargs)) def visit_over(self, over, **kwargs): return "%s OVER (%s)" % ( over.func._compiler_dispatch(self, **kwargs), ' '.join( '%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs)) for word, clause in ( ('PARTITION', over.partition_by), ('ORDER', over.order_by) ) if clause is not None and len(clause) ) ) def visit_funcfilter(self, funcfilter, **kwargs): return "%s FILTER (WHERE %s)" % ( funcfilter.func._compiler_dispatch(self, **kwargs), funcfilter.criterion._compiler_dispatch(self, **kwargs) ) def visit_extract(self, extract, **kwargs): field = self.extract_map.get(extract.field, extract.field) return "EXTRACT(%s FROM %s)" % ( field, extract.expr._compiler_dispatch(self, **kwargs)) def visit_function(self, func, add_to_result_map=None, **kwargs): if add_to_result_map is not None: add_to_result_map( func.name, func.name, (), func.type ) disp = getattr(self, "visit_%s_func" % func.name.lower(), None) if disp: return disp(func, **kwargs) else: name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s") return ".".join(list(func.packagenames) + [name]) % \ {'expr': self.function_argspec(func, **kwargs)} def visit_next_value_func(self, next_value, **kw): return self.visit_sequence(next_value.sequence) def visit_sequence(self, sequence): raise NotImplementedError( "Dialect '%s' does not support sequence increments." % self.dialect.name ) def function_argspec(self, func, **kwargs): return func.clause_expr._compiler_dispatch(self, **kwargs) def visit_compound_select(self, cs, asfrom=False, parens=True, compound_index=0, **kwargs): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] need_result_map = toplevel or \ (compound_index == 0 and entry.get('need_result_map_for_compound', False)) self.stack.append( { 'correlate_froms': entry['correlate_froms'], 'asfrom_froms': entry['asfrom_froms'], 'selectable': cs, 'need_result_map_for_compound': need_result_map }) keyword = self.compound_keywords.get(cs.keyword) text = (" " + keyword + " ").join( (c._compiler_dispatch(self, asfrom=asfrom, parens=False, compound_index=i, **kwargs) for i, c in enumerate(cs.selects)) ) group_by = cs._group_by_clause._compiler_dispatch( self, asfrom=asfrom, **kwargs) if group_by: text += " GROUP BY " + group_by text += self.order_by_clause(cs, **kwargs) text += (cs._limit_clause is not None or cs._offset_clause is not None) and \ self.limit_clause(cs, **kwargs) or "" if self.ctes and toplevel: text = self._render_cte_clause() + text self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def visit_unary(self, unary, **kw): if unary.operator: if unary.modifier: raise exc.CompileError( "Unary expression does not support operator " "and modifier simultaneously") disp = getattr(self, "visit_%s_unary_operator" % unary.operator.__name__, None) if disp: return disp(unary, unary.operator, **kw) else: return self._generate_generic_unary_operator( unary, OPERATORS[unary.operator], **kw) elif unary.modifier: disp = getattr(self, "visit_%s_unary_modifier" % unary.modifier.__name__, None) if disp: return disp(unary, unary.modifier, **kw) else: return self._generate_generic_unary_modifier( unary, OPERATORS[unary.modifier], **kw) else: raise exc.CompileError( "Unary expression has no operator or modifier") def visit_istrue_unary_operator(self, element, operator, **kw): if self.dialect.supports_native_boolean: return self.process(element.element, **kw) else: return "%s = 1" % self.process(element.element, **kw) def visit_isfalse_unary_operator(self, element, operator, **kw): if self.dialect.supports_native_boolean: return "NOT %s" % self.process(element.element, **kw) else: return "%s = 0" % self.process(element.element, **kw) def visit_notmatch_op_binary(self, binary, operator, **kw): return "NOT %s" % self.visit_binary( binary, override_operator=operators.match_op) def visit_binary(self, binary, override_operator=None, **kw): # don't allow "? = ?" to render if self.ansi_bind_rules and \ isinstance(binary.left, elements.BindParameter) and \ isinstance(binary.right, elements.BindParameter): kw['literal_binds'] = True operator_ = override_operator or binary.operator disp = getattr(self, "visit_%s_binary" % operator_.__name__, None) if disp: return disp(binary, operator_, **kw) else: try: opstring = OPERATORS[operator_] except KeyError: raise exc.UnsupportedCompilationError(self, operator_) else: return self._generate_generic_binary(binary, opstring, **kw) def visit_custom_op_binary(self, element, operator, **kw): return self._generate_generic_binary( element, " " + operator.opstring + " ", **kw) def visit_custom_op_unary_operator(self, element, operator, **kw): return self._generate_generic_unary_operator( element, operator.opstring + " ", **kw) def visit_custom_op_unary_modifier(self, element, operator, **kw): return self._generate_generic_unary_modifier( element, " " + operator.opstring, **kw) def _generate_generic_binary(self, binary, opstring, **kw): return binary.left._compiler_dispatch(self, **kw) + \ opstring + \ binary.right._compiler_dispatch(self, **kw) def _generate_generic_unary_operator(self, unary, opstring, **kw): return opstring + unary.element._compiler_dispatch(self, **kw) def _generate_generic_unary_modifier(self, unary, opstring, **kw): return unary.element._compiler_dispatch(self, **kw) + opstring @util.memoized_property def _like_percent_literal(self): return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE) def visit_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_like_op_binary(binary, operator, **kw) def visit_notcontains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_like_op_binary(binary, operator, **kw) def visit_notstartswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_notendswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_like_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) # TODO: use ternary here, not "and"/ "or" return '%s LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_notlike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s NOT LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_ilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_notilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) NOT LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_between_op_binary(self, binary, operator, **kw): symmetric = binary.modifiers.get("symmetric", False) return self._generate_generic_binary( binary, " BETWEEN SYMMETRIC " if symmetric else " BETWEEN ", **kw) def visit_notbetween_op_binary(self, binary, operator, **kw): symmetric = binary.modifiers.get("symmetric", False) return self._generate_generic_binary( binary, " NOT BETWEEN SYMMETRIC " if symmetric else " NOT BETWEEN ", **kw) def visit_bindparam(self, bindparam, within_columns_clause=False, literal_binds=False, skip_bind_expression=False, **kwargs): if not skip_bind_expression and bindparam.type._has_bind_expression: bind_expression = bindparam.type.bind_expression(bindparam) return self.process(bind_expression, skip_bind_expression=True) if literal_binds or \ (within_columns_clause and self.ansi_bind_rules): if bindparam.value is None and bindparam.callable is None: raise exc.CompileError("Bind parameter '%s' without a " "renderable value not allowed here." % bindparam.key) return self.render_literal_bindparam( bindparam, within_columns_clause=True, **kwargs) name = self._truncate_bindparam(bindparam) if name in self.binds: existing = self.binds[name] if existing is not bindparam: if (existing.unique or bindparam.unique) and \ not existing.proxy_set.intersection( bindparam.proxy_set): raise exc.CompileError( "Bind parameter '%s' conflicts with " "unique bind parameter of the same name" % bindparam.key ) elif existing._is_crud or bindparam._is_crud: raise exc.CompileError( "bindparam() name '%s' is reserved " "for automatic usage in the VALUES or SET " "clause of this " "insert/update statement. Please use a " "name other than column name when using bindparam() " "with insert() or update() (for example, 'b_%s')." % (bindparam.key, bindparam.key) ) self.binds[bindparam.key] = self.binds[name] = bindparam return self.bindparam_string(name, **kwargs) def render_literal_bindparam(self, bindparam, **kw): value = bindparam.effective_value return self.render_literal_value(value, bindparam.type) def render_literal_value(self, value, type_): """Render the value of a bind parameter as a quoted literal. This is used for statement sections that do not accept bind parameters on the target driver/database. This should be implemented by subclasses using the quoting services of the DBAPI. """ processor = type_._cached_literal_processor(self.dialect) if processor: return processor(value) else: raise NotImplementedError( "Don't know how to literal-quote value %r" % value) def _truncate_bindparam(self, bindparam): if bindparam in self.bind_names: return self.bind_names[bindparam] bind_name = bindparam.key if isinstance(bind_name, elements._truncated_label): bind_name = self._truncated_identifier("bindparam", bind_name) # add to bind_names for translation self.bind_names[bindparam] = bind_name return bind_name def _truncated_identifier(self, ident_class, name): if (ident_class, name) in self.truncated_names: return self.truncated_names[(ident_class, name)] anonname = name.apply_map(self.anon_map) if len(anonname) > self.label_length - 6: counter = self.truncated_names.get(ident_class, 1) truncname = anonname[0:max(self.label_length - 6, 0)] + \ "_" + hex(counter)[2:] self.truncated_names[ident_class] = counter + 1 else: truncname = anonname self.truncated_names[(ident_class, name)] = truncname return truncname def _anonymize(self, name): return name % self.anon_map def _process_anon(self, key): (ident, derived) = key.split(' ', 1) anonymous_counter = self.anon_map.get(derived, 1) self.anon_map[derived] = anonymous_counter + 1 return derived + "_" + str(anonymous_counter) def bindparam_string(self, name, positional_names=None, **kw): if self.positional: if positional_names is not None: positional_names.append(name) else: self.positiontup.append(name) return self.bindtemplate % {'name': name} def visit_cte(self, cte, asfrom=False, ashint=False, fromhints=None, **kwargs): self._init_cte_state() if isinstance(cte.name, elements._truncated_label): cte_name = self._truncated_identifier("alias", cte.name) else: cte_name = cte.name if cte_name in self.ctes_by_name: existing_cte = self.ctes_by_name[cte_name] # we've generated a same-named CTE that we are enclosed in, # or this is the same CTE. just return the name. if cte in existing_cte._restates or cte is existing_cte: return self.preparer.format_alias(cte, cte_name) elif existing_cte in cte._restates: # we've generated a same-named CTE that is # enclosed in us - we take precedence, so # discard the text for the "inner". del self.ctes[existing_cte] else: raise exc.CompileError( "Multiple, unrelated CTEs found with " "the same name: %r" % cte_name) self.ctes_by_name[cte_name] = cte if cte._cte_alias is not None: orig_cte = cte._cte_alias if orig_cte not in self.ctes: self.visit_cte(orig_cte, **kwargs) cte_alias_name = cte._cte_alias.name if isinstance(cte_alias_name, elements._truncated_label): cte_alias_name = self._truncated_identifier( "alias", cte_alias_name) else: orig_cte = cte cte_alias_name = None if not cte_alias_name and cte not in self.ctes: if cte.recursive: self.ctes_recursive = True text = self.preparer.format_alias(cte, cte_name) if cte.recursive: if isinstance(cte.original, selectable.Select): col_source = cte.original elif isinstance(cte.original, selectable.CompoundSelect): col_source = cte.original.selects[0] else: assert False recur_cols = [c for c in util.unique_list(col_source.inner_columns) if c is not None] text += "(%s)" % (", ".join( self.preparer.format_column(ident) for ident in recur_cols)) if self.positional: kwargs['positional_names'] = self.cte_positional[cte] = [] text += " AS \n" + \ cte.original._compiler_dispatch( self, asfrom=True, **kwargs ) if cte._suffixes: text += " " + self._generate_prefixes( cte, cte._suffixes, **kwargs) self.ctes[cte] = text if asfrom: if cte_alias_name: text = self.preparer.format_alias(cte, cte_alias_name) text += self.get_render_as_alias_suffix(cte_name) else: return self.preparer.format_alias(cte, cte_name) return text def visit_alias(self, alias, asfrom=False, ashint=False, iscrud=False, fromhints=None, **kwargs): if asfrom or ashint: if isinstance(alias.name, elements._truncated_label): alias_name = self._truncated_identifier("alias", alias.name) else: alias_name = alias.name if ashint: return self.preparer.format_alias(alias, alias_name) elif asfrom: ret = alias.original._compiler_dispatch(self, asfrom=True, **kwargs) + \ self.get_render_as_alias_suffix( self.preparer.format_alias(alias, alias_name)) if fromhints and alias in fromhints: ret = self.format_from_hint_text(ret, alias, fromhints[alias], iscrud) return ret else: return alias.original._compiler_dispatch(self, **kwargs) def get_render_as_alias_suffix(self, alias_name_text): return " AS " + alias_name_text def _add_to_result_map(self, keyname, name, objects, type_): self._result_columns.append((keyname, name, objects, type_)) def _label_select_column(self, select, column, populate_result_map, asfrom, column_clause_args, name=None, within_columns_clause=True): """produce labeled columns present in a select().""" if column.type._has_column_expression and \ populate_result_map: col_expr = column.type.column_expression(column) add_to_result_map = lambda keyname, name, objects, type_: \ self._add_to_result_map( keyname, name, objects + (column,), type_) else: col_expr = column if populate_result_map: add_to_result_map = self._add_to_result_map else: add_to_result_map = None if not within_columns_clause: result_expr = col_expr elif isinstance(column, elements.Label): if col_expr is not column: result_expr = _CompileLabel( col_expr, column.name, alt_names=(column.element,) ) else: result_expr = col_expr elif select is not None and name: result_expr = _CompileLabel( col_expr, name, alt_names=(column._key_label,) ) elif \ asfrom and \ isinstance(column, elements.ColumnClause) and \ not column.is_literal and \ column.table is not None and \ not isinstance(column.table, selectable.Select): result_expr = _CompileLabel(col_expr, elements._as_truncated(column.name), alt_names=(column.key,)) elif ( not isinstance(column, elements.TextClause) and ( not isinstance(column, elements.UnaryExpression) or column.wraps_column_expression ) and ( not hasattr(column, 'name') or isinstance(column, functions.Function) ) ): result_expr = _CompileLabel(col_expr, column.anon_label) elif col_expr is not column: # TODO: are we sure "column" has a .name and .key here ? # assert isinstance(column, elements.ColumnClause) result_expr = _CompileLabel(col_expr, elements._as_truncated(column.name), alt_names=(column.key,)) else: result_expr = col_expr column_clause_args.update( within_columns_clause=within_columns_clause, add_to_result_map=add_to_result_map ) return result_expr._compiler_dispatch( self, **column_clause_args ) def format_from_hint_text(self, sqltext, table, hint, iscrud): hinttext = self.get_from_hint_text(table, hint) if hinttext: sqltext += " " + hinttext return sqltext def get_select_hint_text(self, byfroms): return None def get_from_hint_text(self, table, text): return None def get_crud_hint_text(self, table, text): return None def get_statement_hint_text(self, hint_texts): return " ".join(hint_texts) def _transform_select_for_nested_joins(self, select): """Rewrite any "a JOIN (b JOIN c)" expression as "a JOIN (select * from b JOIN c) AS anon", to support databases that can't parse a parenthesized join correctly (i.e. sqlite the main one). """ cloned = {} column_translate = [{}] def visit(element, **kw): if element in column_translate[-1]: return column_translate[-1][element] elif element in cloned: return cloned[element] newelem = cloned[element] = element._clone() if newelem.is_selectable and newelem._is_join and \ isinstance(newelem.right, selectable.FromGrouping): newelem._reset_exported() newelem.left = visit(newelem.left, **kw) right = visit(newelem.right, **kw) selectable_ = selectable.Select( [right.element], use_labels=True).alias() for c in selectable_.c: c._key_label = c.key c._label = c.name translate_dict = dict( zip(newelem.right.element.c, selectable_.c) ) # translating from both the old and the new # because different select() structures will lead us # to traverse differently translate_dict[right.element.left] = selectable_ translate_dict[right.element.right] = selectable_ translate_dict[newelem.right.element.left] = selectable_ translate_dict[newelem.right.element.right] = selectable_ # propagate translations that we've gained # from nested visit(newelem.right) outwards # to the enclosing select here. this happens # only when we have more than one level of right # join nesting, i.e. "a JOIN (b JOIN (c JOIN d))" for k, v in list(column_translate[-1].items()): if v in translate_dict: # remarkably, no current ORM tests (May 2013) # hit this condition, only test_join_rewriting # does. column_translate[-1][k] = translate_dict[v] column_translate[-1].update(translate_dict) newelem.right = selectable_ newelem.onclause = visit(newelem.onclause, **kw) elif newelem._is_from_container: # if we hit an Alias, CompoundSelect or ScalarSelect, put a # marker in the stack. kw['transform_clue'] = 'select_container' newelem._copy_internals(clone=visit, **kw) elif newelem.is_selectable and newelem._is_select: barrier_select = kw.get('transform_clue', None) == \ 'select_container' # if we're still descended from an # Alias/CompoundSelect/ScalarSelect, we're # in a FROM clause, so start with a new translate collection if barrier_select: column_translate.append({}) kw['transform_clue'] = 'inside_select' newelem._copy_internals(clone=visit, **kw) if barrier_select: del column_translate[-1] else: newelem._copy_internals(clone=visit, **kw) return newelem return visit(select) def _transform_result_map_for_nested_joins( self, select, transformed_select): inner_col = dict((c._key_label, c) for c in transformed_select.inner_columns) d = dict( (inner_col[c._key_label], c) for c in select.inner_columns ) self._result_columns = [ (key, name, tuple([d.get(col, col) for col in objs]), typ) for key, name, objs, typ in self._result_columns ] _default_stack_entry = util.immutabledict([ ('correlate_froms', frozenset()), ('asfrom_froms', frozenset()) ]) def _display_froms_for_select(self, select, asfrom): # utility method to help external dialects # get the correct from list for a select. # specifically the oracle dialect needs this feature # right now. toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms.difference( asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) return froms def visit_select(self, select, asfrom=False, parens=True, fromhints=None, compound_index=0, nested_join_translation=False, select_wraps_for=None, **kwargs): needs_nested_translation = \ select.use_labels and \ not nested_join_translation and \ not self.stack and \ not self.dialect.supports_right_nested_joins if needs_nested_translation: transformed_select = self._transform_select_for_nested_joins( select) text = self.visit_select( transformed_select, asfrom=asfrom, parens=parens, fromhints=fromhints, compound_index=compound_index, nested_join_translation=True, **kwargs ) toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] populate_result_map = toplevel or \ ( compound_index == 0 and entry.get( 'need_result_map_for_compound', False) ) or entry.get('need_result_map_for_nested', False) # this was first proposed as part of #3372; however, it is not # reached in current tests and could possibly be an assertion # instead. if not populate_result_map and 'add_to_result_map' in kwargs: del kwargs['add_to_result_map'] if needs_nested_translation: if populate_result_map: self._transform_result_map_for_nested_joins( select, transformed_select) return text froms = self._setup_select_stack(select, entry, asfrom) column_clause_args = kwargs.copy() column_clause_args.update({ 'within_label_clause': False, 'within_columns_clause': False }) text = "SELECT " # we're off to a good start ! if select._hints: hint_text, byfrom = self._setup_select_hints(select) if hint_text: text += hint_text + " " else: byfrom = None if select._prefixes: text += self._generate_prefixes( select, select._prefixes, **kwargs) text += self.get_select_precolumns(select, **kwargs) # the actual list of columns to print in the SELECT column list. inner_columns = [ c for c in [ self._label_select_column( select, column, populate_result_map, asfrom, column_clause_args, name=name) for name, column in select._columns_plus_names ] if c is not None ] if populate_result_map and select_wraps_for is not None: # if this select is a compiler-generated wrapper, # rewrite the targeted columns in the result map wrapped_inner_columns = set(select_wraps_for.inner_columns) translate = dict( (outer, inner.pop()) for outer, inner in [ ( outer, outer.proxy_set.intersection(wrapped_inner_columns)) for outer in select.inner_columns ] if inner ) self._result_columns = [ (key, name, tuple(translate.get(o, o) for o in obj), type_) for key, name, obj, type_ in self._result_columns ] text = self._compose_select_body( text, select, inner_columns, froms, byfrom, kwargs) if select._statement_hints: per_dialect = [ ht for (dialect_name, ht) in select._statement_hints if dialect_name in ('*', self.dialect.name) ] if per_dialect: text += " " + self.get_statement_hint_text(per_dialect) if self.ctes and self._is_toplevel_select(select): text = self._render_cte_clause() + text if select._suffixes: text += " " + self._generate_prefixes( select, select._suffixes, **kwargs) self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def _is_toplevel_select(self, select): """Return True if the stack is placed at the given select, and is also the outermost SELECT, meaning there is either no stack before this one, or the enclosing stack is a topmost INSERT. """ return ( self.stack[-1]['selectable'] is select and ( len(self.stack) == 1 or self.isinsert and len(self.stack) == 2 and self.statement is self.stack[0]['selectable'] ) ) def _setup_select_hints(self, select): byfrom = dict([ (from_, hinttext % { 'name': from_._compiler_dispatch( self, ashint=True) }) for (from_, dialect), hinttext in select._hints.items() if dialect in ('*', self.dialect.name) ]) hint_text = self.get_select_hint_text(byfrom) return hint_text, byfrom def _setup_select_stack(self, select, entry, asfrom): correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms.difference( asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) new_correlate_froms = set(selectable._from_objects(*froms)) all_correlate_froms = new_correlate_froms.union(correlate_froms) new_entry = { 'asfrom_froms': new_correlate_froms, 'correlate_froms': all_correlate_froms, 'selectable': select, } self.stack.append(new_entry) return froms def _compose_select_body( self, text, select, inner_columns, froms, byfrom, kwargs): text += ', '.join(inner_columns) if froms: text += " \nFROM " if select._hints: text += ', '.join( [f._compiler_dispatch(self, asfrom=True, fromhints=byfrom, **kwargs) for f in froms]) else: text += ', '.join( [f._compiler_dispatch(self, asfrom=True, **kwargs) for f in froms]) else: text += self.default_from() if select._whereclause is not None: t = select._whereclause._compiler_dispatch(self, **kwargs) if t: text += " \nWHERE " + t if select._group_by_clause.clauses: group_by = select._group_by_clause._compiler_dispatch( self, **kwargs) if group_by: text += " GROUP BY " + group_by if select._having is not None: t = select._having._compiler_dispatch(self, **kwargs) if t: text += " \nHAVING " + t if select._order_by_clause.clauses: text += self.order_by_clause(select, **kwargs) if (select._limit_clause is not None or select._offset_clause is not None): text += self.limit_clause(select, **kwargs) if select._for_update_arg is not None: text += self.for_update_clause(select, **kwargs) return text def _generate_prefixes(self, stmt, prefixes, **kw): clause = " ".join( prefix._compiler_dispatch(self, **kw) for prefix, dialect_name in prefixes if dialect_name is None or dialect_name == self.dialect.name ) if clause: clause += " " return clause def _render_cte_clause(self): if self.positional: self.positiontup = sum([ self.cte_positional[cte] for cte in self.ctes], []) + \ self.positiontup cte_text = self.get_cte_preamble(self.ctes_recursive) + " " cte_text += ", \n".join( [txt for txt in self.ctes.values()] ) cte_text += "\n " return cte_text def get_cte_preamble(self, recursive): if recursive: return "WITH RECURSIVE" else: return "WITH" def get_select_precolumns(self, select, **kw): """Called when building a ``SELECT`` statement, position is just before column list. """ return select._distinct and "DISTINCT " or "" def order_by_clause(self, select, **kw): order_by = select._order_by_clause._compiler_dispatch(self, **kw) if order_by: return " ORDER BY " + order_by else: return "" def for_update_clause(self, select, **kw): return " FOR UPDATE" def returning_clause(self, stmt, returning_cols): raise exc.CompileError( "RETURNING is not supported by this " "dialect's statement compiler.") def limit_clause(self, select, **kw): text = "" if select._limit_clause is not None: text += "\n LIMIT " + self.process(select._limit_clause, **kw) if select._offset_clause is not None: if select._limit_clause is None: text += "\n LIMIT -1" text += " OFFSET " + self.process(select._offset_clause, **kw) return text def visit_table(self, table, asfrom=False, iscrud=False, ashint=False, fromhints=None, use_schema=True, **kwargs): if asfrom or ashint: if use_schema and getattr(table, "schema", None): ret = self.preparer.quote_schema(table.schema) + \ "." + self.preparer.quote(table.name) else: ret = self.preparer.quote(table.name) if fromhints and table in fromhints: ret = self.format_from_hint_text(ret, table, fromhints[table], iscrud) return ret else: return "" def visit_join(self, join, asfrom=False, **kwargs): return ( join.left._compiler_dispatch(self, asfrom=True, **kwargs) + (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + join.right._compiler_dispatch(self, asfrom=True, **kwargs) + " ON " + join.onclause._compiler_dispatch(self, **kwargs) ) def visit_insert(self, insert_stmt, **kw): self.stack.append( {'correlate_froms': set(), "asfrom_froms": set(), "selectable": insert_stmt}) self.isinsert = True crud_params = crud._get_crud_params(self, insert_stmt, **kw) if not crud_params and \ not self.dialect.supports_default_values and \ not self.dialect.supports_empty_insert: raise exc.CompileError("The '%s' dialect with current database " "version settings does not support empty " "inserts." % self.dialect.name) if insert_stmt._has_multi_parameters: if not self.dialect.supports_multivalues_insert: raise exc.CompileError( "The '%s' dialect with current database " "version settings does not support " "in-place multirow inserts." % self.dialect.name) crud_params_single = crud_params[0] else: crud_params_single = crud_params preparer = self.preparer supports_default_values = self.dialect.supports_default_values text = "INSERT " if insert_stmt._prefixes: text += self._generate_prefixes(insert_stmt, insert_stmt._prefixes, **kw) text += "INTO " table_text = preparer.format_table(insert_stmt.table) if insert_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in insert_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if insert_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, insert_stmt.table, dialect_hints[insert_stmt.table], True ) text += table_text if crud_params_single or not supports_default_values: text += " (%s)" % ', '.join([preparer.format_column(c[0]) for c in crud_params_single]) if self.returning or insert_stmt._returning: self.returning = self.returning or insert_stmt._returning returning_clause = self.returning_clause( insert_stmt, self.returning) if self.returning_precedes_values: text += " " + returning_clause if insert_stmt.select is not None: text += " %s" % self.process(self._insert_from_select, **kw) elif not crud_params and supports_default_values: text += " DEFAULT VALUES" elif insert_stmt._has_multi_parameters: text += " VALUES %s" % ( ", ".join( "(%s)" % ( ', '.join(c[1] for c in crud_param_set) ) for crud_param_set in crud_params ) ) else: text += " VALUES (%s)" % \ ', '.join([c[1] for c in crud_params]) if self.returning and not self.returning_precedes_values: text += " " + returning_clause self.stack.pop(-1) return text def update_limit_clause(self, update_stmt): """Provide a hook for MySQL to add LIMIT to the UPDATE""" return None def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): """Provide a hook to override the initial table clause in an UPDATE statement. MySQL overrides this. """ return from_table._compiler_dispatch(self, asfrom=True, iscrud=True, **kw) def update_from_clause(self, update_stmt, from_table, extra_froms, from_hints, **kw): """Provide a hook to override the generation of an UPDATE..FROM clause. MySQL and MSSQL override this. """ return "FROM " + ', '.join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in extra_froms) def visit_update(self, update_stmt, **kw): self.stack.append( {'correlate_froms': set([update_stmt.table]), "asfrom_froms": set([update_stmt.table]), "selectable": update_stmt}) self.isupdate = True extra_froms = update_stmt._extra_froms text = "UPDATE " if update_stmt._prefixes: text += self._generate_prefixes(update_stmt, update_stmt._prefixes, **kw) table_text = self.update_tables_clause(update_stmt, update_stmt.table, extra_froms, **kw) crud_params = crud._get_crud_params(self, update_stmt, **kw) if update_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in update_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if update_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, update_stmt.table, dialect_hints[update_stmt.table], True ) else: dialect_hints = None text += table_text text += ' SET ' include_table = extra_froms and \ self.render_table_with_column_in_update_from text += ', '.join( c[0]._compiler_dispatch(self, include_table=include_table) + '=' + c[1] for c in crud_params ) if self.returning or update_stmt._returning: if not self.returning: self.returning = update_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) if extra_froms: extra_from_text = self.update_from_clause( update_stmt, update_stmt.table, extra_froms, dialect_hints, **kw) if extra_from_text: text += " " + extra_from_text if update_stmt._whereclause is not None: t = self.process(update_stmt._whereclause) if t: text += " WHERE " + t limit_clause = self.update_limit_clause(update_stmt) if limit_clause: text += " " + limit_clause if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) self.stack.pop(-1) return text @util.memoized_property def _key_getters_for_crud_column(self): return crud._key_getters_for_crud_column(self) def visit_delete(self, delete_stmt, **kw): self.stack.append({'correlate_froms': set([delete_stmt.table]), "asfrom_froms": set([delete_stmt.table]), "selectable": delete_stmt}) self.isdelete = True text = "DELETE " if delete_stmt._prefixes: text += self._generate_prefixes(delete_stmt, delete_stmt._prefixes, **kw) text += "FROM " table_text = delete_stmt.table._compiler_dispatch( self, asfrom=True, iscrud=True) if delete_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in delete_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if delete_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, delete_stmt.table, dialect_hints[delete_stmt.table], True ) else: dialect_hints = None text += table_text if delete_stmt._returning: self.returning = delete_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) if delete_stmt._whereclause is not None: t = delete_stmt._whereclause._compiler_dispatch(self) if t: text += " WHERE " + t if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) self.stack.pop(-1) return text def visit_savepoint(self, savepoint_stmt): return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) def visit_rollback_to_savepoint(self, savepoint_stmt): return "ROLLBACK TO SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) def visit_release_savepoint(self, savepoint_stmt): return "RELEASE SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) class DDLCompiler(Compiled): @util.memoized_property def sql_compiler(self): return self.dialect.statement_compiler(self.dialect, None) @util.memoized_property def type_compiler(self): return self.dialect.type_compiler @property def preparer(self): return self.dialect.identifier_preparer def construct_params(self, params=None): return None def visit_ddl(self, ddl, **kwargs): # table events can substitute table and schema name context = ddl.context if isinstance(ddl.target, schema.Table): context = context.copy() preparer = self.dialect.identifier_preparer path = preparer.format_table_seq(ddl.target) if len(path) == 1: table, sch = path[0], '' else: table, sch = path[-1], path[0] context.setdefault('table', table) context.setdefault('schema', sch) context.setdefault('fullname', preparer.format_table(ddl.target)) return self.sql_compiler.post_process_text(ddl.statement % context) def visit_create_schema(self, create): schema = self.preparer.format_schema(create.element) return "CREATE SCHEMA " + schema def visit_drop_schema(self, drop): schema = self.preparer.format_schema(drop.element) text = "DROP SCHEMA " + schema if drop.cascade: text += " CASCADE" return text def visit_create_table(self, create): table = create.element preparer = self.dialect.identifier_preparer text = "\n" + " ".join(['CREATE'] + table._prefixes + ['TABLE', preparer.format_table(table), "("]) separator = "\n" # if only one primary key, specify it along with the column first_pk = False for create_column in create.columns: column = create_column.element try: processed = self.process(create_column, first_pk=column.primary_key and not first_pk) if processed is not None: text += separator separator = ", \n" text += "\t" + processed if column.primary_key: first_pk = True except exc.CompileError as ce: util.raise_from_cause( exc.CompileError( util.u("(in table '%s', column '%s'): %s") % (table.description, column.name, ce.args[0]) )) const = self.create_table_constraints( table, _include_foreign_key_constraints= create.include_foreign_key_constraints) if const: text += separator + "\t" + const text += "\n)%s\n\n" % self.post_create_table(table) return text def visit_create_column(self, create, first_pk=False): column = create.element if column.system: return None text = self.get_column_specification( column, first_pk=first_pk ) const = " ".join(self.process(constraint) for constraint in column.constraints) if const: text += " " + const return text def create_table_constraints( self, table, _include_foreign_key_constraints=None): # On some DB order is significant: visit PK first, then the # other constraints (engine.ReflectionTest.testbasic failed on FB2) constraints = [] if table.primary_key: constraints.append(table.primary_key) all_fkcs = table.foreign_key_constraints if _include_foreign_key_constraints is not None: omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints) else: omit_fkcs = set() constraints.extend([c for c in table._sorted_constraints if c is not table.primary_key and c not in omit_fkcs]) return ", \n\t".join( p for p in (self.process(constraint) for constraint in constraints if ( constraint._create_rule is None or constraint._create_rule(self)) and ( not self.dialect.supports_alter or not getattr(constraint, 'use_alter', False) )) if p is not None ) def visit_drop_table(self, drop): return "\nDROP TABLE " + self.preparer.format_table(drop.element) def visit_drop_view(self, drop): return "\nDROP VIEW " + self.preparer.format_table(drop.element) def _verify_index_table(self, index): if index.table is None: raise exc.CompileError("Index '%s' is not associated " "with any table." % index.name) def visit_create_index(self, create, include_schema=False, include_table_schema=True): index = create.element self._verify_index_table(index) preparer = self.preparer text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX %s ON %s (%s)" \ % ( self._prepared_index_name(index, include_schema=include_schema), preparer.format_table(index.table, use_schema=include_table_schema), ', '.join( self.sql_compiler.process( expr, include_table=False, literal_binds=True) for expr in index.expressions) ) return text def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX " + self._prepared_index_name( index, include_schema=True) def _prepared_index_name(self, index, include_schema=False): if include_schema and index.table is not None and index.table.schema: schema = index.table.schema schema_name = self.preparer.quote_schema(schema) else: schema_name = None ident = index.name if isinstance(ident, elements._truncated_label): max_ = self.dialect.max_index_name_length or \ self.dialect.max_identifier_length if len(ident) > max_: ident = ident[0:max_ - 8] + \ "_" + util.md5_hex(ident)[-4:] else: self.dialect.validate_identifier(ident) index_name = self.preparer.quote(ident) if schema_name: index_name = schema_name + "." + index_name return index_name def visit_add_constraint(self, create): return "ALTER TABLE %s ADD %s" % ( self.preparer.format_table(create.element.table), self.process(create.element) ) def visit_create_sequence(self, create): text = "CREATE SEQUENCE %s" % \ self.preparer.format_sequence(create.element) if create.element.increment is not None: text += " INCREMENT BY %d" % create.element.increment if create.element.start is not None: text += " START WITH %d" % create.element.start if create.element.minvalue is not None: text += " MINVALUE %d" % create.element.minvalue if create.element.maxvalue is not None: text += " MAXVALUE %d" % create.element.maxvalue if create.element.nominvalue is not None: text += " NO MINVALUE" if create.element.nomaxvalue is not None: text += " NO MAXVALUE" if create.element.cycle is not None: text += " CYCLE" return text def visit_drop_sequence(self, drop): return "DROP SEQUENCE %s" % \ self.preparer.format_sequence(drop.element) def visit_drop_constraint(self, drop): constraint = drop.element if constraint.name is not None: formatted_name = self.preparer.format_constraint(constraint) else: formatted_name = None if formatted_name is None: raise exc.CompileError( "Can't emit DROP CONSTRAINT for constraint %r; " "it has no name" % drop.element) return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( self.preparer.format_table(drop.element.table), formatted_name, drop.cascade and " CASCADE" or "" ) def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ self.dialect.type_compiler.process( column.type, type_expression=column) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" return colspec def post_create_table(self, table): return '' def get_column_default_string(self, column): if isinstance(column.server_default, schema.DefaultClause): if isinstance(column.server_default.arg, util.string_types): return "'%s'" % column.server_default.arg else: return self.sql_compiler.process( column.server_default.arg, literal_binds=True) else: return None def visit_check_constraint(self, constraint): text = "" if constraint.name is not None: formatted_name = self.preparer.format_constraint(constraint) if formatted_name is not None: text += "CONSTRAINT %s " % formatted_name text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext, include_table=False, literal_binds=True) text += self.define_constraint_deferrability(constraint) return text def visit_column_check_constraint(self, constraint): text = "" if constraint.name is not None: formatted_name = self.preparer.format_constraint(constraint) if formatted_name is not None: text += "CONSTRAINT %s " % formatted_name text += "CHECK (%s)" % constraint.sqltext text += self.define_constraint_deferrability(constraint) return text def visit_primary_key_constraint(self, constraint): if len(constraint) == 0: return '' text = "" if constraint.name is not None: formatted_name = self.preparer.format_constraint(constraint) if formatted_name is not None: text += "CONSTRAINT %s " % formatted_name text += "PRIMARY KEY " text += "(%s)" % ', '.join(self.preparer.quote(c.name) for c in constraint) text += self.define_constraint_deferrability(constraint) return text def visit_foreign_key_constraint(self, constraint): preparer = self.dialect.identifier_preparer text = "" if constraint.name is not None: formatted_name = self.preparer.format_constraint(constraint) if formatted_name is not None: text += "CONSTRAINT %s " % formatted_name remote_table = list(constraint.elements)[0].column.table text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name) for f in constraint.elements), self.define_constraint_remote_table( constraint, remote_table, preparer), ', '.join(preparer.quote(f.column.name) for f in constraint.elements) ) text += self.define_constraint_match(constraint) text += self.define_constraint_cascades(constraint) text += self.define_constraint_deferrability(constraint) return text def define_constraint_remote_table(self, constraint, table, preparer): """Format the remote table clause of a CREATE CONSTRAINT clause.""" return preparer.format_table(table) def visit_unique_constraint(self, constraint): if len(constraint) == 0: return '' text = "" if constraint.name is not None: formatted_name = self.preparer.format_constraint(constraint) text += "CONSTRAINT %s " % formatted_name text += "UNIQUE (%s)" % ( ', '.join(self.preparer.quote(c.name) for c in constraint)) text += self.define_constraint_deferrability(constraint) return text def define_constraint_cascades(self, constraint): text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete if constraint.onupdate is not None: text += " ON UPDATE %s" % constraint.onupdate return text def define_constraint_deferrability(self, constraint): text = "" if constraint.deferrable is not None: if constraint.deferrable: text += " DEFERRABLE" else: text += " NOT DEFERRABLE" if constraint.initially is not None: text += " INITIALLY %s" % constraint.initially return text def define_constraint_match(self, constraint): text = "" if constraint.match is not None: text += " MATCH %s" % constraint.match return text class GenericTypeCompiler(TypeCompiler): def visit_FLOAT(self, type_, **kw): return "FLOAT" def visit_REAL(self, type_, **kw): return "REAL" def visit_NUMERIC(self, type_, **kw): if type_.precision is None: return "NUMERIC" elif type_.scale is None: return "NUMERIC(%(precision)s)" % \ {'precision': type_.precision} else: return "NUMERIC(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_DECIMAL(self, type_, **kw): if type_.precision is None: return "DECIMAL" elif type_.scale is None: return "DECIMAL(%(precision)s)" % \ {'precision': type_.precision} else: return "DECIMAL(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_INTEGER(self, type_, **kw): return "INTEGER" def visit_SMALLINT(self, type_, **kw): return "SMALLINT" def visit_BIGINT(self, type_, **kw): return "BIGINT" def visit_TIMESTAMP(self, type_, **kw): return 'TIMESTAMP' def visit_DATETIME(self, type_, **kw): return "DATETIME" def visit_DATE(self, type_, **kw): return "DATE" def visit_TIME(self, type_, **kw): return "TIME" def visit_CLOB(self, type_, **kw): return "CLOB" def visit_NCLOB(self, type_, **kw): return "NCLOB" def _render_string_type(self, type_, name): text = name if type_.length: text += "(%d)" % type_.length if type_.collation: text += ' COLLATE "%s"' % type_.collation return text def visit_CHAR(self, type_, **kw): return self._render_string_type(type_, "CHAR") def visit_NCHAR(self, type_, **kw): return self._render_string_type(type_, "NCHAR") def visit_VARCHAR(self, type_, **kw): return self._render_string_type(type_, "VARCHAR") def visit_NVARCHAR(self, type_, **kw): return self._render_string_type(type_, "NVARCHAR") def visit_TEXT(self, type_, **kw): return self._render_string_type(type_, "TEXT") def visit_BLOB(self, type_, **kw): return "BLOB" def visit_BINARY(self, type_, **kw): return "BINARY" + (type_.length and "(%d)" % type_.length or "") def visit_VARBINARY(self, type_, **kw): return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") def visit_BOOLEAN(self, type_, **kw): return "BOOLEAN" def visit_large_binary(self, type_, **kw): return self.visit_BLOB(type_, **kw) def visit_boolean(self, type_, **kw): return self.visit_BOOLEAN(type_, **kw) def visit_time(self, type_, **kw): return self.visit_TIME(type_, **kw) def visit_datetime(self, type_, **kw): return self.visit_DATETIME(type_, **kw) def visit_date(self, type_, **kw): return self.visit_DATE(type_, **kw) def visit_big_integer(self, type_, **kw): return self.visit_BIGINT(type_, **kw) def visit_small_integer(self, type_, **kw): return self.visit_SMALLINT(type_, **kw) def visit_integer(self, type_, **kw): return self.visit_INTEGER(type_, **kw) def visit_real(self, type_, **kw): return self.visit_REAL(type_, **kw) def visit_float(self, type_, **kw): return self.visit_FLOAT(type_, **kw) def visit_numeric(self, type_, **kw): return self.visit_NUMERIC(type_, **kw) def visit_string(self, type_, **kw): return self.visit_VARCHAR(type_, **kw) def visit_unicode(self, type_, **kw): return self.visit_VARCHAR(type_, **kw) def visit_text(self, type_, **kw): return self.visit_TEXT(type_, **kw) def visit_unicode_text(self, type_, **kw): return self.visit_TEXT(type_, **kw) def visit_enum(self, type_, **kw): return self.visit_VARCHAR(type_, **kw) def visit_null(self, type_, **kw): raise exc.CompileError("Can't generate DDL for %r; " "did you forget to specify a " "type on this Column?" % type_) def visit_type_decorator(self, type_, **kw): return self.process(type_.type_engine(self.dialect), **kw) def visit_user_defined(self, type_, **kw): return type_.get_col_spec(**kw) class IdentifierPreparer(object): """Handle quoting and case-folding of identifiers based on options.""" reserved_words = RESERVED_WORDS legal_characters = LEGAL_CHARACTERS illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS def __init__(self, dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=False): """Construct a new ``IdentifierPreparer`` object. initial_quote Character that begins a delimited identifier. final_quote Character that ends a delimited identifier. Defaults to `initial_quote`. omit_schema Prevent prepending schema name. Useful for databases that do not support schemae. """ self.dialect = dialect self.initial_quote = initial_quote self.final_quote = final_quote or self.initial_quote self.escape_quote = escape_quote self.escape_to_quote = self.escape_quote * 2 self.omit_schema = omit_schema self._strings = {} def _escape_identifier(self, value): """Escape an identifier. Subclasses should override this to provide database-dependent escaping behavior. """ return value.replace(self.escape_quote, self.escape_to_quote) def _unescape_identifier(self, value): """Canonicalize an escaped identifier. Subclasses should override this to provide database-dependent unescaping behavior that reverses _escape_identifier. """ return value.replace(self.escape_to_quote, self.escape_quote) def quote_identifier(self, value): """Quote an identifier. Subclasses should override this to provide database-dependent quoting behavior. """ return self.initial_quote + \ self._escape_identifier(value) + \ self.final_quote def _requires_quotes(self, value): """Return True if the given identifier requires quoting.""" lc_value = value.lower() return (lc_value in self.reserved_words or value[0] in self.illegal_initial_characters or not self.legal_characters.match(util.text_type(value)) or (lc_value != value)) def quote_schema(self, schema, force=None): """Conditionally quote a schema. Subclasses can override this to provide database-dependent quoting behavior for schema names. the 'force' flag should be considered deprecated. """ return self.quote(schema, force) def quote(self, ident, force=None): """Conditionally quote an identifier. the 'force' flag should be considered deprecated. """ force = getattr(ident, "quote", None) if force is None: if ident in self._strings: return self._strings[ident] else: if self._requires_quotes(ident): self._strings[ident] = self.quote_identifier(ident) else: self._strings[ident] = ident return self._strings[ident] elif force: return self.quote_identifier(ident) else: return ident def format_sequence(self, sequence, use_schema=True): name = self.quote(sequence.name) if (not self.omit_schema and use_schema and sequence.schema is not None): name = self.quote_schema(sequence.schema) + "." + name return name def format_label(self, label, name=None): return self.quote(name or label.name) def format_alias(self, alias, name=None): return self.quote(name or alias.name) def format_savepoint(self, savepoint, name=None): return self.quote(name or savepoint.ident) @util.dependencies("sqlalchemy.sql.naming") def format_constraint(self, naming, constraint): if isinstance(constraint.name, elements._defer_name): name = naming._constraint_name_for_table( constraint, constraint.table) if name: return self.quote(name) elif isinstance(constraint.name, elements._defer_none_name): return None return self.quote(constraint.name) def format_table(self, table, use_schema=True, name=None): """Prepare a quoted table and schema name.""" if name is None: name = table.name result = self.quote(name) if not self.omit_schema and use_schema \ and getattr(table, "schema", None): result = self.quote_schema(table.schema) + "." + result return result def format_schema(self, name, quote=None): """Prepare a quoted schema name.""" return self.quote(name, quote) def format_column(self, column, use_table=False, name=None, table_name=None): """Prepare a quoted column name.""" if name is None: name = column.name if not getattr(column, 'is_literal', False): if use_table: return self.format_table( column.table, use_schema=False, name=table_name) + "." + self.quote(name) else: return self.quote(name) else: # literal textual elements get stuck into ColumnClause a lot, # which shouldn't get quoted if use_table: return self.format_table( column.table, use_schema=False, name=table_name) + '.' + name else: return name def format_table_seq(self, table, use_schema=True): """Format table name and schema as a tuple.""" # Dialects with more levels in their fully qualified references # ('database', 'owner', etc.) could override this and return # a longer sequence. if not self.omit_schema and use_schema and \ getattr(table, 'schema', None): return (self.quote_schema(table.schema), self.format_table(table, use_schema=False)) else: return (self.format_table(table, use_schema=False), ) @util.memoized_property def _r_identifiers(self): initial, final, escaped_final = \ [re.escape(s) for s in (self.initial_quote, self.final_quote, self._escape_identifier(self.final_quote))] r = re.compile( r'(?:' r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s' r'|([^\.]+))(?=\.|$))+' % {'initial': initial, 'final': final, 'escaped': escaped_final}) return r def unformat_identifiers(self, identifiers): """Unpack 'schema.table.column'-like strings into components.""" r = self._r_identifiers return [self._unescape_identifier(i) for i in [a or b for a, b in r.findall(identifiers)]] SQLAlchemy-1.0.11/lib/sqlalchemy/sql/elements.py0000664000175000017500000040361012636375552022541 0ustar classicclassic00000000000000# sql/elements.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Core SQL expression elements, including :class:`.ClauseElement`, :class:`.ColumnElement`, and derived classes. """ from __future__ import unicode_literals from .. import util, exc, inspection from . import type_api from . import operators from .visitors import Visitable, cloned_traverse, traverse from .annotation import Annotated import itertools from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG from .base import _generative import numbers import re import operator def _clone(element, **kw): return element._clone() def collate(expression, collation): """Return the clause ``expression COLLATE collation``. e.g.:: collate(mycolumn, 'utf8_bin') produces:: mycolumn COLLATE utf8_bin """ expr = _literal_as_binds(expression) return BinaryExpression( expr, _literal_as_text(collation), operators.collate, type_=expr.type) def between(expr, lower_bound, upper_bound, symmetric=False): """Produce a ``BETWEEN`` predicate clause. E.g.:: from sqlalchemy import between stmt = select([users_table]).where(between(users_table.c.id, 5, 7)) Would produce SQL resembling:: SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2 The :func:`.between` function is a standalone version of the :meth:`.ColumnElement.between` method available on all SQL expressions, as in:: stmt = select([users_table]).where(users_table.c.id.between(5, 7)) All arguments passed to :func:`.between`, including the left side column expression, are coerced from Python scalar values if a the value is not a :class:`.ColumnElement` subclass. For example, three fixed values can be compared as in:: print(between(5, 3, 7)) Which would produce:: :param_1 BETWEEN :param_2 AND :param_3 :param expr: a column expression, typically a :class:`.ColumnElement` instance or alternatively a Python scalar expression to be coerced into a column expression, serving as the left side of the ``BETWEEN`` expression. :param lower_bound: a column or Python scalar expression serving as the lower bound of the right side of the ``BETWEEN`` expression. :param upper_bound: a column or Python scalar expression serving as the upper bound of the right side of the ``BETWEEN`` expression. :param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note that not all databases support this syntax. .. versionadded:: 0.9.5 .. seealso:: :meth:`.ColumnElement.between` """ expr = _literal_as_binds(expr) return expr.between(lower_bound, upper_bound, symmetric=symmetric) def literal(value, type_=None): """Return a literal clause, bound to a bind parameter. Literal clauses are created automatically when non- :class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are used in a comparison operation with a :class:`.ColumnElement` subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the generation of a literal clause, which will be created as a :class:`BindParameter` with a bound value. :param value: the value to be bound. Can be any Python object supported by the underlying DB-API, or is translatable via the given type argument. :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which will provide bind-parameter translation for this literal. """ return BindParameter(None, value, type_=type_, unique=True) def type_coerce(expression, type_): """Associate a SQL expression with a particular type, without rendering ``CAST``. E.g.:: from sqlalchemy import type_coerce stmt = select([type_coerce(log_table.date_string, StringDateTime())]) The above construct will produce SQL that is usually otherwise unaffected by the :func:`.type_coerce` call:: SELECT date_string FROM log However, when result rows are fetched, the ``StringDateTime`` type will be applied to result rows on behalf of the ``date_string`` column. A type that features bound-value handling will also have that behavior take effect when literal values or :func:`.bindparam` constructs are passed to :func:`.type_coerce` as targets. For example, if a type implements the :meth:`.TypeEngine.bind_expression` method or :meth:`.TypeEngine.bind_processor` method or equivalent, these functions will take effect at statement compilation/execution time when a literal value is passed, as in:: # bound-value handling of MyStringType will be applied to the # literal value "some string" stmt = select([type_coerce("some string", MyStringType)]) :func:`.type_coerce` is similar to the :func:`.cast` function, except that it does not render the ``CAST`` expression in the resulting statement. :param expression: A SQL expression, such as a :class:`.ColumnElement` expression or a Python string which will be coerced into a bound literal value. :param type_: A :class:`.TypeEngine` class or instance indicating the type to which the expression is coerced. .. seealso:: :func:`.cast` """ type_ = type_api.to_instance(type_) if hasattr(expression, '__clause_element__'): return type_coerce(expression.__clause_element__(), type_) elif isinstance(expression, BindParameter): bp = expression._clone() bp.type = type_ return bp elif not isinstance(expression, Visitable): if expression is None: return Null() else: return literal(expression, type_=type_) else: return Label(None, expression, type_=type_) def outparam(key, type_=None): """Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them. The ``outparam`` can be used like a regular function parameter. The "output" value will be available from the :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` attribute, which returns a dictionary containing the values. """ return BindParameter( key, None, type_=type_, unique=False, isoutparam=True) def not_(clause): """Return a negation of the given clause, i.e. ``NOT(clause)``. The ``~`` operator is also overloaded on all :class:`.ColumnElement` subclasses to produce the same result. """ return operators.inv(_literal_as_binds(clause)) @inspection._self_inspects class ClauseElement(Visitable): """Base class for elements of a programmatically constructed SQL expression. """ __visit_name__ = 'clause' _annotations = {} supports_execution = False _from_objects = [] bind = None _is_clone_of = None is_selectable = False is_clause_element = True description = None _order_by_label_element = None _is_from_container = False def _clone(self): """Create a shallow copy of this ClauseElement. This method may be used by a generative API. Its also used as part of the "deep" copy afforded by a traversal that combines the _copy_internals() method. """ c = self.__class__.__new__(self.__class__) c.__dict__ = self.__dict__.copy() ClauseElement._cloned_set._reset(c) ColumnElement.comparator._reset(c) # this is a marker that helps to "equate" clauses to each other # when a Select returns its list of FROM clauses. the cloning # process leaves around a lot of remnants of the previous clause # typically in the form of column expressions still attached to the # old table. c._is_clone_of = self return c @property def _constructor(self): """return the 'constructor' for this ClauseElement. This is for the purposes for creating a new object of this type. Usually, its just the element's __class__. However, the "Annotated" version of the object overrides to return the class of its proxied element. """ return self.__class__ @util.memoized_property def _cloned_set(self): """Return the set consisting all cloned ancestors of this ClauseElement. Includes this ClauseElement. This accessor tends to be used for FromClause objects to identify 'equivalent' FROM clauses, regardless of transformative operations. """ s = util.column_set() f = self while f is not None: s.add(f) f = f._is_clone_of return s def __getstate__(self): d = self.__dict__.copy() d.pop('_is_clone_of', None) return d def _annotate(self, values): """return a copy of this ClauseElement with annotations updated by the given dictionary. """ return Annotated(self, values) def _with_annotations(self, values): """return a copy of this ClauseElement with annotations replaced by the given dictionary. """ return Annotated(self, values) def _deannotate(self, values=None, clone=False): """return a copy of this :class:`.ClauseElement` with annotations removed. :param values: optional tuple of individual values to remove. """ if clone: # clone is used when we are also copying # the expression for a deep deannotation return self._clone() else: # if no clone, since we have no annotations we return # self return self def _execute_on_connection(self, connection, multiparams, params): return connection._execute_clauseelement(self, multiparams, params) def unique_params(self, *optionaldict, **kwargs): """Return a copy with :func:`bindparam()` elements replaced. Same functionality as ``params()``, except adds `unique=True` to affected bind parameters so that multiple statements can be used. """ return self._params(True, optionaldict, kwargs) def params(self, *optionaldict, **kwargs): """Return a copy with :func:`bindparam()` elements replaced. Returns a copy of this ClauseElement with :func:`bindparam()` elements replaced with values taken from the given dictionary:: >>> clause = column('x') + bindparam('foo') >>> print clause.compile().params {'foo':None} >>> print clause.params({'foo':7}).compile().params {'foo':7} """ return self._params(False, optionaldict, kwargs) def _params(self, unique, optionaldict, kwargs): if len(optionaldict) == 1: kwargs.update(optionaldict[0]) elif len(optionaldict) > 1: raise exc.ArgumentError( "params() takes zero or one positional dictionary argument") def visit_bindparam(bind): if bind.key in kwargs: bind.value = kwargs[bind.key] bind.required = False if unique: bind._convert_to_unique() return cloned_traverse(self, {}, {'bindparam': visit_bindparam}) def compare(self, other, **kw): """Compare this ClauseElement to the given ClauseElement. Subclasses should override the default behavior, which is a straight identity comparison. \**kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see :class:`.ColumnElement`) """ return self is other def _copy_internals(self, clone=_clone, **kw): """Reassign internal elements to be clones of themselves. Called during a copy-and-traverse operation on newly shallow-copied elements to create a deep copy. The given clone function should be used, which may be applying additional transformations to the element (i.e. replacement traversal, cloned traversal, annotations). """ pass def get_children(self, **kwargs): """Return immediate child elements of this :class:`.ClauseElement`. This is used for visit traversal. \**kwargs may contain flags that change the collection that is returned, for example to return a subset of items in order to cut down on larger traversals, or to return child items from a different context (such as schema-level collections instead of clause-level). """ return [] def self_group(self, against=None): """Apply a 'grouping' to this :class:`.ClauseElement`. This method is overridden by subclasses to return a "grouping" construct, i.e. parenthesis. In particular it's used by "binary" expressions to provide a grouping around themselves when placed into a larger expression, as well as by :func:`.select` constructs when placed into the FROM clause of another :func:`.select`. (Note that subqueries should be normally created using the :meth:`.Select.alias` method, as many platforms require nested SELECT statements to be named). As expressions are composed together, the application of :meth:`self_group` is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy's clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like ``x OR (y AND z)`` - AND takes precedence over OR. The base :meth:`self_group` method of :class:`.ClauseElement` just returns self. """ return self @util.dependencies("sqlalchemy.engine.default") def compile(self, default, bind=None, dialect=None, **kw): """Compile this SQL expression. The return value is a :class:`~.Compiled` object. Calling ``str()`` or ``unicode()`` on the returned value will yield a string representation of the result. The :class:`~.Compiled` object also can return a dictionary of bind parameter names and values using the ``params`` accessor. :param bind: An ``Engine`` or ``Connection`` from which a ``Compiled`` will be acquired. This argument takes precedence over this :class:`.ClauseElement`'s bound engine, if any. :param column_keys: Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If ``None``, all columns from the target table object are rendered. :param dialect: A ``Dialect`` instance from which a ``Compiled`` will be acquired. This argument takes precedence over the `bind` argument as well as this :class:`.ClauseElement`'s bound engine, if any. :param inline: Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement's VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key `Column`. :param compile_kwargs: optional dictionary of additional parameters that will be passed through to the compiler within all "visit" methods. This allows any custom flag to be passed through to a custom compilation construct, for example. It is also used for the case of passing the ``literal_binds`` flag through:: from sqlalchemy.sql import table, column, select t = table('t', column('x')) s = select([t]).where(t.c.x == 5) print s.compile(compile_kwargs={"literal_binds": True}) .. versionadded:: 0.9.0 .. seealso:: :ref:`faq_sql_expression_string` """ if not dialect: if bind: dialect = bind.dialect elif self.bind: dialect = self.bind.dialect bind = self.bind else: dialect = default.DefaultDialect() return self._compiler(dialect, bind=bind, **kw) def _compiler(self, dialect, **kw): """Return a compiler appropriate for this ClauseElement, given a Dialect.""" return dialect.statement_compiler(dialect, self, **kw) def __str__(self): if util.py3k: return str(self.compile()) else: return unicode(self.compile()).encode('ascii', 'backslashreplace') def __and__(self, other): """'and' at the ClauseElement level. .. deprecated:: 0.9.5 - conjunctions are intended to be at the :class:`.ColumnElement`. level """ return and_(self, other) def __or__(self, other): """'or' at the ClauseElement level. .. deprecated:: 0.9.5 - conjunctions are intended to be at the :class:`.ColumnElement`. level """ return or_(self, other) def __invert__(self): if hasattr(self, 'negation_clause'): return self.negation_clause else: return self._negate() def _negate(self): return UnaryExpression( self.self_group(against=operators.inv), operator=operators.inv, negate=None) def __bool__(self): raise TypeError("Boolean value of this clause is not defined") __nonzero__ = __bool__ def __repr__(self): friendly = self.description if friendly is None: return object.__repr__(self) else: return '<%s.%s at 0x%x; %s>' % ( self.__module__, self.__class__.__name__, id(self), friendly) class ColumnElement(operators.ColumnOperators, ClauseElement): """Represent a column-oriented SQL expression suitable for usage in the "columns" clause, WHERE clause etc. of a statement. While the most familiar kind of :class:`.ColumnElement` is the :class:`.Column` object, :class:`.ColumnElement` serves as the basis for any unit that may be present in a SQL expression, including the expressions themselves, SQL functions, bound parameters, literal expressions, keywords such as ``NULL``, etc. :class:`.ColumnElement` is the ultimate base class for all such elements. A wide variety of SQLAlchemy Core functions work at the SQL expression level, and are intended to accept instances of :class:`.ColumnElement` as arguments. These functions will typically document that they accept a "SQL expression" as an argument. What this means in terms of SQLAlchemy usually refers to an input which is either already in the form of a :class:`.ColumnElement` object, or a value which can be **coerced** into one. The coercion rules followed by most, but not all, SQLAlchemy Core functions with regards to SQL expressions are as follows: * a literal Python value, such as a string, integer or floating point value, boolean, datetime, ``Decimal`` object, or virtually any other Python object, will be coerced into a "literal bound value". This generally means that a :func:`.bindparam` will be produced featuring the given value embedded into the construct; the resulting :class:`.BindParameter` object is an instance of :class:`.ColumnElement`. The Python value will ultimately be sent to the DBAPI at execution time as a paramterized argument to the ``execute()`` or ``executemany()`` methods, after SQLAlchemy type-specific converters (e.g. those provided by any associated :class:`.TypeEngine` objects) are applied to the value. * any special object value, typically ORM-level constructs, which feature a method called ``__clause_element__()``. The Core expression system looks for this method when an object of otherwise unknown type is passed to a function that is looking to coerce the argument into a :class:`.ColumnElement` expression. The ``__clause_element__()`` method, if present, should return a :class:`.ColumnElement` instance. The primary use of ``__clause_element__()`` within SQLAlchemy is that of class-bound attributes on ORM-mapped classes; a ``User`` class which contains a mapped attribute named ``.name`` will have a method ``User.name.__clause_element__()`` which when invoked returns the :class:`.Column` called ``name`` associated with the mapped table. * The Python ``None`` value is typically interpreted as ``NULL``, which in SQLAlchemy Core produces an instance of :func:`.null`. A :class:`.ColumnElement` provides the ability to generate new :class:`.ColumnElement` objects using Python expressions. This means that Python operators such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations, and allow the instantiation of further :class:`.ColumnElement` instances which are composed from other, more fundamental :class:`.ColumnElement` objects. For example, two :class:`.ColumnClause` objects can be added together with the addition operator ``+`` to produce a :class:`.BinaryExpression`. Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses of :class:`.ColumnElement`:: >>> from sqlalchemy.sql import column >>> column('a') + column('b') >>> print column('a') + column('b') a + b .. seealso:: :class:`.Column` :func:`.expression.column` """ __visit_name__ = 'column' primary_key = False foreign_keys = [] _label = None """The named label that can be used to target this column in a result set. This label is almost always the label used when rendering AS