aiopg-0.7.0/0000775000175000017500000000000012515777172013417 5ustar andrewandrew00000000000000aiopg-0.7.0/aiopg/0000775000175000017500000000000012515777172014516 5ustar andrewandrew00000000000000aiopg-0.7.0/aiopg/__init__.py0000664000175000017500000000254012474312016016612 0ustar andrewandrew00000000000000import re import sys from collections import namedtuple from .connection import connect, Connection, TIMEOUT as DEFAULT_TIMEOUT from .cursor import Cursor from .pool import create_pool, Pool __all__ = ('connect', 'create_pool', 'Connection', 'Cursor', 'Pool', 'version', 'version_info', 'DEFAULT_TIMEOUT') __version__ = '0.7.0' version = __version__ + ' , Python ' + sys.version VersionInfo = namedtuple('VersionInfo', 'major minor micro releaselevel serial') def _parse_version(ver): RE = (r'^(?P\d+)\.(?P\d+)\.' '(?P\d+)((?P[a-z]+)(?P\d+)?)?$') match = re.match(RE, ver) try: major = int(match.group('major')) minor = int(match.group('minor')) micro = int(match.group('micro')) levels = {'c': 'candidate', 'a': 'alpha', 'b': 'beta', None: 'final'} releaselevel = levels[match.group('releaselevel')] serial = int(match.group('serial')) if match.group('serial') else 0 return VersionInfo(major, minor, micro, releaselevel, serial) except Exception: raise ImportError("Invalid package version {}".format(ver)) version_info = _parse_version(__version__) # make pyflakes happy (connect, create_pool, Connection, Cursor, Pool, DEFAULT_TIMEOUT) aiopg-0.7.0/aiopg/sa/0000775000175000017500000000000012515777172015121 5ustar andrewandrew00000000000000aiopg-0.7.0/aiopg/sa/result.py0000664000175000017500000003267512515716033017013 0ustar andrewandrew00000000000000import asyncio import warnings import weakref from collections.abc import Mapping, Sequence from sqlalchemy.sql import expression, sqltypes from . import exc class RowProxy(Mapping): __slots__ = ('_result_proxy', '_row', '_processors', '_keymap') def __init__(self, result_proxy, row, processors, keymap): """RowProxy objects are constructed by ResultProxy objects.""" self._result_proxy = result_proxy self._row = row self._processors = processors self._keymap = keymap def __iter__(self): return iter(self._result_proxy.keys) def __len__(self): return len(self._row) def __getitem__(self, key): try: processor, obj, index = self._keymap[key] except KeyError: processor, obj, index = self._result_proxy._key_fallback(key) # Do we need slicing at all? RowProxy now is Mapping not Sequence # except TypeError: # if isinstance(key, slice): # l = [] # for processor, value in zip(self._processors[key], # self._row[key]): # if processor is None: # l.append(value) # else: # l.append(processor(value)) # return tuple(l) # else: # raise if index is None: raise exc.InvalidRequestError( "Ambiguous column name '%s' in result set! " "try 'use_labels' option on select statement." % key) if processor is not None: return processor(self._row[index]) else: return self._row[index] def __getattr__(self, name): try: return self[name] except KeyError as e: raise AttributeError(e.args[0]) def __contains__(self, key): return self._result_proxy._has_key(self._row, key) __hash__ = None def __eq__(self, other): if isinstance(other, RowProxy): return self.as_tuple() == other.as_tuple() elif isinstance(other, Sequence): return self.as_tuple() == other else: return NotImplemented def __ne__(self, other): return not self == other def as_tuple(self): return tuple(self[k] for k in self) def __repr__(self): return repr(self.as_tuple()) class ResultMetaData(object): """Handle cursor.description, applying additional info from an execution context.""" def __init__(self, result_proxy, metadata): self._processors = processors = [] # We do not strictly need to store the processor in the key mapping, # though it is faster in the Python version (probably because of the # saved attribute lookup self._processors) self._keymap = keymap = {} self.keys = [] dialect = result_proxy.dialect typemap = dialect.dbapi_type_map assert dialect.case_sensitive, \ "Doesn't support case insensitive database connection" # high precedence key values. primary_keymap = {} assert not dialect.description_encoding, \ "psycopg in py3k should not use this" for i, rec in enumerate(metadata): colname = rec[0] coltype = rec[1] # PostgreSQL doesn't require this. # if dialect.requires_name_normalize: # colname = dialect.normalize_name(colname) name, obj, type_ = \ colname, None, typemap.get(coltype, sqltypes.NULLTYPE) processor = type_._cached_result_processor(dialect, coltype) processors.append(processor) rec = (processor, obj, i) # indexes as keys. This is only needed for the Python version of # RowProxy (the C version uses a faster path for integer indexes). primary_keymap[i] = rec # populate primary keymap, looking for conflicts. if primary_keymap.setdefault(name, rec) is not rec: # place a record that doesn't have the "index" - this # is interpreted later as an AmbiguousColumnError, # but only when actually accessed. Columns # colliding by name is not a problem if those names # aren't used; integer access is always # unambiguous. primary_keymap[name] = rec = (None, obj, None) self.keys.append(colname) if obj: for o in obj: keymap[o] = rec # technically we should be doing this but we # are saving on callcounts by not doing so. # if keymap.setdefault(o, rec) is not rec: # keymap[o] = (None, obj, None) # overwrite keymap values with those of the # high precedence keymap. keymap.update(primary_keymap) def _key_fallback(self, key, raiseerr=True): map = self._keymap result = None if isinstance(key, str): result = map.get(key) # fallback for targeting a ColumnElement to a textual expression # this is a rare use case which only occurs when matching text() # or colummn('name') constructs to ColumnElements, or after a # pickle/unpickle roundtrip elif isinstance(key, expression.ColumnElement): if (key._label and key._label in map): result = map[key._label] elif (hasattr(key, 'name') and key.name in map): # match is only on name. result = map[key.name] # search extra hard to make sure this # isn't a column/label name overlap. # this check isn't currently available if the row # was unpickled. if (result is not None and result[1] is not None): for obj in result[1]: if key._compare_name_for_result(obj): break else: result = None if result is None: if raiseerr: raise exc.NoSuchColumnError( "Could not locate column in row for column '%s'" % expression._string_or_unprintable(key)) else: return None else: map[key] = result return result def _has_key(self, row, key): if key in self._keymap: return True else: return self._key_fallback(key, False) is not None class ResultProxy: """Wraps a DB-API cursor object to provide easier access to row columns. Individual columns may be accessed by their integer position, case-insensitive column name, or by sqlalchemy schema.Column object. e.g.: row = fetchone() col1 = row[0] # access via integer position col2 = row['col2'] # access via name col3 = row[mytable.c.mycol] # access via Column object. ResultProxy also handles post-processing of result column data using sqlalchemy TypeEngine objects, which are referenced from the originating SQL statement that produced this result set. """ def __init__(self, connection, cursor, dialect, result_map=None): self._dialect = dialect self._closed = False self._cursor = cursor self._connection = connection self._rowcount = cursor.rowcount if cursor.description is not None: self._metadata = ResultMetaData(self, cursor.description) self._weak = weakref.ref(self, lambda wr: cursor.close()) else: self._metadata = None self.close() self._weak = None @property def dialect(self): """SQLAlchemy dialect.""" return self._dialect @property def cursor(self): return self._cursor def keys(self): """Return the current set of string keys for rows.""" if self._metadata: return tuple(self._metadata.keys) else: return () @property def rowcount(self): """Return the 'rowcount' for this result. The 'rowcount' reports the number of rows *matched* by the WHERE criterion of an UPDATE or DELETE statement. .. note:: Notes regarding .rowcount: * This attribute returns the number of rows *matched*, which is not necessarily the same as the number of rows that were actually *modified* - an UPDATE statement, for example, may have no net change on a given row if the SET values given are the same as those present in the row already. Such a row would be matched but not modified. * .rowcount is *only* useful in conjunction with an UPDATE or DELETE statement. Contrary to what the Python DBAPI says, it does *not* return the number of rows available from the results of a SELECT statement as DBAPIs cannot support this functionality when rows are unbuffered. * Statements that use RETURNING may not return a correct rowcount. """ return self._rowcount @property def returns_rows(self): """True if this ResultProxy returns rows. I.e. if it is legal to call the methods .fetchone(), .fetchmany() and .fetchall()`. """ return self._metadata is not None @property def closed(self): return self._closed def close(self): """Close this ResultProxy. Closes the underlying DBAPI cursor corresponding to the execution. Note that any data cached within this ResultProxy is still available. For some types of results, this may include buffered rows. If this ResultProxy was generated from an implicit execution, the underlying Connection will also be closed (returns the underlying DBAPI connection to the connection pool.) This method is called automatically when: * all result rows are exhausted using the fetchXXX() methods. * cursor.description is None. """ if not self._closed: self._closed = True self._cursor.close() # allow consistent errors self._cursor = None self._weak = None def __iter__(self): warnings.warn("Iteration over ResultProxy is deprecated", DeprecationWarning, stacklevel=2) while True: row = yield from self.fetchone() if row is None: raise StopIteration else: yield row def _non_result(self): if self._metadata is None: raise exc.ResourceClosedError( "This result object does not return rows. " "It has been closed automatically.") else: raise exc.ResourceClosedError("This result object is closed.") def _process_rows(self, rows): process_row = RowProxy metadata = self._metadata keymap = metadata._keymap processors = metadata._processors return [process_row(metadata, row, processors, keymap) for row in rows] @asyncio.coroutine def fetchall(self): """Fetch all rows, just like DB-API cursor.fetchall().""" try: rows = yield from self._cursor.fetchall() except AttributeError: self._non_result() else: l = self._process_rows(rows) self.close() return l @asyncio.coroutine def fetchone(self): """Fetch one row, just like DB-API cursor.fetchone(). If a row is present, the cursor remains open after this is called. Else the cursor is automatically closed and None is returned. """ try: row = yield from self._cursor.fetchone() except AttributeError: self._non_result() else: if row is not None: return self._process_rows([row])[0] else: self.close() return None @asyncio.coroutine def fetchmany(self, size=None): """Fetch many rows, just like DB-API cursor.fetchmany(size=cursor.arraysize). If rows are present, the cursor remains open after this is called. Else the cursor is automatically closed and an empty list is returned. """ try: if size is None: rows = yield from self._cursor.fetchmany() else: rows = yield from self._cursor.fetchmany(size) except AttributeError: self._non_result() else: l = self._process_rows(rows) if len(l) == 0: self.close() return l @asyncio.coroutine def first(self): """Fetch the first row and then close the result set unconditionally. Returns None if no row is present. """ if self._metadata is None: self._non_result() try: return (yield from self.fetchone()) finally: self.close() @asyncio.coroutine def scalar(self): """Fetch the first column of the first row, and close the result set. Returns None if no row is present. """ row = yield from self.first() if row is not None: return row[0] else: return None aiopg-0.7.0/aiopg/sa/__init__.py0000664000175000017500000000106112412473420017211 0ustar andrewandrew00000000000000"""Optional support for sqlalchemy.sql dynamic query generation.""" from .connection import SAConnection from .engine import create_engine, Engine from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError', 'Engine') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, Engine) aiopg-0.7.0/aiopg/sa/transaction.py0000664000175000017500000001143612354551123020010 0ustar andrewandrew00000000000000import asyncio from . import exc class Transaction(object): """Represent a database transaction in progress. The Transaction object is procured by calling the SAConnection.begin() method of SAConnection: with (yield from engine) as conn: trans = yield from conn.begin() try: yield from conn.execute("insert into x (a, b) values (1, 2)") except Exception: yield from trans.rollback() else: yield from trans.commit() The object provides .rollback() and .commit() methods in order to control transaction boundaries. See also: SAConnection.begin(), SAConnection.begin_twophase(), SAConnection.begin_nested(). """ def __init__(self, connection, parent): self._connection = connection self._parent = parent or self self._is_active = True @property def is_active(self): """Return ``True`` if a transaction is active.""" return self._is_active @property def connection(self): """Return transaction's connection (SAConnection instance).""" return self._connection @asyncio.coroutine def close(self): """Close this transaction. If this transaction is the base transaction in a begin/commit nesting, the transaction will rollback(). Otherwise, the method returns. This is used to cancel a Transaction without affecting the scope of an enclosing transaction. """ if not self._parent._is_active: return if self._parent is self: yield from self.rollback() else: self._is_active = False @asyncio.coroutine def rollback(self): """Roll back this transaction.""" if not self._parent._is_active: return yield from self._do_rollback() self._is_active = False @asyncio.coroutine def _do_rollback(self): yield from self._parent.rollback() @asyncio.coroutine def commit(self): """Commit this transaction.""" if not self._parent._is_active: raise exc.InvalidRequestError("This transaction is inactive") yield from self._do_commit() self._is_active = False @asyncio.coroutine def _do_commit(self): pass class RootTransaction(Transaction): def __init__(self, connection): super().__init__(connection, None) @asyncio.coroutine def _do_rollback(self): yield from self._connection._rollback_impl() @asyncio.coroutine def _do_commit(self): yield from self._connection._commit_impl() class NestedTransaction(Transaction): """Represent a 'nested', or SAVEPOINT transaction. A new NestedTransaction object may be procured using the SAConnection.begin_nested() method. The interface is the same as that of Transaction class. """ _savepoint = None def __init__(self, connection, parent): super(NestedTransaction, self).__init__(connection, parent) @asyncio.coroutine def _do_rollback(self): assert self._savepoint is not None, "Broken transaction logic" if self._is_active: yield from self._connection._rollback_to_savepoint_impl( self._savepoint, self._parent) @asyncio.coroutine def _do_commit(self): assert self._savepoint is not None, "Broken transaction logic" if self._is_active: yield from self._connection._release_savepoint_impl( self._savepoint, self._parent) class TwoPhaseTransaction(Transaction): """Represent a two-phase transaction. A new TwoPhaseTransaction object may be procured using the SAConnection.begin_twophase() method. The interface is the same as that of Transaction class with the addition of the .prepare() method. """ def __init__(self, connection, xid): super().__init__(connection, None) self._is_prepared = False self._xid = xid @property def xid(self): """Returns twophase transaction id.""" return self._xid @asyncio.coroutine def prepare(self): """Prepare this TwoPhaseTransaction. After a PREPARE, the transaction can be committed. """ if not self._parent.is_active: raise exc.InvalidRequestError("This transaction is inactive") yield from self._connection._prepare_twophase_impl(self._xid) self._is_prepared = True @asyncio.coroutine def _do_rollback(self): yield from self._connection._rollback_twophase_impl( self._xid, is_prepared=self._is_prepared) @asyncio.coroutine def _do_commit(self): yield from self._connection._commit_twophase_impl( self._xid, is_prepared=self._is_prepared) aiopg-0.7.0/aiopg/sa/engine.py0000664000175000017500000001201712515516714016732 0ustar andrewandrew00000000000000import asyncio import json import aiopg from .connection import SAConnection from .exc import InvalidRequestError from aiopg.connection import TIMEOUT try: from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2 except ImportError: # pragma: no cover raise ImportError('aiopg.sa requires sqlalchemy') _dialect = PGDialect_psycopg2(json_serializer=json.dumps, json_deserializer=lambda x: x) _dialect.implicit_returning = True _dialect.supports_native_enum = True _dialect.supports_smallserial = True # 9.2+ _dialect._backslash_escapes = False _dialect.supports_sane_multi_rowcount = True # psycopg 2.0.9+ _dialect._has_native_hstore = True @asyncio.coroutine def create_engine(dsn=None, *, minsize=10, maxsize=10, loop=None, dialect=_dialect, timeout=TIMEOUT, **kwargs): """A coroutine for Engine creation. Returns Engine instance with embedded connection pool. The pool has *minsize* opened connections to PostgreSQL server. """ if loop is None: loop = asyncio.get_event_loop() pool = yield from aiopg.create_pool(dsn, minsize=minsize, maxsize=maxsize, loop=loop, timeout=timeout, **kwargs) conn = yield from pool.acquire() try: real_dsn = conn.dsn return Engine(dialect, pool, real_dsn) finally: pool.release(conn) class Engine: """Connects a aiopg.Pool and sqlalchemy.engine.interfaces.Dialect together to provide a source of database connectivity and behavior. An Engine object is instantiated publicly using the create_engine coroutine. """ def __init__(self, dialect, pool, dsn): self._dialect = dialect self._pool = pool self._dsn = dsn @property def dialect(self): """An dialect for engine.""" return self._dialect @property def name(self): """A name of the dialect.""" return self._dialect.name @property def driver(self): """A driver of the dialect.""" return self._dialect.driver @property def dsn(self): """DSN connection info""" return self._dsn @property def timeout(self): return self._pool.timeout @property def minsize(self): return self._pool.minsize @property def maxsize(self): return self._pool.maxsize @property def size(self): return self._pool.size @property def freesize(self): return self._pool.freesize def close(self): """Close engine. Mark all engine connections to be closed on getting back to pool. Closed engine doesn't allow to acquire new connections. """ self._pool.close() def terminate(self): """Terminate engine. Terminate engine pool with instantly closing all acquired connections also. """ self._pool.terminate() @asyncio.coroutine def wait_closed(self): """Wait for closing all engine's connections.""" yield from self._pool.wait_closed() @asyncio.coroutine def acquire(self): """Get a connection from pool.""" raw = yield from self._pool.acquire() conn = SAConnection(raw, self) return conn def release(self, conn): """Revert back connection to pool.""" if conn.in_transaction: raise InvalidRequestError("Cannot release a connection with " "not finished transaction") raw = conn.connection self._pool.release(raw) def __enter__(self): raise RuntimeError( '"yield from" should be used as context manager expression') def __exit__(self, *args): # This must exist because __enter__ exists, even though that # always raises; that's how the with-statement works. pass # pragma: nocover def __iter__(self): # This is not a coroutine. It is meant to enable the idiom: # # with (yield from engine) as conn: # # # as an alternative to: # # conn = yield from engine.acquire() # try: # # finally: # engine.release(conn) conn = yield from self.acquire() return _ConnectionContextManager(self, conn) class _ConnectionContextManager: """Context manager. This enables the following idiom for acquiring and releasing a connection around a block: with (yield from engine) as conn: cur = yield from conn.cursor() while failing loudly when accidentally using: with engine: """ __slots__ = ('_engine', '_conn') def __init__(self, engine, conn): self._engine = engine self._conn = conn def __enter__(self): return self._conn def __exit__(self, *args): try: self._engine.release(self._conn) finally: self._engine = None self._conn = None aiopg-0.7.0/aiopg/sa/exc.py0000664000175000017500000000127112354551123016236 0ustar andrewandrew00000000000000class Error(Exception): """Generic error class.""" class ArgumentError(Error): """Raised when an invalid or conflicting function argument is supplied. This error generally corresponds to construction time state errors. """ class InvalidRequestError(ArgumentError): """aiopg.sa was asked to do something it can't do. This error generally corresponds to runtime state errors. """ class NoSuchColumnError(KeyError, InvalidRequestError): """A nonexistent column is requested from a ``RowProxy``.""" class ResourceClosedError(InvalidRequestError): """An operation was requested from a connection, cursor, or other object that's in a closed state.""" aiopg-0.7.0/aiopg/sa/connection.py0000664000175000017500000003076412515514757017642 0ustar andrewandrew00000000000000import asyncio import weakref from sqlalchemy.sql import ClauseElement from sqlalchemy.sql.dml import UpdateBase from sqlalchemy.sql.ddl import DDLElement from . import exc from .result import ResultProxy from .transaction import (RootTransaction, Transaction, NestedTransaction, TwoPhaseTransaction) class SAConnection: def __init__(self, connection, engine): self._connection = connection self._transaction = None self._savepoint_seq = 0 self._weak_results = weakref.WeakSet() self._engine = engine self._dialect = engine.dialect @asyncio.coroutine def execute(self, query, *multiparams, **params): """Executes a SQL query with optional parameters. query - a SQL query string or any sqlalchemy expression. *multiparams/**params - represent bound parameter values to be used in the execution. Typically, the format is a dictionary passed to *multiparams: yield from conn.execute( table.insert(), {"id":1, "value":"v1"}, ) ...or individual key/values interpreted by **params:: yield from conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, a tuple or individual values in \*multiparams may be passed:: yield from conn.execute( "INSERT INTO table (id, value) VALUES (%d, %s)", (1, "v1") ) yield from conn.execute( "INSERT INTO table (id, value) VALUES (%s, %s)", 1, "v1" ) Returns ResultProxy instance with results of SQL query execution. """ cursor = yield from self._connection.cursor() dp = _distill_params(multiparams, params) if len(dp) > 1: raise exc.ArgumentError("aiopg doesn't support executemany") elif dp: dp = dp[0] if isinstance(query, str): yield from cursor.execute(query, dp) elif isinstance(query, ClauseElement): compiled = query.compile(dialect=self._dialect) # parameters = compiled.params if not isinstance(query, DDLElement): if dp and isinstance(dp, (list, tuple)): if isinstance(query, UpdateBase): dp = {c.key: pval for c, pval in zip(query.table.c, dp)} else: raise exc.ArgumentError("Don't mix sqlalchemy SELECT " "clause with positional " "parameters") compiled_parameters = [compiled.construct_params( dp)] processed_parameters = [] processors = compiled._bind_processors for compiled_params in compiled_parameters: params = {key: (processors[key](compiled_params[key]) if key in processors else compiled_params[key]) for key in compiled_params} processed_parameters.append(params) post_processed_params = self._dialect.execute_sequence_format( processed_parameters) else: if dp: raise exc.ArgumentError("Don't mix sqlalchemy DDL clause " "and execution with parameters") post_processed_params = [compiled.construct_params()] yield from cursor.execute(str(compiled), post_processed_params[0]) else: raise exc.ArgumentError("sql statement should be str or " "SQLAlchemy data " "selection/modification clause") ret = ResultProxy(self, cursor, self._dialect) self._weak_results.add(ret) return ret @asyncio.coroutine def scalar(self, query, *multiparams, **params): """Executes a SQL query and returns a scalar value.""" res = yield from self.execute(query, *multiparams, **params) return (yield from res.scalar()) @property def closed(self): """The readonly property that returns True if connections is closed.""" return self._connection is None or self._connection.closed @property def info(self): return self._connection.info @property def connection(self): return self._connection @asyncio.coroutine def begin(self): """Begin a transaction and return a transaction handle. The returned object is an instance of Transaction. This object represents the "scope" of the transaction, which completes when either the .rollback or .commit method is called. Nested calls to .begin on the same SAConnection instance will return new Transaction objects that represent an emulated transaction within the scope of the enclosing transaction, that is:: trans = yield from conn.begin() # outermost transaction trans2 = yield from conn.begin() # "nested" yield from trans2.commit() # does nothing yield from trans.commit() # actually commits Calls to .commit only have an effect when invoked via the outermost Transaction object, though the .rollback method of any of the Transaction objects will roll back the transaction. See also: .begin_nested - use a SAVEPOINT .begin_twophase - use a two phase/XA transaction """ if self._transaction is None: self._transaction = RootTransaction(self) yield from self._begin_impl() return self._transaction else: return Transaction(self, self._transaction) @asyncio.coroutine def _begin_impl(self): cur = yield from self._connection.cursor() try: yield from cur.execute('BEGIN') finally: cur.close() @asyncio.coroutine def _commit_impl(self): cur = yield from self._connection.cursor() try: yield from cur.execute('COMMIT') finally: cur.close() self._transaction = None @asyncio.coroutine def _rollback_impl(self): cur = yield from self._connection.cursor() try: yield from cur.execute('ROLLBACK') finally: cur.close() @asyncio.coroutine def begin_nested(self): """Begin a nested transaction and return a transaction handle. The returned object is an instance of :class:`.NestedTransaction`. Nested transactions require SAVEPOINT support in the underlying database. Any transaction in the hierarchy may .commit() and .rollback(), however the outermost transaction still controls the overall .commit() or .rollback() of the transaction of a whole. """ if self._transaction is None: self._transaction = RootTransaction(self) yield from self._begin_impl() else: self._transaction = NestedTransaction(self, self._transaction) self._transaction._savepoint = yield from self._savepoint_impl() return self._transaction @asyncio.coroutine def _savepoint_impl(self, name=None): self._savepoint_seq += 1 name = 'aiopg_sa_savepoint_%s' % self._savepoint_seq cur = yield from self._connection.cursor() try: yield from cur.execute('SAVEPOINT ' + name) return name finally: cur.close() @asyncio.coroutine def _rollback_to_savepoint_impl(self, name, parent): cur = yield from self._connection.cursor() try: yield from cur.execute('ROLLBACK TO SAVEPOINT ' + name) finally: cur.close() self._transaction = parent @asyncio.coroutine def _release_savepoint_impl(self, name, parent): cur = yield from self._connection.cursor() try: yield from cur.execute('RELEASE SAVEPOINT ' + name) finally: cur.close() self._transaction = parent @asyncio.coroutine def begin_twophase(self, xid=None): """Begin a two-phase or XA transaction and return a transaction handle. The returned object is an instance of TwoPhaseTransaction, which in addition to the methods provided by Transaction, also provides a TwoPhaseTransaction.prepare() method. xid - the two phase transaction id. If not supplied, a random id will be generated. """ if self._transaction is not None: raise exc.InvalidRequestError( "Cannot start a two phase transaction when a transaction " "is already in progress.") if xid is None: xid = self._dialect.create_xid() self._transaction = TwoPhaseTransaction(self, xid) yield from self._begin_impl() return self._transaction @asyncio.coroutine def _prepare_twophase_impl(self, xid): yield from self.execute("PREPARE TRANSACTION '%s'" % xid) @asyncio.coroutine def recover_twophase(self): """Return a list of prepared twophase transaction ids.""" result = yield from self.execute("SELECT gid FROM pg_prepared_xacts") return [row[0] for row in result] @asyncio.coroutine def rollback_prepared(self, xid, *, is_prepared=True): """Rollback prepared twophase transaction.""" if is_prepared: yield from self.execute("ROLLBACK PREPARED '%s'" % xid) else: yield from self._rollback_impl() @asyncio.coroutine def commit_prepared(self, xid, *, is_prepared=True): """Commit prepared twophase transaction.""" if is_prepared: yield from self.execute("COMMIT PREPARED '%s'" % xid) else: yield from self._commit_impl() @property def in_transaction(self): """Return True if a transaction is in progress.""" return self._transaction is not None and self._transaction.is_active @asyncio.coroutine def close(self): """Close this SAConnection. This results in a release of the underlying database resources, that is, the underlying connection referenced internally. The underlying connection is typically restored back to the connection-holding Pool referenced by the Engine that produced this SAConnection. Any transactional state present on the underlying connection is also unconditionally released via calling Transaction.rollback() method. After .close() is called, the SAConnection is permanently in a closed state, and will allow no further operations. """ if self._connection is None: return if self._transaction is not None: yield from self._transaction.rollback() self._transaction = None # don't close underlying connection, it can be reused by pool # conn.close() self._engine.release(self) self._connection = None self._engine = None def _distill_params(multiparams, params): """Given arguments from the calling form *multiparams, **params, return a list of bind parameter structures, usually a list of dictionaries. In the case of 'raw' execution which accepts positional parameters, it may be a list of tuples or lists. """ if not multiparams: if params: return [params] else: return [] elif len(multiparams) == 1: zero = multiparams[0] if isinstance(zero, (list, tuple)): if not zero or hasattr(zero[0], '__iter__') and \ not hasattr(zero[0], 'strip'): # execute(stmt, [{}, {}, {}, ...]) # execute(stmt, [(), (), (), ...]) return zero else: # execute(stmt, ("value", "value")) return [zero] elif hasattr(zero, 'keys'): # execute(stmt, {"key":"value"}) return [zero] else: # execute(stmt, "value") return [[zero]] else: if (hasattr(multiparams[0], '__iter__') and not hasattr(multiparams[0], 'strip')): return multiparams else: return [multiparams] aiopg-0.7.0/aiopg/cursor.py0000664000175000017500000002674312515715775016422 0ustar andrewandrew00000000000000import asyncio import warnings import psycopg2 from .log import logger class Cursor: def __init__(self, conn, impl, timeout, echo): self._conn = conn self._impl = impl self._timeout = timeout self._echo = echo @property def echo(self): """Return echo mode status.""" return self._echo @property def description(self): """This read-only attribute is a sequence of 7-item sequences. Each of these sequences is a collections.namedtuple containing information describing one result column: 0. name: the name of the column returned. 1. type_code: the PostgreSQL OID of the column. 2. display_size: the actual length of the column in bytes. 3. internal_size: the size in bytes of the column associated to this column on the server. 4. precision: total number of significant digits in columns of type NUMERIC. None for other types. 5. scale: count of decimal digits in the fractional part in columns of type NUMERIC. None for other types. 6. null_ok: always None as not easy to retrieve from the libpq. This attribute will be None for operations that do not return rows or if the cursor has not had an operation invoked via the execute() method yet. """ return self._impl.description def close(self): """Close the cursor now.""" self._impl.close() @property def closed(self): """Read-only boolean attribute: specifies if the cursor is closed.""" return self._impl.closed @property def connection(self): """Read-only attribute returning a reference to the `Connection`.""" return self._conn @property def raw(self): """Underlying psycopg cursor object, readonly""" return self._impl @property def name(self): # Not supported return self._impl.name @property def scrollable(self): # Not supported return self._impl.scrollable @scrollable.setter def scrollable(self, val): # Not supported self._impl.scrollable = val @property def withhold(self): # Not supported return self._impl.withhold @withhold.setter def withhold(self, val): # Not supported self._impl.withhold = val @asyncio.coroutine def execute(self, operation, parameters=None, *, timeout=None): """Prepare and execute a database operation (query or command). Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified either with positional %s or named %({name})s placeholders. """ if timeout is None: timeout = self._timeout waiter = self._conn._create_waiter('cursor.execute') if self._echo: logger.info(operation) logger.info("%r", parameters) try: self._impl.execute(operation, parameters) except: self._conn._waiter = None raise else: yield from self._conn._poll(waiter, timeout) @asyncio.coroutine def executemany(self, operation, seq_of_parameters): # Not supported raise psycopg2.ProgrammingError( "executemany cannot be used in asynchronous mode") @asyncio.coroutine def callproc(self, procname, parameters=None, *, timeout=None): """Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each argument that the procedure expects. The result of the call is returned as modified copy of the input sequence. Input parameters are left untouched, output and input/output parameters replaced with possibly new values. """ if timeout is None: timeout = self._timeout waiter = self._conn._create_waiter('cursor.callproc') if self._echo: logger.info("CALL %s", procname) logger.info("%r", parameters) try: self._impl.callproc(procname, parameters) except: self._conn._waiter = None raise else: yield from self._conn._poll(waiter, timeout) @asyncio.coroutine def mogrify(self, operation, parameters=None): """Return a query string after arguments binding. The string returned is exactly the one that would be sent to the database running the .execute() method or similar. """ ret = self._impl.mogrify(operation, parameters) assert not self._conn._isexecuting(), ("Don't support server side " "mogrify") return ret @asyncio.coroutine def setinputsizes(self, sizes): """This method is exposed in compliance with the DBAPI. It currently does nothing but it is safe to call it. """ self._impl.setinputsizes(sizes) @asyncio.coroutine def fetchone(self): """Fetch the next row of a query result set. Returns a single tuple, or None when no more data is available. """ ret = self._impl.fetchone() assert not self._conn._isexecuting(), ("Don't support server side " "cursors yet") return ret @asyncio.coroutine def fetchmany(self, size=None): """Fetch the next set of rows of a query result. Returns a list of tuples. An empty list is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's .arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. """ if size is None: size = self._impl.arraysize ret = self._impl.fetchmany(size) assert not self._conn._isexecuting(), ("Don't support server side " "cursors yet") return ret @asyncio.coroutine def fetchall(self): """Fetch all (remaining) rows of a query result. Returns them as a list of tuples. An empty list is returned if there is no more record to fetch. """ ret = self._impl.fetchall() assert not self._conn._isexecuting(), ("Don't support server side " "cursors yet") return ret @asyncio.coroutine def scroll(self, value, mode="relative"): """Scroll to a new position according to mode. If mode is relative (default), value is taken as offset to the current position in the result set, if set to absolute, value states an absolute target position. """ ret = self._impl.scroll(value, mode) assert not self._conn._isexecuting(), ("Don't support server side " "cursors yet") return ret @property def arraysize(self): """How many rows will be returned by fetchmany() call. This read/write attribute specifies the number of rows to fetch at a time with fetchmany(). It defaults to 1 meaning to fetch a single row at a time. """ return self._impl.arraysize @arraysize.setter def arraysize(self, val): """How many rows will be returned by fetchmany() call. This read/write attribute specifies the number of rows to fetch at a time with fetchmany(). It defaults to 1 meaning to fetch a single row at a time. """ self._impl.arraysize = val @property def itersize(self): # Not supported return self._impl.itersize @itersize.setter def itersize(self, val): # Not supported self._impl.itersize = val @property def rowcount(self): """Returns the number of rows that has been produced of affected. This read-only attribute specifies the number of rows that the last :meth:`execute` produced (for Data Query Language statements like SELECT) or affected (for Data Manipulation Language statements like UPDATE or INSERT). The attribute is -1 in case no .execute() has been performed on the cursor or the row count of the last operation if it can't be determined by the interface. """ return self._impl.rowcount @property def rownumber(self): """Row index. This read-only attribute provides the current 0-based index of the cursor in the result set or ``None`` if the index cannot be determined.""" return self._impl.rownumber @property def lastrowid(self): """OID of the last inserted row. This read-only attribute provides the OID of the last row inserted by the cursor. If the table wasn't created with OID support or the last operation is not a single record insert, the attribute is set to None. """ return self._impl.lastrowid @property def query(self): """The last executed query string. Read-only attribute containing the body of the last query sent to the backend (including bound arguments) as bytes string. None if no query has been executed yet. """ return self._impl.query @property def statusmessage(self): """the message returned by the last command.""" return self._impl.statusmessage # @asyncio.coroutine # def cast(self, old, s): # ... @property def tzinfo_factory(self): """The time zone factory used to handle data types such as `TIMESTAMP WITH TIME ZONE`. """ return self._impl.tzinfo_factory @tzinfo_factory.setter def tzinfo_factory(self, val): """The time zone factory used to handle data types such as `TIMESTAMP WITH TIME ZONE`. """ self._impl.tzinfo_factory = val @asyncio.coroutine def nextset(self): # Not supported self._impl.nextset() # raises psycopg2.NotSupportedError @asyncio.coroutine def setoutputsize(self, size, column=None): # Does nothing self._impl.setoutputsize(size, column) @asyncio.coroutine def copy_from(self, file, table, sep='\t', null='\\N', size=8192, columns=None): raise psycopg2.ProgrammingError( "copy_from cannot be used in asynchronous mode") @asyncio.coroutine def copy_to(self, file, table, sep='\t', null='\\N', columns=None): raise psycopg2.ProgrammingError( "copy_to cannot be used in asynchronous mode") @asyncio.coroutine def copy_expert(self, sql, file, size=8192): raise psycopg2.ProgrammingError( "copy_expert cannot be used in asynchronous mode") @property def timeout(self): """Return default timeout for cursor operations.""" return self._timeout def __iter__(self): warnings.warn("Iteration over cursor is deprecated", DeprecationWarning, stacklevel=2) while True: row = yield from self.fetchone() if row is None: raise StopIteration else: yield row aiopg-0.7.0/aiopg/log.py0000664000175000017500000000017412354551123015636 0ustar andrewandrew00000000000000"""Logging configuration.""" import logging # Name the logger after the package. logger = logging.getLogger(__package__) aiopg-0.7.0/aiopg/pool.py0000664000175000017500000002301012515773647016040 0ustar andrewandrew00000000000000import asyncio import collections import sys import warnings from psycopg2.extensions import TRANSACTION_STATUS_IDLE from .connection import connect, TIMEOUT from .log import logger PY_34 = sys.version_info >= (3, 4) @asyncio.coroutine def create_pool(dsn=None, *, minsize=10, maxsize=10, loop=None, timeout=TIMEOUT, enable_json=True, enable_hstore=True, echo=False, **kwargs): if loop is None: loop = asyncio.get_event_loop() pool = Pool(dsn, minsize, maxsize, loop, timeout, enable_json=enable_json, enable_hstore=enable_hstore, echo=echo, **kwargs) if minsize > 0: with (yield from pool._cond): yield from pool._fill_free_pool(False) return pool class Pool(asyncio.AbstractServer): """Connection pool""" def __init__(self, dsn, minsize, maxsize, loop, timeout, *, enable_json, enable_hstore, echo, **kwargs): if minsize < 0: raise ValueError("minsize should be zero or greater") if maxsize < minsize: raise ValueError("maxsize should be not less than minsize") self._dsn = dsn self._minsize = minsize self._loop = loop self._timeout = timeout self._enable_json = enable_json self._enable_hstore = enable_hstore self._echo = echo self._conn_kwargs = kwargs self._acquiring = 0 self._free = collections.deque(maxlen=maxsize) self._cond = asyncio.Condition(loop=loop) self._used = set() self._terminated = set() self._closing = False self._closed = False @property def echo(self): return self._echo @property def minsize(self): return self._minsize @property def maxsize(self): return self._free.maxlen @property def size(self): return self.freesize + len(self._used) + self._acquiring @property def freesize(self): return len(self._free) @property def timeout(self): return self._timeout @asyncio.coroutine def clear(self): """Close all free connections in pool.""" with (yield from self._cond): while self._free: conn = self._free.popleft() yield from conn.close() self._cond.notify() def close(self): """Close pool. Mark all pool connections to be closed on getting back to pool. Closed pool doesn't allow to acquire new connections. """ if self._closed: return self._closing = True def terminate(self): """Terminate pool. Close pool with instantly closing all acquired connections also. """ self.close() for conn in list(self._used): conn.close() self._terminated.add(conn) self._used.clear() @asyncio.coroutine def wait_closed(self): """Wait for closing all pool's connections.""" if self._closed: return if not self._closing: raise RuntimeError(".wait_closed() should be called " "after .close()") while self._free: conn = self._free.popleft() conn.close() with (yield from self._cond): while self.size > self.freesize: yield from self._cond.wait() self._closed = True @asyncio.coroutine def acquire(self): """Acquire free connection from the pool.""" if self._closing: raise RuntimeError("Cannot acquire connection after closing pool") with (yield from self._cond): while True: yield from self._fill_free_pool(True) if self._free: conn = self._free.popleft() assert not conn.closed, conn assert conn not in self._used, (conn, self._used) self._used.add(conn) return conn else: yield from self._cond.wait() @asyncio.coroutine def _fill_free_pool(self, override_min): while self.size < self.minsize: self._acquiring += 1 try: conn = yield from connect( self._dsn, loop=self._loop, timeout=self._timeout, enable_json=self._enable_json, enable_hstore=self._enable_hstore, echo=self._echo, **self._conn_kwargs) # raise exception if pool is closing self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1 if self._free: return if override_min and self.size < self.maxsize: self._acquiring += 1 try: conn = yield from connect( self._dsn, loop=self._loop, timeout=self._timeout, enable_json=self._enable_json, enable_hstore=self._enable_hstore, echo=self._echo, **self._conn_kwargs) # raise exception if pool is closing self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1 @asyncio.coroutine def _wakeup(self): with (yield from self._cond): self._cond.notify() def release(self, conn): """Release free connection back to the connection pool. This is NOT a coroutine. """ if conn in self._terminated: assert conn.closed, conn self._terminated.remove(conn) return assert conn in self._used, (conn, self._used) self._used.remove(conn) if not conn.closed: tran_status = conn._conn.get_transaction_status() if tran_status != TRANSACTION_STATUS_IDLE: logger.warning( "Invalid transaction status on released connection: %d", tran_status) conn.close() return if self._closing: conn.close() else: self._free.append(conn) asyncio.Task(self._wakeup(), loop=self._loop) @asyncio.coroutine def cursor(self, name=None, cursor_factory=None, scrollable=None, withhold=False, *, timeout=None): """XXX""" conn = yield from self.acquire() cur = yield from conn.cursor(name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold, timeout=timeout) return _CursorContextManager(self, conn, cur) def __enter__(self): raise RuntimeError( '"yield from" should be used as context manager expression') def __exit__(self, *args): # This must exist because __enter__ exists, even though that # always raises; that's how the with-statement works. pass # pragma: nocover def __iter__(self): # This is not a coroutine. It is meant to enable the idiom: # # with (yield from pool) as conn: # # # as an alternative to: # # conn = yield from pool.acquire() # try: # # finally: # conn.release() conn = yield from self.acquire() return _ConnectionContextManager(self, conn) if PY_34: # pragma: no branch def __del__(self): try: self._free except AttributeError: return # frame has been cleared, __dict__ is empty if self._free: left = 0 while self._free: conn = self._free.popleft() conn.close() left += 1 warnings.warn( "Unclosed {} connections in {!r}".format(left, self), ResourceWarning) class _ConnectionContextManager: """Context manager. This enables the following idiom for acquiring and releasing a connection around a block: with (yield from pool) as conn: cur = yield from conn.cursor() while failing loudly when accidentally using: with pool: """ __slots__ = ('_pool', '_conn') def __init__(self, pool, conn): self._pool = pool self._conn = conn def __enter__(self): return self._conn def __exit__(self, *args): try: self._pool.release(self._conn) finally: self._pool = None self._conn = None class _CursorContextManager: """Context manager. This enables the following idiom for acquiring and releasing a cursor around a block: with (yield from pool.cursor()) as cur: yield from cur.execute("SELECT 1") while failing loudly when accidentally using: with pool: """ __slots__ = ('_pool', '_conn', '_cur') def __init__(self, pool, conn, cur): self._pool = pool self._conn = conn self._cur = cur def __enter__(self): return self._cur def __exit__(self, *args): try: self._cur._impl.close() self._pool.release(self._conn) finally: self._pool = None self._conn = None self._cur = None aiopg-0.7.0/aiopg/connection.py0000664000175000017500000003123312515772663017231 0ustar andrewandrew00000000000000import asyncio import sys import warnings import psycopg2 from psycopg2.extensions import ( POLL_OK, POLL_READ, POLL_WRITE, POLL_ERROR) from psycopg2 import extras from .cursor import Cursor __all__ = ('connect',) TIMEOUT = 60.0 PY_34 = sys.version_info >= (3, 4) @asyncio.coroutine def _enable_hstore(conn): cur = yield from conn.cursor() yield from cur.execute("""\ SELECT t.oid, typarray FROM pg_type t JOIN pg_namespace ns ON typnamespace = ns.oid WHERE typname = 'hstore'; """) rv0, rv1 = [], [] for oids in (yield from cur.fetchall()): rv0.append(oids[0]) rv1.append(oids[1]) cur.close() return tuple(rv0), tuple(rv1) @asyncio.coroutine def connect(dsn=None, *, timeout=TIMEOUT, loop=None, enable_json=True, enable_hstore=True, echo=False, **kwargs): """A factory for connecting to PostgreSQL. The coroutine accepts all parameters that psycopg2.connect() does plus optional keyword-only `loop` and `timeout` parameters. Returns instantiated Connection object. """ if loop is None: loop = asyncio.get_event_loop() waiter = asyncio.Future(loop=loop) conn = Connection(dsn, loop, timeout, waiter, bool(echo), **kwargs) try: yield from conn._poll(waiter, timeout) except Exception: conn.close() raise if enable_json: extras.register_default_json(conn._conn) if enable_hstore: oids = yield from _enable_hstore(conn) if oids is not None: oid, array_oid = oids extras.register_hstore(conn._conn, oid=oid, array_oid=array_oid) return conn class Connection: """Low-level asynchronous interface for wrapped psycopg2 connection. The Connection instance encapsulates a database session. Provides support for creating asynchronous cursors. """ def __init__(self, dsn, loop, timeout, waiter, echo, **kwargs): self._loop = loop self._conn = psycopg2.connect(dsn, async=True, **kwargs) self._dsn = self._conn.dsn assert self._conn.isexecuting(), "Is conn async at all???" self._fileno = self._conn.fileno() self._timeout = timeout self._waiter = waiter self._reading = False self._writing = False self._echo = echo self._ready() def _ready(self): if self._waiter is None: self._fatal_error("Fatal error on aiopg connection: " "bad state in _ready callback") return try: state = self._conn.poll() except (psycopg2.Warning, psycopg2.Error) as exc: if self._reading: self._loop.remove_reader(self._fileno) self._reading = False if self._writing: self._loop.remove_writer(self._fileno) self._writing = False if not self._waiter.cancelled(): self._waiter.set_exception(exc) else: if state == POLL_OK: if self._reading: self._loop.remove_reader(self._fileno) self._reading = False if self._writing: self._loop.remove_writer(self._fileno) self._writing = False if not self._waiter.cancelled(): self._waiter.set_result(None) elif state == POLL_READ: if not self._reading: self._loop.add_reader(self._fileno, self._ready) self._reading = True if self._writing: self._loop.remove_writer(self._fileno) self._writing = False elif state == POLL_WRITE: if self._reading: self._loop.remove_reader(self._fileno) self._reading = False if not self._writing: self._loop.add_writer(self._fileno, self._ready) self._writing = True elif state == POLL_ERROR: self._fatal_error("Fatal error on aiopg connection: " "POLL_ERROR from underlying .poll() call") else: self._fatal_error("Fatal error on aiopg connection: " "unknown answer {} from underlying " ".poll() call" .format(state)) def _fatal_error(self, message): # Should be called from exception handler only. self._loop.call_exception_handler({ 'message': message, 'connection': self, }) self.close() if self._waiter and not self._waiter.done(): self._waiter.set_exception(psycopg2.OperationalError(message)) def _create_waiter(self, func_name): if self._waiter is not None: raise RuntimeError('%s() called while another coroutine is ' 'already waiting for incoming data' % func_name) self._waiter = asyncio.Future(loop=self._loop) return self._waiter @asyncio.coroutine def _poll(self, waiter, timeout): assert waiter is self._waiter, (waiter, self._waiter) self._ready() try: yield from asyncio.wait_for(self._waiter, timeout, loop=self._loop) finally: self._waiter = None def _isexecuting(self): return self._conn.isexecuting() @asyncio.coroutine def cursor(self, name=None, cursor_factory=None, scrollable=None, withhold=False, timeout=None): """A coroutine that returns a new cursor object using the connection. *cursor_factory* argument can be used to create non-standard cursors. The argument must be suclass of `psycopg2.extensions.cursor`. *name*, *scrollable* and *withhold* parameters are not supported by psycopg in asynchronous mode. """ if timeout is None: timeout = self._timeout impl = yield from self._cursor(name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold) return Cursor(self, impl, timeout, self._echo) @asyncio.coroutine def _cursor(self, name=None, cursor_factory=None, scrollable=None, withhold=False): if cursor_factory is None: impl = self._conn.cursor(name=name, scrollable=scrollable, withhold=withhold) else: impl = self._conn.cursor(name=name, cursor_factory=cursor_factory, scrollable=scrollable, withhold=withhold) return impl def close(self): """Remove the connection from the event_loop and close it.""" # N.B. If connection contains uncommitted transaction the # transaction will be discarded if self._reading: self._loop.remove_reader(self._fileno) self._reading = False if self._writing: self._loop.remove_writer(self._fileno) self._writing = False self._conn.close() if self._waiter is not None and not self._waiter.done(): self._waiter.set_exception( psycopg2.OperationalError("Connection closed")) ret = asyncio.Future(loop=self._loop) ret.set_result(None) return ret @property def closed(self): """Connection status. Read-only attribute reporting whether the database connection is open (False) or closed (True). """ return self._conn.closed @property def raw(self): """Underlying psycopg connection object, readonly""" return self._conn @asyncio.coroutine def commit(self): raise psycopg2.ProgrammingError( "commit cannot be used in asynchronous mode") @asyncio.coroutine def rollback(self): raise psycopg2.ProgrammingError( "rollback cannot be used in asynchronous mode") # TPC @asyncio.coroutine def xid(self, format_id, gtrid, bqual): return self._conn.xid(format_id, gtrid, bqual) @asyncio.coroutine def tpc_begin(self, xid=None): raise psycopg2.ProgrammingError( "tpc_begin cannot be used in asynchronous mode") @asyncio.coroutine def tpc_prepare(self): raise psycopg2.ProgrammingError( "tpc_prepare cannot be used in asynchronous mode") @asyncio.coroutine def tpc_commit(self, xid=None): raise psycopg2.ProgrammingError( "tpc_commit cannot be used in asynchronous mode") @asyncio.coroutine def tpc_rollback(self, xid=None): raise psycopg2.ProgrammingError( "tpc_rollback cannot be used in asynchronous mode") @asyncio.coroutine def tpc_recover(self): raise psycopg2.ProgrammingError( "tpc_recover cannot be used in asynchronous mode") @asyncio.coroutine def cancel(self, timeout=None): """Cancel the current database operation.""" waiter = self._create_waiter('cancel') self._conn.cancel() if timeout is None: timeout = self._timeout yield from self._poll(waiter, timeout) @asyncio.coroutine def reset(self): raise psycopg2.ProgrammingError( "reset cannot be used in asynchronous mode") @property def dsn(self): """DSN connection string. Read-only attribute representing dsn connection string used for connectint to PostgreSQL server. """ return self._dsn @asyncio.coroutine def set_session(self, *, isolation_level=None, readonly=None, deferrable=None, autocommit=None): raise psycopg2.ProgrammingError( "set_session cannot be used in asynchronous mode") @property def autocommit(self): """Autocommit status""" return self._conn.autocommit @autocommit.setter def autocommit(self, val): """Autocommit status""" self._conn.autocommit = val @property def isolation_level(self): """Transaction isolation level. The only allowed value is ISOLATION_LEVEL_READ_COMMITTED. """ return self._conn.isolation_level @asyncio.coroutine def set_isolation_level(self, val): """Transaction isolation level. The only allowed value is ISOLATION_LEVEL_READ_COMMITTED. """ self._conn.set_isolation_level(val) @property def encoding(self): """Client encoding for SQL operations.""" return self._conn.encoding @asyncio.coroutine def set_client_encoding(self, val): self._conn.set_client_encoding(val) @property def notices(self): """A list of all db messages sent to the client during the session.""" return self._conn.notices @property def cursor_factory(self): """The default cursor factory used by .cursor().""" return self._conn.cursor_factory @asyncio.coroutine def get_backend_pid(self): """Returns the PID of the backend server process.""" return self._conn.get_backend_pid() @asyncio.coroutine def get_parameter_status(self, parameter): """Look up a current parameter setting of the server.""" return self._conn.get_parameter_status(parameter) @asyncio.coroutine def get_transaction_status(self): """Return the current session transaction status as an integer.""" return self._conn.get_transaction_status() @property def protocol_version(self): """A read-only integer representing protocol being used.""" return self._conn.protocol_version @property def server_version(self): """A read-only integer representing the backend version.""" return self._conn.server_version @property def status(self): """A read-only integer representing the status of the connection.""" return self._conn.status @asyncio.coroutine def lobject(self, *args, **kwargs): raise psycopg2.ProgrammingError( "lobject cannot be used in asynchronous mode") @property def timeout(self): """Return default timeout for connection operations.""" return self._timeout @property def echo(self): """Return echo mode status.""" return self._echo if PY_34: # pragma: no branch def __del__(self): if not self._conn.closed: warnings.warn("Unclosed connection {!r}".format(self), ResourceWarning) self.close() aiopg-0.7.0/README.rst0000664000175000017500000000371512474311622015100 0ustar andrewandrew00000000000000aiopg ======= .. image:: https://travis-ci.org/aio-libs/aiopg.svg?branch=master :target: https://travis-ci.org/aio-libs/aiopg .. image:: https://coveralls.io/repos/aio-libs/aiopg/badge.svg :target: https://coveralls.io/r/aio-libs/aiopg **aiopg** is a library for accessing a PostgreSQL_ database from the asyncio_ (PEP-3156/tulip) framework. It wraps asynchronous features of the Psycopg database driver. Example ------- :: import asyncio from aiopg.pool import create_pool dsn = 'dbname=jetty user=nick password=1234 host=localhost port=5432' @asyncio.coroutine def test_select(): pool = yield from create_pool(dsn) with (yield from pool) as conn: cur = yield from conn.cursor() yield from cur.execute('SELECT 1') ret = yield from cur.fetchone() assert ret == (1,), ret asyncio.get_event_loop().run_until_complete(test_select()) Example of SQLAlchemy optional integration ------------------------------------------- :: import asyncio from aiopg.sa import create_engine import sqlalchemy as sa metadata = sa.MetaData() tbl = sa.Table('tbl', metadata, sa.Column('id', sa.Integer, primary_key=True), sa.Column('val', sa.String(255))) @asyncio.coroutine def go(): engine = yield from create_engine(user='aiopg', database='aiopg', host='127.0.0.1', password='passwd') with (yield from engine) as conn: yield from conn.execute(tbl.insert().values(val='abc')) res = yield from conn.execute(tbl.select()) for row in res: print(row.id, row.val) asyncio.get_event_loop().run_until_complete(go()) .. _PostgreSQL: http://www.postgresql.org/ .. _asyncio: http://docs.python.org/3.4/library/asyncio.html Please use:: $ python3 runtests.py for executing project's unittests aiopg-0.7.0/LICENSE.txt0000664000175000017500000000242312464130063015224 0ustar andrewandrew00000000000000Copyright (c) 2014, 2015, Andrew Svetlov All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. aiopg-0.7.0/setup.cfg0000664000175000017500000000007312515777172015240 0ustar andrewandrew00000000000000[egg_info] tag_build = tag_svn_revision = 0 tag_date = 0 aiopg-0.7.0/aiopg.egg-info/0000775000175000017500000000000012515777172016210 5ustar andrewandrew00000000000000aiopg-0.7.0/aiopg.egg-info/requires.txt0000664000175000017500000000004512515777172020607 0ustar andrewandrew00000000000000psycopg2>=2.5.2 [sa] sqlalchemy>=0.9aiopg-0.7.0/aiopg.egg-info/SOURCES.txt0000664000175000017500000000062212515777172020074 0ustar andrewandrew00000000000000CHANGES.txt LICENSE.txt MANIFEST.in README.rst setup.py aiopg/__init__.py aiopg/connection.py aiopg/cursor.py aiopg/log.py aiopg/pool.py aiopg.egg-info/PKG-INFO aiopg.egg-info/SOURCES.txt aiopg.egg-info/dependency_links.txt aiopg.egg-info/requires.txt aiopg.egg-info/top_level.txt aiopg/sa/__init__.py aiopg/sa/connection.py aiopg/sa/engine.py aiopg/sa/exc.py aiopg/sa/result.py aiopg/sa/transaction.pyaiopg-0.7.0/aiopg.egg-info/top_level.txt0000664000175000017500000000001412515777172020735 0ustar andrewandrew00000000000000aiopg tests aiopg-0.7.0/aiopg.egg-info/PKG-INFO0000664000175000017500000001514112515777172017307 0ustar andrewandrew00000000000000Metadata-Version: 1.1 Name: aiopg Version: 0.7.0 Summary: Postgres integration with asyncio. Home-page: http://aiopg.readthedocs.org Author: Andrew Svetlov Author-email: andrew.svetlov@gmail.com License: BSD Download-URL: https://pypi.python.org/pypi/aiopg Description: aiopg ======= .. image:: https://travis-ci.org/aio-libs/aiopg.svg?branch=master :target: https://travis-ci.org/aio-libs/aiopg .. image:: https://coveralls.io/repos/aio-libs/aiopg/badge.svg :target: https://coveralls.io/r/aio-libs/aiopg **aiopg** is a library for accessing a PostgreSQL_ database from the asyncio_ (PEP-3156/tulip) framework. It wraps asynchronous features of the Psycopg database driver. Example ------- :: import asyncio from aiopg.pool import create_pool dsn = 'dbname=jetty user=nick password=1234 host=localhost port=5432' @asyncio.coroutine def test_select(): pool = yield from create_pool(dsn) with (yield from pool) as conn: cur = yield from conn.cursor() yield from cur.execute('SELECT 1') ret = yield from cur.fetchone() assert ret == (1,), ret asyncio.get_event_loop().run_until_complete(test_select()) Example of SQLAlchemy optional integration ------------------------------------------- :: import asyncio from aiopg.sa import create_engine import sqlalchemy as sa metadata = sa.MetaData() tbl = sa.Table('tbl', metadata, sa.Column('id', sa.Integer, primary_key=True), sa.Column('val', sa.String(255))) @asyncio.coroutine def go(): engine = yield from create_engine(user='aiopg', database='aiopg', host='127.0.0.1', password='passwd') with (yield from engine) as conn: yield from conn.execute(tbl.insert().values(val='abc')) res = yield from conn.execute(tbl.select()) for row in res: print(row.id, row.val) asyncio.get_event_loop().run_until_complete(go()) .. _PostgreSQL: http://www.postgresql.org/ .. _asyncio: http://docs.python.org/3.4/library/asyncio.html Please use:: $ python3 runtests.py for executing project's unittests CHANGES ------- 0.7.0 (2015-04-22) ^^^^^^^^^^^^^^^^^^ * Get rid of resource leak on connection failure. * Report ResourceWarning on non-closed connections. * Deprecate iteration protocol support in cursor and ResultProxy. * Release sa connection to pool on `connection.close()`. 0.6.0 (2015-02-03) ^^^^^^^^^^^^^^^^^^ * Accept dict, list, tuple, named and positional parameters in `SAConnection.execute()` 0.5.2 (2014-12-08) ^^^^^^^^^^^^^^^^^^ * Minor release, fixes a bug that leaves connection in broken state after `cursor.execute()` failure. 0.5.1 (2014-10-31) ^^^^^^^^^^^^^^^^^^ * Fix a bug for processing transactions in line. 0.5.0 (2014-10-31) ^^^^^^^^^^^^^^^^^^ * Add .terminate() to Pool and Engine * Reimplement connection pool (now pool size cannot be greater than pool.maxsize) * Add .close() and .wait_closed() to Pool and Engine * Add minsize, maxsize, size and freesize properties to sa.Engine * Support *echo* parameter for logging executed SQL commands * Connection.close() is not a coroutine (but we keep backward compatibility). 0.4.1 (2014-10-02) ^^^^^^^^^^^^^^^^^^ * make cursor iterable * update docs 0.4.0 (2014-10-02) ^^^^^^^^^^^^^^^^^^ * add timeouts for database operations. * Autoregister psycopg2 support for json data type. * Support JSON in aiopg.sa * Support ARRAY in aiopg.sa * Autoregister hstore support if present in connected DB * Support HSTORE in aiopg.sa 0.3.2 (2014-07-07) ^^^^^^^^^^^^^^^^^^ * change signature to cursor.execute(operation, parameters=None) to follow psycopg2 convention. 0.3.1 (2014-07-04) ^^^^^^^^^^^^^^^^^^ * Forward arguments to cursor constructor for pooled connections. 0.3.0 (2014-06-22) ^^^^^^^^^^^^^^^^^^ * Allow executing SQLAlchemy DDL statements. * Fix bug with race conditions on acquiring/releasing connections from pool. 0.2.3 (2014-06-12) ^^^^^^^^^^^^^^^^^^ * Fix bug in connection pool. 0.2.2 (2014-06-07) ^^^^^^^^^^^^^^^^^^ * Fix bug with passing parameters into SAConnection.execute when executing raw SQL expression. 0.2.1 (2014-05-08) ^^^^^^^^^^^^^^^^^^ * Close connection with invalid transaction status on returning to pool. 0.2.0 (2014-05-04) ^^^^^^^^^^^^^^^^^^ * Implemented optional support for sqlalchemy functional sql layer. 0.1.0 (2014-04-06) ^^^^^^^^^^^^^^^^^^ * Implemented plain connections: connect, Connection, Cursor. * Implemented database pools: create_pool and Pool. Platform: POSIX Classifier: License :: OSI Approved :: BSD License Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Operating System :: POSIX Classifier: Operating System :: MacOS :: MacOS X Classifier: Environment :: Web Environment Classifier: Development Status :: 4 - Beta Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Requires: psycopg2 Provides: aiopg aiopg-0.7.0/aiopg.egg-info/dependency_links.txt0000664000175000017500000000000112515777172022256 0ustar andrewandrew00000000000000 aiopg-0.7.0/MANIFEST.in0000664000175000017500000000015512354551123015141 0ustar andrewandrew00000000000000include LICENSE.txt include CHANGES.txt include README.rst graft aiopg global-exclude *.pyc exclude tests/** aiopg-0.7.0/PKG-INFO0000664000175000017500000001514112515777172014516 0ustar andrewandrew00000000000000Metadata-Version: 1.1 Name: aiopg Version: 0.7.0 Summary: Postgres integration with asyncio. Home-page: http://aiopg.readthedocs.org Author: Andrew Svetlov Author-email: andrew.svetlov@gmail.com License: BSD Download-URL: https://pypi.python.org/pypi/aiopg Description: aiopg ======= .. image:: https://travis-ci.org/aio-libs/aiopg.svg?branch=master :target: https://travis-ci.org/aio-libs/aiopg .. image:: https://coveralls.io/repos/aio-libs/aiopg/badge.svg :target: https://coveralls.io/r/aio-libs/aiopg **aiopg** is a library for accessing a PostgreSQL_ database from the asyncio_ (PEP-3156/tulip) framework. It wraps asynchronous features of the Psycopg database driver. Example ------- :: import asyncio from aiopg.pool import create_pool dsn = 'dbname=jetty user=nick password=1234 host=localhost port=5432' @asyncio.coroutine def test_select(): pool = yield from create_pool(dsn) with (yield from pool) as conn: cur = yield from conn.cursor() yield from cur.execute('SELECT 1') ret = yield from cur.fetchone() assert ret == (1,), ret asyncio.get_event_loop().run_until_complete(test_select()) Example of SQLAlchemy optional integration ------------------------------------------- :: import asyncio from aiopg.sa import create_engine import sqlalchemy as sa metadata = sa.MetaData() tbl = sa.Table('tbl', metadata, sa.Column('id', sa.Integer, primary_key=True), sa.Column('val', sa.String(255))) @asyncio.coroutine def go(): engine = yield from create_engine(user='aiopg', database='aiopg', host='127.0.0.1', password='passwd') with (yield from engine) as conn: yield from conn.execute(tbl.insert().values(val='abc')) res = yield from conn.execute(tbl.select()) for row in res: print(row.id, row.val) asyncio.get_event_loop().run_until_complete(go()) .. _PostgreSQL: http://www.postgresql.org/ .. _asyncio: http://docs.python.org/3.4/library/asyncio.html Please use:: $ python3 runtests.py for executing project's unittests CHANGES ------- 0.7.0 (2015-04-22) ^^^^^^^^^^^^^^^^^^ * Get rid of resource leak on connection failure. * Report ResourceWarning on non-closed connections. * Deprecate iteration protocol support in cursor and ResultProxy. * Release sa connection to pool on `connection.close()`. 0.6.0 (2015-02-03) ^^^^^^^^^^^^^^^^^^ * Accept dict, list, tuple, named and positional parameters in `SAConnection.execute()` 0.5.2 (2014-12-08) ^^^^^^^^^^^^^^^^^^ * Minor release, fixes a bug that leaves connection in broken state after `cursor.execute()` failure. 0.5.1 (2014-10-31) ^^^^^^^^^^^^^^^^^^ * Fix a bug for processing transactions in line. 0.5.0 (2014-10-31) ^^^^^^^^^^^^^^^^^^ * Add .terminate() to Pool and Engine * Reimplement connection pool (now pool size cannot be greater than pool.maxsize) * Add .close() and .wait_closed() to Pool and Engine * Add minsize, maxsize, size and freesize properties to sa.Engine * Support *echo* parameter for logging executed SQL commands * Connection.close() is not a coroutine (but we keep backward compatibility). 0.4.1 (2014-10-02) ^^^^^^^^^^^^^^^^^^ * make cursor iterable * update docs 0.4.0 (2014-10-02) ^^^^^^^^^^^^^^^^^^ * add timeouts for database operations. * Autoregister psycopg2 support for json data type. * Support JSON in aiopg.sa * Support ARRAY in aiopg.sa * Autoregister hstore support if present in connected DB * Support HSTORE in aiopg.sa 0.3.2 (2014-07-07) ^^^^^^^^^^^^^^^^^^ * change signature to cursor.execute(operation, parameters=None) to follow psycopg2 convention. 0.3.1 (2014-07-04) ^^^^^^^^^^^^^^^^^^ * Forward arguments to cursor constructor for pooled connections. 0.3.0 (2014-06-22) ^^^^^^^^^^^^^^^^^^ * Allow executing SQLAlchemy DDL statements. * Fix bug with race conditions on acquiring/releasing connections from pool. 0.2.3 (2014-06-12) ^^^^^^^^^^^^^^^^^^ * Fix bug in connection pool. 0.2.2 (2014-06-07) ^^^^^^^^^^^^^^^^^^ * Fix bug with passing parameters into SAConnection.execute when executing raw SQL expression. 0.2.1 (2014-05-08) ^^^^^^^^^^^^^^^^^^ * Close connection with invalid transaction status on returning to pool. 0.2.0 (2014-05-04) ^^^^^^^^^^^^^^^^^^ * Implemented optional support for sqlalchemy functional sql layer. 0.1.0 (2014-04-06) ^^^^^^^^^^^^^^^^^^ * Implemented plain connections: connect, Connection, Cursor. * Implemented database pools: create_pool and Pool. Platform: POSIX Classifier: License :: OSI Approved :: BSD License Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Operating System :: POSIX Classifier: Operating System :: MacOS :: MacOS X Classifier: Environment :: Web Environment Classifier: Development Status :: 4 - Beta Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Requires: psycopg2 Provides: aiopg aiopg-0.7.0/setup.py0000664000175000017500000000365612450232027015122 0ustar andrewandrew00000000000000import os import re import sys from setuptools import setup, find_packages install_requires = ['psycopg2>=2.5.2'] PY_VER = sys.version_info if PY_VER >= (3, 4): pass elif PY_VER >= (3, 3): install_requires.append('asyncio') else: raise RuntimeError("aiopg doesn't suppport Python earlier than 3.3") def read(f): return open(os.path.join(os.path.dirname(__file__), f)).read().strip() extras_require = {'sa': ['sqlalchemy>=0.9'], } def read_version(): regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'") init_py = os.path.join(os.path.dirname(__file__), 'aiopg', '__init__.py') with open(init_py) as f: for line in f: match = regexp.match(line) if match is not None: return match.group(1) else: raise RuntimeError('Cannot find version in aiopg/__init__.py') classifiers = [ 'License :: OSI Approved :: BSD License', 'Intended Audience :: Developers', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Operating System :: POSIX', 'Operating System :: MacOS :: MacOS X', 'Environment :: Web Environment', 'Development Status :: 4 - Beta', 'Topic :: Database', 'Topic :: Database :: Front-Ends', ] setup(name='aiopg', version=read_version(), description=('Postgres integration with asyncio.'), long_description='\n\n'.join((read('README.rst'), read('CHANGES.txt'))), classifiers=classifiers, platforms=['POSIX'], author='Andrew Svetlov', author_email='andrew.svetlov@gmail.com', url='http://aiopg.readthedocs.org', download_url='https://pypi.python.org/pypi/aiopg', license='BSD', packages=find_packages(), install_requires=install_requires, extras_require=extras_require, provides=['aiopg'], requires=['psycopg2'], include_package_data = True) aiopg-0.7.0/CHANGES.txt0000664000175000017500000000456012515776704015235 0ustar andrewandrew00000000000000CHANGES ------- 0.7.0 (2015-04-22) ^^^^^^^^^^^^^^^^^^ * Get rid of resource leak on connection failure. * Report ResourceWarning on non-closed connections. * Deprecate iteration protocol support in cursor and ResultProxy. * Release sa connection to pool on `connection.close()`. 0.6.0 (2015-02-03) ^^^^^^^^^^^^^^^^^^ * Accept dict, list, tuple, named and positional parameters in `SAConnection.execute()` 0.5.2 (2014-12-08) ^^^^^^^^^^^^^^^^^^ * Minor release, fixes a bug that leaves connection in broken state after `cursor.execute()` failure. 0.5.1 (2014-10-31) ^^^^^^^^^^^^^^^^^^ * Fix a bug for processing transactions in line. 0.5.0 (2014-10-31) ^^^^^^^^^^^^^^^^^^ * Add .terminate() to Pool and Engine * Reimplement connection pool (now pool size cannot be greater than pool.maxsize) * Add .close() and .wait_closed() to Pool and Engine * Add minsize, maxsize, size and freesize properties to sa.Engine * Support *echo* parameter for logging executed SQL commands * Connection.close() is not a coroutine (but we keep backward compatibility). 0.4.1 (2014-10-02) ^^^^^^^^^^^^^^^^^^ * make cursor iterable * update docs 0.4.0 (2014-10-02) ^^^^^^^^^^^^^^^^^^ * add timeouts for database operations. * Autoregister psycopg2 support for json data type. * Support JSON in aiopg.sa * Support ARRAY in aiopg.sa * Autoregister hstore support if present in connected DB * Support HSTORE in aiopg.sa 0.3.2 (2014-07-07) ^^^^^^^^^^^^^^^^^^ * change signature to cursor.execute(operation, parameters=None) to follow psycopg2 convention. 0.3.1 (2014-07-04) ^^^^^^^^^^^^^^^^^^ * Forward arguments to cursor constructor for pooled connections. 0.3.0 (2014-06-22) ^^^^^^^^^^^^^^^^^^ * Allow executing SQLAlchemy DDL statements. * Fix bug with race conditions on acquiring/releasing connections from pool. 0.2.3 (2014-06-12) ^^^^^^^^^^^^^^^^^^ * Fix bug in connection pool. 0.2.2 (2014-06-07) ^^^^^^^^^^^^^^^^^^ * Fix bug with passing parameters into SAConnection.execute when executing raw SQL expression. 0.2.1 (2014-05-08) ^^^^^^^^^^^^^^^^^^ * Close connection with invalid transaction status on returning to pool. 0.2.0 (2014-05-04) ^^^^^^^^^^^^^^^^^^ * Implemented optional support for sqlalchemy functional sql layer. 0.1.0 (2014-04-06) ^^^^^^^^^^^^^^^^^^ * Implemented plain connections: connect, Connection, Cursor. * Implemented database pools: create_pool and Pool.