aioredis-0.2.4/0000775000175000017500000000000012607147634014130 5ustar alexeyalexey00000000000000aioredis-0.2.4/MANIFEST.in0000664000175000017500000000014212563036251015654 0ustar alexeyalexey00000000000000include LICENSE include CHANGES.txt include README.rst graft aioredis global-exclude *.pyc *.swp aioredis-0.2.4/setup.py0000664000175000017500000000357112607136511015640 0ustar alexeyalexey00000000000000import re import os.path import sys from setuptools import setup, find_packages install_requires = ['hiredis'] PY_VER = sys.version_info if PY_VER >= (3, 4): pass elif PY_VER >= (3, 3): install_requires.append('asyncio') else: raise RuntimeError("aioredis doesn't support Python version prior 3.3") def read(*parts): with open(os.path.join(*parts), 'rt') as f: return f.read().strip() def read_version(): regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'") init_py = os.path.join(os.path.dirname(__file__), 'aioredis', '__init__.py') with open(init_py) as f: for line in f: match = regexp.match(line) if match is not None: return match.group(1) else: raise RuntimeError('Cannot find version in aioredis/__init__.py') classifiers = [ 'License :: OSI Approved :: MIT License', 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Operating System :: POSIX', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', ] setup(name='aioredis', version=read_version(), description=("asyncio (PEP 3156) Redis support"), long_description="\n\n".join((read('README.rst'), read('CHANGES.txt'))), classifiers=classifiers, platforms=["POSIX"], author="Alexey Popravka", author_email="alexey.popravka@horsedevel.com", url="https://github.com/aio-libs/aioredis", license="MIT", packages=find_packages(exclude=["tests"]), install_requires=install_requires, include_package_data=True, ) aioredis-0.2.4/LICENSE0000664000175000017500000000207112563036251015126 0ustar alexeyalexey00000000000000The MIT License (MIT) Copyright (c) 2014 Alexey Popravka Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.aioredis-0.2.4/aioredis/0000775000175000017500000000000012607147634015727 5ustar alexeyalexey00000000000000aioredis-0.2.4/aioredis/connection.py0000664000175000017500000003001112577726331020436 0ustar alexeyalexey00000000000000import types import asyncio import hiredis from functools import partial from collections import deque from .util import ( encode_command, wait_ok, _NOTSET, coerced_keys_dict, Channel, decode, ) from .errors import RedisError, ProtocolError, ReplyError from .log import logger __all__ = ['create_connection', 'RedisConnection'] MAX_CHUNK_SIZE = 65536 _PUBSUB_COMMANDS = ( 'SUBSCRIBE', b'SUBSCRIBE', 'PSUBSCRIBE', b'PSUBSCRIBE', 'UNSUBSCRIBE', b'UNSUBSCRIBE', 'PUNSUBSCRIBE', b'PUNSUBSCRIBE', ) @asyncio.coroutine def create_connection(address, *, db=None, password=None, encoding=None, loop=None): """Creates redis connection. Opens connection to Redis server specified by address argument. Address argument is similar to socket address argument, ie: * when address is a tuple it represents (host, port) pair; * when address is a str it represents unix domain socket path. (no other address formats supported) Encoding argument can be used to decode byte-replies to strings. By default no decoding is done. Return value is RedisConnection instance. This function is a coroutine. """ assert isinstance(address, (tuple, list, str)), "tuple or str expected" if isinstance(address, (list, tuple)): host, port = address reader, writer = yield from asyncio.open_connection( host, port, loop=loop) else: reader, writer = yield from asyncio.open_unix_connection( address, loop=loop) conn = RedisConnection(reader, writer, encoding=encoding, loop=loop) try: if password is not None: yield from conn.auth(password) if db is not None: yield from conn.select(db) except Exception: conn.close() yield from conn.wait_closed() raise return conn class RedisConnection: """Redis connection.""" def __init__(self, reader, writer, *, encoding=None, loop=None): if loop is None: loop = asyncio.get_event_loop() self._reader = reader self._writer = writer self._loop = loop self._waiters = deque() self._parser = hiredis.Reader(protocolError=ProtocolError, replyError=ReplyError) self._reader_task = asyncio.Task(self._read_data(), loop=self._loop) self._db = 0 self._closing = False self._closed = False self._close_waiter = asyncio.Future(loop=self._loop) self._reader_task.add_done_callback(self._close_waiter.set_result) self._in_transaction = False self._transaction_error = None self._in_pubsub = 0 self._pubsub_channels = coerced_keys_dict() self._pubsub_patterns = coerced_keys_dict() self._encoding = encoding def __repr__(self): return ''.format(self._db) @asyncio.coroutine def _read_data(self): """Response reader task.""" while not self._reader.at_eof(): try: data = yield from self._reader.read(MAX_CHUNK_SIZE) except asyncio.CancelledError: break except Exception as exc: # XXX: for QUIT command connection error can be received # before response logger.error("Exception on data read %r", exc, exc_info=True) break self._parser.feed(data) while True: try: obj = self._parser.gets() except ProtocolError as exc: # ProtocolError is fatal # so connection must be closed self._closing = True self._loop.call_soon(self._do_close, exc) if self._in_transaction: self._transaction_error = exc return else: if obj is False: break if self._in_pubsub: self._process_pubsub(obj) else: self._process_data(obj) self._closing = True self._loop.call_soon(self._do_close, None) def _process_data(self, obj): """Processes command results.""" waiter, encoding, cb = self._waiters.popleft() if waiter.done(): logger.debug("Waiter future is already done %r", waiter) assert waiter.cancelled(), ( "waiting future is in wrong state", waiter, obj) return if isinstance(obj, RedisError): waiter.set_exception(obj) if self._in_transaction: self._transaction_error = obj else: if encoding is not None: try: obj = decode(obj, encoding) except Exception as exc: waiter.set_exception(exc) return waiter.set_result(obj) if cb is not None: cb(obj) def _process_pubsub(self, obj, *, _process_waiters=True): """Processes pubsub messages.""" kind, *pattern, chan, data = obj if _process_waiters and self._in_pubsub and self._waiters: self._process_data(obj) if kind in (b'subscribe', b'unsubscribe'): if kind == b'subscribe' and chan not in self._pubsub_channels: self._pubsub_channels[chan] = Channel(chan, is_pattern=False, loop=self._loop) elif kind == b'unsubscribe': ch = self._pubsub_channels.pop(chan, None) if ch: ch.close() self._in_pubsub = data elif kind in (b'psubscribe', b'punsubscribe'): if kind == b'psubscribe' and chan not in self._pubsub_patterns: self._pubsub_patterns[chan] = Channel(chan, is_pattern=True, loop=self._loop) elif kind == b'punsubscribe': ch = self._pubsub_patterns.pop(chan, None) if ch: ch.close() self._in_pubsub = data elif kind == b'message': self._pubsub_channels[chan].put_nowait(data) elif kind == b'pmessage': pattern = pattern[0] self._pubsub_patterns[pattern].put_nowait((chan, data)) else: logger.warning("Unknown pubsub message received %r", obj) def execute(self, command, *args, encoding=_NOTSET): """Executes redis command and returns Future waiting for the answer. Raises: * TypeError if any of args can not be encoded as bytes. * ReplyError on redis '-ERR' resonses. * ProtocolError when response can not be decoded meaning connection is broken. """ assert self._reader and not self._reader.at_eof(), ( "Connection closed or corrupted") if command is None: raise TypeError("command must not be None") if None in set(args): raise TypeError("args must not contain None") command = command.upper().strip() is_pubsub = command in _PUBSUB_COMMANDS if self._in_pubsub and not is_pubsub: raise RedisError("Connection in SUBSCRIBE mode") elif is_pubsub: logger.warning("Deprecated. Use `execute_pubsub` method directly") return self.execute_pubsub(command, *args) if command in ('SELECT', b'SELECT'): cb = partial(self._set_db, args=args) elif command in ('MULTI', b'MULTI'): cb = self._start_transaction elif command in ('EXEC', b'EXEC', 'DISCARD', b'DISCARD'): cb = self._end_transaction else: cb = None if encoding is _NOTSET: encoding = self._encoding fut = asyncio.Future(loop=self._loop) self._writer.write(encode_command(command, *args)) self._waiters.append((fut, encoding, cb)) return fut def execute_pubsub(self, command, *channels): """Executes redis (p)subscribe/(p)unsubscribe commands. Returns asyncio.gather coroutine waiting for all channels/patterns to receive answers. """ command = command.upper().strip() assert command in _PUBSUB_COMMANDS, ( "Pub/Sub command expected", command) if None in set(channels): raise TypeError("args must not contain None") if not len(channels): raise ValueError("No channels/patterns supplied") cmd = encode_command(command, *channels) res = [] for ch in channels: fut = asyncio.Future(loop=self._loop) res.append(fut) self._waiters.append((fut, None, self._update_pubsub)) self._writer.write(cmd) return asyncio.gather(*res, loop=self._loop) def close(self): """Close connection.""" self._do_close(None) def _do_close(self, exc): if self._closed: return self._closed = True self._closing = False self._writer.transport.close() self._reader_task.cancel() self._reader_task = None self._writer = None self._reader = None while self._waiters: waiter, *spam = self._waiters.popleft() logger.debug("Cancelling waiter %r", (waiter, spam)) if exc is None: waiter.cancel() else: waiter.set_exception(exc) # TODO: close all subscribed channels @property def closed(self): """True if connection is closed.""" closed = self._closing or self._closed if not closed and self._reader and self._reader.at_eof(): self._closing = closed = True self._loop.call_soon(self._do_close, None) return closed @asyncio.coroutine def wait_closed(self): yield from self._close_waiter @property def db(self): """Currently selected db index.""" return self._db @property def encoding(self): """Current set codec or None.""" return self._encoding def select(self, db): """Change the selected database for the current connection.""" if not isinstance(db, int): raise TypeError("DB must be of int type, not {!r}".format(db)) if db < 0: raise ValueError("DB must be greater or equal 0, got {!r}" .format(db)) fut = self.execute('SELECT', db) return wait_ok(fut) def _set_db(self, ok, args): assert ok in {b'OK', 'OK'}, ok self._db = args[0] def _start_transaction(self, ok): assert not self._in_transaction self._in_transaction = True self._transaction_error = None def _end_transaction(self, ok): assert self._in_transaction self._in_transaction = False self._transaction_error = None def _update_pubsub(self, obj): *head, subscriptions = obj self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub if not was_in_pubsub: self._process_pubsub(obj, _process_waiters=False) @property def in_transaction(self): """Set to True when MULTI command was issued.""" return self._in_transaction @property def in_pubsub(self): """Indicates that connection is in PUB/SUB mode. Provides the number of subscribed channels. """ return self._in_pubsub @property def pubsub_channels(self): """Returns read-only channels dict.""" return types.MappingProxyType(self._pubsub_channels) @property def pubsub_patterns(self): """Returns read-only patterns dict.""" return types.MappingProxyType(self._pubsub_patterns) def auth(self, password): """Authenticate to server.""" fut = self.execute('AUTH', password) return wait_ok(fut) @asyncio.coroutine def get_atomic_connection(self): return self aioredis-0.2.4/aioredis/errors.py0000664000175000017500000000151512577726331017622 0ustar alexeyalexey00000000000000__all__ = [ 'RedisError', 'ProtocolError', 'ReplyError', 'PipelineError', 'MultiExecError', ] class RedisError(Exception): """Base exception class for aioredis exceptions.""" class ProtocolError(RedisError): """Raised when protocol error occurs.""" class ReplyError(RedisError): """Raised for redis error replies (-ERR).""" class PipelineError(ReplyError): """Raised if command within pipeline raised error.""" def __init__(self, errors): super().__init__('{} errors:'.format(self.__class__.__name__), errors) class MultiExecError(PipelineError): """Raised if command within MULTI/EXEC block caused error.""" class ChannelClosedError(RedisError): """Raised when Pub/Sub channel is unsubscribed and messages queue is empty. """ # TODO: add ConnectionClosed exception. aioredis-0.2.4/aioredis/commands/0000775000175000017500000000000012607147634017530 5ustar alexeyalexey00000000000000aioredis-0.2.4/aioredis/commands/string.py0000664000175000017500000002143512604743503021407 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, wait_ok, _NOTSET class StringCommandsMixin: """String commands mixin. For commands details see: http://redis.io/commands/#string """ SET_IF_NOT_EXIST = 'SET_IF_NOT_EXIST' # NX SET_IF_EXIST = 'SET_IF_EXIST' # XX def append(self, key, value): """Append a value to key.""" return self._conn.execute(b'APPEND', key, value) def bitcount(self, key, start=None, end=None): """Count set bits in a string. :raises TypeError: if only start or end specified. """ if start is None and end is not None: raise TypeError("both start and stop must be specified") elif start is not None and end is None: raise TypeError("both start and stop must be specified") elif start is not None and end is not None: args = (start, end) else: args = () return self._conn.execute(b'BITCOUNT', key, *args) def bitop_and(self, dest, key, *keys): """Perform bitwise AND operations between strings.""" return self._conn.execute(b'BITOP', b'AND', dest, key, *keys) def bitop_or(self, dest, key, *keys): """Perform bitwise OR operations between strings.""" return self._conn.execute(b'BITOP', b'OR', dest, key, *keys) def bitop_xor(self, dest, key, *keys): """Perform bitwise XOR operations between strings.""" return self._conn.execute(b'BITOP', b'XOR', dest, key, *keys) def bitop_not(self, dest, key): """Perform bitwise NOT operations between strings.""" return self._conn.execute(b'BITOP', b'NOT', dest, key) def bitpos(self, key, bit, start=None, end=None): """Find first bit set or clear in a string. :raises ValueError: if bit is not 0 or 1 """ if bit not in (1, 0): raise ValueError("bit argument must be either 1 or 0") bytes_range = [] if start is not None: bytes_range.append(start) if end is not None: if start is None: bytes_range = [0, end] else: bytes_range.append(end) return self._conn.execute(b'BITPOS', key, bit, *bytes_range) def decr(self, key): """Decrement the integer value of a key by one.""" return self._conn.execute(b'DECR', key) def decrby(self, key, decrement): """Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int """ if not isinstance(decrement, int): raise TypeError("decrement must be of type int") return self._conn.execute(b'DECRBY', key, decrement) def get(self, key, *, encoding=_NOTSET): """Get the value of a key.""" return self._conn.execute(b'GET', key, encoding=encoding) def getbit(self, key, offset): """Returns the bit value at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less then 0 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") return self._conn.execute(b'GETBIT', key, offset) def getrange(self, key, start, end, *, encoding=_NOTSET): """Get a substring of the string stored at a key. :raises TypeError: if start or end is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(end, int): raise TypeError("end argument must be int") return self._conn.execute(b'GETRANGE', key, start, end, encoding=encoding) def getset(self, key, value, *, encoding=_NOTSET): """Set the string value of a key and return its old value.""" return self._conn.execute(b'GETSET', key, value, encoding=encoding) def incr(self, key): """Increment the integer value of a key by one.""" return self._conn.execute(b'INCR', key) def incrby(self, key, increment): """Increment the integer value of a key by the given amount. :raises TypeError: if increment is not int """ if not isinstance(increment, int): raise TypeError("increment must be of type int") return self._conn.execute(b'INCRBY', key, increment) def incrbyfloat(self, key, increment): """Increment the float value of a key by the given amount. :raises TypeError: if increment is not int """ if not isinstance(increment, float): raise TypeError("increment must be of type int") fut = self._conn.execute(b'INCRBYFLOAT', key, increment) return wait_convert(fut, float) def mget(self, key, *keys, encoding=_NOTSET): """Get the values of all the given keys.""" return self._conn.execute(b'MGET', key, *keys, encoding=encoding) def mset(self, key, value, *pairs): """Set multiple keys to multiple values. :raises TypeError: if len of pairs is not event number """ if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") fut = self._conn.execute(b'MSET', key, value, *pairs) return wait_ok(fut) def msetnx(self, key, value, *pairs): """Set multiple keys to multiple values, only if none of the keys exist. :raises TypeError: if len of pairs is not event number """ if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") return self._conn.execute(b'MSETNX', key, value, *pairs) def psetex(self, key, milliseconds, value): """Set the value and expiration in milliseconds of a key. :raises TypeError: if milliseconds is not int """ if not isinstance(milliseconds, int): raise TypeError("milliseconds argument must be int") fut = self._conn.execute(b'PSETEX', key, milliseconds, value) return wait_ok(fut) def set(self, key, value, *, expire=0, pexpire=0, exist=None): """Set the string value of a key. :raises TypeError: if expire or pexpire is not int """ if expire and not isinstance(expire, int): raise TypeError("expire argument must be int") if pexpire and not isinstance(pexpire, int): raise TypeError("pexpire argument must be int") args = [] if expire: args[:] = [b'EX', expire] if pexpire: args[:] = [b'PX', pexpire] if exist is self.SET_IF_EXIST: args.append(b'XX') elif exist is self.SET_IF_NOT_EXIST: args.append(b'NX') fut = self._conn.execute(b'SET', key, value, *args) return wait_ok(fut) def setbit(self, key, offset, value): """Sets or clears the bit at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less then 0 or value is not 0 or 1 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") if value not in (0, 1): raise ValueError("value argument must be either 1 or 0") return self._conn.execute(b'SETBIT', key, offset, value) def setex(self, key, seconds, value): """Set the value and expiration of a key. If seconds is float it will be multiplied by 1000 coerced to int and passed to `psetex` method. :raises TypeError: if seconds is neither int nor float """ if isinstance(seconds, float): return self.psetex(key, int(seconds * 1000), value) if not isinstance(seconds, int): raise TypeError("milliseconds argument must be int") fut = self._conn.execute(b'SETEX', key, seconds, value) return wait_ok(fut) def setnx(self, key, value): """Set the value of a key, only if the key does not exist.""" fut = self._conn.execute(b'SETNX', key, value) return wait_convert(fut, bool) def setrange(self, key, offset, value): """Overwrite part of a string at key starting at the specified offset. :raises TypeError: if offset is not int :raises ValueError: if offset less then 0 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") return self._conn.execute(b'SETRANGE', key, offset, value) def strlen(self, key): """Get the length of the value stored in a key.""" return self._conn.execute(b'STRLEN', key) aioredis-0.2.4/aioredis/commands/generic.py0000664000175000017500000002213512607134125021510 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, wait_ok, _NOTSET, PY_35 if PY_35: from aioredis.util import _ScanIter class GenericCommandsMixin: """Generic commands mixin. For commands details see: http://redis.io/commands/#generic """ def delete(self, key, *keys): """Delete a key.""" fut = self._conn.execute(b'DEL', key, *keys) return wait_convert(fut, int) def dump(self, key): """Dump a key.""" return self._conn.execute(b'DUMP', key) def exists(self, key): """Check if key exists.""" fut = self._conn.execute(b'EXISTS', key) return wait_convert(fut, bool) def expire(self, key, timeout): """Set a timeout on key. if timeout is float it will be multiplyed by 1000 coerced to int and passed to `pexpire` method. Otherwise raises TypeError if timeout argument is not int. """ if isinstance(timeout, float): return self.pexpire(key, int(timeout * 1000)) if not isinstance(timeout, int): raise TypeError("timeout argument must be int, not {!r}" .format(timeout)) fut = self._conn.execute(b'EXPIRE', key, timeout) return wait_convert(fut, bool) def expireat(self, key, timestamp): """Set expire timestamp on key. if timeout is float it will be multiplyed by 1000 coerced to int and passed to `pexpire` method. Otherwise raises TypeError if timestamp argument is not int. """ if isinstance(timestamp, float): return self.pexpireat(key, int(timestamp * 1000)) if not isinstance(timestamp, int): raise TypeError("timestamp argument must be int, not {!r}" .format(timestamp)) fut = self._conn.execute(b'EXPIREAT', key, timestamp) return wait_convert(fut, bool) def keys(self, pattern, *, encoding=_NOTSET): """Returns all keys matching pattern.""" return self._conn.execute(b'KEYS', pattern, encoding=encoding) def migrate(self, host, port, key, dest_db, timeout, copy=False, replace=False): """Atomically transfer a key from a Redis instance to another one.""" if not isinstance(host, str): raise TypeError("host argument must be str") if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if not isinstance(dest_db, int): raise TypeError("dest_db argument must be int") if not host: raise ValueError("Got empty host") if dest_db < 0: raise ValueError("dest_db must be greater equal 0") if timeout < 0: raise ValueError("timeout must be greater equal 0") flags = [] if copy: flags.append(b'COPY') if replace: flags.append(b'REPLACE') fut = self._conn.execute(b'MIGRATE', host, port, key, dest_db, timeout, *flags) return wait_ok(fut) def move(self, key, db): """Move key from currently selected database to specified destination. :raises TypeError: if db is not int :raises ValueError: if db is less then 0 """ if not isinstance(db, int): raise TypeError("db argument must be int, not {!r}".format(db)) if db < 0: raise ValueError("db argument must be not less then 0, {!r}" .format(db)) fut = self._conn.execute(b'MOVE', key, db) return wait_convert(fut, bool) def object_refcount(self, key): """Returns the number of references of the value associated with the specified key (OBJECT REFCOUNT). """ return self._conn.execute(b'OBJECT', b'REFCOUNT', key) def object_encoding(self, key): """Returns the kind of internal representation used in order to store the value associated with a key (OBJECT ENCODING). """ return self._conn.execute(b'OBJECT', b'ENCODING', key) def object_idletime(self, key): """Returns the number of seconds since the object is not requested by read or write operations (OBJECT IDLETIME). """ return self._conn.execute(b'OBJECT', b'IDLETIME', key) def persist(self, key): """Remove the existing timeout on key.""" fut = self._conn.execute(b'PERSIST', key) return wait_convert(fut, bool) def pexpire(self, key, timeout): """Set a milliseconds timeout on key. :raises TypeError: if timeout is not int """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int, not {!r}" .format(timeout)) fut = self._conn.execute(b'PEXPIRE', key, timeout) return wait_convert(fut, bool) def pexpireat(self, key, timestamp): """Set expire timestamp on key, timestamp in milliseconds. :raises TypeError: if timeout is not int """ if not isinstance(timestamp, int): raise TypeError("timestamp argument must be int, not {!r}" .format(timestamp)) fut = self._conn.execute(b'PEXPIREAT', key, timestamp) return wait_convert(fut, bool) def pttl(self, key): """Returns time-to-live for a key, in milliseconds. Special return values (starting with Redis 2.8): * command returns -2 if the key does not exist. * command returns -1 if the key exists but has no associated expire. """ # TODO: maybe convert negative values to: # -2 to None - no key # -1 to False - no expire return self._conn.execute(b'PTTL', key) def randomkey(self, *, encoding=_NOTSET): """Return a random key from the currently selected database.""" return self._conn.execute(b'RANDOMKEY', encoding=encoding) def rename(self, key, newkey): """Renames key to newkey. :raises ValueError: if key == newkey """ if key == newkey: raise ValueError("key and newkey are the same") fut = self._conn.execute(b'RENAME', key, newkey) return wait_ok(fut) def renamenx(self, key, newkey): """Renames key to newkey only if newkey does not exist. :raises ValueError: if key == newkey """ if key == newkey: raise ValueError("key and newkey are the same") fut = self._conn.execute(b'RENAMENX', key, newkey) return wait_convert(fut, bool) def restore(self, key, ttl, value): """Creates a key associated with a value that is obtained via DUMP.""" return self._conn.execute(b'RESTORE', key, ttl, value) def scan(self, cursor=0, match=None, count=None): """Incrementally iterate the keys space. Usage example: >>> match = 'something*' >>> cur = b'0' >>> while cur: ... cur, keys = yield from redis.scan(cur, match=match) ... for key in keys: ... print('Matched:', key) """ args = [] if match is not None: args += [b'MATCH', match] if count is not None: args += [b'COUNT', count] fut = self._conn.execute(b'SCAN', cursor, *args) return wait_convert(fut, lambda o: (int(o[0]), o[1])) if PY_35: def iscan(self, *, match=None, count=None): """Incrementally iterate the keys space using async for. Usage example: >>> async for key in redis.iscan(match='something*'): ... print('Matched:', key) """ return _ScanIter(lambda cur: self.scan(cur, match=match, count=count)) def sort(self, key, *get_patterns, by=None, offset=None, count=None, asc=None, alpha=False, store=None): """Sort the elements in a list, set or sorted set.""" args = [] if by is not None: args += [b'BY', by] if offset is not None and count is not None: args += [b'LIMIT', offset, count] if get_patterns: args += sum(([b'GET', pattern] for pattern in get_patterns), []) if asc is not None: args += [asc is True and b'ASC' or b'DESC'] if alpha: args += [b'ALPHA'] if store is not None: args += [b'STORE', store] return self._conn.execute(b'SORT', key, *args) def ttl(self, key): """Returns time-to-live for a key, in seconds. Special return values (starting with Redis 2.8): * command returns -2 if the key does not exist. * command returns -1 if the key exists but has no associated expire. """ # TODO: maybe convert negative values to: # -2 to None - no key # -1 to False - no expire return self._conn.execute(b'TTL', key) def type(self, key): """Returns the string representation of the value's type stored at key. """ # NOTE: for non-existent keys TYPE returns b'none' return self._conn.execute(b'TYPE', key) aioredis-0.2.4/aioredis/commands/hyperloglog.py0000664000175000017500000000144012604743503022426 0ustar alexeyalexey00000000000000from aioredis.util import wait_ok class HyperLogLogCommandsMixin: """HyperLogLog commands mixin. For commands details see: http://redis.io/commands#hyperloglog """ def pfadd(self, key, value, *values): """Adds the specified elements to the specified HyperLogLog.""" return self._conn.execute(b'PFADD', key, value, *values) def pfcount(self, key, *keys): """Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s). """ return self._conn.execute(b'PFCOUNT', key, *keys) def pfmerge(self, destkey, sourcekey, *sourcekeys): """Merge N different HyperLogLogs into a single one.""" fut = self._conn.execute(b'PFMERGE', destkey, sourcekey, *sourcekeys) return wait_ok(fut) aioredis-0.2.4/aioredis/commands/pubsub.py0000664000175000017500000000601312604743503021374 0ustar alexeyalexey00000000000000import asyncio import json from aioredis.util import wait_make_dict class PubSubCommandsMixin: """Pub/Sub commands mixin. For commands details see: http://redis.io/commands/#pubsub """ def publish(self, channel, message): """Post a message to channel.""" return self._conn.execute(b'PUBLISH', channel, message) def publish_json(self, channel, obj): """Post a JSON-encoded message to channel.""" return self.publish(channel, json.dumps(obj)) def subscribe(self, channel, *channels): """Switch connection to Pub/Sub mode and subscribe to specified channels. Returns :func:`asyncio.gather()` coroutine which when done will return a list of subscribed channels. """ conn = self._conn return wait_return_channels( conn.execute_pubsub(b'SUBSCRIBE', channel, *channels), conn.pubsub_channels) def unsubscribe(self, channel, *channels): """Unsubscribe from specific channels.""" return self._conn.execute_pubsub(b'UNSUBSCRIBE', channel, *channels) def psubscribe(self, pattern, *patterns): """Switch connection to Pub/Sub mode and subscribe to specified patterns. Returns :func:`asyncio.gather()` coroutine which when done will return a list of subscribed patterns. """ conn = self._conn return wait_return_channels( conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns), conn.pubsub_patterns) def punsubscribe(self, pattern, *patterns): """Unsubscribe from specific patterns.""" return self._conn.execute_pubsub(b'PUNSUBSCRIBE', pattern, *patterns) def pubsub_channels(self, pattern=None): """Lists the currently active channels.""" args = [b'PUBSUB', b'CHANNELS'] if pattern is not None: args.append(pattern) return self._conn.execute(*args) def pubsub_numsub(self, *channels): """Returns the number of subscribers for the specified channels.""" return wait_make_dict(self._conn.execute( b'PUBSUB', b'NUMSUB', *channels)) def pubsub_numpat(self): """Returns the number of subscriptions to patterns.""" return self._conn.execute(b'PUBSUB', b'NUMPAT') @property def channels(self): """Returns read-only channels dict. See :attr:`~aioredis.RedisConnection.pubsub_channels` """ return self._conn.pubsub_channels @property def patterns(self): """Returns read-only patterns dict. See :attr:`~aioredis.RedisConnection.pubsub_patterns` """ return self._conn.pubsub_patterns @property def in_pubsub(self): """Indicates that connection is in PUB/SUB mode. Provides the number of subscribed channels. """ return self._conn.in_pubsub @asyncio.coroutine def wait_return_channels(fut, channels_dict): res = yield from fut return [channels_dict[name] for cmd, name, count in res] aioredis-0.2.4/aioredis/commands/hash.py0000664000175000017500000000736512607134125021027 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, wait_make_dict, _NOTSET, PY_35 if PY_35: from aioredis.util import _ScanIterPairs class HashCommandsMixin: """Hash commands mixin. For commands details see: http://redis.io/commands#hash """ def hdel(self, key, field, *fields): """Delete one or more hash fields.""" return self._conn.execute(b'HDEL', key, field, *fields) def hexists(self, key, field): """Determine if hash field exists.""" fut = self._conn.execute(b'HEXISTS', key, field) return wait_convert(fut, bool) def hget(self, key, field, *, encoding=_NOTSET): """Get the value of a hash field.""" return self._conn.execute(b'HGET', key, field, encoding=encoding) def hgetall(self, key, *, encoding=_NOTSET): """Get all the fields and values in a hash.""" fut = self._conn.execute(b'HGETALL', key, encoding=encoding) return wait_make_dict(fut) def hincrby(self, key, field, increment=1): """Increment the integer value of a hash field by the given number.""" return self._conn.execute(b'HINCRBY', key, field, increment) def hincrbyfloat(self, key, field, increment=1.0): """Increment the float value of a hash field by the given number.""" fut = self._conn.execute(b'HINCRBYFLOAT', key, field, increment) return wait_convert(fut, float) def hkeys(self, key, *, encoding=_NOTSET): """Get all the fields in a hash.""" return self._conn.execute(b'HKEYS', key, encoding=encoding) def hlen(self, key): """Get the number of fields in a hash.""" return self._conn.execute(b'HLEN', key) def hmget(self, key, field, *fields, encoding=_NOTSET): """Get the values of all the given fields.""" return self._conn.execute(b'HMGET', key, field, *fields, encoding=encoding) # TODO: replace args with dict_or_pairs def hmset(self, key, field, value, *pairs): """Set multiple hash fields to multiple values.""" if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") return self._conn.execute(b'HMSET', key, field, value, *pairs) def hset(self, key, field, value): """Set the string value of a hash field.""" return self._conn.execute(b'HSET', key, field, value) def hsetnx(self, key, field, value): """Set the value of a hash field, only if the field does not exist.""" return self._conn.execute(b'HSETNX', key, field, value) def hvals(self, key, *, encoding=_NOTSET): """Get all the values in a hash.""" return self._conn.execute(b'HVALS', key, encoding=encoding) def hscan(self, key, cursor=0, match=None, count=None): """Incrementally iterate hash fields and associated values.""" args = [key, cursor] match is not None and args.extend([b'MATCH', match]) count is not None and args.extend([b'COUNT', count]) fut = self._conn.execute(b'HSCAN', *args) return wait_convert(fut, lambda obj: (int(obj[0]), obj[1])) if PY_35: def ihscan(self, key, *, match=None, count=None): """Incrementally iterate sorted set items using async for. Usage example: >>> async for name, val in redis.ihscan(key, match='something*'): ... print('Matched:', name, '->', val) """ return _ScanIterPairs(lambda cur: self.hscan(key, cur, match=match, count=count)) def hstrlen(self, key, field): """Get the length of the value of a hash field.""" return self._conn.execute(b'HSTRLEN', key, field) aioredis-0.2.4/aioredis/commands/set.py0000664000175000017500000000661712607134125020676 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, _NOTSET, PY_35 if PY_35: from aioredis.util import _ScanIter class SetCommandsMixin: """Set commands mixin. For commands details see: http://redis.io/commands#set """ def sadd(self, key, member, *members): """Add one or more members to a set.""" return self._conn.execute(b'SADD', key, member, *members) def scard(self, key): """Get the number of members in a set.""" return self._conn.execute(b'SCARD', key) def sdiff(self, key, *keys): """Subtract multiple sets.""" return self._conn.execute(b'SDIFF', key, *keys) def sdiffstore(self, destkey, key, *keys): """Subtract multiple sets and store the resulting set in a key.""" return self._conn.execute(b'SDIFFSTORE', destkey, key, *keys) def sinter(self, key, *keys): """Intersect multiple sets.""" return self._conn.execute(b'SINTER', key, *keys) def sinterstore(self, destkey, key, *keys): """Intersect multiple sets and store the resulting set in a key.""" return self._conn.execute(b'SINTERSTORE', destkey, key, *keys) def sismember(self, key, member): """Determine if a given value is a member of a set.""" return self._conn.execute(b'SISMEMBER', key, member) def smembers(self, key, *, encoding=_NOTSET): """Get all the members in a set.""" return self._conn.execute(b'SMEMBERS', key, encoding=encoding) def smove(self, sourcekey, destkey, member): """Move a member from one set to another.""" return self._conn.execute(b'SMOVE', sourcekey, destkey, member) def spop(self, key, *, encoding=_NOTSET): """Remove and return a random member from a set.""" return self._conn.execute(b'SPOP', key, encoding=encoding) def srandmember(self, key, count=None, *, encoding=_NOTSET): """Get one or multiple random members from a set.""" args = [key] count is not None and args.append(count) return self._conn.execute(b'SRANDMEMBER', *args, encoding=encoding) def srem(self, key, member, *members): """Remove one or more members from a set.""" return self._conn.execute(b'SREM', key, member, *members) def sunion(self, key, *keys): """Add multiple sets.""" return self._conn.execute(b'SUNION', key, *keys) def sunionstore(self, destkey, key, *keys): """Add multiple sets and store the resulting set in a key.""" return self._conn.execute(b'SUNIONSTORE', destkey, key, *keys) def sscan(self, key, cursor=0, match=None, count=None): """Incrementally iterate Set elements.""" tokens = [key, cursor] match is not None and tokens.extend([b'MATCH', match]) count is not None and tokens.extend([b'COUNT', count]) fut = self._conn.execute(b'SSCAN', *tokens) return wait_convert(fut, lambda obj: (int(obj[0]), obj[1])) if PY_35: def isscan(self, key, *, match=None, count=None): """Incrementally iterate set elements using async for. Usage example: >>> async for val in redis.isscan(key, match='something*'): ... print('Matched:', val) """ return _ScanIter(lambda cur: self.sscan(key, cur, match=match, count=count)) aioredis-0.2.4/aioredis/commands/list.py0000664000175000017500000001361712604743503021057 0ustar alexeyalexey00000000000000from aioredis.util import _NOTSET, wait_ok class ListCommandsMixin: """List commands mixin. For commands details see: http://redis.io/commands#list """ def blpop(self, key, *keys, timeout=0, encoding=_NOTSET): """Remove and get the first element in a list, or block until one is available. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") args = keys + (timeout,) return self._conn.execute(b'BLPOP', key, *args, encoding=encoding) def brpop(self, key, *keys, timeout=0, encoding=_NOTSET): """Remove and get the last element in a list, or block until one is available. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") args = keys + (timeout,) return self._conn.execute(b'BRPOP', key, *args, encoding=encoding) def brpoplpush(self, sourcekey, destkey, timeout=0, encoding=_NOTSET): """Remove and get the last element in a list, or block until one is available. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") return self._conn.execute(b'BRPOPLPUSH', sourcekey, destkey, timeout, encoding=encoding) def lindex(self, key, index, *, encoding=_NOTSET): """Get an element from a list by its index. :raises TypeError: if index is not int """ if not isinstance(index, int): raise TypeError("index argument must be int") return self._conn.execute(b'LINDEX', key, index, encoding=encoding) def linsert(self, key, pivot, value, before=False): """Inserts value in the list stored at key either before or after the reference value pivot. """ where = b'AFTER' if not before else b'BEFORE' return self._conn.execute(b'LINSERT', key, where, pivot, value) def llen(self, key): """Returns the length of the list stored at key.""" return self._conn.execute(b'LLEN', key) def lpop(self, key, *, encoding=_NOTSET): """Removes and returns the first element of the list stored at key.""" return self._conn.execute(b'LPOP', key, encoding=encoding) def lpush(self, key, value, *values): """Insert all the specified values at the head of the list stored at key. """ return self._conn.execute(b'LPUSH', key, value, *values) def lpushx(self, key, value): """Inserts value at the head of the list stored at key, only if key already exists and holds a list. """ return self._conn.execute(b'LPUSHX', key, value) def lrange(self, key, start, stop, *, encoding=_NOTSET): """Returns the specified elements of the list stored at key. :raises TypeError: if start or stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") return self._conn.execute(b'LRANGE', key, start, stop, encoding=encoding) def lrem(self, key, count, value): """Removes the first count occurrences of elements equal to value from the list stored at key. :raises TypeError: if count is not int """ if not isinstance(count, int): raise TypeError("count argument must be int") return self._conn.execute(b'LREM', key, count, value) def lset(self, key, index, value): """Sets the list element at index to value. :raises TypeError: if index is not int """ if not isinstance(index, int): raise TypeError("index argument must be int") return self._conn.execute(b'LSET', key, index, value) def ltrim(self, key, start, stop): """Trim an existing list so that it will contain only the specified range of elements specified. :raises TypeError: if start or stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") fut = self._conn.execute(b'LTRIM', key, start, stop) return wait_ok(fut) def rpop(self, key, *, encoding=_NOTSET): """Removes and returns the last element of the list stored at key.""" return self._conn.execute(b'RPOP', key, encoding=encoding) def rpoplpush(self, sourcekey, destkey, *, encoding=_NOTSET): """Atomically returns and removes the last element (tail) of the list stored at source, and pushes the element at the first element (head) of the list stored at destination. """ return self._conn.execute(b'RPOPLPUSH', sourcekey, destkey, encoding=encoding) def rpush(self, key, value, *values): """Insert all the specified values at the tail of the list stored at key. """ return self._conn.execute(b'RPUSH', key, value, *values) def rpushx(self, key, value): """Inserts value at the tail of the list stored at key, only if key already exists and holds a list. """ return self._conn.execute(b'RPUSHX', key, value) aioredis-0.2.4/aioredis/commands/scripting.py0000664000175000017500000000232412604743503022077 0ustar alexeyalexey00000000000000from aioredis.util import wait_ok class ScriptingCommandsMixin: """Set commands mixin. For commands details see: http://redis.io/commands#scripting """ def eval(self, script, keys=[], args=[]): """Execute a Lua script server side.""" return self._conn.execute(b'EVAL', script, len(keys), *(keys + args)) def evalsha(self, digest, keys=[], args=[]): """Execute a Lua script server side by its SHA1 digest.""" return self._conn.execute( b'EVALSHA', digest, len(keys), *(keys + args)) def script_exists(self, digest, *digests): """Check existence of scripts in the script cache.""" return self._conn.execute(b'SCRIPT', b'EXISTS', digest, *digests) def script_kill(self): """Kill the script currently in execution.""" fut = self._conn.execute(b'SCRIPT', b'KILL') return wait_ok(fut) def script_flush(self): """Remove all the scripts from the script cache.""" fut = self._conn.execute(b"SCRIPT", b"FLUSH") return wait_ok(fut) def script_load(self, script): """Load the specified Lua script into the script cache.""" return self._conn.execute(b"SCRIPT", b"LOAD", script) aioredis-0.2.4/aioredis/commands/server.py0000664000175000017500000001554212604743503021411 0ustar alexeyalexey00000000000000from collections import namedtuple from aioredis.util import wait_ok, wait_convert, wait_make_dict, _NOTSET class ServerCommandsMixin: """Server commands mixin. For commands details see: http://redis.io/commands/#server """ SHUTDOWN_SAVE = 'SHUTDOWN_SAVE' SHUTDOWN_NOSAVE = 'SHUTDOWN_NOSAVE' def bgrewriteaof(self): """Asynchronously rewrite the append-only file.""" fut = self._conn.execute(b'BGREWRITEAOF') return wait_ok(fut) def bgsave(self): """Asynchronously save the dataset to disk.""" fut = self._conn.execute(b'BGSAVE') return wait_ok(fut) def client_kill(self): """Kill the connection of a client. .. warning:: Not Implemented """ raise NotImplementedError def client_list(self): """Get the list of client connections.""" fut = self._conn.execute(b'CLIENT', b'LIST', encoding='utf-8') return wait_convert(fut, to_tuples) def client_getname(self, encoding=_NOTSET): """Get the current connection name.""" return self._conn.execute(b'CLIENT', b'GETNAME', encoding=encoding) def client_pause(self, timeout): """Stop processing commands from clients for *timeout* milliseconds. :raises TypeError: if timeout is not int :raises ValueError: if timeout is less then 0 """ if not isinstance(timeout, int): raise TypeError("timeout argument must be int") if timeout < 0: raise ValueError("timeout must be greater equal 0") fut = self._conn.execute(b'CLIENT', b'PAUSE', timeout) return wait_ok(fut) def client_setname(self, name): """Set the current connection name.""" fut = self._conn.execute(b'CLIENT', b'SETNAME', name) return wait_ok(fut) def config_get(self, parameter): """Get the value of a configuration parameter.""" if not isinstance(parameter, str): raise TypeError("parameter must be str") fut = self._conn.execute(b'CONFIG', b'GET', parameter) return wait_make_dict(fut) def config_rewrite(self): """Rewrite the configuration file with the in memory configuration.""" fut = self._conn.execute(b'CONFIG', b'REWRITE') return wait_ok(fut) def config_set(self, parameter, value): """Set a configuration parameter to the given value.""" if not isinstance(parameter, str): raise TypeError("parameter must be str") fut = self._conn.execute(b'CONFIG', b'SET', parameter, value) return wait_ok(fut) def config_resetstat(self): """Reset the stats returned by INFO.""" fut = self._conn.execute(b'CONFIG', b'RESETSTAT') return wait_ok(fut) def dbsize(self): """Return the number of keys in the selected database.""" return self._conn.execute(b'DBSIZE') def debug_object(self, key): """Get debugging information about a key.""" return self._conn.execute(b'DEBUG', b'OBJECT', key) def debug_segfault(self, key): """Make the server crash.""" return self._conn.execute(b'DEBUG', 'SEGFAULT') def flushall(self): """Remove all keys from all databases.""" fut = self._conn.execute(b'FLUSHALL') return wait_ok(fut) def flushdb(self): """Remove all keys from the current database.""" fut = self._conn.execute('FLUSHDB') return wait_ok(fut) def info(self, section): """Get information and statistics about the server.""" # TODO: check section fut = self._conn.execute(b'INFO', section, encoding='utf-8') return wait_convert(fut, parse_info) def lastsave(self): """Get the UNIX time stamp of the last successful save to disk.""" return self._conn.execute(b'LASTSAVE') def monitor(self): """Listen for all requests received by the server in real time. .. warning:: Will not be implemented for now. """ # NOTE: will not implement for now; raise NotImplementedError def role(self): """Return the role of the instance in the context of replication.""" return self._conn.execute(b'ROLE') def save(self): """Synchronously save the dataset to disk.""" return self._conn.execute(b'SAVE') def shutdown(self, save=None): """Synchronously save the dataset to disk and then shut down the server. """ if save is self.SHUTDOWN_SAVE: return self._conn.execute(b'SHUTDOWN', b'SAVE') elif save is self.SHUTDOWN_NOSAVE: return self._conn.execute(b'SHUTDOWN', b'NOSAVE') else: return self._conn.execute(b'SHUTDOWN') def slaveof(self, host=None, port=None): """Make the server a slave of another instance, or promote it as master. Calling slaveof without arguments will send ``SLAVEOF NO ONE``. """ if host is None and port is None: return self._conn.execute(b'SLAVEOF', b'NO', b'ONE') return self._conn.execute(b'SLAVEOF', host, port) def slowlog_get(self, length=None): """Returns the Redis slow queries log.""" if length is not None: if not isinstance(length, int): raise TypeError("length must be int or None") return self._conn.execute(b'SLOWLOG', b'GET', length) else: return self._conn.execute(b'SLOWLOG', b'GET') def slowlog_len(self, length=None): """Returns length of Redis slow queries log.""" return self._conn.execute(b'SLOWLOG', b'LEN') def slowlog_reset(self): """Resets Redis slow queries log.""" return self._conn.execute(b'SLOWLOG', b'RESET') def sync(self): """Redis-server internal command used for replication.""" return self._conn.execute(b'SYNC') def time(self): """Return current server time.""" fut = self._conn.execute(b'TIME') return wait_convert(fut, lambda obj: float(b'.'.join(obj))) def _split(s): k, v = s.split('=') return k.replace('-', '_'), v def to_tuples(value): lines = iter(value.splitlines(False)) line = next(lines) line = list(map(_split, line.split(' '))) ClientInfo = namedtuple('ClientInfo', ' '.join(k for k, v in line)) result = [ClientInfo(**dict(line))] for line in lines: result.append(ClientInfo(**dict(map(_split, line.split(' '))))) return result def parse_info(info): res = {} for block in info.split('\r\n\r\n'): block = iter(block.strip().splitlines()) section = next(block)[2:].lower() res[section] = tmp = {} for line in block: key, value = line.split(':') if ',' in line and '=' in line: value = dict(map(lambda i: i.split('='), value.split(','))) tmp[key] = value return res aioredis-0.2.4/aioredis/commands/__init__.py0000664000175000017500000001166712604743503021646 0ustar alexeyalexey00000000000000import asyncio from aioredis.connection import create_connection from aioredis.util import _NOTSET from .generic import GenericCommandsMixin from .string import StringCommandsMixin from .hash import HashCommandsMixin from .hyperloglog import HyperLogLogCommandsMixin from .set import SetCommandsMixin from .sorted_set import SortedSetCommandsMixin from .transaction import TransactionsCommandsMixin, Pipeline, MultiExec from .list import ListCommandsMixin from .scripting import ScriptingCommandsMixin from .server import ServerCommandsMixin from .pubsub import PubSubCommandsMixin from .cluster import ClusterCommandsMixin __all__ = ['create_redis', 'Redis', 'Pipeline', 'MultiExec'] class AutoConnector(object): closed = False def __init__(self, *conn_args, **conn_kwargs): self._conn_args = conn_args self._conn_kwargs = conn_kwargs self._conn = None self._loop = conn_kwargs.get('loop') self._lock = asyncio.Lock(loop=self._loop) def __repr__(self): return ''.format(self._conn) @asyncio.coroutine def execute(self, *args, **kwargs): conn = yield from self.get_atomic_connection() return (yield from conn.execute(*args, **kwargs)) @asyncio.coroutine def get_atomic_connection(self): if self._conn is None or self._conn.closed: with (yield from self._lock): if self._conn is None or self._conn.closed: conn = yield from create_connection( *self._conn_args, **self._conn_kwargs) self._conn = conn return self._conn class Redis(GenericCommandsMixin, StringCommandsMixin, HyperLogLogCommandsMixin, SetCommandsMixin, HashCommandsMixin, TransactionsCommandsMixin, SortedSetCommandsMixin, ListCommandsMixin, ScriptingCommandsMixin, ServerCommandsMixin, PubSubCommandsMixin, ClusterCommandsMixin): """High-level Redis interface. Gathers in one place Redis commands implemented in mixins. For commands details see: http://redis.io/commands/#connection """ def __init__(self, connection): self._conn = connection def __repr__(self): return ''.format(self._conn) def close(self): self._conn.close() @asyncio.coroutine def wait_closed(self): yield from self._conn.wait_closed() @property def db(self): """Currently selected db index.""" return self._conn.db @property def encoding(self): """Current set codec or None.""" return self._conn.encoding @property def connection(self): """:class:`aioredis.RedisConnection` instance.""" return self._conn @property def in_transaction(self): """Set to True when MULTI command was issued.""" return self._conn.in_transaction @property def closed(self): """True if connection is closed.""" return self._conn.closed def auth(self, password): """Authenticate to server. This method wraps call to :meth:`aioredis.RedisConnection.auth()` """ return self._conn.auth(password) def echo(self, message, *, encoding=_NOTSET): """Echo the given string.""" return self._conn.execute('ECHO', message, encoding=encoding) def ping(self, *, encoding=_NOTSET): """Ping the server.""" return self._conn.execute('PING', encoding=encoding) def quit(self): """Close the connection.""" return self._conn.execute('QUIT') def select(self, db): """Change the selected database for the current connection. This method wraps call to :meth:`aioredis.RedisConnection.select()` """ return self._conn.select(db) @asyncio.coroutine def create_redis(address, *, db=None, password=None, encoding=None, commands_factory=Redis, loop=None): """Creates high-level Redis interface. This function is a coroutine. """ conn = yield from create_connection(address, db=db, password=password, encoding=encoding, loop=loop) return commands_factory(conn) @asyncio.coroutine def create_reconnecting_redis(address, *, db=None, password=None, encoding=None, commands_factory=Redis, loop=None): """Creates high-level Redis interface. This function is a coroutine. """ # Note: this is not coroutine, but we may make it such. We may start # a first connection in it, or just resolve DNS. So let's keep it # coroutine for forward compatibility conn = AutoConnector(address, db=db, password=password, encoding=encoding, loop=loop) return commands_factory(conn) # make pyflakes happy (Pipeline, MultiExec) aioredis-0.2.4/aioredis/commands/transaction.py0000664000175000017500000002204512563036251022423 0ustar alexeyalexey00000000000000import asyncio import functools from ..errors import RedisError, PipelineError, MultiExecError from ..util import wait_ok class TransactionsCommandsMixin: """Transaction commands mixin. For commands details see: http://redis.io/commands/#transactions Transactions HOWTO: >>> tr = redis.multi_exec() >>> result_future1 = tr.incr('foo') >>> result_future2 = tr.incr('bar') >>> try: ... result = yield from tr.execute() ... except MultiExecError: ... pass # check what happened >>> result1 = yield from result_future1 >>> result2 = yield from result_future2 >>> assert result == [result1, result2] """ def unwatch(self): """Forget about all watched keys.""" fut = self._conn.execute(b'UNWATCH') return wait_ok(fut) def watch(self, key, *keys): """Watch the given keys to determine execution of the MULTI/EXEC block. """ fut = self._conn.execute(b'WATCH', key, *keys) return wait_ok(fut) def multi_exec(self): """Returns MULTI/EXEC pipeline wrapper. Usage: >>> tr = redis.multi_exec() >>> fut1 = tr.incr('foo') # NO `yield from` as it will block forever! >>> fut2 = tr.incr('bar') >>> result = yield from tr.execute() >>> result [1, 1] >>> yield from asyncio.gather(fut1, fut2) [1, 1] """ return MultiExec(self._conn, self.__class__, loop=self._conn._loop) def pipeline(self): """Returns :class:`Pipeline` object to execute bulk of commands. It is provided for convenience. Commands can be pipelined without it. Example: >>> pipe = redis.pipeline() >>> fut1 = pipe.incr('foo') # NO `yield from` as it will block forever! >>> fut2 = pipe.incr('bar') >>> result = yield from pipe.execute() >>> result [1, 1] >>> yield from asyncio.gather(fut1, fut2) [1, 1] >>> # >>> # The same can be done without pipeline: >>> # >>> fut1 = redis.incr('foo') # the 'INCRY foo' command already sent >>> fut2 = redis.incr('bar') >>> yield from asyncio.gather(fut1, fut2) [2, 2] """ return Pipeline(self._conn, self.__class__, loop=self._conn._loop) class _RedisBuffer: def __init__(self, pipeline, *, loop=None): if loop is None: loop = asyncio.get_event_loop() self._pipeline = pipeline self._loop = loop def execute(self, cmd, *args, **kw): fut = asyncio.Future(loop=self._loop) self._pipeline.append((fut, cmd, args, kw)) return fut # TODO: add here or remove in connection methods like `select`, `auth` etc class Pipeline: """Commands pipeline. Usage: >>> pipe = redis.pipeline() >>> fut1 = pipe.incr('foo') >>> fut2 = pipe.incr('bar') >>> yield from pipe.execute() [1, 1] >>> yield from fut1 1 >>> yield from fut2 1 """ error_class = PipelineError def __init__(self, connection, commands_factory=lambda conn: conn, *, loop=None): if loop is None: loop = asyncio.get_event_loop() self._conn = connection self._loop = loop self._pipeline = [] self._results = [] self._buffer = _RedisBuffer(self._pipeline, loop=loop) self._redis = commands_factory(self._buffer) self._done = False def __getattr__(self, name): assert not self._done, "Pipeline already executed. Create new one." attr = getattr(self._redis, name) if callable(attr): @functools.wraps(attr) def wrapper(*args, **kw): try: task = asyncio.async(attr(*args, **kw), loop=self._loop) except Exception as exc: task = asyncio.Future(loop=self._loop) task.set_exception(exc) self._results.append(task) return task return wrapper return attr def execute(self, *, return_exceptions=False): """Execute all buffered commands. Any exception that is raised by any command is caught and raised later when processing results. Exceptions can also be returned in result if `return_exceptions` flag is set to True. """ assert not self._done, "Pipeline already executed. Create new one." self._done = True if self._pipeline: return self._do_execute(return_exceptions=return_exceptions) else: return self._gather_result(return_exceptions) @asyncio.coroutine def _do_execute(self, *, return_exceptions=False): conn = yield from self._conn.get_atomic_connection() yield from asyncio.gather(*self._send_pipeline(conn), loop=self._loop, return_exceptions=True) return (yield from self._gather_result(return_exceptions)) @asyncio.coroutine def _gather_result(self, return_exceptions): errors = [] results = [] for fut in self._results: try: res = yield from fut results.append(res) except Exception as exc: errors.append(exc) results.append(exc) if errors and not return_exceptions: raise self.error_class(errors) return results def _send_pipeline(self, conn): for fut, cmd, args, kw in self._pipeline: try: result_fut = conn.execute(cmd, *args, **kw) result_fut.add_done_callback( functools.partial(self._check_result, waiter=fut)) except Exception as exc: fut.set_exception(exc) else: yield result_fut def _check_result(self, fut, waiter): if fut.cancelled(): waiter.cancel() elif fut.exception(): waiter.set_exception(fut.exception()) else: self._set_result(fut, waiter) def _set_result(self, fut, waiter): waiter.set_result(fut.result()) class MultiExec(Pipeline): """Multi/Exec pipeline wrapper. Usage: >>> tr = redis.multi_exec() >>> f1 = tr.incr('foo') >>> f2 = tr.incr('bar') >>> # A) >>> yield from tr.execute() >>> res1 = yield from f1 >>> res2 = yield from f2 >>> # or B) >>> res1, res2 = yield from tr.execute() and ofcourse try/except: >>> tr = redis.multi_exec() >>> f1 = tr.incr('1') # won't raise any exception (why?) >>> try: ... res = yield from tr.execute() ... except RedisError: ... pass >>> assert f1.done() >>> assert f1.result() is res >>> tr = redis.multi_exec() >>> wait_ok_coro = tr.mset('1') >>> try: ... ok1 = yield from tr.execute() ... except RedisError: ... pass # handle it >>> ok2 = yield from wait_ok_coro >>> # for this to work `wait_ok_coro` must be wrapped in Future """ error_class = MultiExecError @asyncio.coroutine def _do_execute(self, *, return_exceptions=False): self._waiters = waiters = [] conn = yield from self._conn.get_atomic_connection() multi = conn.execute('MULTI') coros = list(self._send_pipeline(conn)) exec_ = conn.execute('EXEC') try: yield from asyncio.gather(multi, *coros, loop=self._loop) except asyncio.CancelledError: pass finally: if self._conn.closed: for fut in waiters: fut.cancel() else: try: results = yield from exec_ except RedisError as err: for fut in waiters: fut.set_exception(err) else: assert len(results) == len(waiters), (results, waiters) self._resolve_waiters(results, return_exceptions) return (yield from self._gather_result( return_exceptions)) def _resolve_waiters(self, results, return_exceptions): errors = [] for val, fut in zip(results, self._waiters): if isinstance(val, RedisError): fut.set_exception(val) errors.append(val) else: fut.set_result(val) if errors and not return_exceptions: raise MultiExecError(errors) def _set_result(self, fut, waiter): # fut is done and must be 'QUEUED' if fut in self._waiters: self._waiters.remove(fut) waiter.set_result(fut.result()) elif fut.result() not in {b'QUEUED', 'QUEUED'}: waiter.set_result(fut.result()) else: fut = asyncio.Future(loop=self._loop) self._waiters.append(fut) fut.add_done_callback( functools.partial(self._check_result, waiter=waiter)) aioredis-0.2.4/aioredis/commands/cluster.py0000664000175000017500000000742312577726331021574 0ustar alexeyalexey00000000000000from aioredis.util import wait_ok class ClusterCommandsMixin: """Cluster commands mixin. For commands details see: http://redis.io/commands#cluster """ def cluster_add_slots(self, slot, *slots): """Assign new hash slots to receiving node.""" slots = (slot,) + slots if not all(isinstance(s, int) for s in slots): raise TypeError("All parameters must be of type int") fut = self._conn.execute(b'CLUSTER', b'ADDSLOTS', *slots) return wait_ok(fut) def cluster_count_failure_reports(self, node_id): """Return the number of failure reports active for a given node.""" return self._conn.execute( b'CLUSTER', b'COUNT-FAILURE-REPORTS', node_id) def cluster_count_key_in_slots(self, slot): """Return the number of local keys in the specified hash slot.""" if not isinstance(slot, int): raise TypeError("Expected slot to be of type int, got {}" .format(type(slot))) return self._conn.execute(b'CLUSTER', b'COUNTKEYSINSLOT', slot) def cluster_del_slots(self, slot, *slots): """Set hash slots as unbound in receiving node.""" slots = (slot,) + slots if not all(isinstance(s, int) for s in slots): raise TypeError("All parameters must be of type int") fut = self._conn.execute(b'CLUSTER', b'DELSLOTS', *slots) return wait_ok(fut) def cluster_failover(self): """Forces a slave to perform a manual failover of its master.""" pass # TODO: Implement def cluster_forget(self, node_id): """Remove a node from the nodes table.""" fut = self._conn.execute(b'CLUSTER', b'FORGET', node_id) return wait_ok(fut) def cluster_get_keys_in_slots(self, slot, count, *, encoding): """Return local key names in the specified hash slot.""" return self._conn.execute(b'CLUSTER', b'GETKEYSINSLOT', slot, count, encoding=encoding) def cluster_info(self): """Provides info about Redis Cluster node state.""" pass # TODO: Implement def cluster_keyslot(self, key): """Returns the hash slot of the specified key.""" return self._conn.execute(b'CLUSTER', b'KEYSLOT', key) def cluster_meet(self, ip, port): """Force a node cluster to handshake with another node.""" fut = self._conn.execute(b'CLUSTER', b'MEET', ip, port) return wait_ok(fut) def cluster_nodes(self): """Get Cluster config for the node.""" pass # TODO: Implement def cluster_replicate(self, node_id): """Reconfigure a node as a slave of the specified master node.""" fut = self._conn.execute(b'CLUSTER', b'REPLICATE', node_id) return wait_ok(fut) def cluster_reset(self, *, hard=False): """Reset a Redis Cluster node.""" reset = hard and b'HARD' or b'SOFT' fut = self._conn.execute(b'CLUSTER', b'RESET', reset) return wait_ok(fut) def cluster_save_config(self): """Force the node to save cluster state on disk.""" fut = self._conn.execute(b'CLUSTER', b'SAVECONFIG') return wait_ok(fut) def cluster_set_config_epoch(self, config_epoch): """Set the configuration epoch in a new node.""" fut = self._conn.execute(b'CLUSTER', b'SET-CONFIG-EPOCH', config_epoch) return wait_ok(fut) def cluster_setslot(self, slot, command, node_id): """Bind a hash slot to specified node.""" pass # TODO: Implement def cluster_slaves(self, node_id): """List slave nodes of the specified master node.""" pass # TODO: Implement def cluster_slots(self): """Get array of Cluster slot to node mappings.""" pass # TODO: Implement aioredis-0.2.4/aioredis/commands/sorted_set.py0000664000175000017500000003714312607134125022254 0ustar alexeyalexey00000000000000from aioredis.util import wait_convert, PY_35 if PY_35: from aioredis.util import _ScanIterPairs class SortedSetCommandsMixin: """Sorted Sets commands mixin. For commands details see: http://redis.io/commands/#sorted_set """ ZSET_EXCLUDE_MIN = 'ZSET_EXCLUDE_MIN' ZSET_EXCLUDE_MAX = 'ZSET_EXCLUDE_MAX' ZSET_EXCLUDE_BOTH = 'ZSET_EXCLUDE_BOTH' ZSET_AGGREGATE_SUM = 'ZSET_AGGREGATE_SUM' ZSET_AGGREGATE_MIN = 'ZSET_AGGREGATE_MIN' ZSET_AGGREGATE_MAX = 'ZSET_AGGREGATE_MAX' def zadd(self, key, score, member, *pairs): """Add one or more members to a sorted set or update its score. :raises TypeError: score not int or float :raises TypeError: length of pairs is not even number """ if not isinstance(score, (int, float)): raise TypeError("score argument must be int or float") if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") scores = (item for i, item in enumerate(pairs) if i % 2 == 0) if any(not isinstance(s, (int, float)) for s in scores): raise TypeError("all scores must be int or float") return self._conn.execute(b'ZADD', key, score, member, *pairs) def zcard(self, key): """Get the number of members in a sorted set.""" return self._conn.execute(b'ZCARD', key) def zcount(self, key, min=float('-inf'), max=float('inf'), *, exclude=None): """Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min grater then max """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if min > max: raise ValueError("min could not be grater then max") return self._conn.execute(b'ZCOUNT', key, *_encode_min_max(exclude, min, max)) def zincrby(self, key, increment, member): """Increment the score of a member in a sorted set. :raises TypeError: increment is not float or int """ if not isinstance(increment, (int, float)): raise TypeError("increment argument must be int or float") fut = self._conn.execute(b'ZINCRBY', key, increment, member) return wait_convert(fut, int_or_float) def zinterstore(self, destkey, key, *keys, with_weights=False, aggregate=None): """Intersect multiple sorted sets and store result in a new key. :param bool with_weights: when set to true each key must be a tuple in form of (key, weight) """ keys = (key,) + keys numkeys = len(keys) args = [] if with_weights: assert all(isinstance(val, (list, tuple)) for val in keys), ( "All key arguments must be (key, weight) tuples") weights = ['WEIGHTS'] for key, weight in keys: args.append(key) weights.append(weight) args.extend(weights) else: args.extend(keys) if aggregate is self.ZSET_AGGREGATE_SUM: args.extend(('AGGREGATE', 'SUM')) elif aggregate is self.ZSET_AGGREGATE_MAX: args.extend(('AGGREGATE', 'MAX')) elif aggregate is self.ZSET_AGGREGATE_MIN: args.extend(('AGGREGATE', 'MIN')) fut = self._conn.execute(b'ZINTERSTORE', destkey, numkeys, *args) return fut def zlexcount(self, key, min=b'-', max=b'+', include_min=True, include_max=True): """Count the number of members in a sorted set between a given lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME Why only bytes? raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max return self._conn.execute(b'ZLEXCOUNT', key, min, max) def zrange(self, key, start=0, stop=-1, withscores=False): """Return a range of members in a sorted set, by index. :raises TypeError: if start is not int :raises TypeError: if stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") if withscores: args = [b'WITHSCORES'] else: args = [] fut = self._conn.execute(b'ZRANGE', key, start, stop, *args) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrangebylex(self, key, min=b'-', max=b'+', include_min=True, include_max=True, offset=None, count=None): """Return a range of members in a sorted set, by lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes :raises TypeError: if both offset and count are not specified :raises TypeError: if offset is not bytes :raises TypeError: if count is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max if (offset is not None and count is None) or \ (count is not None and offset is None): raise TypeError("offset and count must both be specified") if offset is not None and not isinstance(offset, int): raise TypeError("offset argument must be int") if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") args = [] if offset is not None and count is not None: args.extend([b'LIMIT', offset, count]) return self._conn.execute(b'ZRANGEBYLEX', key, min, max, *args) def zrangebyscore(self, key, min=float('-inf'), max=float('inf'), withscores=False, offset=None, count=None, *, exclude=None): """Return a range of memebers in a sorted set, by score. :raises TypeError: if min or max is not float or int :raises TypeError: if both offset and count are not specified :raises TypeError: if offset is not int :raises TypeError: if count is not int """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if (offset is not None and count is None) or \ (count is not None and offset is None): raise TypeError("offset and count must both be specified") if offset is not None and not isinstance(offset, int): raise TypeError("offset argument must be int") if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") min, max = _encode_min_max(exclude, min, max) args = [] if withscores: args = [b'WITHSCORES'] if offset is not None and count is not None: args.extend([b'LIMIT', offset, count]) fut = self._conn.execute(b'ZRANGEBYSCORE', key, min, max, *args) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrank(self, key, member): """Determine the index of a member in a sorted set.""" return self._conn.execute(b'ZRANK', key, member) def zrem(self, key, member, *members): """Remove one or more members from a sorted set.""" return self._conn.execute(b'ZREM', key, member, *members) def zremrangebylex(self, key, min=b'-', max=b'+', include_min=True, include_max=True,): """Remove all members in a sorted set between the given lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max return self._conn.execute(b'ZREMRANGEBYLEX', key, min, max) def zremrangebyrank(self, key, start, stop): """Remove all members in a sorted set within the given indexes. :raises TypeError: if start is not int :raises TypeError: if stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") return self._conn.execute(b'ZREMRANGEBYRANK', key, start, stop) def zremrangebyscore(self, key, min=float('-inf'), max=float('inf'), *, exclude=None): """Remove all members in a sorted set within the given scores. :raises TypeError: if min or max is not int or float """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") min, max = _encode_min_max(exclude, min, max) return self._conn.execute(b'ZREMRANGEBYSCORE', key, min, max) def zrevrange(self, key, start, stop, withscores=False): """Return a range of members in a sorted set, by index, with scores ordered from high to low. :raises TypeError: if start or stop is not int """ if not isinstance(start, int): raise TypeError("start argument must be int") if not isinstance(stop, int): raise TypeError("stop argument must be int") if withscores: args = [b'WITHSCORES'] else: args = [] fut = self._conn.execute(b'ZREVRANGE', key, start, stop, *args) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrevrangebyscore(self, key, max=float('inf'), min=float('-inf'), *, exclude=None, withscores=False, offset=None, count=None): """Return a range of members in a sorted set, by score, with scores ordered from high to low. :raises TypeError: if min or max is not float or int :raises TypeError: if both offset and count are not specified :raises TypeError: if offset is not int :raises TypeError: if count is not int """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if (offset is not None and count is None) or \ (count is not None and offset is None): raise TypeError("offset and count must both be specified") if offset is not None and not isinstance(offset, int): raise TypeError("offset argument must be int") if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") min, max = _encode_min_max(exclude, min, max) args = [] if withscores: args = [b'WITHSCORES'] if offset is not None and count is not None: args.extend([b'LIMIT', offset, count]) fut = self._conn.execute(b'ZREVRANGEBYSCORE', key, max, min, *args) if withscores: return wait_convert(fut, pairs_int_or_float) return fut def zrevrank(self, key, member): """Determine the index of a member in a sorted set, with scores ordered from high to low. """ return self._conn.execute(b'ZREVRANK', key, member) def zscore(self, key, member): """Get the score associated with the given member in a sorted set.""" fut = self._conn.execute(b'ZSCORE', key, member) return wait_convert(fut, optional_int_or_float) def zunionstore(self, destkey, key, *keys, with_weights=False, aggregate=None): """Add multiple sorted sets and store result in a new key.""" keys = (key,) + keys numkeys = len(keys) args = [] if with_weights: assert all(isinstance(val, (list, tuple)) for val in keys), ( "All key arguments must be (key, weight) tuples") weights = ['WEIGHTS'] for key, weight in keys: args.append(key) weights.append(weight) args.extend(weights) else: args.extend(keys) if aggregate is self.ZSET_AGGREGATE_SUM: args.extend(('AGGREGATE', 'SUM')) elif aggregate is self.ZSET_AGGREGATE_MAX: args.extend(('AGGREGATE', 'MAX')) elif aggregate is self.ZSET_AGGREGATE_MIN: args.extend(('AGGREGATE', 'MIN')) fut = self._conn.execute(b'ZUNIONSTORE', destkey, numkeys, *args) return fut def zscan(self, key, cursor=0, match=None, count=None): """Incrementally iterate sorted sets elements and associated scores.""" args = [] if match is not None: args += [b'MATCH', match] if count is not None: args += [b'COUNT', count] fut = self._conn.execute(b'ZSCAN', key, cursor, *args) def _converter(obj): return (int(obj[0]), pairs_int_or_float(obj[1])) return wait_convert(fut, _converter) if PY_35: def izscan(self, key, *, match=None, count=None): """Incrementally iterate sorted set items using async for. Usage example: >>> async for val, score in redis.izscan(key, match='something*'): ... print('Matched:', val, ':', score) """ return _ScanIterPairs(lambda cur: self.zscan(key, cur, match=match, count=count)) def _encode_min_max(flag, min, max): if flag is SortedSetCommandsMixin.ZSET_EXCLUDE_MIN: return '({}'.format(min), max elif flag is SortedSetCommandsMixin.ZSET_EXCLUDE_MAX: return min, '({}'.format(max) elif flag is SortedSetCommandsMixin.ZSET_EXCLUDE_BOTH: return '({}'.format(min), '({}'.format(max) return min, max def int_or_float(value): assert isinstance(value, (str, bytes)), 'raw_value must be bytes' try: return int(value) except ValueError: return float(value) def optional_int_or_float(value): if value is None: return value return int_or_float(value) def pairs_int_or_float(value): it = iter(value) return list(sum(([val, int_or_float(score)] for val, score in zip(it, it)), [])) aioredis-0.2.4/aioredis/pool.py0000664000175000017500000001773712605756152017270 0ustar alexeyalexey00000000000000import asyncio import collections import sys from .commands import create_redis, Redis from .log import logger PY_35 = sys.version_info >= (3, 5) @asyncio.coroutine def create_pool(address, *, db=0, password=None, encoding=None, minsize=10, maxsize=10, commands_factory=Redis, loop=None): """Creates Redis Pool. By default it creates pool of commands_factory instances, but it is also possible to create pool of plain connections by passing ``lambda conn: conn`` as commands_factory. All arguments are the same as for create_connection. Returns RedisPool instance. """ pool = RedisPool(address, db, password, encoding, minsize=minsize, maxsize=maxsize, commands_factory=commands_factory, loop=loop) yield from pool._fill_free(override_min=False) return pool class RedisPool: """Redis connections pool. """ def __init__(self, address, db=0, password=None, encoding=None, *, minsize, maxsize, commands_factory, loop=None): if loop is None: loop = asyncio.get_event_loop() self._address = address self._db = db self._password = password self._encoding = encoding self._minsize = minsize self._factory = commands_factory self._loop = loop self._pool = collections.deque(maxlen=maxsize) self._used = set() self._acquiring = 0 self._cond = asyncio.Condition(loop=loop) @property def minsize(self): """Minimum pool size.""" return self._minsize @property def maxsize(self): """Maximum pool size.""" return self._pool.maxlen @property def size(self): """Current pool size.""" return self.freesize + len(self._used) + self._acquiring @property def freesize(self): """Current number of free connections.""" return len(self._pool) @asyncio.coroutine def clear(self): """Clear pool connections. Close and remove all free connections. """ with (yield from self._cond): waiters = [] while self._pool: conn = self._pool.popleft() conn.close() waiters.append(conn.wait_closed()) yield from asyncio.gather(*waiters, loop=self._loop) @property def db(self): """Currently selected db index.""" return self._db @property def encoding(self): """Current set codec or None.""" return self._encoding @asyncio.coroutine def select(self, db): """Changes db index for all free connections. All previously acquired connections will be closed when released. """ with (yield from self._cond): for i in range(self.freesize): yield from self._pool[i].select(db) else: self._db = db @asyncio.coroutine def acquire(self): """Acquires a connection from free pool. Creates new connection if needed. """ with (yield from self._cond): while True: yield from self._fill_free(override_min=True) if self.freesize: conn = self._pool.popleft() assert not conn.closed, conn assert conn not in self._used, (conn, self._used) self._used.add(conn) return conn else: yield from self._cond.wait() def release(self, conn): """Returns used connection back into pool. When returned connection has db index that differs from one in pool the connection will be closed and dropped. When queue of free connections is full the connection will be dropped. """ assert conn in self._used, "Invalid connection, maybe from other pool" self._used.remove(conn) if not conn.closed: if conn.in_transaction: logger.warning("Connection %r in transaction, closing it.", conn) conn.close() elif conn.db == self.db: if self.maxsize and self.freesize < self.maxsize: self._pool.append(conn) else: # consider this connection as old and close it. conn.close() else: conn.close() # FIXME: check event loop is not closed asyncio.async(self._wakeup(), loop=self._loop) def _drop_closed(self): for i in range(self.freesize): conn = self._pool[0] if conn.closed: self._pool.popleft() else: self._pool.rotate(1) @asyncio.coroutine def _fill_free(self, *, override_min): # drop closed connections first self._drop_closed() while self.size < self.minsize: self._acquiring += 1 try: conn = yield from self._create_new_connection() self._pool.append(conn) finally: self._acquiring -= 1 # connection may be closed at yeild point self._drop_closed() if self.freesize: return if override_min: while not self._pool and self.size < self.maxsize: self._acquiring += 1 try: conn = yield from self._create_new_connection() self._pool.append(conn) finally: self._acquiring -= 1 # connection may be closed at yeild point self._drop_closed() def _create_new_connection(self): return create_redis(self._address, db=self._db, password=self._password, encoding=self._encoding, commands_factory=self._factory, loop=self._loop) @asyncio.coroutine def _wakeup(self, closing_conn=None): with (yield from self._cond): self._cond.notify() if closing_conn is not None: yield from closing_conn.wait_closed() def __enter__(self): raise RuntimeError( "'yield from' should be used as a context manager expression") def __exit__(self, *args): pass # pragma: nocover def __iter__(self): # this method is needed to allow `yield`ing from pool conn = yield from self.acquire() return _ConnectionContextManager(self, conn) if PY_35: def __await__(self): # To make `with await pool` work conn = yield from self.acquire() return _ConnectionContextManager(self, conn) def get(self): '''Return async context manager for working with connection. async with pool.get() as conn: await conn.get(key) ''' return _AsyncConnectionContextManager(self) class _ConnectionContextManager: __slots__ = ('_pool', '_conn') def __init__(self, pool, conn): self._pool = pool self._conn = conn def __enter__(self): return self._conn def __exit__(self, exc_type, exc_value, tb): try: self._pool.release(self._conn) finally: self._pool = None self._conn = None if PY_35: class _AsyncConnectionContextManager: __slots__ = ('_pool', '_conn') def __init__(self, pool): self._pool = pool self._conn = None @asyncio.coroutine def __aenter__(self): self._conn = yield from self._pool.acquire() return self._conn @asyncio.coroutine def __aexit__(self, exc_type, exc_value, tb): try: self._pool.release(self._conn) finally: self._pool = None self._conn = None aioredis-0.2.4/aioredis/util.py0000664000175000017500000001452112607134125017250 0ustar alexeyalexey00000000000000import asyncio import json import sys from .errors import ChannelClosedError PY_35 = sys.version_info >= (3, 5) _NOTSET = object() # NOTE: never put here anything else; # just this basic types _converters = { bytes: lambda val: val, bytearray: lambda val: val, str: lambda val: val.encode('utf-8'), int: lambda val: str(val).encode('utf-8'), float: lambda val: str(val).encode('utf-8'), } def _bytes_len(sized): return str(len(sized)).encode('utf-8') def encode_command(*args): """Encodes arguments into redis bulk-strings array. Raises TypeError if any of args not of bytes, str, int or float type. """ buf = bytearray() def add(data): return buf.extend(data + b'\r\n') add(b'*' + _bytes_len(args)) for arg in args: if type(arg) in _converters: barg = _converters[type(arg)](arg) add(b'$' + _bytes_len(barg)) add(barg) else: raise TypeError("Argument {!r} expected to be of bytes," " str, int or float type".format(arg)) return buf def decode(obj, encoding): if isinstance(obj, bytes): return obj.decode(encoding) elif isinstance(obj, list): return [o.decode(encoding) if isinstance(o, bytes) else o for o in obj] return obj class Channel: """Wrapper around asyncio.Queue.""" __slots__ = ('_queue', '_name', '_closed', '_waiter', '_is_pattern', '_loop') def __init__(self, name, is_pattern, loop=None): self._queue = asyncio.Queue(loop=loop) self._name = name self._is_pattern = is_pattern self._loop = loop self._closed = False self._waiter = None def __repr__(self): return "".format( self._name, self._is_pattern, self._queue.qsize()) @property def name(self): """Encoded channel name/pattern.""" return self._name @property def is_pattern(self): """Set to True if channel is subscribed to pattern.""" return self._is_pattern @property def is_active(self): """Returns True until there are messages in channel or connection is subscribed to it. Can be used with ``while``: >>> ch = conn.pubsub_channels['chan:1'] >>> while ch.is_active(): ... msg = yield from ch.get() # may stuck for a long time """ return not (self._queue.qsize() <= 1 and self._closed) @asyncio.coroutine def get(self, *, encoding=None, decoder=None): """Coroutine that waits for and returns a message. Raises (TBD) exception if channel is unsubscribed and has no messages. """ assert decoder is None or callable(decoder), decoder if not self.is_active: raise ChannelClosedError() msg = yield from self._queue.get() if msg is None: return if self._is_pattern: dest_channel, msg = msg if encoding is not None: msg = msg.decode(encoding) if decoder is not None: msg = decoder(msg) if self._is_pattern: return dest_channel, msg return msg @asyncio.coroutine def get_json(self, encoding='utf-8'): """Shortcut to get JSON messages.""" return (yield from self.get(encoding=encoding, decoder=json.loads)) @asyncio.coroutine def wait_message(self): """Waits for message to become available in channel. Possible usage: >>> while (yield from ch.wait_message()): ... msg = yield from ch.get() """ if not self.is_active: return False if not self._queue.empty(): return True if self._waiter is None: self._waiter = asyncio.Future(loop=self._loop) yield from self._waiter return self.is_active # internale methods def put_nowait(self, data): self._queue.put_nowait(data) if self._waiter is not None: fut, self._waiter = self._waiter, None fut.set_result(None) def close(self): """Marks channel as inactive. Internal method, will be called from connection on `unsubscribe` command. """ if not self._closed: self.put_nowait(None) self._closed = True @asyncio.coroutine def wait_ok(fut): res = yield from fut if res in (b'QUEUED', 'QUEUED'): return res return res in (b'OK', 'OK') @asyncio.coroutine def wait_convert(fut, type_): result = yield from fut if result in (b'QUEUED', 'QUEUED'): return result return type_(result) @asyncio.coroutine def wait_make_dict(fut): res = yield from fut if res in (b'QUEUED', 'QUEUED'): return res it = iter(res) return dict(zip(it, it)) class coerced_keys_dict(dict): def __getitem__(self, other): if not isinstance(other, bytes): other = _converters[type(other)](other) return dict.__getitem__(self, other) def __contains__(self, other): if not isinstance(other, bytes): other = _converters[type(other)](other) return dict.__contains__(self, other) if PY_35: class _BaseScanIter: __slots__ = ('_scan', '_cur', '_ret') def __init__(self, scan): self._scan = scan self._cur = b'0' self._ret = [] @asyncio.coroutine def __aiter__(self): return self class _ScanIter(_BaseScanIter): @asyncio.coroutine def __anext__(self): while not self._ret and self._cur: self._cur, self._ret = yield from self._scan(self._cur) if not self._cur and not self._ret: raise StopAsyncIteration # noqa else: ret = self._ret.pop(0) return ret class _ScanIterPairs(_BaseScanIter): @asyncio.coroutine def __anext__(self): while not self._ret and self._cur: self._cur, ret = yield from self._scan(self._cur) self._ret = list(zip(ret[::2], ret[1::2])) if not self._cur and not self._ret: raise StopAsyncIteration # noqa else: ret = self._ret.pop(0) return ret aioredis-0.2.4/aioredis/__init__.py0000664000175000017500000000105012607147531020030 0ustar alexeyalexey00000000000000from .connection import RedisConnection, create_connection from .commands import Redis, create_redis, create_reconnecting_redis from .pool import RedisPool, create_pool from .util import Channel from .errors import ( RedisError, ProtocolError, ReplyError, PipelineError, MultiExecError, ) __version__ = '0.2.4' # make pyflakes happy (create_connection, RedisConnection, create_redis, create_reconnecting_redis, Redis, create_pool, RedisPool, Channel, RedisError, ProtocolError, ReplyError, PipelineError, MultiExecError) aioredis-0.2.4/aioredis/log.py0000664000175000017500000000006712577726331017070 0ustar alexeyalexey00000000000000import logging logger = logging.getLogger('aioredis') aioredis-0.2.4/aioredis.egg-info/0000775000175000017500000000000012607147634017421 5ustar alexeyalexey00000000000000aioredis-0.2.4/aioredis.egg-info/requires.txt0000664000175000017500000000001012607147634022010 0ustar alexeyalexey00000000000000hiredis aioredis-0.2.4/aioredis.egg-info/top_level.txt0000664000175000017500000000001112607147634022143 0ustar alexeyalexey00000000000000aioredis aioredis-0.2.4/aioredis.egg-info/SOURCES.txt0000664000175000017500000000127512607147634021312 0ustar alexeyalexey00000000000000CHANGES.txt LICENSE MANIFEST.in README.rst setup.py aioredis/__init__.py aioredis/connection.py aioredis/errors.py aioredis/log.py aioredis/pool.py aioredis/util.py aioredis.egg-info/PKG-INFO aioredis.egg-info/SOURCES.txt aioredis.egg-info/dependency_links.txt aioredis.egg-info/requires.txt aioredis.egg-info/top_level.txt aioredis/commands/__init__.py aioredis/commands/cluster.py aioredis/commands/generic.py aioredis/commands/hash.py aioredis/commands/hyperloglog.py aioredis/commands/list.py aioredis/commands/pubsub.py aioredis/commands/scripting.py aioredis/commands/server.py aioredis/commands/set.py aioredis/commands/sorted_set.py aioredis/commands/string.py aioredis/commands/transaction.pyaioredis-0.2.4/aioredis.egg-info/PKG-INFO0000664000175000017500000001622412607147634020523 0ustar alexeyalexey00000000000000Metadata-Version: 1.1 Name: aioredis Version: 0.2.4 Summary: asyncio (PEP 3156) Redis support Home-page: https://github.com/aio-libs/aioredis Author: Alexey Popravka Author-email: alexey.popravka@horsedevel.com License: MIT Description: aioredis ======== asyncio (PEP 3156) Redis client library. .. image:: https://travis-ci.org/aio-libs/aioredis.svg?branch=master :target: https://travis-ci.org/aio-libs/aioredis .. image:: https://coveralls.io/repos/aio-libs/aioredis/badge.png?branch=master :target: https://coveralls.io/r/aio-libs/aioredis?branch=master Features -------- ================================ ============================== hiredis_ parser Yes Pure-python parser TBD Low-level & High-level APIs Yes Connections Pool Yes Pipelining support Yes Pub/Sub support Yes Redis Cluster support WIP Trollius (python 2.7) No Tested python versions `3.3, 3.4, 3.5`_ Tested for Redis server `2.6, 2.8, 3.0`_ Support for dev Redis server through low-level API ================================ ============================== Documentation ------------- http://aioredis.readthedocs.org/ Usage examples -------------- Simple low-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): conn = yield from aioredis.create_connection( ('localhost', 6379), loop=loop) yield from conn.execute('set', 'my-key', 'value') val = yield from conn.execute('get', 'my-key') print(val) conn.close() loop.run_until_complete(go()) # will print 'value' Simple high-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): redis = yield from aioredis.create_redis( ('localhost', 6379), loop=loop) yield from redis.set('my-key', 'value') val = yield from redis.get('my-key') print(val) redis.close() loop.run_until_complete(go()) # will print 'value' Connections pool: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): pool = yield from aioredis.create_pool( ('localhost', 6379), minsize=5, maxsize=10, loop=loop) with (yield from pool) as redis: # high-level redis API instance yield from redis.set('my-key', 'value') print((yield from redis.get('my-key'))) yield from pool.clear() # closing all open connections loop.run_until_complete(go()) Requirements ------------ * Python_ 3.3+ * asyncio_ or Python_ 3.4+ * hiredis_ .. note:: hiredis is preferred requirement. Pure-python fallback protocol parser is TBD. License ------- The aioredis is offered under MIT license. .. _Python: https://www.python.org .. _asyncio: https://pypi.python.org/pypi/asyncio .. _hiredis: https://pypi.python.org/pypi/hiredis .. _3.3, 3.4, 3.5: .. _2.6, 2.8, 3.0: .. _travis: https://travis-ci.org/aio-libs/aioredis Changes ------- 0.2.4 (2015-10-13) ^^^^^^^^^^^^^^^^^^ * Python 3.5 ``async`` support: - New scan commands API (``iscan``, ``izscan``, ``ihscan``); - Pool made awaitable (allowing ``with await pool: ...`` constructs); * Fixed dropping closed connections from free pool (see `#83 `_); * Docs updated; 0.2.3 (2015-08-14) ^^^^^^^^^^^^^^^^^^ * Redis cluster support work in progress; * Fixed pool issue causing pool growth over max size & ``acquire`` call hangs (see `#71 `_); * ``info`` server command result parsing implemented; * Fixed behavior of util functions (see `#70 `_); * ``hstrlen`` command added; * Few fixes in examples; * Few fixes in documentation; 0.2.2 (2015-07-07) ^^^^^^^^^^^^^^^^^^ * Decoding data with ``encoding`` paramter now takes into account list (array) replies (see `#68 `_); * ``encoding`` parameter added to following commands: - generic commands: keys, randomkey; - hash commands: hgetall, hkeys, hmget, hvals; - list commands: blpop, brpop, brpoplpush, lindex, lpop, lrange, rpop, rpoplpush; - set commands: smembers, spop, srandmember; - string commands: getrange, getset, mget; * Backward incompatibility: ``ltrim`` command now returns bool value instead of 'OK'; * Tests updated; 0.2.1 (2015-07-06) ^^^^^^^^^^^^^^^^^^ * Logging added (aioredis.log module); * Fixed issue with ``wait_message`` in pub/sub (see `#66 `_); 0.2.0 (2015-06-04) ^^^^^^^^^^^^^^^^^^ * Pub/Sub support added; * Fix in ``zrevrangebyscore`` command (see `#62 `_); * Fixes/tests/docs; Platform: POSIX Classifier: License :: OSI Approved :: MIT License Classifier: Development Status :: 4 - Beta Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Operating System :: POSIX Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries aioredis-0.2.4/aioredis.egg-info/dependency_links.txt0000664000175000017500000000000112607147634023467 0ustar alexeyalexey00000000000000 aioredis-0.2.4/CHANGES.txt0000664000175000017500000000345412607147464015750 0ustar alexeyalexey00000000000000Changes ------- 0.2.4 (2015-10-13) ^^^^^^^^^^^^^^^^^^ * Python 3.5 ``async`` support: - New scan commands API (``iscan``, ``izscan``, ``ihscan``); - Pool made awaitable (allowing ``with await pool: ...`` constructs); * Fixed dropping closed connections from free pool (see `#83 `_); * Docs updated; 0.2.3 (2015-08-14) ^^^^^^^^^^^^^^^^^^ * Redis cluster support work in progress; * Fixed pool issue causing pool growth over max size & ``acquire`` call hangs (see `#71 `_); * ``info`` server command result parsing implemented; * Fixed behavior of util functions (see `#70 `_); * ``hstrlen`` command added; * Few fixes in examples; * Few fixes in documentation; 0.2.2 (2015-07-07) ^^^^^^^^^^^^^^^^^^ * Decoding data with ``encoding`` paramter now takes into account list (array) replies (see `#68 `_); * ``encoding`` parameter added to following commands: - generic commands: keys, randomkey; - hash commands: hgetall, hkeys, hmget, hvals; - list commands: blpop, brpop, brpoplpush, lindex, lpop, lrange, rpop, rpoplpush; - set commands: smembers, spop, srandmember; - string commands: getrange, getset, mget; * Backward incompatibility: ``ltrim`` command now returns bool value instead of 'OK'; * Tests updated; 0.2.1 (2015-07-06) ^^^^^^^^^^^^^^^^^^ * Logging added (aioredis.log module); * Fixed issue with ``wait_message`` in pub/sub (see `#66 `_); 0.2.0 (2015-06-04) ^^^^^^^^^^^^^^^^^^ * Pub/Sub support added; * Fix in ``zrevrangebyscore`` command (see `#62 `_); * Fixes/tests/docs; aioredis-0.2.4/setup.cfg0000664000175000017500000000007312607147634015751 0ustar alexeyalexey00000000000000[egg_info] tag_build = tag_svn_revision = 0 tag_date = 0 aioredis-0.2.4/PKG-INFO0000664000175000017500000001622412607147634015232 0ustar alexeyalexey00000000000000Metadata-Version: 1.1 Name: aioredis Version: 0.2.4 Summary: asyncio (PEP 3156) Redis support Home-page: https://github.com/aio-libs/aioredis Author: Alexey Popravka Author-email: alexey.popravka@horsedevel.com License: MIT Description: aioredis ======== asyncio (PEP 3156) Redis client library. .. image:: https://travis-ci.org/aio-libs/aioredis.svg?branch=master :target: https://travis-ci.org/aio-libs/aioredis .. image:: https://coveralls.io/repos/aio-libs/aioredis/badge.png?branch=master :target: https://coveralls.io/r/aio-libs/aioredis?branch=master Features -------- ================================ ============================== hiredis_ parser Yes Pure-python parser TBD Low-level & High-level APIs Yes Connections Pool Yes Pipelining support Yes Pub/Sub support Yes Redis Cluster support WIP Trollius (python 2.7) No Tested python versions `3.3, 3.4, 3.5`_ Tested for Redis server `2.6, 2.8, 3.0`_ Support for dev Redis server through low-level API ================================ ============================== Documentation ------------- http://aioredis.readthedocs.org/ Usage examples -------------- Simple low-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): conn = yield from aioredis.create_connection( ('localhost', 6379), loop=loop) yield from conn.execute('set', 'my-key', 'value') val = yield from conn.execute('get', 'my-key') print(val) conn.close() loop.run_until_complete(go()) # will print 'value' Simple high-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): redis = yield from aioredis.create_redis( ('localhost', 6379), loop=loop) yield from redis.set('my-key', 'value') val = yield from redis.get('my-key') print(val) redis.close() loop.run_until_complete(go()) # will print 'value' Connections pool: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): pool = yield from aioredis.create_pool( ('localhost', 6379), minsize=5, maxsize=10, loop=loop) with (yield from pool) as redis: # high-level redis API instance yield from redis.set('my-key', 'value') print((yield from redis.get('my-key'))) yield from pool.clear() # closing all open connections loop.run_until_complete(go()) Requirements ------------ * Python_ 3.3+ * asyncio_ or Python_ 3.4+ * hiredis_ .. note:: hiredis is preferred requirement. Pure-python fallback protocol parser is TBD. License ------- The aioredis is offered under MIT license. .. _Python: https://www.python.org .. _asyncio: https://pypi.python.org/pypi/asyncio .. _hiredis: https://pypi.python.org/pypi/hiredis .. _3.3, 3.4, 3.5: .. _2.6, 2.8, 3.0: .. _travis: https://travis-ci.org/aio-libs/aioredis Changes ------- 0.2.4 (2015-10-13) ^^^^^^^^^^^^^^^^^^ * Python 3.5 ``async`` support: - New scan commands API (``iscan``, ``izscan``, ``ihscan``); - Pool made awaitable (allowing ``with await pool: ...`` constructs); * Fixed dropping closed connections from free pool (see `#83 `_); * Docs updated; 0.2.3 (2015-08-14) ^^^^^^^^^^^^^^^^^^ * Redis cluster support work in progress; * Fixed pool issue causing pool growth over max size & ``acquire`` call hangs (see `#71 `_); * ``info`` server command result parsing implemented; * Fixed behavior of util functions (see `#70 `_); * ``hstrlen`` command added; * Few fixes in examples; * Few fixes in documentation; 0.2.2 (2015-07-07) ^^^^^^^^^^^^^^^^^^ * Decoding data with ``encoding`` paramter now takes into account list (array) replies (see `#68 `_); * ``encoding`` parameter added to following commands: - generic commands: keys, randomkey; - hash commands: hgetall, hkeys, hmget, hvals; - list commands: blpop, brpop, brpoplpush, lindex, lpop, lrange, rpop, rpoplpush; - set commands: smembers, spop, srandmember; - string commands: getrange, getset, mget; * Backward incompatibility: ``ltrim`` command now returns bool value instead of 'OK'; * Tests updated; 0.2.1 (2015-07-06) ^^^^^^^^^^^^^^^^^^ * Logging added (aioredis.log module); * Fixed issue with ``wait_message`` in pub/sub (see `#66 `_); 0.2.0 (2015-06-04) ^^^^^^^^^^^^^^^^^^ * Pub/Sub support added; * Fix in ``zrevrangebyscore`` command (see `#62 `_); * Fixes/tests/docs; Platform: POSIX Classifier: License :: OSI Approved :: MIT License Classifier: Development Status :: 4 - Beta Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Operating System :: POSIX Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries aioredis-0.2.4/README.rst0000664000175000017500000000571512607136404015620 0ustar alexeyalexey00000000000000aioredis ======== asyncio (PEP 3156) Redis client library. .. image:: https://travis-ci.org/aio-libs/aioredis.svg?branch=master :target: https://travis-ci.org/aio-libs/aioredis .. image:: https://coveralls.io/repos/aio-libs/aioredis/badge.png?branch=master :target: https://coveralls.io/r/aio-libs/aioredis?branch=master Features -------- ================================ ============================== hiredis_ parser Yes Pure-python parser TBD Low-level & High-level APIs Yes Connections Pool Yes Pipelining support Yes Pub/Sub support Yes Redis Cluster support WIP Trollius (python 2.7) No Tested python versions `3.3, 3.4, 3.5`_ Tested for Redis server `2.6, 2.8, 3.0`_ Support for dev Redis server through low-level API ================================ ============================== Documentation ------------- http://aioredis.readthedocs.org/ Usage examples -------------- Simple low-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): conn = yield from aioredis.create_connection( ('localhost', 6379), loop=loop) yield from conn.execute('set', 'my-key', 'value') val = yield from conn.execute('get', 'my-key') print(val) conn.close() loop.run_until_complete(go()) # will print 'value' Simple high-level interface: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): redis = yield from aioredis.create_redis( ('localhost', 6379), loop=loop) yield from redis.set('my-key', 'value') val = yield from redis.get('my-key') print(val) redis.close() loop.run_until_complete(go()) # will print 'value' Connections pool: .. code:: python import asyncio import aioredis loop = asyncio.get_event_loop() @asyncio.coroutine def go(): pool = yield from aioredis.create_pool( ('localhost', 6379), minsize=5, maxsize=10, loop=loop) with (yield from pool) as redis: # high-level redis API instance yield from redis.set('my-key', 'value') print((yield from redis.get('my-key'))) yield from pool.clear() # closing all open connections loop.run_until_complete(go()) Requirements ------------ * Python_ 3.3+ * asyncio_ or Python_ 3.4+ * hiredis_ .. note:: hiredis is preferred requirement. Pure-python fallback protocol parser is TBD. License ------- The aioredis is offered under MIT license. .. _Python: https://www.python.org .. _asyncio: https://pypi.python.org/pypi/asyncio .. _hiredis: https://pypi.python.org/pypi/hiredis .. _3.3, 3.4, 3.5: .. _2.6, 2.8, 3.0: .. _travis: https://travis-ci.org/aio-libs/aioredis